problem_id
stringlengths 18
21
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
54
| prompt
stringlengths 1.28k
64.2k
| golden_diff
stringlengths 166
811
| verification_info
stringlengths 604
118k
|
---|---|---|---|---|---|---|
gh_patches_debug_1300 | rasdani/github-patches | git_diff | sopel-irc__sopel-1339 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
raw logging defaults to true...necessary?
sopel [by default](https://github.com/sopel-irc/sopel/blob/master/sopel/config/core_section.py#L134) does raw logging.
You would need to manually add log_raw = False to config file to disable it.
Just wondering if having it on by default is really that necessary?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sopel/config/core_section.py`
Content:
```
1 # coding=utf-8
2
3 from __future__ import unicode_literals, absolute_import, print_function, division
4
5 import os.path
6
7 from sopel.config.types import (
8 StaticSection, ValidatedAttribute, ListAttribute, ChoiceAttribute,
9 FilenameAttribute, NO_DEFAULT
10 )
11 from sopel.tools import Identifier
12
13
14 def _find_certs():
15 """
16 Find the TLS root CA store.
17
18 :returns: str (path to file)
19 """
20 # check if the root CA store is at a known location
21 locations = [
22 '/etc/pki/tls/cert.pem', # best first guess
23 '/etc/ssl/certs/ca-certificates.crt', # Debian
24 '/etc/ssl/cert.pem', # FreeBSD base OpenSSL
25 '/usr/local/openssl/cert.pem', # FreeBSD userland OpenSSL
26 '/etc/pki/tls/certs/ca-bundle.crt', # RHEL 6 / Fedora
27 '/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem', # RHEL 7 / CentOS
28 '/etc/pki/tls/cacert.pem', # OpenELEC
29 '/etc/ssl/ca-bundle.pem', # OpenSUSE
30 ]
31 for certs in locations:
32 if os.path.isfile(certs):
33 return certs
34 return None
35
36
37 def configure(config):
38 config.core.configure_setting('nick', 'Enter the nickname for your bot.')
39 config.core.configure_setting('host', 'Enter the server to connect to.')
40 config.core.configure_setting('use_ssl', 'Should the bot connect with SSL?')
41 if config.core.use_ssl:
42 default_port = 6697
43 else:
44 default_port = 6667
45 config.core.configure_setting('port', 'Enter the port to connect on.',
46 default=default_port)
47 config.core.configure_setting(
48 'owner', "Enter your own IRC name (or that of the bot's owner)")
49 config.core.configure_setting(
50 'channels',
51 'Enter the channels to connect to at startup, separated by commas.'
52 )
53
54
55 class CoreSection(StaticSection):
56 """The config section used for configuring the bot itself."""
57 admins = ListAttribute('admins')
58 """The list of people (other than the owner) who can administer the bot"""
59
60 admin_accounts = ListAttribute('admin_accounts')
61 """The list of accounts (other than the owner's) who can administer the bot.
62
63 This should not be set for networks that do not support IRCv3 account
64 capabilities."""
65
66 auth_method = ChoiceAttribute('auth_method', choices=[
67 'nickserv', 'authserv', 'Q', 'sasl', 'server'])
68 """The method to use to authenticate with the server.
69
70 Can be ``nickserv``, ``authserv``, ``Q``, ``sasl``, or ``server``."""
71
72 auth_password = ValidatedAttribute('auth_password')
73 """The password to use to authenticate with the server."""
74
75 auth_target = ValidatedAttribute('auth_target')
76 """The user to use for nickserv authentication, or the SASL mechanism.
77
78 May not apply, depending on ``auth_method``. Defaults to NickServ for
79 nickserv auth, and PLAIN for SASL auth."""
80
81 auth_username = ValidatedAttribute('auth_username')
82 """The username/account to use to authenticate with the server.
83
84 May not apply, depending on ``auth_method``."""
85
86 bind_host = ValidatedAttribute('bind_host')
87 """Bind the connection to a specific IP"""
88
89 ca_certs = FilenameAttribute('ca_certs', default=_find_certs())
90 """The path of the CA certs pem file"""
91
92 channels = ListAttribute('channels')
93 """List of channels for the bot to join when it connects"""
94
95 db_filename = ValidatedAttribute('db_filename')
96 """The filename for Sopel's database."""
97
98 default_time_format = ValidatedAttribute('default_time_format',
99 default='%Y-%m-%d - %T%Z')
100 """The default format to use for time in messages."""
101
102 default_timezone = ValidatedAttribute('default_timezone')
103 """The default timezone to use for time in messages."""
104
105 enable = ListAttribute('enable')
106 """A whitelist of the only modules you want to enable."""
107
108 exclude = ListAttribute('exclude')
109 """A list of modules which should not be loaded."""
110
111 extra = ListAttribute('extra')
112 """A list of other directories you'd like to include modules from."""
113
114 help_prefix = ValidatedAttribute('help_prefix', default='.')
115 """The prefix to use in help"""
116
117 @property
118 def homedir(self):
119 """The directory in which various files are stored at runtime.
120
121 By default, this is the same directory as the config. It can not be
122 changed at runtime.
123 """
124 return self._parent.homedir
125
126 host = ValidatedAttribute('host', default='irc.dftba.net')
127 """The server to connect to."""
128
129 host_blocks = ListAttribute('host_blocks')
130 """A list of hostmasks which Sopel should ignore.
131
132 Regular expression syntax is used"""
133
134 log_raw = ValidatedAttribute('log_raw', bool, default=True)
135 """Whether a log of raw lines as sent and received should be kept."""
136
137 logdir = FilenameAttribute('logdir', directory=True, default='logs')
138 """Directory in which to place logs."""
139
140 logging_channel = ValidatedAttribute('logging_channel', Identifier)
141 """The channel to send logging messages to."""
142
143 logging_level = ChoiceAttribute('logging_level',
144 ['CRITICAL', 'ERROR', 'WARNING', 'INFO',
145 'DEBUG'],
146 'WARNING')
147 """The lowest severity of logs to display."""
148
149 modes = ValidatedAttribute('modes', default='B')
150 """User modes to be set on connection."""
151
152 name = ValidatedAttribute('name', default='Sopel: https://sopel.chat')
153 """The "real name" of your bot for WHOIS responses."""
154
155 nick = ValidatedAttribute('nick', Identifier, default=Identifier('Sopel'))
156 """The nickname for the bot"""
157
158 nick_blocks = ListAttribute('nick_blocks')
159 """A list of nicks which Sopel should ignore.
160
161 Regular expression syntax is used."""
162
163 not_configured = ValidatedAttribute('not_configured', bool, default=False)
164 """For package maintainers. Not used in normal configurations.
165
166 This allows software packages to install a default config file, with this
167 set to true, so that the bot will not run until it has been properly
168 configured."""
169
170 owner = ValidatedAttribute('owner', default=NO_DEFAULT)
171 """The IRC name of the owner of the bot."""
172
173 owner_account = ValidatedAttribute('owner_account')
174 """The services account name of the owner of the bot.
175
176 This should only be set on networks which support IRCv3 account
177 capabilities.
178 """
179
180 pid_dir = FilenameAttribute('pid_dir', directory=True, default='.')
181 """The directory in which to put the file Sopel uses to track its process ID.
182
183 You probably do not need to change this unless you're managing Sopel with
184 systemd or similar."""
185
186 port = ValidatedAttribute('port', int, default=6667)
187 """The port to connect on."""
188
189 prefix = ValidatedAttribute('prefix', default='\.')
190 """The prefix to add to the beginning of commands.
191
192 It is a regular expression (so the default, ``\.``, means commands start
193 with a period), though using capturing groups will create problems."""
194
195 reply_errors = ValidatedAttribute('reply_errors', bool, default=True)
196 """Whether to message the sender of a message that triggered an error with the exception."""
197
198 throttle_join = ValidatedAttribute('throttle_join', int)
199 """Slow down the initial join of channels to prevent getting kicked.
200
201 Sopel will only join this many channels at a time, sleeping for a second
202 between each batch. This is unnecessary on most networks."""
203
204 timeout = ValidatedAttribute('timeout', int, default=120)
205 """The amount of time acceptable between pings before timing out."""
206
207 use_ssl = ValidatedAttribute('use_ssl', bool, default=False)
208 """Whether to use a SSL secured connection."""
209
210 user = ValidatedAttribute('user', default='sopel')
211 """The "user" for your bot (the part before the @ in the hostname)."""
212
213 verify_ssl = ValidatedAttribute('verify_ssl', bool, default=True)
214 """Whether to require a trusted SSL certificate for SSL connections."""
215
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sopel/config/core_section.py b/sopel/config/core_section.py
--- a/sopel/config/core_section.py
+++ b/sopel/config/core_section.py
@@ -131,7 +131,7 @@
Regular expression syntax is used"""
- log_raw = ValidatedAttribute('log_raw', bool, default=True)
+ log_raw = ValidatedAttribute('log_raw', bool, default=False)
"""Whether a log of raw lines as sent and received should be kept."""
logdir = FilenameAttribute('logdir', directory=True, default='logs')
| {"golden_diff": "diff --git a/sopel/config/core_section.py b/sopel/config/core_section.py\n--- a/sopel/config/core_section.py\n+++ b/sopel/config/core_section.py\n@@ -131,7 +131,7 @@\n \n Regular expression syntax is used\"\"\"\n \n- log_raw = ValidatedAttribute('log_raw', bool, default=True)\n+ log_raw = ValidatedAttribute('log_raw', bool, default=False)\n \"\"\"Whether a log of raw lines as sent and received should be kept.\"\"\"\n \n logdir = FilenameAttribute('logdir', directory=True, default='logs')\n", "issue": "raw logging defaults to true...necessary?\nsopel [by default](https://github.com/sopel-irc/sopel/blob/master/sopel/config/core_section.py#L134) does raw logging. \r\nYou would need to manually add log_raw = False to config file to disable it.\r\nJust wondering if having it on by default is really that necessary?\n", "before_files": [{"content": "# coding=utf-8\n\nfrom __future__ import unicode_literals, absolute_import, print_function, division\n\nimport os.path\n\nfrom sopel.config.types import (\n StaticSection, ValidatedAttribute, ListAttribute, ChoiceAttribute,\n FilenameAttribute, NO_DEFAULT\n)\nfrom sopel.tools import Identifier\n\n\ndef _find_certs():\n \"\"\"\n Find the TLS root CA store.\n\n :returns: str (path to file)\n \"\"\"\n # check if the root CA store is at a known location\n locations = [\n '/etc/pki/tls/cert.pem', # best first guess\n '/etc/ssl/certs/ca-certificates.crt', # Debian\n '/etc/ssl/cert.pem', # FreeBSD base OpenSSL\n '/usr/local/openssl/cert.pem', # FreeBSD userland OpenSSL\n '/etc/pki/tls/certs/ca-bundle.crt', # RHEL 6 / Fedora\n '/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem', # RHEL 7 / CentOS\n '/etc/pki/tls/cacert.pem', # OpenELEC\n '/etc/ssl/ca-bundle.pem', # OpenSUSE\n ]\n for certs in locations:\n if os.path.isfile(certs):\n return certs\n return None\n\n\ndef configure(config):\n config.core.configure_setting('nick', 'Enter the nickname for your bot.')\n config.core.configure_setting('host', 'Enter the server to connect to.')\n config.core.configure_setting('use_ssl', 'Should the bot connect with SSL?')\n if config.core.use_ssl:\n default_port = 6697\n else:\n default_port = 6667\n config.core.configure_setting('port', 'Enter the port to connect on.',\n default=default_port)\n config.core.configure_setting(\n 'owner', \"Enter your own IRC name (or that of the bot's owner)\")\n config.core.configure_setting(\n 'channels',\n 'Enter the channels to connect to at startup, separated by commas.'\n )\n\n\nclass CoreSection(StaticSection):\n \"\"\"The config section used for configuring the bot itself.\"\"\"\n admins = ListAttribute('admins')\n \"\"\"The list of people (other than the owner) who can administer the bot\"\"\"\n\n admin_accounts = ListAttribute('admin_accounts')\n \"\"\"The list of accounts (other than the owner's) who can administer the bot.\n\n This should not be set for networks that do not support IRCv3 account\n capabilities.\"\"\"\n\n auth_method = ChoiceAttribute('auth_method', choices=[\n 'nickserv', 'authserv', 'Q', 'sasl', 'server'])\n \"\"\"The method to use to authenticate with the server.\n\n Can be ``nickserv``, ``authserv``, ``Q``, ``sasl``, or ``server``.\"\"\"\n\n auth_password = ValidatedAttribute('auth_password')\n \"\"\"The password to use to authenticate with the server.\"\"\"\n\n auth_target = ValidatedAttribute('auth_target')\n \"\"\"The user to use for nickserv authentication, or the SASL mechanism.\n\n May not apply, depending on ``auth_method``. Defaults to NickServ for\n nickserv auth, and PLAIN for SASL auth.\"\"\"\n\n auth_username = ValidatedAttribute('auth_username')\n \"\"\"The username/account to use to authenticate with the server.\n\n May not apply, depending on ``auth_method``.\"\"\"\n\n bind_host = ValidatedAttribute('bind_host')\n \"\"\"Bind the connection to a specific IP\"\"\"\n\n ca_certs = FilenameAttribute('ca_certs', default=_find_certs())\n \"\"\"The path of the CA certs pem file\"\"\"\n\n channels = ListAttribute('channels')\n \"\"\"List of channels for the bot to join when it connects\"\"\"\n\n db_filename = ValidatedAttribute('db_filename')\n \"\"\"The filename for Sopel's database.\"\"\"\n\n default_time_format = ValidatedAttribute('default_time_format',\n default='%Y-%m-%d - %T%Z')\n \"\"\"The default format to use for time in messages.\"\"\"\n\n default_timezone = ValidatedAttribute('default_timezone')\n \"\"\"The default timezone to use for time in messages.\"\"\"\n\n enable = ListAttribute('enable')\n \"\"\"A whitelist of the only modules you want to enable.\"\"\"\n\n exclude = ListAttribute('exclude')\n \"\"\"A list of modules which should not be loaded.\"\"\"\n\n extra = ListAttribute('extra')\n \"\"\"A list of other directories you'd like to include modules from.\"\"\"\n\n help_prefix = ValidatedAttribute('help_prefix', default='.')\n \"\"\"The prefix to use in help\"\"\"\n\n @property\n def homedir(self):\n \"\"\"The directory in which various files are stored at runtime.\n\n By default, this is the same directory as the config. It can not be\n changed at runtime.\n \"\"\"\n return self._parent.homedir\n\n host = ValidatedAttribute('host', default='irc.dftba.net')\n \"\"\"The server to connect to.\"\"\"\n\n host_blocks = ListAttribute('host_blocks')\n \"\"\"A list of hostmasks which Sopel should ignore.\n\n Regular expression syntax is used\"\"\"\n\n log_raw = ValidatedAttribute('log_raw', bool, default=True)\n \"\"\"Whether a log of raw lines as sent and received should be kept.\"\"\"\n\n logdir = FilenameAttribute('logdir', directory=True, default='logs')\n \"\"\"Directory in which to place logs.\"\"\"\n\n logging_channel = ValidatedAttribute('logging_channel', Identifier)\n \"\"\"The channel to send logging messages to.\"\"\"\n\n logging_level = ChoiceAttribute('logging_level',\n ['CRITICAL', 'ERROR', 'WARNING', 'INFO',\n 'DEBUG'],\n 'WARNING')\n \"\"\"The lowest severity of logs to display.\"\"\"\n\n modes = ValidatedAttribute('modes', default='B')\n \"\"\"User modes to be set on connection.\"\"\"\n\n name = ValidatedAttribute('name', default='Sopel: https://sopel.chat')\n \"\"\"The \"real name\" of your bot for WHOIS responses.\"\"\"\n\n nick = ValidatedAttribute('nick', Identifier, default=Identifier('Sopel'))\n \"\"\"The nickname for the bot\"\"\"\n\n nick_blocks = ListAttribute('nick_blocks')\n \"\"\"A list of nicks which Sopel should ignore.\n\n Regular expression syntax is used.\"\"\"\n\n not_configured = ValidatedAttribute('not_configured', bool, default=False)\n \"\"\"For package maintainers. Not used in normal configurations.\n\n This allows software packages to install a default config file, with this\n set to true, so that the bot will not run until it has been properly\n configured.\"\"\"\n\n owner = ValidatedAttribute('owner', default=NO_DEFAULT)\n \"\"\"The IRC name of the owner of the bot.\"\"\"\n\n owner_account = ValidatedAttribute('owner_account')\n \"\"\"The services account name of the owner of the bot.\n\n This should only be set on networks which support IRCv3 account\n capabilities.\n \"\"\"\n\n pid_dir = FilenameAttribute('pid_dir', directory=True, default='.')\n \"\"\"The directory in which to put the file Sopel uses to track its process ID.\n\n You probably do not need to change this unless you're managing Sopel with\n systemd or similar.\"\"\"\n\n port = ValidatedAttribute('port', int, default=6667)\n \"\"\"The port to connect on.\"\"\"\n\n prefix = ValidatedAttribute('prefix', default='\\.')\n \"\"\"The prefix to add to the beginning of commands.\n\n It is a regular expression (so the default, ``\\.``, means commands start\n with a period), though using capturing groups will create problems.\"\"\"\n\n reply_errors = ValidatedAttribute('reply_errors', bool, default=True)\n \"\"\"Whether to message the sender of a message that triggered an error with the exception.\"\"\"\n\n throttle_join = ValidatedAttribute('throttle_join', int)\n \"\"\"Slow down the initial join of channels to prevent getting kicked.\n\n Sopel will only join this many channels at a time, sleeping for a second\n between each batch. This is unnecessary on most networks.\"\"\"\n\n timeout = ValidatedAttribute('timeout', int, default=120)\n \"\"\"The amount of time acceptable between pings before timing out.\"\"\"\n\n use_ssl = ValidatedAttribute('use_ssl', bool, default=False)\n \"\"\"Whether to use a SSL secured connection.\"\"\"\n\n user = ValidatedAttribute('user', default='sopel')\n \"\"\"The \"user\" for your bot (the part before the @ in the hostname).\"\"\"\n\n verify_ssl = ValidatedAttribute('verify_ssl', bool, default=True)\n \"\"\"Whether to require a trusted SSL certificate for SSL connections.\"\"\"\n", "path": "sopel/config/core_section.py"}], "after_files": [{"content": "# coding=utf-8\n\nfrom __future__ import unicode_literals, absolute_import, print_function, division\n\nimport os.path\n\nfrom sopel.config.types import (\n StaticSection, ValidatedAttribute, ListAttribute, ChoiceAttribute,\n FilenameAttribute, NO_DEFAULT\n)\nfrom sopel.tools import Identifier\n\n\ndef _find_certs():\n \"\"\"\n Find the TLS root CA store.\n\n :returns: str (path to file)\n \"\"\"\n # check if the root CA store is at a known location\n locations = [\n '/etc/pki/tls/cert.pem', # best first guess\n '/etc/ssl/certs/ca-certificates.crt', # Debian\n '/etc/ssl/cert.pem', # FreeBSD base OpenSSL\n '/usr/local/openssl/cert.pem', # FreeBSD userland OpenSSL\n '/etc/pki/tls/certs/ca-bundle.crt', # RHEL 6 / Fedora\n '/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem', # RHEL 7 / CentOS\n '/etc/pki/tls/cacert.pem', # OpenELEC\n '/etc/ssl/ca-bundle.pem', # OpenSUSE\n ]\n for certs in locations:\n if os.path.isfile(certs):\n return certs\n return None\n\n\ndef configure(config):\n config.core.configure_setting('nick', 'Enter the nickname for your bot.')\n config.core.configure_setting('host', 'Enter the server to connect to.')\n config.core.configure_setting('use_ssl', 'Should the bot connect with SSL?')\n if config.core.use_ssl:\n default_port = 6697\n else:\n default_port = 6667\n config.core.configure_setting('port', 'Enter the port to connect on.',\n default=default_port)\n config.core.configure_setting(\n 'owner', \"Enter your own IRC name (or that of the bot's owner)\")\n config.core.configure_setting(\n 'channels',\n 'Enter the channels to connect to at startup, separated by commas.'\n )\n\n\nclass CoreSection(StaticSection):\n \"\"\"The config section used for configuring the bot itself.\"\"\"\n admins = ListAttribute('admins')\n \"\"\"The list of people (other than the owner) who can administer the bot\"\"\"\n\n admin_accounts = ListAttribute('admin_accounts')\n \"\"\"The list of accounts (other than the owner's) who can administer the bot.\n\n This should not be set for networks that do not support IRCv3 account\n capabilities.\"\"\"\n\n auth_method = ChoiceAttribute('auth_method', choices=[\n 'nickserv', 'authserv', 'Q', 'sasl', 'server'])\n \"\"\"The method to use to authenticate with the server.\n\n Can be ``nickserv``, ``authserv``, ``Q``, ``sasl``, or ``server``.\"\"\"\n\n auth_password = ValidatedAttribute('auth_password')\n \"\"\"The password to use to authenticate with the server.\"\"\"\n\n auth_target = ValidatedAttribute('auth_target')\n \"\"\"The user to use for nickserv authentication, or the SASL mechanism.\n\n May not apply, depending on ``auth_method``. Defaults to NickServ for\n nickserv auth, and PLAIN for SASL auth.\"\"\"\n\n auth_username = ValidatedAttribute('auth_username')\n \"\"\"The username/account to use to authenticate with the server.\n\n May not apply, depending on ``auth_method``.\"\"\"\n\n bind_host = ValidatedAttribute('bind_host')\n \"\"\"Bind the connection to a specific IP\"\"\"\n\n ca_certs = FilenameAttribute('ca_certs', default=_find_certs())\n \"\"\"The path of the CA certs pem file\"\"\"\n\n channels = ListAttribute('channels')\n \"\"\"List of channels for the bot to join when it connects\"\"\"\n\n db_filename = ValidatedAttribute('db_filename')\n \"\"\"The filename for Sopel's database.\"\"\"\n\n default_time_format = ValidatedAttribute('default_time_format',\n default='%Y-%m-%d - %T%Z')\n \"\"\"The default format to use for time in messages.\"\"\"\n\n default_timezone = ValidatedAttribute('default_timezone')\n \"\"\"The default timezone to use for time in messages.\"\"\"\n\n enable = ListAttribute('enable')\n \"\"\"A whitelist of the only modules you want to enable.\"\"\"\n\n exclude = ListAttribute('exclude')\n \"\"\"A list of modules which should not be loaded.\"\"\"\n\n extra = ListAttribute('extra')\n \"\"\"A list of other directories you'd like to include modules from.\"\"\"\n\n help_prefix = ValidatedAttribute('help_prefix', default='.')\n \"\"\"The prefix to use in help\"\"\"\n\n @property\n def homedir(self):\n \"\"\"The directory in which various files are stored at runtime.\n\n By default, this is the same directory as the config. It can not be\n changed at runtime.\n \"\"\"\n return self._parent.homedir\n\n host = ValidatedAttribute('host', default='irc.dftba.net')\n \"\"\"The server to connect to.\"\"\"\n\n host_blocks = ListAttribute('host_blocks')\n \"\"\"A list of hostmasks which Sopel should ignore.\n\n Regular expression syntax is used\"\"\"\n\n log_raw = ValidatedAttribute('log_raw', bool, default=False)\n \"\"\"Whether a log of raw lines as sent and received should be kept.\"\"\"\n\n logdir = FilenameAttribute('logdir', directory=True, default='logs')\n \"\"\"Directory in which to place logs.\"\"\"\n\n logging_channel = ValidatedAttribute('logging_channel', Identifier)\n \"\"\"The channel to send logging messages to.\"\"\"\n\n logging_level = ChoiceAttribute('logging_level',\n ['CRITICAL', 'ERROR', 'WARNING', 'INFO',\n 'DEBUG'],\n 'WARNING')\n \"\"\"The lowest severity of logs to display.\"\"\"\n\n modes = ValidatedAttribute('modes', default='B')\n \"\"\"User modes to be set on connection.\"\"\"\n\n name = ValidatedAttribute('name', default='Sopel: https://sopel.chat')\n \"\"\"The \"real name\" of your bot for WHOIS responses.\"\"\"\n\n nick = ValidatedAttribute('nick', Identifier, default=Identifier('Sopel'))\n \"\"\"The nickname for the bot\"\"\"\n\n nick_blocks = ListAttribute('nick_blocks')\n \"\"\"A list of nicks which Sopel should ignore.\n\n Regular expression syntax is used.\"\"\"\n\n not_configured = ValidatedAttribute('not_configured', bool, default=False)\n \"\"\"For package maintainers. Not used in normal configurations.\n\n This allows software packages to install a default config file, with this\n set to true, so that the bot will not run until it has been properly\n configured.\"\"\"\n\n owner = ValidatedAttribute('owner', default=NO_DEFAULT)\n \"\"\"The IRC name of the owner of the bot.\"\"\"\n\n owner_account = ValidatedAttribute('owner_account')\n \"\"\"The services account name of the owner of the bot.\n\n This should only be set on networks which support IRCv3 account\n capabilities.\n \"\"\"\n\n pid_dir = FilenameAttribute('pid_dir', directory=True, default='.')\n \"\"\"The directory in which to put the file Sopel uses to track its process ID.\n\n You probably do not need to change this unless you're managing Sopel with\n systemd or similar.\"\"\"\n\n port = ValidatedAttribute('port', int, default=6667)\n \"\"\"The port to connect on.\"\"\"\n\n prefix = ValidatedAttribute('prefix', default='\\.')\n \"\"\"The prefix to add to the beginning of commands.\n\n It is a regular expression (so the default, ``\\.``, means commands start\n with a period), though using capturing groups will create problems.\"\"\"\n\n reply_errors = ValidatedAttribute('reply_errors', bool, default=True)\n \"\"\"Whether to message the sender of a message that triggered an error with the exception.\"\"\"\n\n throttle_join = ValidatedAttribute('throttle_join', int)\n \"\"\"Slow down the initial join of channels to prevent getting kicked.\n\n Sopel will only join this many channels at a time, sleeping for a second\n between each batch. This is unnecessary on most networks.\"\"\"\n\n timeout = ValidatedAttribute('timeout', int, default=120)\n \"\"\"The amount of time acceptable between pings before timing out.\"\"\"\n\n use_ssl = ValidatedAttribute('use_ssl', bool, default=False)\n \"\"\"Whether to use a SSL secured connection.\"\"\"\n\n user = ValidatedAttribute('user', default='sopel')\n \"\"\"The \"user\" for your bot (the part before the @ in the hostname).\"\"\"\n\n verify_ssl = ValidatedAttribute('verify_ssl', bool, default=True)\n \"\"\"Whether to require a trusted SSL certificate for SSL connections.\"\"\"\n", "path": "sopel/config/core_section.py"}]} |
gh_patches_debug_1301 | rasdani/github-patches | git_diff | Nitrate__Nitrate-1040 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
CSRF error when add cases to test runs
Reproduce steps:
* Go to a plan page
* Select a few cases
* Click "Add cases to runs"
* Select at least one test run and click Update button
* Confirm yes, then error occurs:
Forbidden (403)
CSRF verification failed. Request aborted.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/tcms/testplans/views.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 import datetime
4 import functools
5 import itertools
6 import json
7 import urllib
8 from operator import add, itemgetter
9 from typing import List, Optional, Set
10
11 from django.conf import settings
12 from django.contrib.auth.decorators import login_required, permission_required
13 from django.contrib.auth.mixins import PermissionRequiredMixin
14 from django.core.exceptions import ObjectDoesNotExist
15 from django.http import (
16 Http404,
17 HttpRequest,
18 HttpResponse,
19 HttpResponseBadRequest,
20 HttpResponsePermanentRedirect,
21 HttpResponseRedirect,
22 JsonResponse,
23 )
24 from django.shortcuts import get_object_or_404, render
25 from django.template.loader import get_template
26 from django.urls import reverse
27 from django.utils.decorators import method_decorator
28 from django.views.decorators.csrf import csrf_protect
29 from django.views.decorators.http import require_GET, require_http_methods, require_POST
30 from django.views.generic import View
31 from django.views.generic.base import TemplateView
32 from uuslug import slugify
33
34 from tcms.core.db import SQLExecution
35 from tcms.core.models import TCMSLog
36 from tcms.core.responses import JsonResponseBadRequest, JsonResponseNotFound
37 from tcms.core.utils import DataTableResult, checksum
38 from tcms.core.views import prompt
39 from tcms.management.models import Component, TCMSEnvGroup
40 from tcms.testcases.data import get_exported_cases_and_related_data
41 from tcms.testcases.forms import QuickSearchCaseForm, SearchCaseForm
42 from tcms.testcases.models import TestCase, TestCasePlan, TestCaseStatus
43 from tcms.testcases.views import get_selected_testcases
44 from tcms.testplans import sqls
45 from tcms.testplans.forms import (
46 ClonePlanForm,
47 EditPlanForm,
48 ImportCasesViaXMLForm,
49 NewPlanForm,
50 PlanComponentForm,
51 SearchPlanForm,
52 )
53 from tcms.testplans.models import TestPlan, TestPlanComponent
54 from tcms.testruns.models import TestCaseRun, TestRun
55
56 MODULE_NAME = "testplans"
57
58 # _____________________________________________________________________________
59 # helper functons
60
61
62 def update_plan_email_settings(tp, form):
63 """Update testplan's email settings"""
64 tp.email_settings.notify_on_plan_update = form.cleaned_data["notify_on_plan_update"]
65 tp.email_settings.notify_on_plan_delete = form.cleaned_data["notify_on_plan_delete"]
66 tp.email_settings.notify_on_case_update = form.cleaned_data["notify_on_case_update"]
67 tp.email_settings.auto_to_plan_owner = form.cleaned_data["auto_to_plan_owner"]
68 tp.email_settings.auto_to_plan_author = form.cleaned_data["auto_to_plan_author"]
69 tp.email_settings.auto_to_case_owner = form.cleaned_data["auto_to_case_owner"]
70 tp.email_settings.auto_to_case_default_tester = form.cleaned_data["auto_to_case_default_tester"]
71 tp.email_settings.save()
72
73
74 # _____________________________________________________________________________
75 # view functions
76
77
78 class CreateNewPlanView(PermissionRequiredMixin, View):
79 """Create a new test plan view"""
80
81 sub_module_name = "new_plan"
82 template_name = "plan/new.html"
83 permission_required = (
84 "testplans.add_testplan",
85 "testplans.add_testplantext",
86 "testplans.add_tcmsenvplanmap",
87 )
88
89 def make_response(self, form):
90 return render(
91 self.request,
92 self.template_name,
93 context={
94 "module": MODULE_NAME,
95 "sub_module": self.sub_module_name,
96 "form": form,
97 },
98 )
99
100 def get(self, request):
101 return self.make_response(NewPlanForm())
102
103 @method_decorator(csrf_protect)
104 def post(self, request):
105 form = NewPlanForm(request.POST, request.FILES)
106 form.populate(product_id=request.POST.get("product"))
107
108 if not form.is_valid():
109 return self.make_response(form)
110
111 # Process the upload plan document
112 if form.cleaned_data.get("upload_plan_text"):
113 # A document is uploaded to provide the document content. Load the
114 # page again in order to show the content.
115 initial_data = {
116 "name": form.cleaned_data["name"],
117 "type": form.cleaned_data["type"].pk,
118 "product": form.cleaned_data["product"].pk,
119 "product_version": form.cleaned_data["product_version"].pk,
120 "extra_link": form.cleaned_data["extra_link"],
121 "text": form.cleaned_data["text"],
122 }
123 if form.cleaned_data["env_group"]:
124 initial_data["env_group"] = form.cleaned_data["env_group"].pk
125 return self.make_response(NewPlanForm(initial=initial_data))
126
127 # Process the test plan submit to the form
128 tp = TestPlan.objects.create(
129 product=form.cleaned_data["product"],
130 author=request.user,
131 owner=request.user,
132 product_version=form.cleaned_data["product_version"],
133 type=form.cleaned_data["type"],
134 name=form.cleaned_data["name"],
135 create_date=datetime.datetime.now(),
136 extra_link=form.cleaned_data["extra_link"],
137 parent=form.cleaned_data["parent"],
138 )
139
140 tp.add_text(author=request.user, plan_text=form.cleaned_data["text"])
141
142 # Add test plan environment groups
143 if request.POST.get("env_group"):
144 env_groups = TCMSEnvGroup.objects.filter(id__in=request.POST.getlist("env_group"))
145
146 for env_group in env_groups:
147 tp.add_env_group(env_group=env_group)
148
149 return HttpResponseRedirect(reverse("plan-get", args=[tp.plan_id]))
150
151
152 @require_GET
153 @permission_required("testplans.delete_testplan")
154 def delete(request, plan_id):
155 """Delete testplan"""
156 if request.GET.get("sure", "no") == "no":
157 # TODO: rewrite the response
158 plan_delete_url = reverse("plan-delete", args=[plan_id])
159 return HttpResponse(
160 "<script>"
161 "if (confirm('Are you sure you want to delete this plan %s?\\n\\n"
162 "Click OK to delete or cancel to come back'))"
163 "{ window.location.href='%s?sure=yes' }"
164 "else { history.go(-1) }"
165 "</script>" % (plan_id, plan_delete_url)
166 )
167 elif request.GET.get("sure") == "yes":
168 tp = get_object_or_404(TestPlan, plan_id=plan_id)
169
170 try:
171 tp.delete()
172 return HttpResponse(
173 "<script>window.location.href='%s'</script>" % reverse("tcms.testplans.views.all")
174 )
175 except Exception:
176 return prompt.info(request, "Delete failed.")
177 else:
178 return prompt.info(request, "Nothing yet.")
179
180
181 class SimplePlansFilterView(TemplateView):
182 """Providing base plans filter functionaity"""
183
184 # Subclass should provide a concrete template to render the final content.
185 # Or, pass the template path to argument template_name of View.as_view()
186 template_name = None
187
188 def filter_plans(self):
189 search_form = SearchPlanForm(self.request.GET)
190 product_id = self.request.GET.get("product")
191 search_form.populate(int(product_id) if product_id else None)
192
193 plans = TestPlan.objects.none()
194
195 if search_form.is_valid():
196 # Determine the query is the user's plans and change the sub module value
197 author = self.request.GET.get("author__email__startswith")
198 req_user = self.request.user
199 if req_user.is_authenticated and author in (
200 req_user.username,
201 req_user.email,
202 ):
203 self.SUB_MODULE_NAME = "my_plans"
204
205 plans = (
206 TestPlan.list(search_form.cleaned_data)
207 .select_related("author", "type", "product")
208 .order_by("-create_date")
209 )
210
211 plans = TestPlan.apply_subtotal(
212 plans,
213 cases_count=True,
214 runs_count=True,
215 children_count=True,
216 )
217
218 return search_form, plans
219
220 def get_context_data(self, **kwargs):
221 context = super().get_context_data(**kwargs)
222 context["search_plan_form"], context["plans"] = self.filter_plans()
223 return context
224
225
226 class SearchPlansView(SimplePlansFilterView):
227 """Used to filter test plans"""
228
229 SUB_MODULE_NAME = "plans"
230 template_name = "plan/all.html"
231
232 def get(self, request, *args, **kwargs):
233 context = self.get_context_data(**kwargs)
234 return self.render_to_response(context)
235
236 def get_context_data(self, **kwargs):
237 context = super().get_context_data(**kwargs)
238 context.update(
239 {
240 "module": MODULE_NAME,
241 "sub_module": self.SUB_MODULE_NAME,
242 "object_list": context["plans"][0:20],
243 "plans_count": context["plans"].count(),
244 }
245 )
246 return context
247
248
249 class SearchPlansPagesView(SimplePlansFilterView):
250
251 template_name = "plan/common/json_plans.txt"
252 column_names = [
253 "",
254 "plan_id",
255 "name",
256 "author__username",
257 "owner__username",
258 "product",
259 "product_version",
260 "type",
261 "cases_count",
262 "runs_count",
263 "",
264 ]
265
266 def get(self, request, *args, **kwargs):
267 _, plans = self.filter_plans()
268 dt = DataTableResult(request.GET, plans, self.column_names)
269 data = dt.get_response_data()
270 resp_data = get_template(self.template_name).render(data, request)
271 return JsonResponse(json.loads(resp_data))
272
273
274 def get(request, plan_id, slug=None, template_name="plan/get.html"):
275 """Display the plan details."""
276 SUB_MODULE_NAME = "plans"
277
278 try:
279 tp = TestPlan.objects.select_related().get(plan_id=plan_id)
280 tp.latest_text = tp.latest_text()
281 except ObjectDoesNotExist:
282 raise Http404
283
284 # redirect if has a cheated slug
285 if slug != slugify(tp.name):
286 return HttpResponsePermanentRedirect(tp.get_absolute_url())
287
288 # Initial the case counter
289 confirm_status_name = "CONFIRMED"
290 tp.run_case = tp.case.filter(case_status__name=confirm_status_name)
291 tp.review_case = tp.case.exclude(case_status__name=confirm_status_name)
292
293 context_data = {
294 "module": MODULE_NAME,
295 "sub_module": SUB_MODULE_NAME,
296 "test_plan": tp,
297 "xml_form": ImportCasesViaXMLForm(),
298 }
299 return render(request, template_name, context=context_data)
300
301
302 class AddCasesToRunsView(PermissionRequiredMixin, View):
303 """View of adding cases to runs"""
304
305 SUB_MODULE_NAME = "runs"
306 permission_required = "testruns.change_testrun"
307 template_name = "plan/choose_testrun.html"
308
309 def get(self, request, plan_id):
310 plan = TestPlan.objects.filter(pk=int(plan_id)).defer("product_version").first()
311 if plan is None:
312 raise Http404
313
314 # TODO: replace with plan.run.values(...)
315 runs = TestRun.objects.filter(plan=plan_id).values(
316 "pk", "summary", "build__name", "manager__username"
317 )
318
319 cases = get_selected_testcases(request).values(
320 "pk",
321 "summary",
322 "author__username",
323 "create_date",
324 "category__name",
325 "priority__value",
326 )
327
328 return render(
329 request,
330 self.template_name,
331 context={
332 "module": MODULE_NAME,
333 "sub_module": self.SUB_MODULE_NAME,
334 "plan_id": plan_id,
335 "plan": plan,
336 "test_runs": runs.iterator(),
337 "test_cases": cases,
338 },
339 )
340
341 def post(self, request, plan_id):
342 choosed_testrun_ids = request.POST.getlist("testrun_ids")
343 to_be_added_cases = TestCase.objects.filter(pk__in=request.POST.getlist("case_ids"))
344
345 plan_url = reverse("plan-get", args=[plan_id])
346
347 # cases and runs are required in this process
348 if not len(choosed_testrun_ids) or not len(to_be_added_cases):
349 return prompt.info(
350 request,
351 "At least one test run and one case is required to add cases to runs.",
352 plan_url,
353 )
354
355 # Adding cases to runs by recursion
356 for tr_id in choosed_testrun_ids:
357 testrun = get_object_or_404(TestRun, run_id=tr_id)
358 cases = TestCaseRun.objects.filter(run=tr_id)
359 exist_cases_id = cases.values_list("case", flat=True)
360
361 for testcase in to_be_added_cases:
362 if testcase.case_id not in exist_cases_id:
363 testrun.add_case_run(case=testcase)
364
365 estimated_time = functools.reduce(add, [nc.estimated_time for nc in to_be_added_cases])
366 testrun.estimated_time = testrun.estimated_time + estimated_time
367 testrun.save()
368
369 return HttpResponseRedirect(plan_url)
370
371
372 @require_http_methods(["GET", "POST"])
373 @permission_required("testplans.change_testplan")
374 def edit(request, plan_id, template_name="plan/edit.html"):
375 """Edit test plan view"""
376 # Define the default sub module
377 SUB_MODULE_NAME = "plans"
378
379 try:
380 tp = TestPlan.objects.select_related().get(plan_id=plan_id)
381 except ObjectDoesNotExist:
382 raise Http404
383
384 # If the form is submitted
385 if request.method == "POST":
386 form = EditPlanForm(request.POST, request.FILES)
387 if request.POST.get("product"):
388 form.populate(product_id=request.POST["product"])
389 else:
390 form.populate()
391
392 # FIXME: Error handle
393 if form.is_valid():
394 if form.cleaned_data.get("upload_plan_text"):
395 # Set the summary form field to the uploaded text
396 form.data["text"] = form.cleaned_data["text"]
397
398 # Generate the form
399 context_data = {
400 "module": MODULE_NAME,
401 "sub_module": SUB_MODULE_NAME,
402 "form": form,
403 "test_plan": tp,
404 }
405 return render(request, template_name, context=context_data)
406
407 if request.user.has_perm("testplans.change_testplan"):
408 tp.name = form.cleaned_data["name"]
409 tp.parent = form.cleaned_data["parent"]
410 tp.product = form.cleaned_data["product"]
411 tp.product_version = form.cleaned_data["product_version"]
412 tp.type = form.cleaned_data["type"]
413 tp.is_active = form.cleaned_data["is_active"]
414 tp.extra_link = form.cleaned_data["extra_link"]
415 tp.owner = form.cleaned_data["owner"]
416 # IMPORTANT! tp.current_user is an instance attribute,
417 # added so that in post_save, current logged-in user info
418 # can be accessed.
419 # Instance attribute is usually not a desirable solution.
420 tp.current_user = request.user
421 tp.save()
422
423 if request.user.has_perm("testplans.add_testplantext"):
424 new_text = request.POST.get("text")
425 text_checksum = checksum(new_text)
426
427 if not tp.text_exist() or text_checksum != tp.text_checksum():
428 tp.add_text(
429 author=request.user,
430 plan_text=request.POST.get("text"),
431 text_checksum=text_checksum,
432 )
433
434 if request.user.has_perm("management.change_tcmsenvplanmap"):
435 tp.clear_env_groups()
436
437 if request.POST.get("env_group"):
438 env_groups = TCMSEnvGroup.objects.filter(
439 id__in=request.POST.getlist("env_group")
440 )
441
442 for env_group in env_groups:
443 tp.add_env_group(env_group=env_group)
444 # Update plan email settings
445 update_plan_email_settings(tp, form)
446 return HttpResponseRedirect(reverse("plan-get", args=[plan_id, slugify(tp.name)]))
447 else:
448 # Generate a blank form
449 # Temporary use one environment group in this case
450 if tp.env_group.all():
451 for env_group in tp.env_group.all():
452 env_group_id = env_group.id
453 break
454 else:
455 env_group_id = None
456
457 form = EditPlanForm(
458 initial={
459 "name": tp.name,
460 "product": tp.product_id,
461 "product_version": tp.product_version_id,
462 "type": tp.type_id,
463 "text": tp.latest_text() and tp.latest_text().plan_text or "",
464 "parent": tp.parent_id,
465 "env_group": env_group_id,
466 "is_active": tp.is_active,
467 "extra_link": tp.extra_link,
468 "owner": tp.owner,
469 "auto_to_plan_owner": tp.email_settings.auto_to_plan_owner,
470 "auto_to_plan_author": tp.email_settings.auto_to_plan_author,
471 "auto_to_case_owner": tp.email_settings.auto_to_case_owner,
472 "auto_to_case_default_tester": tp.email_settings.auto_to_case_default_tester,
473 "notify_on_plan_update": tp.email_settings.notify_on_plan_update,
474 "notify_on_case_update": tp.email_settings.notify_on_case_update,
475 "notify_on_plan_delete": tp.email_settings.notify_on_plan_delete,
476 }
477 )
478 form.populate(product_id=tp.product_id)
479
480 context_data = {
481 "module": MODULE_NAME,
482 "sub_module": SUB_MODULE_NAME,
483 "test_plan": tp,
484 "form": form,
485 }
486 return render(request, template_name, context=context_data)
487
488
489 @require_http_methods(["GET", "POST"])
490 @permission_required("testplans.add_testplan")
491 def clone(request, template_name="plan/clone.html"):
492 """Clone testplan"""
493 SUB_MODULE_NAME = "plans"
494
495 req_data = request.GET or request.POST
496 if "plan" not in req_data:
497 return prompt.info(
498 request,
499 "At least one plan is required by clone function.",
500 )
501
502 tps = TestPlan.objects.filter(pk__in=req_data.getlist("plan")).order_by("-pk")
503
504 if not tps:
505 return prompt.info(
506 request,
507 "The plan you specify does not exist in database.",
508 )
509
510 # Clone the plan if the form is submitted
511 if request.method == "POST":
512 clone_form = ClonePlanForm(request.POST)
513 clone_form.populate(product_id=request.POST.get("product_id"))
514
515 if clone_form.is_valid():
516 clone_options = clone_form.cleaned_data
517
518 # Create new test plan.
519 for tp in tps:
520
521 new_name = len(tps) == 1 and clone_options["name"] or None
522
523 clone_params = {
524 # Cloned plan properties
525 "new_name": new_name,
526 "product": clone_options["product"],
527 "version": clone_options["product_version"],
528 "set_parent": clone_options["set_parent"],
529 # Related data
530 "copy_texts": clone_options["copy_texts"],
531 "copy_attachments": clone_options["copy_attachements"],
532 "copy_environment_group": clone_options["copy_environment_group"],
533 # Link or copy cases
534 "link_cases": clone_options["link_testcases"],
535 "copy_cases": clone_options["copy_testcases"],
536 "default_component_initial_owner": request.user,
537 }
538
539 assign_me_as_plan_author = not clone_options["keep_orignal_author"]
540 if assign_me_as_plan_author:
541 clone_params["new_original_author"] = request.user
542
543 assign_me_as_copied_case_author = (
544 clone_options["copy_testcases"]
545 and not clone_options["maintain_case_orignal_author"]
546 )
547 if assign_me_as_copied_case_author:
548 clone_params["new_case_author"] = request.user
549
550 assign_me_as_copied_case_default_tester = (
551 clone_options["copy_testcases"]
552 and not clone_options["keep_case_default_tester"]
553 )
554 if assign_me_as_copied_case_default_tester:
555 clone_params["new_case_default_tester"] = request.user
556
557 assign_me_as_text_author = not clone_options["copy_texts"]
558 if assign_me_as_text_author:
559 clone_params["default_text_author"] = request.user
560
561 cloned_plan = tp.clone(**clone_params)
562
563 if len(tps) == 1:
564 return HttpResponseRedirect(reverse("plan-get", args=[cloned_plan.plan_id]))
565 else:
566 args = {
567 "action": "search",
568 "product": clone_form.cleaned_data["product"].id,
569 "product_version": clone_form.cleaned_data["product_version"].id,
570 }
571 url_args = urllib.parse.urlencode(args)
572 return HttpResponseRedirect("{}?{}".format(reverse("plans-all"), url_args))
573 else:
574 # Generate the default values for the form
575 if len(tps) == 1:
576 clone_form = ClonePlanForm(
577 initial={
578 "product": tps[0].product_id,
579 "product_version": tps[0].product_version_id,
580 "set_parent": True,
581 "copy_texts": True,
582 "copy_attachements": True,
583 "copy_environment_group": True,
584 "link_testcases": True,
585 "copy_testcases": False,
586 "maintain_case_orignal_author": True,
587 "keep_case_default_tester": False,
588 "name": tps[0].make_cloned_name(),
589 }
590 )
591 clone_form.populate(product_id=tps[0].product.id)
592 else:
593 clone_form = ClonePlanForm(
594 initial={
595 "set_parent": True,
596 "copy_texts": True,
597 "copy_attachements": True,
598 "link_testcases": True,
599 "copy_testcases": False,
600 "maintain_case_orignal_author": True,
601 "keep_case_default_tester": True,
602 }
603 )
604
605 context_data = {
606 "module": MODULE_NAME,
607 "sub_module": SUB_MODULE_NAME,
608 "testplans": tps,
609 "clone_form": clone_form,
610 }
611 return render(request, template_name, context=context_data)
612
613
614 def attachment(request, plan_id, template_name="plan/attachment.html"):
615 """Manage attached files"""
616 SUB_MODULE_NAME = "plans"
617
618 file_size_limit = settings.MAX_UPLOAD_SIZE
619 limit_readable = int(file_size_limit) / 2 ** 20 # Mb
620
621 tp = get_object_or_404(TestPlan, plan_id=plan_id)
622 context_data = {
623 "module": MODULE_NAME,
624 "sub_module": SUB_MODULE_NAME,
625 "test_plan": tp,
626 "limit": file_size_limit,
627 "limit_readable": str(limit_readable) + "Mb",
628 }
629 return render(request, template_name, context=context_data)
630
631
632 @require_GET
633 def text_history(request, plan_id, template_name="plan/history.html"):
634 """View test plan text history"""
635 SUB_MODULE_NAME = "plans"
636
637 tp = get_object_or_404(TestPlan, plan_id=int(plan_id))
638 tptxts = tp.text.select_related("author").only(
639 "plan", "create_date", "plan_text", "plan_text_version", "author__email"
640 )
641 selected_plan_text_version = int(request.GET.get("plan_text_version", 0))
642 context_data = {
643 "module": MODULE_NAME,
644 "sub_module": SUB_MODULE_NAME,
645 "testplan": tp,
646 "test_plan_texts": tptxts,
647 "select_plan_text_version": selected_plan_text_version,
648 }
649 return render(request, template_name, context=context_data)
650
651
652 class ReorderCasesView(View):
653 """Reorder cases"""
654
655 http_method_names = ["post"]
656
657 def post(self, request, plan_id):
658 # Current we should rewrite all of cases belong to the plan.
659 # Because the cases sortkey in database is chaos,
660 # Most of them are None.
661
662 if "case" not in request.POST:
663 return JsonResponseBadRequest({"message": "At least one case is required to re-order."})
664
665 plan = get_object_or_404(TestPlan, pk=int(plan_id))
666
667 case_ids = [int(id) for id in request.POST.getlist("case")]
668 cases = TestCase.objects.filter(pk__in=case_ids).only("pk")
669
670 for case in cases:
671 new_sort_key = (case_ids.index(case.pk) + 1) * 10
672 TestCasePlan.objects.filter(plan=plan, case=case).update(sortkey=new_sort_key)
673
674 return JsonResponse({})
675
676
677 class LinkCasesView(View):
678 """Link cases to plan"""
679
680 permission_required = "testcases.add_testcaseplan"
681
682 def post(self, request, plan_id):
683 plan = get_object_or_404(TestPlan.objects.only("pk"), pk=int(plan_id))
684 case_ids = [int(id) for id in request.POST.getlist("case")]
685 cases = TestCase.objects.filter(case_id__in=case_ids).only("pk")
686 for case in cases:
687 plan.add_case(case)
688 return HttpResponseRedirect(reverse("plan-get", args=[plan_id]))
689
690
691 class LinkCasesSearchView(View):
692 """Search cases for linking to plan"""
693
694 template_name = "plan/search_case.html"
695 SUB_MODULE_NAME = "plans"
696
697 def get(self, request, plan_id):
698 plan = get_object_or_404(TestPlan, pk=int(plan_id))
699
700 normal_form = SearchCaseForm(
701 initial={
702 "product": plan.product_id,
703 "product_version": plan.product_version_id,
704 "case_status_id": TestCaseStatus.get("CONFIRMED"),
705 }
706 )
707 quick_form = QuickSearchCaseForm()
708 return render(
709 self.request,
710 self.template_name,
711 {
712 "module": MODULE_NAME,
713 "sub_module": self.SUB_MODULE_NAME,
714 "search_form": normal_form,
715 "quick_form": quick_form,
716 "test_plan": plan,
717 },
718 )
719
720 def post(self, request, plan_id):
721 plan = get_object_or_404(TestPlan, pk=int(plan_id))
722
723 search_mode = request.POST.get("search_mode")
724 if search_mode == "quick":
725 form = quick_form = QuickSearchCaseForm(request.POST)
726 normal_form = SearchCaseForm()
727 else:
728 form = normal_form = SearchCaseForm(request.POST)
729 form.populate(product_id=request.POST.get("product"))
730 quick_form = QuickSearchCaseForm()
731
732 if form.is_valid():
733 cases = TestCase.list(form.cleaned_data)
734 cases = (
735 cases.select_related("author", "default_tester", "case_status", "priority")
736 .only(
737 "pk",
738 "summary",
739 "create_date",
740 "author__email",
741 "default_tester__email",
742 "case_status__name",
743 "priority__value",
744 )
745 .exclude(case_id__in=plan.case.values_list("case_id", flat=True))
746 )
747
748 context = {
749 "module": MODULE_NAME,
750 "sub_module": self.SUB_MODULE_NAME,
751 "test_plan": plan,
752 "test_cases": cases,
753 "search_form": normal_form,
754 "quick_form": quick_form,
755 "search_mode": search_mode,
756 }
757 return render(request, self.template_name, context=context)
758
759
760 class ImportCasesView(PermissionRequiredMixin, View):
761 """Import cases to a plan"""
762
763 permission_required = "testcases.add_testcaseplan"
764
765 def post(self, request, plan_id):
766 plan = get_object_or_404(TestPlan.objects.only("pk"), pk=int(plan_id))
767 next_url = reverse("plan-get", args=[plan_id]) + "#testcases"
768 xml_form = ImportCasesViaXMLForm(request.POST, request.FILES)
769 if xml_form.is_valid():
770 plan.import_cases(xml_form.cleaned_data["xml_file"])
771 return HttpResponseRedirect(next_url)
772 else:
773 return prompt.alert(request, xml_form.errors, next_url)
774
775
776 class DeleteCasesView(View):
777 """Delete selected cases from plan"""
778
779 def post(self, request, plan_id):
780 plan = get_object_or_404(TestPlan.objects.only("pk"), pk=int(plan_id))
781
782 if "case" not in request.POST:
783 return JsonResponseBadRequest({"message": "At least one case is required to delete."})
784
785 cases = get_selected_testcases(request).only("pk")
786
787 # Log Action
788 plan_log = TCMSLog(model=plan)
789 for case in cases:
790 plan_log.make(who=request.user, new_value=f"Remove case {case.pk} from plan {plan.pk}")
791 case.log_action(who=request.user, new_value=f"Remove from plan {plan.pk}")
792 plan.delete_case(case=case)
793
794 return JsonResponse({})
795
796
797 class PlanComponentsActionView(View):
798 """Manage a plan's components"""
799
800 template_name = "plan/get_component.html"
801
802 def get(self, request):
803 if "plan" not in request.GET:
804 return HttpResponseBadRequest("Plan ID is not in request.")
805 plans = TestPlan.objects.filter(pk=int(request.GET["plan"]))
806 if not plans:
807 return Http404("Plan ID {} does not exist.".format(", ".join(plans)))
808
809 action = request.GET.get("a", "get_component_list").lower()
810
811 if action == "get_form":
812 return self.get_manage_form(request, plans)
813 elif action == "get_component_list":
814 return self.get_default_component_list(request, plans[0])
815 elif action == "add":
816 return self.add(request, plans[0], self._get_components())
817 elif action == "remove":
818 components = self._get_components()
819 return self.remove_components_from_plan(request, plans[0], components)
820 elif action == "update":
821 return self.update_components(request, plans[0])
822
823 def _get_components(self):
824 if "component" not in self.request.GET:
825 return HttpResponseBadRequest("Component ID is not in request.")
826 component_ids = [int(id) for id in self.request.GET.getlist("component")]
827 return Component.objects.filter(pk__in=component_ids)
828
829 @method_decorator(permission_required("testplans.add_testplancomponent"))
830 def add(self, request, plan, components):
831 """Add components to given plans"""
832 list(map(plan.add_component, components))
833
834 @method_decorator(permission_required("testplans.delete_testplancomponent"))
835 def remove_components_from_plan(self, request, plan, components=None):
836 """Remove existing components from plans
837
838 :param plan: instance of TestPlan, from which to remove components
839 from this plan.
840 :param components: instances of Component, which will be removed.
841 """
842 if components is None:
843 TestPlanComponent.objects.filter(plan=plan).delete()
844 else:
845 list(map(plan.remove_component, components))
846
847 return self.get_default_component_list(request, plan)
848
849 def update_components(self, request, plan):
850 self.remove_components_from_plan(request, plan)
851 self.add(request, plan, self._get_components())
852 return self.get_default_component_list(request, plan)
853
854 def get_manage_form(self, request, plans):
855 """Return form content in order to select components"""
856 plan_comps = TestPlanComponent.objects.filter(plan__in=plans)
857
858 form = PlanComponentForm(
859 tps=plans,
860 initial={
861 "component": plan_comps.values_list("component_id", flat=True),
862 },
863 )
864
865 q_format = request.GET.get("format", "p")
866 html = getattr(form, "as_" + q_format)
867
868 return HttpResponse(html())
869
870 def get_default_component_list(self, request, plan):
871 return render(request, self.template_name, context={"test_plan": plan})
872
873
874 @require_GET
875 def printable(request, template_name="plan/printable.html"):
876 """Create the printable copy for plan"""
877 plan_pks = request.GET.getlist("plan")
878
879 if not plan_pks:
880 return prompt.info(request, "At least one target is required.")
881
882 tps = TestPlan.objects.filter(pk__in=plan_pks).only("pk", "name")
883
884 def plan_generator():
885 repeat = len(plan_pks)
886 params_sql = ",".join(itertools.repeat("%s", repeat))
887 sql = sqls.TP_PRINTABLE_CASE_TEXTS % (params_sql, params_sql)
888 result_set = SQLExecution(sql, plan_pks * 2)
889 group_data = itertools.groupby(result_set.rows, itemgetter("plan_id"))
890 cases_dict = {key: list(values) for key, values in group_data}
891 for tp in tps:
892 tp.result_set = cases_dict.get(tp.plan_id, None)
893 yield tp
894
895 context_data = {
896 "test_plans": plan_generator(),
897 }
898
899 return render(request, template_name, context=context_data)
900
901
902 @require_GET
903 def export(request, template_name="case/export.xml"):
904 """Export the plan"""
905 plan_pks = list(map(int, request.GET.getlist("plan")))
906
907 if not plan_pks:
908 return prompt.info(request, "At least one target is required.")
909
910 context_data = {
911 "cases_info": get_exported_cases_and_related_data(plan_pks),
912 }
913
914 timestamp = datetime.datetime.now()
915 timestamp_str = "%02i-%02i-%02i" % (timestamp.year, timestamp.month, timestamp.day)
916
917 response = render(request, template_name, context=context_data)
918 filename = f"tcms-testcases-{timestamp_str}.xml"
919 response["Content-Disposition"] = f"attachment; filename={filename}"
920 return response
921
922
923 @require_GET
924 def construct_plans_treeview(request, plan_id):
925 """Construct a plan's tree view"""
926 plan = get_object_or_404(TestPlan, pk=plan_id)
927
928 tree_plan_ids = plan.get_ancestor_ids() + plan.get_descendant_ids()
929 tree_plan_ids.append(plan.pk)
930
931 plans = (
932 TestPlan.objects.filter(pk__in=tree_plan_ids)
933 .only("pk", "name", "parent_id")
934 .order_by("parent_id", "pk")
935 )
936
937 plans = TestPlan.apply_subtotal(plans, cases_count=True, runs_count=True, children_count=True)
938
939 return render(
940 request,
941 "plan/get_treeview.html",
942 context={"current_plan_id": plan_id, "plans": plans},
943 )
944
945
946 @login_required
947 @require_POST
948 def treeview_add_child_plans(request: HttpRequest, plan_id: int):
949 plan = TestPlan.objects.filter(pk=plan_id).only("pk").first()
950 if plan is None:
951 return JsonResponseNotFound({"message": f"Plan {plan_id} does not exist."})
952
953 child_plan_ids: List[str] = request.POST.getlist("children")
954 child_plans: List[TestPlan] = []
955
956 ancestor_ids = plan.get_ancestor_ids()
957 descendant_ids = plan.get_descendant_ids()
958
959 for child_plan_id in child_plan_ids:
960 if not child_plan_id.isdigit():
961 return JsonResponseBadRequest(
962 {"message": f"Child plan id {child_plan_id} is not a number."}
963 )
964 child_plan: TestPlan = TestPlan.objects.filter(pk=int(child_plan_id)).only("pk").first()
965 if child_plan is None:
966 return JsonResponseBadRequest(
967 {"message": f"Child plan {child_plan_id} does not exist."}
968 )
969 if child_plan.pk in ancestor_ids:
970 return JsonResponseBadRequest(
971 {"message": f"Plan {child_plan_id} is an ancestor of " f"plan {plan_id} already."}
972 )
973 if child_plan.pk in descendant_ids:
974 return JsonResponseBadRequest(
975 {"message": f"Plan {child_plan_id} is a descendant of " f"plan {plan_id} already."}
976 )
977
978 child_plans.append(child_plan)
979
980 for child_plan in child_plans:
981 child_plan.parent = plan
982 child_plan.save(update_fields=["parent"])
983
984 return JsonResponse(
985 {"parent_plan": plan.pk, "children_plans": [plan.pk for plan in child_plans]}
986 )
987
988
989 @login_required
990 @require_POST
991 def treeview_remove_child_plans(request, plan_id: int):
992 plan: TestPlan = TestPlan.objects.filter(pk=plan_id).only("pk").first()
993 if plan is None:
994 return JsonResponseNotFound({"message": f"Plan {plan_id} does not exist."})
995
996 child_plan_ids: Set[int] = set(map(int, request.POST.getlist("children")))
997 direct_descendants = set(plan.get_descendant_ids(True))
998 ids_to_remove = child_plan_ids & direct_descendants
999
1000 if ids_to_remove:
1001 TestPlan.objects.filter(pk__in=ids_to_remove).update(parent=None)
1002
1003 return JsonResponse(
1004 {
1005 "parent_plan": plan.pk,
1006 "removed": sorted(ids_to_remove),
1007 "non_descendants": sorted(child_plan_ids - direct_descendants),
1008 }
1009 )
1010
1011
1012 class PlanTreeChangeParentView(PermissionRequiredMixin, View):
1013 """Plan tree view to change a plan's parent"""
1014
1015 permission_required = "testplans.change_testplan"
1016
1017 def handle_no_permission(self):
1018 return JsonResponseBadRequest(
1019 {"message": "You do not have permission to change the parent plan."}
1020 )
1021
1022 def patch(self, request, *args, **kwargs):
1023 plan: TestPlan = TestPlan.objects.filter(pk=self.kwargs["plan_id"]).only("pk").first()
1024 if plan is None:
1025 return JsonResponseNotFound(
1026 {
1027 "message": f"Cannot change parent of plan, "
1028 f"whose id {self.kwargs['plan_id']} does not exist."
1029 }
1030 )
1031
1032 data = json.loads(request.body)
1033 user_input: Optional[str] = data.get("parent")
1034 if user_input is None:
1035 return JsonResponseBadRequest({"message": "Missing parent plan id."})
1036 if not isinstance(user_input, int):
1037 return JsonResponseBadRequest(
1038 {"message": f'The given parent plan id "{user_input}" is not a positive integer.'}
1039 )
1040 parent_id = int(user_input)
1041 new_parent = TestPlan.objects.filter(pk=parent_id).only("parent").first()
1042 if new_parent is None:
1043 return JsonResponseBadRequest(
1044 {"message": f"The parent plan id {parent_id} does not exist."}
1045 )
1046
1047 descendant_ids = plan.get_descendant_ids()
1048 if parent_id in descendant_ids:
1049 return JsonResponseBadRequest(
1050 {
1051 "message": f"The parent plan {parent_id} is a descendant of plan {plan.pk} already."
1052 }
1053 )
1054
1055 original_value = plan.parent.pk if plan.parent else "None"
1056
1057 plan.parent = new_parent
1058 plan.save(update_fields=["parent"])
1059 plan.log_action(
1060 who=request.user,
1061 field="parent",
1062 original_value=original_value,
1063 new_value=str(new_parent.pk),
1064 )
1065
1066 return JsonResponse({})
1067
1068
1069 class SetPlanActiveView(PermissionRequiredMixin, View):
1070 """Set a test plan active or inactive"""
1071
1072 permission_required = "testplans.change_testplan"
1073 raise_exception = True
1074 enable: bool = True
1075
1076 def patch(self, request, *args, **kwargs):
1077 plan_id = self.kwargs["plan_id"]
1078 plan: TestPlan = TestPlan.objects.filter(pk=plan_id).only("is_active").first()
1079 if not plan:
1080 return JsonResponseNotFound({"message": f"Plan id {plan_id} does not exist."})
1081 original_value: str = str(plan.is_active)
1082 plan.is_active = self.enable
1083 plan.save(update_fields=["is_active"])
1084 plan.log_action(
1085 who=request.user,
1086 field="is_active",
1087 original_value=original_value,
1088 new_value=str(plan.is_active),
1089 )
1090 return JsonResponse({})
1091
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/tcms/testplans/views.py b/src/tcms/testplans/views.py
--- a/src/tcms/testplans/views.py
+++ b/src/tcms/testplans/views.py
@@ -339,8 +339,8 @@
)
def post(self, request, plan_id):
- choosed_testrun_ids = request.POST.getlist("testrun_ids")
- to_be_added_cases = TestCase.objects.filter(pk__in=request.POST.getlist("case_ids"))
+ choosed_testrun_ids = request.POST.getlist("run")
+ to_be_added_cases = TestCase.objects.filter(pk__in=request.POST.getlist("case"))
plan_url = reverse("plan-get", args=[plan_id])
| {"golden_diff": "diff --git a/src/tcms/testplans/views.py b/src/tcms/testplans/views.py\n--- a/src/tcms/testplans/views.py\n+++ b/src/tcms/testplans/views.py\n@@ -339,8 +339,8 @@\n )\n \n def post(self, request, plan_id):\n- choosed_testrun_ids = request.POST.getlist(\"testrun_ids\")\n- to_be_added_cases = TestCase.objects.filter(pk__in=request.POST.getlist(\"case_ids\"))\n+ choosed_testrun_ids = request.POST.getlist(\"run\")\n+ to_be_added_cases = TestCase.objects.filter(pk__in=request.POST.getlist(\"case\"))\n \n plan_url = reverse(\"plan-get\", args=[plan_id])\n", "issue": "CSRF error when add cases to test runs\nReproduce steps:\r\n\r\n* Go to a plan page\r\n* Select a few cases\r\n* Click \"Add cases to runs\"\r\n* Select at least one test run and click Update button\r\n* Confirm yes, then error occurs:\r\n\r\nForbidden (403)\r\nCSRF verification failed. Request aborted.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport datetime\nimport functools\nimport itertools\nimport json\nimport urllib\nfrom operator import add, itemgetter\nfrom typing import List, Optional, Set\n\nfrom django.conf import settings\nfrom django.contrib.auth.decorators import login_required, permission_required\nfrom django.contrib.auth.mixins import PermissionRequiredMixin\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.http import (\n Http404,\n HttpRequest,\n HttpResponse,\n HttpResponseBadRequest,\n HttpResponsePermanentRedirect,\n HttpResponseRedirect,\n JsonResponse,\n)\nfrom django.shortcuts import get_object_or_404, render\nfrom django.template.loader import get_template\nfrom django.urls import reverse\nfrom django.utils.decorators import method_decorator\nfrom django.views.decorators.csrf import csrf_protect\nfrom django.views.decorators.http import require_GET, require_http_methods, require_POST\nfrom django.views.generic import View\nfrom django.views.generic.base import TemplateView\nfrom uuslug import slugify\n\nfrom tcms.core.db import SQLExecution\nfrom tcms.core.models import TCMSLog\nfrom tcms.core.responses import JsonResponseBadRequest, JsonResponseNotFound\nfrom tcms.core.utils import DataTableResult, checksum\nfrom tcms.core.views import prompt\nfrom tcms.management.models import Component, TCMSEnvGroup\nfrom tcms.testcases.data import get_exported_cases_and_related_data\nfrom tcms.testcases.forms import QuickSearchCaseForm, SearchCaseForm\nfrom tcms.testcases.models import TestCase, TestCasePlan, TestCaseStatus\nfrom tcms.testcases.views import get_selected_testcases\nfrom tcms.testplans import sqls\nfrom tcms.testplans.forms import (\n ClonePlanForm,\n EditPlanForm,\n ImportCasesViaXMLForm,\n NewPlanForm,\n PlanComponentForm,\n SearchPlanForm,\n)\nfrom tcms.testplans.models import TestPlan, TestPlanComponent\nfrom tcms.testruns.models import TestCaseRun, TestRun\n\nMODULE_NAME = \"testplans\"\n\n# _____________________________________________________________________________\n# helper functons\n\n\ndef update_plan_email_settings(tp, form):\n \"\"\"Update testplan's email settings\"\"\"\n tp.email_settings.notify_on_plan_update = form.cleaned_data[\"notify_on_plan_update\"]\n tp.email_settings.notify_on_plan_delete = form.cleaned_data[\"notify_on_plan_delete\"]\n tp.email_settings.notify_on_case_update = form.cleaned_data[\"notify_on_case_update\"]\n tp.email_settings.auto_to_plan_owner = form.cleaned_data[\"auto_to_plan_owner\"]\n tp.email_settings.auto_to_plan_author = form.cleaned_data[\"auto_to_plan_author\"]\n tp.email_settings.auto_to_case_owner = form.cleaned_data[\"auto_to_case_owner\"]\n tp.email_settings.auto_to_case_default_tester = form.cleaned_data[\"auto_to_case_default_tester\"]\n tp.email_settings.save()\n\n\n# _____________________________________________________________________________\n# view functions\n\n\nclass CreateNewPlanView(PermissionRequiredMixin, View):\n \"\"\"Create a new test plan view\"\"\"\n\n sub_module_name = \"new_plan\"\n template_name = \"plan/new.html\"\n permission_required = (\n \"testplans.add_testplan\",\n \"testplans.add_testplantext\",\n \"testplans.add_tcmsenvplanmap\",\n )\n\n def make_response(self, form):\n return render(\n self.request,\n self.template_name,\n context={\n \"module\": MODULE_NAME,\n \"sub_module\": self.sub_module_name,\n \"form\": form,\n },\n )\n\n def get(self, request):\n return self.make_response(NewPlanForm())\n\n @method_decorator(csrf_protect)\n def post(self, request):\n form = NewPlanForm(request.POST, request.FILES)\n form.populate(product_id=request.POST.get(\"product\"))\n\n if not form.is_valid():\n return self.make_response(form)\n\n # Process the upload plan document\n if form.cleaned_data.get(\"upload_plan_text\"):\n # A document is uploaded to provide the document content. Load the\n # page again in order to show the content.\n initial_data = {\n \"name\": form.cleaned_data[\"name\"],\n \"type\": form.cleaned_data[\"type\"].pk,\n \"product\": form.cleaned_data[\"product\"].pk,\n \"product_version\": form.cleaned_data[\"product_version\"].pk,\n \"extra_link\": form.cleaned_data[\"extra_link\"],\n \"text\": form.cleaned_data[\"text\"],\n }\n if form.cleaned_data[\"env_group\"]:\n initial_data[\"env_group\"] = form.cleaned_data[\"env_group\"].pk\n return self.make_response(NewPlanForm(initial=initial_data))\n\n # Process the test plan submit to the form\n tp = TestPlan.objects.create(\n product=form.cleaned_data[\"product\"],\n author=request.user,\n owner=request.user,\n product_version=form.cleaned_data[\"product_version\"],\n type=form.cleaned_data[\"type\"],\n name=form.cleaned_data[\"name\"],\n create_date=datetime.datetime.now(),\n extra_link=form.cleaned_data[\"extra_link\"],\n parent=form.cleaned_data[\"parent\"],\n )\n\n tp.add_text(author=request.user, plan_text=form.cleaned_data[\"text\"])\n\n # Add test plan environment groups\n if request.POST.get(\"env_group\"):\n env_groups = TCMSEnvGroup.objects.filter(id__in=request.POST.getlist(\"env_group\"))\n\n for env_group in env_groups:\n tp.add_env_group(env_group=env_group)\n\n return HttpResponseRedirect(reverse(\"plan-get\", args=[tp.plan_id]))\n\n\n@require_GET\n@permission_required(\"testplans.delete_testplan\")\ndef delete(request, plan_id):\n \"\"\"Delete testplan\"\"\"\n if request.GET.get(\"sure\", \"no\") == \"no\":\n # TODO: rewrite the response\n plan_delete_url = reverse(\"plan-delete\", args=[plan_id])\n return HttpResponse(\n \"<script>\"\n \"if (confirm('Are you sure you want to delete this plan %s?\\\\n\\\\n\"\n \"Click OK to delete or cancel to come back'))\"\n \"{ window.location.href='%s?sure=yes' }\"\n \"else { history.go(-1) }\"\n \"</script>\" % (plan_id, plan_delete_url)\n )\n elif request.GET.get(\"sure\") == \"yes\":\n tp = get_object_or_404(TestPlan, plan_id=plan_id)\n\n try:\n tp.delete()\n return HttpResponse(\n \"<script>window.location.href='%s'</script>\" % reverse(\"tcms.testplans.views.all\")\n )\n except Exception:\n return prompt.info(request, \"Delete failed.\")\n else:\n return prompt.info(request, \"Nothing yet.\")\n\n\nclass SimplePlansFilterView(TemplateView):\n \"\"\"Providing base plans filter functionaity\"\"\"\n\n # Subclass should provide a concrete template to render the final content.\n # Or, pass the template path to argument template_name of View.as_view()\n template_name = None\n\n def filter_plans(self):\n search_form = SearchPlanForm(self.request.GET)\n product_id = self.request.GET.get(\"product\")\n search_form.populate(int(product_id) if product_id else None)\n\n plans = TestPlan.objects.none()\n\n if search_form.is_valid():\n # Determine the query is the user's plans and change the sub module value\n author = self.request.GET.get(\"author__email__startswith\")\n req_user = self.request.user\n if req_user.is_authenticated and author in (\n req_user.username,\n req_user.email,\n ):\n self.SUB_MODULE_NAME = \"my_plans\"\n\n plans = (\n TestPlan.list(search_form.cleaned_data)\n .select_related(\"author\", \"type\", \"product\")\n .order_by(\"-create_date\")\n )\n\n plans = TestPlan.apply_subtotal(\n plans,\n cases_count=True,\n runs_count=True,\n children_count=True,\n )\n\n return search_form, plans\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"search_plan_form\"], context[\"plans\"] = self.filter_plans()\n return context\n\n\nclass SearchPlansView(SimplePlansFilterView):\n \"\"\"Used to filter test plans\"\"\"\n\n SUB_MODULE_NAME = \"plans\"\n template_name = \"plan/all.html\"\n\n def get(self, request, *args, **kwargs):\n context = self.get_context_data(**kwargs)\n return self.render_to_response(context)\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context.update(\n {\n \"module\": MODULE_NAME,\n \"sub_module\": self.SUB_MODULE_NAME,\n \"object_list\": context[\"plans\"][0:20],\n \"plans_count\": context[\"plans\"].count(),\n }\n )\n return context\n\n\nclass SearchPlansPagesView(SimplePlansFilterView):\n\n template_name = \"plan/common/json_plans.txt\"\n column_names = [\n \"\",\n \"plan_id\",\n \"name\",\n \"author__username\",\n \"owner__username\",\n \"product\",\n \"product_version\",\n \"type\",\n \"cases_count\",\n \"runs_count\",\n \"\",\n ]\n\n def get(self, request, *args, **kwargs):\n _, plans = self.filter_plans()\n dt = DataTableResult(request.GET, plans, self.column_names)\n data = dt.get_response_data()\n resp_data = get_template(self.template_name).render(data, request)\n return JsonResponse(json.loads(resp_data))\n\n\ndef get(request, plan_id, slug=None, template_name=\"plan/get.html\"):\n \"\"\"Display the plan details.\"\"\"\n SUB_MODULE_NAME = \"plans\"\n\n try:\n tp = TestPlan.objects.select_related().get(plan_id=plan_id)\n tp.latest_text = tp.latest_text()\n except ObjectDoesNotExist:\n raise Http404\n\n # redirect if has a cheated slug\n if slug != slugify(tp.name):\n return HttpResponsePermanentRedirect(tp.get_absolute_url())\n\n # Initial the case counter\n confirm_status_name = \"CONFIRMED\"\n tp.run_case = tp.case.filter(case_status__name=confirm_status_name)\n tp.review_case = tp.case.exclude(case_status__name=confirm_status_name)\n\n context_data = {\n \"module\": MODULE_NAME,\n \"sub_module\": SUB_MODULE_NAME,\n \"test_plan\": tp,\n \"xml_form\": ImportCasesViaXMLForm(),\n }\n return render(request, template_name, context=context_data)\n\n\nclass AddCasesToRunsView(PermissionRequiredMixin, View):\n \"\"\"View of adding cases to runs\"\"\"\n\n SUB_MODULE_NAME = \"runs\"\n permission_required = \"testruns.change_testrun\"\n template_name = \"plan/choose_testrun.html\"\n\n def get(self, request, plan_id):\n plan = TestPlan.objects.filter(pk=int(plan_id)).defer(\"product_version\").first()\n if plan is None:\n raise Http404\n\n # TODO: replace with plan.run.values(...)\n runs = TestRun.objects.filter(plan=plan_id).values(\n \"pk\", \"summary\", \"build__name\", \"manager__username\"\n )\n\n cases = get_selected_testcases(request).values(\n \"pk\",\n \"summary\",\n \"author__username\",\n \"create_date\",\n \"category__name\",\n \"priority__value\",\n )\n\n return render(\n request,\n self.template_name,\n context={\n \"module\": MODULE_NAME,\n \"sub_module\": self.SUB_MODULE_NAME,\n \"plan_id\": plan_id,\n \"plan\": plan,\n \"test_runs\": runs.iterator(),\n \"test_cases\": cases,\n },\n )\n\n def post(self, request, plan_id):\n choosed_testrun_ids = request.POST.getlist(\"testrun_ids\")\n to_be_added_cases = TestCase.objects.filter(pk__in=request.POST.getlist(\"case_ids\"))\n\n plan_url = reverse(\"plan-get\", args=[plan_id])\n\n # cases and runs are required in this process\n if not len(choosed_testrun_ids) or not len(to_be_added_cases):\n return prompt.info(\n request,\n \"At least one test run and one case is required to add cases to runs.\",\n plan_url,\n )\n\n # Adding cases to runs by recursion\n for tr_id in choosed_testrun_ids:\n testrun = get_object_or_404(TestRun, run_id=tr_id)\n cases = TestCaseRun.objects.filter(run=tr_id)\n exist_cases_id = cases.values_list(\"case\", flat=True)\n\n for testcase in to_be_added_cases:\n if testcase.case_id not in exist_cases_id:\n testrun.add_case_run(case=testcase)\n\n estimated_time = functools.reduce(add, [nc.estimated_time for nc in to_be_added_cases])\n testrun.estimated_time = testrun.estimated_time + estimated_time\n testrun.save()\n\n return HttpResponseRedirect(plan_url)\n\n\n@require_http_methods([\"GET\", \"POST\"])\n@permission_required(\"testplans.change_testplan\")\ndef edit(request, plan_id, template_name=\"plan/edit.html\"):\n \"\"\"Edit test plan view\"\"\"\n # Define the default sub module\n SUB_MODULE_NAME = \"plans\"\n\n try:\n tp = TestPlan.objects.select_related().get(plan_id=plan_id)\n except ObjectDoesNotExist:\n raise Http404\n\n # If the form is submitted\n if request.method == \"POST\":\n form = EditPlanForm(request.POST, request.FILES)\n if request.POST.get(\"product\"):\n form.populate(product_id=request.POST[\"product\"])\n else:\n form.populate()\n\n # FIXME: Error handle\n if form.is_valid():\n if form.cleaned_data.get(\"upload_plan_text\"):\n # Set the summary form field to the uploaded text\n form.data[\"text\"] = form.cleaned_data[\"text\"]\n\n # Generate the form\n context_data = {\n \"module\": MODULE_NAME,\n \"sub_module\": SUB_MODULE_NAME,\n \"form\": form,\n \"test_plan\": tp,\n }\n return render(request, template_name, context=context_data)\n\n if request.user.has_perm(\"testplans.change_testplan\"):\n tp.name = form.cleaned_data[\"name\"]\n tp.parent = form.cleaned_data[\"parent\"]\n tp.product = form.cleaned_data[\"product\"]\n tp.product_version = form.cleaned_data[\"product_version\"]\n tp.type = form.cleaned_data[\"type\"]\n tp.is_active = form.cleaned_data[\"is_active\"]\n tp.extra_link = form.cleaned_data[\"extra_link\"]\n tp.owner = form.cleaned_data[\"owner\"]\n # IMPORTANT! tp.current_user is an instance attribute,\n # added so that in post_save, current logged-in user info\n # can be accessed.\n # Instance attribute is usually not a desirable solution.\n tp.current_user = request.user\n tp.save()\n\n if request.user.has_perm(\"testplans.add_testplantext\"):\n new_text = request.POST.get(\"text\")\n text_checksum = checksum(new_text)\n\n if not tp.text_exist() or text_checksum != tp.text_checksum():\n tp.add_text(\n author=request.user,\n plan_text=request.POST.get(\"text\"),\n text_checksum=text_checksum,\n )\n\n if request.user.has_perm(\"management.change_tcmsenvplanmap\"):\n tp.clear_env_groups()\n\n if request.POST.get(\"env_group\"):\n env_groups = TCMSEnvGroup.objects.filter(\n id__in=request.POST.getlist(\"env_group\")\n )\n\n for env_group in env_groups:\n tp.add_env_group(env_group=env_group)\n # Update plan email settings\n update_plan_email_settings(tp, form)\n return HttpResponseRedirect(reverse(\"plan-get\", args=[plan_id, slugify(tp.name)]))\n else:\n # Generate a blank form\n # Temporary use one environment group in this case\n if tp.env_group.all():\n for env_group in tp.env_group.all():\n env_group_id = env_group.id\n break\n else:\n env_group_id = None\n\n form = EditPlanForm(\n initial={\n \"name\": tp.name,\n \"product\": tp.product_id,\n \"product_version\": tp.product_version_id,\n \"type\": tp.type_id,\n \"text\": tp.latest_text() and tp.latest_text().plan_text or \"\",\n \"parent\": tp.parent_id,\n \"env_group\": env_group_id,\n \"is_active\": tp.is_active,\n \"extra_link\": tp.extra_link,\n \"owner\": tp.owner,\n \"auto_to_plan_owner\": tp.email_settings.auto_to_plan_owner,\n \"auto_to_plan_author\": tp.email_settings.auto_to_plan_author,\n \"auto_to_case_owner\": tp.email_settings.auto_to_case_owner,\n \"auto_to_case_default_tester\": tp.email_settings.auto_to_case_default_tester,\n \"notify_on_plan_update\": tp.email_settings.notify_on_plan_update,\n \"notify_on_case_update\": tp.email_settings.notify_on_case_update,\n \"notify_on_plan_delete\": tp.email_settings.notify_on_plan_delete,\n }\n )\n form.populate(product_id=tp.product_id)\n\n context_data = {\n \"module\": MODULE_NAME,\n \"sub_module\": SUB_MODULE_NAME,\n \"test_plan\": tp,\n \"form\": form,\n }\n return render(request, template_name, context=context_data)\n\n\n@require_http_methods([\"GET\", \"POST\"])\n@permission_required(\"testplans.add_testplan\")\ndef clone(request, template_name=\"plan/clone.html\"):\n \"\"\"Clone testplan\"\"\"\n SUB_MODULE_NAME = \"plans\"\n\n req_data = request.GET or request.POST\n if \"plan\" not in req_data:\n return prompt.info(\n request,\n \"At least one plan is required by clone function.\",\n )\n\n tps = TestPlan.objects.filter(pk__in=req_data.getlist(\"plan\")).order_by(\"-pk\")\n\n if not tps:\n return prompt.info(\n request,\n \"The plan you specify does not exist in database.\",\n )\n\n # Clone the plan if the form is submitted\n if request.method == \"POST\":\n clone_form = ClonePlanForm(request.POST)\n clone_form.populate(product_id=request.POST.get(\"product_id\"))\n\n if clone_form.is_valid():\n clone_options = clone_form.cleaned_data\n\n # Create new test plan.\n for tp in tps:\n\n new_name = len(tps) == 1 and clone_options[\"name\"] or None\n\n clone_params = {\n # Cloned plan properties\n \"new_name\": new_name,\n \"product\": clone_options[\"product\"],\n \"version\": clone_options[\"product_version\"],\n \"set_parent\": clone_options[\"set_parent\"],\n # Related data\n \"copy_texts\": clone_options[\"copy_texts\"],\n \"copy_attachments\": clone_options[\"copy_attachements\"],\n \"copy_environment_group\": clone_options[\"copy_environment_group\"],\n # Link or copy cases\n \"link_cases\": clone_options[\"link_testcases\"],\n \"copy_cases\": clone_options[\"copy_testcases\"],\n \"default_component_initial_owner\": request.user,\n }\n\n assign_me_as_plan_author = not clone_options[\"keep_orignal_author\"]\n if assign_me_as_plan_author:\n clone_params[\"new_original_author\"] = request.user\n\n assign_me_as_copied_case_author = (\n clone_options[\"copy_testcases\"]\n and not clone_options[\"maintain_case_orignal_author\"]\n )\n if assign_me_as_copied_case_author:\n clone_params[\"new_case_author\"] = request.user\n\n assign_me_as_copied_case_default_tester = (\n clone_options[\"copy_testcases\"]\n and not clone_options[\"keep_case_default_tester\"]\n )\n if assign_me_as_copied_case_default_tester:\n clone_params[\"new_case_default_tester\"] = request.user\n\n assign_me_as_text_author = not clone_options[\"copy_texts\"]\n if assign_me_as_text_author:\n clone_params[\"default_text_author\"] = request.user\n\n cloned_plan = tp.clone(**clone_params)\n\n if len(tps) == 1:\n return HttpResponseRedirect(reverse(\"plan-get\", args=[cloned_plan.plan_id]))\n else:\n args = {\n \"action\": \"search\",\n \"product\": clone_form.cleaned_data[\"product\"].id,\n \"product_version\": clone_form.cleaned_data[\"product_version\"].id,\n }\n url_args = urllib.parse.urlencode(args)\n return HttpResponseRedirect(\"{}?{}\".format(reverse(\"plans-all\"), url_args))\n else:\n # Generate the default values for the form\n if len(tps) == 1:\n clone_form = ClonePlanForm(\n initial={\n \"product\": tps[0].product_id,\n \"product_version\": tps[0].product_version_id,\n \"set_parent\": True,\n \"copy_texts\": True,\n \"copy_attachements\": True,\n \"copy_environment_group\": True,\n \"link_testcases\": True,\n \"copy_testcases\": False,\n \"maintain_case_orignal_author\": True,\n \"keep_case_default_tester\": False,\n \"name\": tps[0].make_cloned_name(),\n }\n )\n clone_form.populate(product_id=tps[0].product.id)\n else:\n clone_form = ClonePlanForm(\n initial={\n \"set_parent\": True,\n \"copy_texts\": True,\n \"copy_attachements\": True,\n \"link_testcases\": True,\n \"copy_testcases\": False,\n \"maintain_case_orignal_author\": True,\n \"keep_case_default_tester\": True,\n }\n )\n\n context_data = {\n \"module\": MODULE_NAME,\n \"sub_module\": SUB_MODULE_NAME,\n \"testplans\": tps,\n \"clone_form\": clone_form,\n }\n return render(request, template_name, context=context_data)\n\n\ndef attachment(request, plan_id, template_name=\"plan/attachment.html\"):\n \"\"\"Manage attached files\"\"\"\n SUB_MODULE_NAME = \"plans\"\n\n file_size_limit = settings.MAX_UPLOAD_SIZE\n limit_readable = int(file_size_limit) / 2 ** 20 # Mb\n\n tp = get_object_or_404(TestPlan, plan_id=plan_id)\n context_data = {\n \"module\": MODULE_NAME,\n \"sub_module\": SUB_MODULE_NAME,\n \"test_plan\": tp,\n \"limit\": file_size_limit,\n \"limit_readable\": str(limit_readable) + \"Mb\",\n }\n return render(request, template_name, context=context_data)\n\n\n@require_GET\ndef text_history(request, plan_id, template_name=\"plan/history.html\"):\n \"\"\"View test plan text history\"\"\"\n SUB_MODULE_NAME = \"plans\"\n\n tp = get_object_or_404(TestPlan, plan_id=int(plan_id))\n tptxts = tp.text.select_related(\"author\").only(\n \"plan\", \"create_date\", \"plan_text\", \"plan_text_version\", \"author__email\"\n )\n selected_plan_text_version = int(request.GET.get(\"plan_text_version\", 0))\n context_data = {\n \"module\": MODULE_NAME,\n \"sub_module\": SUB_MODULE_NAME,\n \"testplan\": tp,\n \"test_plan_texts\": tptxts,\n \"select_plan_text_version\": selected_plan_text_version,\n }\n return render(request, template_name, context=context_data)\n\n\nclass ReorderCasesView(View):\n \"\"\"Reorder cases\"\"\"\n\n http_method_names = [\"post\"]\n\n def post(self, request, plan_id):\n # Current we should rewrite all of cases belong to the plan.\n # Because the cases sortkey in database is chaos,\n # Most of them are None.\n\n if \"case\" not in request.POST:\n return JsonResponseBadRequest({\"message\": \"At least one case is required to re-order.\"})\n\n plan = get_object_or_404(TestPlan, pk=int(plan_id))\n\n case_ids = [int(id) for id in request.POST.getlist(\"case\")]\n cases = TestCase.objects.filter(pk__in=case_ids).only(\"pk\")\n\n for case in cases:\n new_sort_key = (case_ids.index(case.pk) + 1) * 10\n TestCasePlan.objects.filter(plan=plan, case=case).update(sortkey=new_sort_key)\n\n return JsonResponse({})\n\n\nclass LinkCasesView(View):\n \"\"\"Link cases to plan\"\"\"\n\n permission_required = \"testcases.add_testcaseplan\"\n\n def post(self, request, plan_id):\n plan = get_object_or_404(TestPlan.objects.only(\"pk\"), pk=int(plan_id))\n case_ids = [int(id) for id in request.POST.getlist(\"case\")]\n cases = TestCase.objects.filter(case_id__in=case_ids).only(\"pk\")\n for case in cases:\n plan.add_case(case)\n return HttpResponseRedirect(reverse(\"plan-get\", args=[plan_id]))\n\n\nclass LinkCasesSearchView(View):\n \"\"\"Search cases for linking to plan\"\"\"\n\n template_name = \"plan/search_case.html\"\n SUB_MODULE_NAME = \"plans\"\n\n def get(self, request, plan_id):\n plan = get_object_or_404(TestPlan, pk=int(plan_id))\n\n normal_form = SearchCaseForm(\n initial={\n \"product\": plan.product_id,\n \"product_version\": plan.product_version_id,\n \"case_status_id\": TestCaseStatus.get(\"CONFIRMED\"),\n }\n )\n quick_form = QuickSearchCaseForm()\n return render(\n self.request,\n self.template_name,\n {\n \"module\": MODULE_NAME,\n \"sub_module\": self.SUB_MODULE_NAME,\n \"search_form\": normal_form,\n \"quick_form\": quick_form,\n \"test_plan\": plan,\n },\n )\n\n def post(self, request, plan_id):\n plan = get_object_or_404(TestPlan, pk=int(plan_id))\n\n search_mode = request.POST.get(\"search_mode\")\n if search_mode == \"quick\":\n form = quick_form = QuickSearchCaseForm(request.POST)\n normal_form = SearchCaseForm()\n else:\n form = normal_form = SearchCaseForm(request.POST)\n form.populate(product_id=request.POST.get(\"product\"))\n quick_form = QuickSearchCaseForm()\n\n if form.is_valid():\n cases = TestCase.list(form.cleaned_data)\n cases = (\n cases.select_related(\"author\", \"default_tester\", \"case_status\", \"priority\")\n .only(\n \"pk\",\n \"summary\",\n \"create_date\",\n \"author__email\",\n \"default_tester__email\",\n \"case_status__name\",\n \"priority__value\",\n )\n .exclude(case_id__in=plan.case.values_list(\"case_id\", flat=True))\n )\n\n context = {\n \"module\": MODULE_NAME,\n \"sub_module\": self.SUB_MODULE_NAME,\n \"test_plan\": plan,\n \"test_cases\": cases,\n \"search_form\": normal_form,\n \"quick_form\": quick_form,\n \"search_mode\": search_mode,\n }\n return render(request, self.template_name, context=context)\n\n\nclass ImportCasesView(PermissionRequiredMixin, View):\n \"\"\"Import cases to a plan\"\"\"\n\n permission_required = \"testcases.add_testcaseplan\"\n\n def post(self, request, plan_id):\n plan = get_object_or_404(TestPlan.objects.only(\"pk\"), pk=int(plan_id))\n next_url = reverse(\"plan-get\", args=[plan_id]) + \"#testcases\"\n xml_form = ImportCasesViaXMLForm(request.POST, request.FILES)\n if xml_form.is_valid():\n plan.import_cases(xml_form.cleaned_data[\"xml_file\"])\n return HttpResponseRedirect(next_url)\n else:\n return prompt.alert(request, xml_form.errors, next_url)\n\n\nclass DeleteCasesView(View):\n \"\"\"Delete selected cases from plan\"\"\"\n\n def post(self, request, plan_id):\n plan = get_object_or_404(TestPlan.objects.only(\"pk\"), pk=int(plan_id))\n\n if \"case\" not in request.POST:\n return JsonResponseBadRequest({\"message\": \"At least one case is required to delete.\"})\n\n cases = get_selected_testcases(request).only(\"pk\")\n\n # Log Action\n plan_log = TCMSLog(model=plan)\n for case in cases:\n plan_log.make(who=request.user, new_value=f\"Remove case {case.pk} from plan {plan.pk}\")\n case.log_action(who=request.user, new_value=f\"Remove from plan {plan.pk}\")\n plan.delete_case(case=case)\n\n return JsonResponse({})\n\n\nclass PlanComponentsActionView(View):\n \"\"\"Manage a plan's components\"\"\"\n\n template_name = \"plan/get_component.html\"\n\n def get(self, request):\n if \"plan\" not in request.GET:\n return HttpResponseBadRequest(\"Plan ID is not in request.\")\n plans = TestPlan.objects.filter(pk=int(request.GET[\"plan\"]))\n if not plans:\n return Http404(\"Plan ID {} does not exist.\".format(\", \".join(plans)))\n\n action = request.GET.get(\"a\", \"get_component_list\").lower()\n\n if action == \"get_form\":\n return self.get_manage_form(request, plans)\n elif action == \"get_component_list\":\n return self.get_default_component_list(request, plans[0])\n elif action == \"add\":\n return self.add(request, plans[0], self._get_components())\n elif action == \"remove\":\n components = self._get_components()\n return self.remove_components_from_plan(request, plans[0], components)\n elif action == \"update\":\n return self.update_components(request, plans[0])\n\n def _get_components(self):\n if \"component\" not in self.request.GET:\n return HttpResponseBadRequest(\"Component ID is not in request.\")\n component_ids = [int(id) for id in self.request.GET.getlist(\"component\")]\n return Component.objects.filter(pk__in=component_ids)\n\n @method_decorator(permission_required(\"testplans.add_testplancomponent\"))\n def add(self, request, plan, components):\n \"\"\"Add components to given plans\"\"\"\n list(map(plan.add_component, components))\n\n @method_decorator(permission_required(\"testplans.delete_testplancomponent\"))\n def remove_components_from_plan(self, request, plan, components=None):\n \"\"\"Remove existing components from plans\n\n :param plan: instance of TestPlan, from which to remove components\n from this plan.\n :param components: instances of Component, which will be removed.\n \"\"\"\n if components is None:\n TestPlanComponent.objects.filter(plan=plan).delete()\n else:\n list(map(plan.remove_component, components))\n\n return self.get_default_component_list(request, plan)\n\n def update_components(self, request, plan):\n self.remove_components_from_plan(request, plan)\n self.add(request, plan, self._get_components())\n return self.get_default_component_list(request, plan)\n\n def get_manage_form(self, request, plans):\n \"\"\"Return form content in order to select components\"\"\"\n plan_comps = TestPlanComponent.objects.filter(plan__in=plans)\n\n form = PlanComponentForm(\n tps=plans,\n initial={\n \"component\": plan_comps.values_list(\"component_id\", flat=True),\n },\n )\n\n q_format = request.GET.get(\"format\", \"p\")\n html = getattr(form, \"as_\" + q_format)\n\n return HttpResponse(html())\n\n def get_default_component_list(self, request, plan):\n return render(request, self.template_name, context={\"test_plan\": plan})\n\n\n@require_GET\ndef printable(request, template_name=\"plan/printable.html\"):\n \"\"\"Create the printable copy for plan\"\"\"\n plan_pks = request.GET.getlist(\"plan\")\n\n if not plan_pks:\n return prompt.info(request, \"At least one target is required.\")\n\n tps = TestPlan.objects.filter(pk__in=plan_pks).only(\"pk\", \"name\")\n\n def plan_generator():\n repeat = len(plan_pks)\n params_sql = \",\".join(itertools.repeat(\"%s\", repeat))\n sql = sqls.TP_PRINTABLE_CASE_TEXTS % (params_sql, params_sql)\n result_set = SQLExecution(sql, plan_pks * 2)\n group_data = itertools.groupby(result_set.rows, itemgetter(\"plan_id\"))\n cases_dict = {key: list(values) for key, values in group_data}\n for tp in tps:\n tp.result_set = cases_dict.get(tp.plan_id, None)\n yield tp\n\n context_data = {\n \"test_plans\": plan_generator(),\n }\n\n return render(request, template_name, context=context_data)\n\n\n@require_GET\ndef export(request, template_name=\"case/export.xml\"):\n \"\"\"Export the plan\"\"\"\n plan_pks = list(map(int, request.GET.getlist(\"plan\")))\n\n if not plan_pks:\n return prompt.info(request, \"At least one target is required.\")\n\n context_data = {\n \"cases_info\": get_exported_cases_and_related_data(plan_pks),\n }\n\n timestamp = datetime.datetime.now()\n timestamp_str = \"%02i-%02i-%02i\" % (timestamp.year, timestamp.month, timestamp.day)\n\n response = render(request, template_name, context=context_data)\n filename = f\"tcms-testcases-{timestamp_str}.xml\"\n response[\"Content-Disposition\"] = f\"attachment; filename={filename}\"\n return response\n\n\n@require_GET\ndef construct_plans_treeview(request, plan_id):\n \"\"\"Construct a plan's tree view\"\"\"\n plan = get_object_or_404(TestPlan, pk=plan_id)\n\n tree_plan_ids = plan.get_ancestor_ids() + plan.get_descendant_ids()\n tree_plan_ids.append(plan.pk)\n\n plans = (\n TestPlan.objects.filter(pk__in=tree_plan_ids)\n .only(\"pk\", \"name\", \"parent_id\")\n .order_by(\"parent_id\", \"pk\")\n )\n\n plans = TestPlan.apply_subtotal(plans, cases_count=True, runs_count=True, children_count=True)\n\n return render(\n request,\n \"plan/get_treeview.html\",\n context={\"current_plan_id\": plan_id, \"plans\": plans},\n )\n\n\n@login_required\n@require_POST\ndef treeview_add_child_plans(request: HttpRequest, plan_id: int):\n plan = TestPlan.objects.filter(pk=plan_id).only(\"pk\").first()\n if plan is None:\n return JsonResponseNotFound({\"message\": f\"Plan {plan_id} does not exist.\"})\n\n child_plan_ids: List[str] = request.POST.getlist(\"children\")\n child_plans: List[TestPlan] = []\n\n ancestor_ids = plan.get_ancestor_ids()\n descendant_ids = plan.get_descendant_ids()\n\n for child_plan_id in child_plan_ids:\n if not child_plan_id.isdigit():\n return JsonResponseBadRequest(\n {\"message\": f\"Child plan id {child_plan_id} is not a number.\"}\n )\n child_plan: TestPlan = TestPlan.objects.filter(pk=int(child_plan_id)).only(\"pk\").first()\n if child_plan is None:\n return JsonResponseBadRequest(\n {\"message\": f\"Child plan {child_plan_id} does not exist.\"}\n )\n if child_plan.pk in ancestor_ids:\n return JsonResponseBadRequest(\n {\"message\": f\"Plan {child_plan_id} is an ancestor of \" f\"plan {plan_id} already.\"}\n )\n if child_plan.pk in descendant_ids:\n return JsonResponseBadRequest(\n {\"message\": f\"Plan {child_plan_id} is a descendant of \" f\"plan {plan_id} already.\"}\n )\n\n child_plans.append(child_plan)\n\n for child_plan in child_plans:\n child_plan.parent = plan\n child_plan.save(update_fields=[\"parent\"])\n\n return JsonResponse(\n {\"parent_plan\": plan.pk, \"children_plans\": [plan.pk for plan in child_plans]}\n )\n\n\n@login_required\n@require_POST\ndef treeview_remove_child_plans(request, plan_id: int):\n plan: TestPlan = TestPlan.objects.filter(pk=plan_id).only(\"pk\").first()\n if plan is None:\n return JsonResponseNotFound({\"message\": f\"Plan {plan_id} does not exist.\"})\n\n child_plan_ids: Set[int] = set(map(int, request.POST.getlist(\"children\")))\n direct_descendants = set(plan.get_descendant_ids(True))\n ids_to_remove = child_plan_ids & direct_descendants\n\n if ids_to_remove:\n TestPlan.objects.filter(pk__in=ids_to_remove).update(parent=None)\n\n return JsonResponse(\n {\n \"parent_plan\": plan.pk,\n \"removed\": sorted(ids_to_remove),\n \"non_descendants\": sorted(child_plan_ids - direct_descendants),\n }\n )\n\n\nclass PlanTreeChangeParentView(PermissionRequiredMixin, View):\n \"\"\"Plan tree view to change a plan's parent\"\"\"\n\n permission_required = \"testplans.change_testplan\"\n\n def handle_no_permission(self):\n return JsonResponseBadRequest(\n {\"message\": \"You do not have permission to change the parent plan.\"}\n )\n\n def patch(self, request, *args, **kwargs):\n plan: TestPlan = TestPlan.objects.filter(pk=self.kwargs[\"plan_id\"]).only(\"pk\").first()\n if plan is None:\n return JsonResponseNotFound(\n {\n \"message\": f\"Cannot change parent of plan, \"\n f\"whose id {self.kwargs['plan_id']} does not exist.\"\n }\n )\n\n data = json.loads(request.body)\n user_input: Optional[str] = data.get(\"parent\")\n if user_input is None:\n return JsonResponseBadRequest({\"message\": \"Missing parent plan id.\"})\n if not isinstance(user_input, int):\n return JsonResponseBadRequest(\n {\"message\": f'The given parent plan id \"{user_input}\" is not a positive integer.'}\n )\n parent_id = int(user_input)\n new_parent = TestPlan.objects.filter(pk=parent_id).only(\"parent\").first()\n if new_parent is None:\n return JsonResponseBadRequest(\n {\"message\": f\"The parent plan id {parent_id} does not exist.\"}\n )\n\n descendant_ids = plan.get_descendant_ids()\n if parent_id in descendant_ids:\n return JsonResponseBadRequest(\n {\n \"message\": f\"The parent plan {parent_id} is a descendant of plan {plan.pk} already.\"\n }\n )\n\n original_value = plan.parent.pk if plan.parent else \"None\"\n\n plan.parent = new_parent\n plan.save(update_fields=[\"parent\"])\n plan.log_action(\n who=request.user,\n field=\"parent\",\n original_value=original_value,\n new_value=str(new_parent.pk),\n )\n\n return JsonResponse({})\n\n\nclass SetPlanActiveView(PermissionRequiredMixin, View):\n \"\"\"Set a test plan active or inactive\"\"\"\n\n permission_required = \"testplans.change_testplan\"\n raise_exception = True\n enable: bool = True\n\n def patch(self, request, *args, **kwargs):\n plan_id = self.kwargs[\"plan_id\"]\n plan: TestPlan = TestPlan.objects.filter(pk=plan_id).only(\"is_active\").first()\n if not plan:\n return JsonResponseNotFound({\"message\": f\"Plan id {plan_id} does not exist.\"})\n original_value: str = str(plan.is_active)\n plan.is_active = self.enable\n plan.save(update_fields=[\"is_active\"])\n plan.log_action(\n who=request.user,\n field=\"is_active\",\n original_value=original_value,\n new_value=str(plan.is_active),\n )\n return JsonResponse({})\n", "path": "src/tcms/testplans/views.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport datetime\nimport functools\nimport itertools\nimport json\nimport urllib\nfrom operator import add, itemgetter\nfrom typing import List, Optional, Set\n\nfrom django.conf import settings\nfrom django.contrib.auth.decorators import login_required, permission_required\nfrom django.contrib.auth.mixins import PermissionRequiredMixin\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.http import (\n Http404,\n HttpRequest,\n HttpResponse,\n HttpResponseBadRequest,\n HttpResponsePermanentRedirect,\n HttpResponseRedirect,\n JsonResponse,\n)\nfrom django.shortcuts import get_object_or_404, render\nfrom django.template.loader import get_template\nfrom django.urls import reverse\nfrom django.utils.decorators import method_decorator\nfrom django.views.decorators.csrf import csrf_protect\nfrom django.views.decorators.http import require_GET, require_http_methods, require_POST\nfrom django.views.generic import View\nfrom django.views.generic.base import TemplateView\nfrom uuslug import slugify\n\nfrom tcms.core.db import SQLExecution\nfrom tcms.core.models import TCMSLog\nfrom tcms.core.responses import JsonResponseBadRequest, JsonResponseNotFound\nfrom tcms.core.utils import DataTableResult, checksum\nfrom tcms.core.views import prompt\nfrom tcms.management.models import Component, TCMSEnvGroup\nfrom tcms.testcases.data import get_exported_cases_and_related_data\nfrom tcms.testcases.forms import QuickSearchCaseForm, SearchCaseForm\nfrom tcms.testcases.models import TestCase, TestCasePlan, TestCaseStatus\nfrom tcms.testcases.views import get_selected_testcases\nfrom tcms.testplans import sqls\nfrom tcms.testplans.forms import (\n ClonePlanForm,\n EditPlanForm,\n ImportCasesViaXMLForm,\n NewPlanForm,\n PlanComponentForm,\n SearchPlanForm,\n)\nfrom tcms.testplans.models import TestPlan, TestPlanComponent\nfrom tcms.testruns.models import TestCaseRun, TestRun\n\nMODULE_NAME = \"testplans\"\n\n# _____________________________________________________________________________\n# helper functons\n\n\ndef update_plan_email_settings(tp, form):\n \"\"\"Update testplan's email settings\"\"\"\n tp.email_settings.notify_on_plan_update = form.cleaned_data[\"notify_on_plan_update\"]\n tp.email_settings.notify_on_plan_delete = form.cleaned_data[\"notify_on_plan_delete\"]\n tp.email_settings.notify_on_case_update = form.cleaned_data[\"notify_on_case_update\"]\n tp.email_settings.auto_to_plan_owner = form.cleaned_data[\"auto_to_plan_owner\"]\n tp.email_settings.auto_to_plan_author = form.cleaned_data[\"auto_to_plan_author\"]\n tp.email_settings.auto_to_case_owner = form.cleaned_data[\"auto_to_case_owner\"]\n tp.email_settings.auto_to_case_default_tester = form.cleaned_data[\"auto_to_case_default_tester\"]\n tp.email_settings.save()\n\n\n# _____________________________________________________________________________\n# view functions\n\n\nclass CreateNewPlanView(PermissionRequiredMixin, View):\n \"\"\"Create a new test plan view\"\"\"\n\n sub_module_name = \"new_plan\"\n template_name = \"plan/new.html\"\n permission_required = (\n \"testplans.add_testplan\",\n \"testplans.add_testplantext\",\n \"testplans.add_tcmsenvplanmap\",\n )\n\n def make_response(self, form):\n return render(\n self.request,\n self.template_name,\n context={\n \"module\": MODULE_NAME,\n \"sub_module\": self.sub_module_name,\n \"form\": form,\n },\n )\n\n def get(self, request):\n return self.make_response(NewPlanForm())\n\n @method_decorator(csrf_protect)\n def post(self, request):\n form = NewPlanForm(request.POST, request.FILES)\n form.populate(product_id=request.POST.get(\"product\"))\n\n if not form.is_valid():\n return self.make_response(form)\n\n # Process the upload plan document\n if form.cleaned_data.get(\"upload_plan_text\"):\n # A document is uploaded to provide the document content. Load the\n # page again in order to show the content.\n initial_data = {\n \"name\": form.cleaned_data[\"name\"],\n \"type\": form.cleaned_data[\"type\"].pk,\n \"product\": form.cleaned_data[\"product\"].pk,\n \"product_version\": form.cleaned_data[\"product_version\"].pk,\n \"extra_link\": form.cleaned_data[\"extra_link\"],\n \"text\": form.cleaned_data[\"text\"],\n }\n if form.cleaned_data[\"env_group\"]:\n initial_data[\"env_group\"] = form.cleaned_data[\"env_group\"].pk\n return self.make_response(NewPlanForm(initial=initial_data))\n\n # Process the test plan submit to the form\n tp = TestPlan.objects.create(\n product=form.cleaned_data[\"product\"],\n author=request.user,\n owner=request.user,\n product_version=form.cleaned_data[\"product_version\"],\n type=form.cleaned_data[\"type\"],\n name=form.cleaned_data[\"name\"],\n create_date=datetime.datetime.now(),\n extra_link=form.cleaned_data[\"extra_link\"],\n parent=form.cleaned_data[\"parent\"],\n )\n\n tp.add_text(author=request.user, plan_text=form.cleaned_data[\"text\"])\n\n # Add test plan environment groups\n if request.POST.get(\"env_group\"):\n env_groups = TCMSEnvGroup.objects.filter(id__in=request.POST.getlist(\"env_group\"))\n\n for env_group in env_groups:\n tp.add_env_group(env_group=env_group)\n\n return HttpResponseRedirect(reverse(\"plan-get\", args=[tp.plan_id]))\n\n\n@require_GET\n@permission_required(\"testplans.delete_testplan\")\ndef delete(request, plan_id):\n \"\"\"Delete testplan\"\"\"\n if request.GET.get(\"sure\", \"no\") == \"no\":\n # TODO: rewrite the response\n plan_delete_url = reverse(\"plan-delete\", args=[plan_id])\n return HttpResponse(\n \"<script>\"\n \"if (confirm('Are you sure you want to delete this plan %s?\\\\n\\\\n\"\n \"Click OK to delete or cancel to come back'))\"\n \"{ window.location.href='%s?sure=yes' }\"\n \"else { history.go(-1) }\"\n \"</script>\" % (plan_id, plan_delete_url)\n )\n elif request.GET.get(\"sure\") == \"yes\":\n tp = get_object_or_404(TestPlan, plan_id=plan_id)\n\n try:\n tp.delete()\n return HttpResponse(\n \"<script>window.location.href='%s'</script>\" % reverse(\"tcms.testplans.views.all\")\n )\n except Exception:\n return prompt.info(request, \"Delete failed.\")\n else:\n return prompt.info(request, \"Nothing yet.\")\n\n\nclass SimplePlansFilterView(TemplateView):\n \"\"\"Providing base plans filter functionaity\"\"\"\n\n # Subclass should provide a concrete template to render the final content.\n # Or, pass the template path to argument template_name of View.as_view()\n template_name = None\n\n def filter_plans(self):\n search_form = SearchPlanForm(self.request.GET)\n product_id = self.request.GET.get(\"product\")\n search_form.populate(int(product_id) if product_id else None)\n\n plans = TestPlan.objects.none()\n\n if search_form.is_valid():\n # Determine the query is the user's plans and change the sub module value\n author = self.request.GET.get(\"author__email__startswith\")\n req_user = self.request.user\n if req_user.is_authenticated and author in (\n req_user.username,\n req_user.email,\n ):\n self.SUB_MODULE_NAME = \"my_plans\"\n\n plans = (\n TestPlan.list(search_form.cleaned_data)\n .select_related(\"author\", \"type\", \"product\")\n .order_by(\"-create_date\")\n )\n\n plans = TestPlan.apply_subtotal(\n plans,\n cases_count=True,\n runs_count=True,\n children_count=True,\n )\n\n return search_form, plans\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"search_plan_form\"], context[\"plans\"] = self.filter_plans()\n return context\n\n\nclass SearchPlansView(SimplePlansFilterView):\n \"\"\"Used to filter test plans\"\"\"\n\n SUB_MODULE_NAME = \"plans\"\n template_name = \"plan/all.html\"\n\n def get(self, request, *args, **kwargs):\n context = self.get_context_data(**kwargs)\n return self.render_to_response(context)\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context.update(\n {\n \"module\": MODULE_NAME,\n \"sub_module\": self.SUB_MODULE_NAME,\n \"object_list\": context[\"plans\"][0:20],\n \"plans_count\": context[\"plans\"].count(),\n }\n )\n return context\n\n\nclass SearchPlansPagesView(SimplePlansFilterView):\n\n template_name = \"plan/common/json_plans.txt\"\n column_names = [\n \"\",\n \"plan_id\",\n \"name\",\n \"author__username\",\n \"owner__username\",\n \"product\",\n \"product_version\",\n \"type\",\n \"cases_count\",\n \"runs_count\",\n \"\",\n ]\n\n def get(self, request, *args, **kwargs):\n _, plans = self.filter_plans()\n dt = DataTableResult(request.GET, plans, self.column_names)\n data = dt.get_response_data()\n resp_data = get_template(self.template_name).render(data, request)\n return JsonResponse(json.loads(resp_data))\n\n\ndef get(request, plan_id, slug=None, template_name=\"plan/get.html\"):\n \"\"\"Display the plan details.\"\"\"\n SUB_MODULE_NAME = \"plans\"\n\n try:\n tp = TestPlan.objects.select_related().get(plan_id=plan_id)\n tp.latest_text = tp.latest_text()\n except ObjectDoesNotExist:\n raise Http404\n\n # redirect if has a cheated slug\n if slug != slugify(tp.name):\n return HttpResponsePermanentRedirect(tp.get_absolute_url())\n\n # Initial the case counter\n confirm_status_name = \"CONFIRMED\"\n tp.run_case = tp.case.filter(case_status__name=confirm_status_name)\n tp.review_case = tp.case.exclude(case_status__name=confirm_status_name)\n\n context_data = {\n \"module\": MODULE_NAME,\n \"sub_module\": SUB_MODULE_NAME,\n \"test_plan\": tp,\n \"xml_form\": ImportCasesViaXMLForm(),\n }\n return render(request, template_name, context=context_data)\n\n\nclass AddCasesToRunsView(PermissionRequiredMixin, View):\n \"\"\"View of adding cases to runs\"\"\"\n\n SUB_MODULE_NAME = \"runs\"\n permission_required = \"testruns.change_testrun\"\n template_name = \"plan/choose_testrun.html\"\n\n def get(self, request, plan_id):\n plan = TestPlan.objects.filter(pk=int(plan_id)).defer(\"product_version\").first()\n if plan is None:\n raise Http404\n\n # TODO: replace with plan.run.values(...)\n runs = TestRun.objects.filter(plan=plan_id).values(\n \"pk\", \"summary\", \"build__name\", \"manager__username\"\n )\n\n cases = get_selected_testcases(request).values(\n \"pk\",\n \"summary\",\n \"author__username\",\n \"create_date\",\n \"category__name\",\n \"priority__value\",\n )\n\n return render(\n request,\n self.template_name,\n context={\n \"module\": MODULE_NAME,\n \"sub_module\": self.SUB_MODULE_NAME,\n \"plan_id\": plan_id,\n \"plan\": plan,\n \"test_runs\": runs.iterator(),\n \"test_cases\": cases,\n },\n )\n\n def post(self, request, plan_id):\n choosed_testrun_ids = request.POST.getlist(\"run\")\n to_be_added_cases = TestCase.objects.filter(pk__in=request.POST.getlist(\"case\"))\n\n plan_url = reverse(\"plan-get\", args=[plan_id])\n\n # cases and runs are required in this process\n if not len(choosed_testrun_ids) or not len(to_be_added_cases):\n return prompt.info(\n request,\n \"At least one test run and one case is required to add cases to runs.\",\n plan_url,\n )\n\n # Adding cases to runs by recursion\n for tr_id in choosed_testrun_ids:\n testrun = get_object_or_404(TestRun, run_id=tr_id)\n cases = TestCaseRun.objects.filter(run=tr_id)\n exist_cases_id = cases.values_list(\"case\", flat=True)\n\n for testcase in to_be_added_cases:\n if testcase.case_id not in exist_cases_id:\n testrun.add_case_run(case=testcase)\n\n estimated_time = functools.reduce(add, [nc.estimated_time for nc in to_be_added_cases])\n testrun.estimated_time = testrun.estimated_time + estimated_time\n testrun.save()\n\n return HttpResponseRedirect(plan_url)\n\n\n@require_http_methods([\"GET\", \"POST\"])\n@permission_required(\"testplans.change_testplan\")\ndef edit(request, plan_id, template_name=\"plan/edit.html\"):\n \"\"\"Edit test plan view\"\"\"\n # Define the default sub module\n SUB_MODULE_NAME = \"plans\"\n\n try:\n tp = TestPlan.objects.select_related().get(plan_id=plan_id)\n except ObjectDoesNotExist:\n raise Http404\n\n # If the form is submitted\n if request.method == \"POST\":\n form = EditPlanForm(request.POST, request.FILES)\n if request.POST.get(\"product\"):\n form.populate(product_id=request.POST[\"product\"])\n else:\n form.populate()\n\n # FIXME: Error handle\n if form.is_valid():\n if form.cleaned_data.get(\"upload_plan_text\"):\n # Set the summary form field to the uploaded text\n form.data[\"text\"] = form.cleaned_data[\"text\"]\n\n # Generate the form\n context_data = {\n \"module\": MODULE_NAME,\n \"sub_module\": SUB_MODULE_NAME,\n \"form\": form,\n \"test_plan\": tp,\n }\n return render(request, template_name, context=context_data)\n\n if request.user.has_perm(\"testplans.change_testplan\"):\n tp.name = form.cleaned_data[\"name\"]\n tp.parent = form.cleaned_data[\"parent\"]\n tp.product = form.cleaned_data[\"product\"]\n tp.product_version = form.cleaned_data[\"product_version\"]\n tp.type = form.cleaned_data[\"type\"]\n tp.is_active = form.cleaned_data[\"is_active\"]\n tp.extra_link = form.cleaned_data[\"extra_link\"]\n tp.owner = form.cleaned_data[\"owner\"]\n # IMPORTANT! tp.current_user is an instance attribute,\n # added so that in post_save, current logged-in user info\n # can be accessed.\n # Instance attribute is usually not a desirable solution.\n tp.current_user = request.user\n tp.save()\n\n if request.user.has_perm(\"testplans.add_testplantext\"):\n new_text = request.POST.get(\"text\")\n text_checksum = checksum(new_text)\n\n if not tp.text_exist() or text_checksum != tp.text_checksum():\n tp.add_text(\n author=request.user,\n plan_text=request.POST.get(\"text\"),\n text_checksum=text_checksum,\n )\n\n if request.user.has_perm(\"management.change_tcmsenvplanmap\"):\n tp.clear_env_groups()\n\n if request.POST.get(\"env_group\"):\n env_groups = TCMSEnvGroup.objects.filter(\n id__in=request.POST.getlist(\"env_group\")\n )\n\n for env_group in env_groups:\n tp.add_env_group(env_group=env_group)\n # Update plan email settings\n update_plan_email_settings(tp, form)\n return HttpResponseRedirect(reverse(\"plan-get\", args=[plan_id, slugify(tp.name)]))\n else:\n # Generate a blank form\n # Temporary use one environment group in this case\n if tp.env_group.all():\n for env_group in tp.env_group.all():\n env_group_id = env_group.id\n break\n else:\n env_group_id = None\n\n form = EditPlanForm(\n initial={\n \"name\": tp.name,\n \"product\": tp.product_id,\n \"product_version\": tp.product_version_id,\n \"type\": tp.type_id,\n \"text\": tp.latest_text() and tp.latest_text().plan_text or \"\",\n \"parent\": tp.parent_id,\n \"env_group\": env_group_id,\n \"is_active\": tp.is_active,\n \"extra_link\": tp.extra_link,\n \"owner\": tp.owner,\n \"auto_to_plan_owner\": tp.email_settings.auto_to_plan_owner,\n \"auto_to_plan_author\": tp.email_settings.auto_to_plan_author,\n \"auto_to_case_owner\": tp.email_settings.auto_to_case_owner,\n \"auto_to_case_default_tester\": tp.email_settings.auto_to_case_default_tester,\n \"notify_on_plan_update\": tp.email_settings.notify_on_plan_update,\n \"notify_on_case_update\": tp.email_settings.notify_on_case_update,\n \"notify_on_plan_delete\": tp.email_settings.notify_on_plan_delete,\n }\n )\n form.populate(product_id=tp.product_id)\n\n context_data = {\n \"module\": MODULE_NAME,\n \"sub_module\": SUB_MODULE_NAME,\n \"test_plan\": tp,\n \"form\": form,\n }\n return render(request, template_name, context=context_data)\n\n\n@require_http_methods([\"GET\", \"POST\"])\n@permission_required(\"testplans.add_testplan\")\ndef clone(request, template_name=\"plan/clone.html\"):\n \"\"\"Clone testplan\"\"\"\n SUB_MODULE_NAME = \"plans\"\n\n req_data = request.GET or request.POST\n if \"plan\" not in req_data:\n return prompt.info(\n request,\n \"At least one plan is required by clone function.\",\n )\n\n tps = TestPlan.objects.filter(pk__in=req_data.getlist(\"plan\")).order_by(\"-pk\")\n\n if not tps:\n return prompt.info(\n request,\n \"The plan you specify does not exist in database.\",\n )\n\n # Clone the plan if the form is submitted\n if request.method == \"POST\":\n clone_form = ClonePlanForm(request.POST)\n clone_form.populate(product_id=request.POST.get(\"product_id\"))\n\n if clone_form.is_valid():\n clone_options = clone_form.cleaned_data\n\n # Create new test plan.\n for tp in tps:\n\n new_name = len(tps) == 1 and clone_options[\"name\"] or None\n\n clone_params = {\n # Cloned plan properties\n \"new_name\": new_name,\n \"product\": clone_options[\"product\"],\n \"version\": clone_options[\"product_version\"],\n \"set_parent\": clone_options[\"set_parent\"],\n # Related data\n \"copy_texts\": clone_options[\"copy_texts\"],\n \"copy_attachments\": clone_options[\"copy_attachements\"],\n \"copy_environment_group\": clone_options[\"copy_environment_group\"],\n # Link or copy cases\n \"link_cases\": clone_options[\"link_testcases\"],\n \"copy_cases\": clone_options[\"copy_testcases\"],\n \"default_component_initial_owner\": request.user,\n }\n\n assign_me_as_plan_author = not clone_options[\"keep_orignal_author\"]\n if assign_me_as_plan_author:\n clone_params[\"new_original_author\"] = request.user\n\n assign_me_as_copied_case_author = (\n clone_options[\"copy_testcases\"]\n and not clone_options[\"maintain_case_orignal_author\"]\n )\n if assign_me_as_copied_case_author:\n clone_params[\"new_case_author\"] = request.user\n\n assign_me_as_copied_case_default_tester = (\n clone_options[\"copy_testcases\"]\n and not clone_options[\"keep_case_default_tester\"]\n )\n if assign_me_as_copied_case_default_tester:\n clone_params[\"new_case_default_tester\"] = request.user\n\n assign_me_as_text_author = not clone_options[\"copy_texts\"]\n if assign_me_as_text_author:\n clone_params[\"default_text_author\"] = request.user\n\n cloned_plan = tp.clone(**clone_params)\n\n if len(tps) == 1:\n return HttpResponseRedirect(reverse(\"plan-get\", args=[cloned_plan.plan_id]))\n else:\n args = {\n \"action\": \"search\",\n \"product\": clone_form.cleaned_data[\"product\"].id,\n \"product_version\": clone_form.cleaned_data[\"product_version\"].id,\n }\n url_args = urllib.parse.urlencode(args)\n return HttpResponseRedirect(\"{}?{}\".format(reverse(\"plans-all\"), url_args))\n else:\n # Generate the default values for the form\n if len(tps) == 1:\n clone_form = ClonePlanForm(\n initial={\n \"product\": tps[0].product_id,\n \"product_version\": tps[0].product_version_id,\n \"set_parent\": True,\n \"copy_texts\": True,\n \"copy_attachements\": True,\n \"copy_environment_group\": True,\n \"link_testcases\": True,\n \"copy_testcases\": False,\n \"maintain_case_orignal_author\": True,\n \"keep_case_default_tester\": False,\n \"name\": tps[0].make_cloned_name(),\n }\n )\n clone_form.populate(product_id=tps[0].product.id)\n else:\n clone_form = ClonePlanForm(\n initial={\n \"set_parent\": True,\n \"copy_texts\": True,\n \"copy_attachements\": True,\n \"link_testcases\": True,\n \"copy_testcases\": False,\n \"maintain_case_orignal_author\": True,\n \"keep_case_default_tester\": True,\n }\n )\n\n context_data = {\n \"module\": MODULE_NAME,\n \"sub_module\": SUB_MODULE_NAME,\n \"testplans\": tps,\n \"clone_form\": clone_form,\n }\n return render(request, template_name, context=context_data)\n\n\ndef attachment(request, plan_id, template_name=\"plan/attachment.html\"):\n \"\"\"Manage attached files\"\"\"\n SUB_MODULE_NAME = \"plans\"\n\n file_size_limit = settings.MAX_UPLOAD_SIZE\n limit_readable = int(file_size_limit) / 2 ** 20 # Mb\n\n tp = get_object_or_404(TestPlan, plan_id=plan_id)\n context_data = {\n \"module\": MODULE_NAME,\n \"sub_module\": SUB_MODULE_NAME,\n \"test_plan\": tp,\n \"limit\": file_size_limit,\n \"limit_readable\": str(limit_readable) + \"Mb\",\n }\n return render(request, template_name, context=context_data)\n\n\n@require_GET\ndef text_history(request, plan_id, template_name=\"plan/history.html\"):\n \"\"\"View test plan text history\"\"\"\n SUB_MODULE_NAME = \"plans\"\n\n tp = get_object_or_404(TestPlan, plan_id=int(plan_id))\n tptxts = tp.text.select_related(\"author\").only(\n \"plan\", \"create_date\", \"plan_text\", \"plan_text_version\", \"author__email\"\n )\n selected_plan_text_version = int(request.GET.get(\"plan_text_version\", 0))\n context_data = {\n \"module\": MODULE_NAME,\n \"sub_module\": SUB_MODULE_NAME,\n \"testplan\": tp,\n \"test_plan_texts\": tptxts,\n \"select_plan_text_version\": selected_plan_text_version,\n }\n return render(request, template_name, context=context_data)\n\n\nclass ReorderCasesView(View):\n \"\"\"Reorder cases\"\"\"\n\n http_method_names = [\"post\"]\n\n def post(self, request, plan_id):\n # Current we should rewrite all of cases belong to the plan.\n # Because the cases sortkey in database is chaos,\n # Most of them are None.\n\n if \"case\" not in request.POST:\n return JsonResponseBadRequest({\"message\": \"At least one case is required to re-order.\"})\n\n plan = get_object_or_404(TestPlan, pk=int(plan_id))\n\n case_ids = [int(id) for id in request.POST.getlist(\"case\")]\n cases = TestCase.objects.filter(pk__in=case_ids).only(\"pk\")\n\n for case in cases:\n new_sort_key = (case_ids.index(case.pk) + 1) * 10\n TestCasePlan.objects.filter(plan=plan, case=case).update(sortkey=new_sort_key)\n\n return JsonResponse({})\n\n\nclass LinkCasesView(View):\n \"\"\"Link cases to plan\"\"\"\n\n permission_required = \"testcases.add_testcaseplan\"\n\n def post(self, request, plan_id):\n plan = get_object_or_404(TestPlan.objects.only(\"pk\"), pk=int(plan_id))\n case_ids = [int(id) for id in request.POST.getlist(\"case\")]\n cases = TestCase.objects.filter(case_id__in=case_ids).only(\"pk\")\n for case in cases:\n plan.add_case(case)\n return HttpResponseRedirect(reverse(\"plan-get\", args=[plan_id]))\n\n\nclass LinkCasesSearchView(View):\n \"\"\"Search cases for linking to plan\"\"\"\n\n template_name = \"plan/search_case.html\"\n SUB_MODULE_NAME = \"plans\"\n\n def get(self, request, plan_id):\n plan = get_object_or_404(TestPlan, pk=int(plan_id))\n\n normal_form = SearchCaseForm(\n initial={\n \"product\": plan.product_id,\n \"product_version\": plan.product_version_id,\n \"case_status_id\": TestCaseStatus.get(\"CONFIRMED\"),\n }\n )\n quick_form = QuickSearchCaseForm()\n return render(\n self.request,\n self.template_name,\n {\n \"module\": MODULE_NAME,\n \"sub_module\": self.SUB_MODULE_NAME,\n \"search_form\": normal_form,\n \"quick_form\": quick_form,\n \"test_plan\": plan,\n },\n )\n\n def post(self, request, plan_id):\n plan = get_object_or_404(TestPlan, pk=int(plan_id))\n\n search_mode = request.POST.get(\"search_mode\")\n if search_mode == \"quick\":\n form = quick_form = QuickSearchCaseForm(request.POST)\n normal_form = SearchCaseForm()\n else:\n form = normal_form = SearchCaseForm(request.POST)\n form.populate(product_id=request.POST.get(\"product\"))\n quick_form = QuickSearchCaseForm()\n\n if form.is_valid():\n cases = TestCase.list(form.cleaned_data)\n cases = (\n cases.select_related(\"author\", \"default_tester\", \"case_status\", \"priority\")\n .only(\n \"pk\",\n \"summary\",\n \"create_date\",\n \"author__email\",\n \"default_tester__email\",\n \"case_status__name\",\n \"priority__value\",\n )\n .exclude(case_id__in=plan.case.values_list(\"case_id\", flat=True))\n )\n\n context = {\n \"module\": MODULE_NAME,\n \"sub_module\": self.SUB_MODULE_NAME,\n \"test_plan\": plan,\n \"test_cases\": cases,\n \"search_form\": normal_form,\n \"quick_form\": quick_form,\n \"search_mode\": search_mode,\n }\n return render(request, self.template_name, context=context)\n\n\nclass ImportCasesView(PermissionRequiredMixin, View):\n \"\"\"Import cases to a plan\"\"\"\n\n permission_required = \"testcases.add_testcaseplan\"\n\n def post(self, request, plan_id):\n plan = get_object_or_404(TestPlan.objects.only(\"pk\"), pk=int(plan_id))\n next_url = reverse(\"plan-get\", args=[plan_id]) + \"#testcases\"\n xml_form = ImportCasesViaXMLForm(request.POST, request.FILES)\n if xml_form.is_valid():\n plan.import_cases(xml_form.cleaned_data[\"xml_file\"])\n return HttpResponseRedirect(next_url)\n else:\n return prompt.alert(request, xml_form.errors, next_url)\n\n\nclass DeleteCasesView(View):\n \"\"\"Delete selected cases from plan\"\"\"\n\n def post(self, request, plan_id):\n plan = get_object_or_404(TestPlan.objects.only(\"pk\"), pk=int(plan_id))\n\n if \"case\" not in request.POST:\n return JsonResponseBadRequest({\"message\": \"At least one case is required to delete.\"})\n\n cases = get_selected_testcases(request).only(\"pk\")\n\n # Log Action\n plan_log = TCMSLog(model=plan)\n for case in cases:\n plan_log.make(who=request.user, new_value=f\"Remove case {case.pk} from plan {plan.pk}\")\n case.log_action(who=request.user, new_value=f\"Remove from plan {plan.pk}\")\n plan.delete_case(case=case)\n\n return JsonResponse({})\n\n\nclass PlanComponentsActionView(View):\n \"\"\"Manage a plan's components\"\"\"\n\n template_name = \"plan/get_component.html\"\n\n def get(self, request):\n if \"plan\" not in request.GET:\n return HttpResponseBadRequest(\"Plan ID is not in request.\")\n plans = TestPlan.objects.filter(pk=int(request.GET[\"plan\"]))\n if not plans:\n return Http404(\"Plan ID {} does not exist.\".format(\", \".join(plans)))\n\n action = request.GET.get(\"a\", \"get_component_list\").lower()\n\n if action == \"get_form\":\n return self.get_manage_form(request, plans)\n elif action == \"get_component_list\":\n return self.get_default_component_list(request, plans[0])\n elif action == \"add\":\n return self.add(request, plans[0], self._get_components())\n elif action == \"remove\":\n components = self._get_components()\n return self.remove_components_from_plan(request, plans[0], components)\n elif action == \"update\":\n return self.update_components(request, plans[0])\n\n def _get_components(self):\n if \"component\" not in self.request.GET:\n return HttpResponseBadRequest(\"Component ID is not in request.\")\n component_ids = [int(id) for id in self.request.GET.getlist(\"component\")]\n return Component.objects.filter(pk__in=component_ids)\n\n @method_decorator(permission_required(\"testplans.add_testplancomponent\"))\n def add(self, request, plan, components):\n \"\"\"Add components to given plans\"\"\"\n list(map(plan.add_component, components))\n\n @method_decorator(permission_required(\"testplans.delete_testplancomponent\"))\n def remove_components_from_plan(self, request, plan, components=None):\n \"\"\"Remove existing components from plans\n\n :param plan: instance of TestPlan, from which to remove components\n from this plan.\n :param components: instances of Component, which will be removed.\n \"\"\"\n if components is None:\n TestPlanComponent.objects.filter(plan=plan).delete()\n else:\n list(map(plan.remove_component, components))\n\n return self.get_default_component_list(request, plan)\n\n def update_components(self, request, plan):\n self.remove_components_from_plan(request, plan)\n self.add(request, plan, self._get_components())\n return self.get_default_component_list(request, plan)\n\n def get_manage_form(self, request, plans):\n \"\"\"Return form content in order to select components\"\"\"\n plan_comps = TestPlanComponent.objects.filter(plan__in=plans)\n\n form = PlanComponentForm(\n tps=plans,\n initial={\n \"component\": plan_comps.values_list(\"component_id\", flat=True),\n },\n )\n\n q_format = request.GET.get(\"format\", \"p\")\n html = getattr(form, \"as_\" + q_format)\n\n return HttpResponse(html())\n\n def get_default_component_list(self, request, plan):\n return render(request, self.template_name, context={\"test_plan\": plan})\n\n\n@require_GET\ndef printable(request, template_name=\"plan/printable.html\"):\n \"\"\"Create the printable copy for plan\"\"\"\n plan_pks = request.GET.getlist(\"plan\")\n\n if not plan_pks:\n return prompt.info(request, \"At least one target is required.\")\n\n tps = TestPlan.objects.filter(pk__in=plan_pks).only(\"pk\", \"name\")\n\n def plan_generator():\n repeat = len(plan_pks)\n params_sql = \",\".join(itertools.repeat(\"%s\", repeat))\n sql = sqls.TP_PRINTABLE_CASE_TEXTS % (params_sql, params_sql)\n result_set = SQLExecution(sql, plan_pks * 2)\n group_data = itertools.groupby(result_set.rows, itemgetter(\"plan_id\"))\n cases_dict = {key: list(values) for key, values in group_data}\n for tp in tps:\n tp.result_set = cases_dict.get(tp.plan_id, None)\n yield tp\n\n context_data = {\n \"test_plans\": plan_generator(),\n }\n\n return render(request, template_name, context=context_data)\n\n\n@require_GET\ndef export(request, template_name=\"case/export.xml\"):\n \"\"\"Export the plan\"\"\"\n plan_pks = list(map(int, request.GET.getlist(\"plan\")))\n\n if not plan_pks:\n return prompt.info(request, \"At least one target is required.\")\n\n context_data = {\n \"cases_info\": get_exported_cases_and_related_data(plan_pks),\n }\n\n timestamp = datetime.datetime.now()\n timestamp_str = \"%02i-%02i-%02i\" % (timestamp.year, timestamp.month, timestamp.day)\n\n response = render(request, template_name, context=context_data)\n filename = f\"tcms-testcases-{timestamp_str}.xml\"\n response[\"Content-Disposition\"] = f\"attachment; filename={filename}\"\n return response\n\n\n@require_GET\ndef construct_plans_treeview(request, plan_id):\n \"\"\"Construct a plan's tree view\"\"\"\n plan = get_object_or_404(TestPlan, pk=plan_id)\n\n tree_plan_ids = plan.get_ancestor_ids() + plan.get_descendant_ids()\n tree_plan_ids.append(plan.pk)\n\n plans = (\n TestPlan.objects.filter(pk__in=tree_plan_ids)\n .only(\"pk\", \"name\", \"parent_id\")\n .order_by(\"parent_id\", \"pk\")\n )\n\n plans = TestPlan.apply_subtotal(plans, cases_count=True, runs_count=True, children_count=True)\n\n return render(\n request,\n \"plan/get_treeview.html\",\n context={\"current_plan_id\": plan_id, \"plans\": plans},\n )\n\n\n@login_required\n@require_POST\ndef treeview_add_child_plans(request: HttpRequest, plan_id: int):\n plan = TestPlan.objects.filter(pk=plan_id).only(\"pk\").first()\n if plan is None:\n return JsonResponseNotFound({\"message\": f\"Plan {plan_id} does not exist.\"})\n\n child_plan_ids: List[str] = request.POST.getlist(\"children\")\n child_plans: List[TestPlan] = []\n\n ancestor_ids = plan.get_ancestor_ids()\n descendant_ids = plan.get_descendant_ids()\n\n for child_plan_id in child_plan_ids:\n if not child_plan_id.isdigit():\n return JsonResponseBadRequest(\n {\"message\": f\"Child plan id {child_plan_id} is not a number.\"}\n )\n child_plan: TestPlan = TestPlan.objects.filter(pk=int(child_plan_id)).only(\"pk\").first()\n if child_plan is None:\n return JsonResponseBadRequest(\n {\"message\": f\"Child plan {child_plan_id} does not exist.\"}\n )\n if child_plan.pk in ancestor_ids:\n return JsonResponseBadRequest(\n {\"message\": f\"Plan {child_plan_id} is an ancestor of \" f\"plan {plan_id} already.\"}\n )\n if child_plan.pk in descendant_ids:\n return JsonResponseBadRequest(\n {\"message\": f\"Plan {child_plan_id} is a descendant of \" f\"plan {plan_id} already.\"}\n )\n\n child_plans.append(child_plan)\n\n for child_plan in child_plans:\n child_plan.parent = plan\n child_plan.save(update_fields=[\"parent\"])\n\n return JsonResponse(\n {\"parent_plan\": plan.pk, \"children_plans\": [plan.pk for plan in child_plans]}\n )\n\n\n@login_required\n@require_POST\ndef treeview_remove_child_plans(request, plan_id: int):\n plan: TestPlan = TestPlan.objects.filter(pk=plan_id).only(\"pk\").first()\n if plan is None:\n return JsonResponseNotFound({\"message\": f\"Plan {plan_id} does not exist.\"})\n\n child_plan_ids: Set[int] = set(map(int, request.POST.getlist(\"children\")))\n direct_descendants = set(plan.get_descendant_ids(True))\n ids_to_remove = child_plan_ids & direct_descendants\n\n if ids_to_remove:\n TestPlan.objects.filter(pk__in=ids_to_remove).update(parent=None)\n\n return JsonResponse(\n {\n \"parent_plan\": plan.pk,\n \"removed\": sorted(ids_to_remove),\n \"non_descendants\": sorted(child_plan_ids - direct_descendants),\n }\n )\n\n\nclass PlanTreeChangeParentView(PermissionRequiredMixin, View):\n \"\"\"Plan tree view to change a plan's parent\"\"\"\n\n permission_required = \"testplans.change_testplan\"\n\n def handle_no_permission(self):\n return JsonResponseBadRequest(\n {\"message\": \"You do not have permission to change the parent plan.\"}\n )\n\n def patch(self, request, *args, **kwargs):\n plan: TestPlan = TestPlan.objects.filter(pk=self.kwargs[\"plan_id\"]).only(\"pk\").first()\n if plan is None:\n return JsonResponseNotFound(\n {\n \"message\": f\"Cannot change parent of plan, \"\n f\"whose id {self.kwargs['plan_id']} does not exist.\"\n }\n )\n\n data = json.loads(request.body)\n user_input: Optional[str] = data.get(\"parent\")\n if user_input is None:\n return JsonResponseBadRequest({\"message\": \"Missing parent plan id.\"})\n if not isinstance(user_input, int):\n return JsonResponseBadRequest(\n {\"message\": f'The given parent plan id \"{user_input}\" is not a positive integer.'}\n )\n parent_id = int(user_input)\n new_parent = TestPlan.objects.filter(pk=parent_id).only(\"parent\").first()\n if new_parent is None:\n return JsonResponseBadRequest(\n {\"message\": f\"The parent plan id {parent_id} does not exist.\"}\n )\n\n descendant_ids = plan.get_descendant_ids()\n if parent_id in descendant_ids:\n return JsonResponseBadRequest(\n {\n \"message\": f\"The parent plan {parent_id} is a descendant of plan {plan.pk} already.\"\n }\n )\n\n original_value = plan.parent.pk if plan.parent else \"None\"\n\n plan.parent = new_parent\n plan.save(update_fields=[\"parent\"])\n plan.log_action(\n who=request.user,\n field=\"parent\",\n original_value=original_value,\n new_value=str(new_parent.pk),\n )\n\n return JsonResponse({})\n\n\nclass SetPlanActiveView(PermissionRequiredMixin, View):\n \"\"\"Set a test plan active or inactive\"\"\"\n\n permission_required = \"testplans.change_testplan\"\n raise_exception = True\n enable: bool = True\n\n def patch(self, request, *args, **kwargs):\n plan_id = self.kwargs[\"plan_id\"]\n plan: TestPlan = TestPlan.objects.filter(pk=plan_id).only(\"is_active\").first()\n if not plan:\n return JsonResponseNotFound({\"message\": f\"Plan id {plan_id} does not exist.\"})\n original_value: str = str(plan.is_active)\n plan.is_active = self.enable\n plan.save(update_fields=[\"is_active\"])\n plan.log_action(\n who=request.user,\n field=\"is_active\",\n original_value=original_value,\n new_value=str(plan.is_active),\n )\n return JsonResponse({})\n", "path": "src/tcms/testplans/views.py"}]} |
gh_patches_debug_1302 | rasdani/github-patches | git_diff | catalyst-team__catalyst-855 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
EarlyStoppingCallback considers first epoch as bad
## 🐛 Bug Report
EarlyStoppingCallback considers first epoch as bad. This can lead for example to always stopping after first epoch if patience=1.
### How To Reproduce
You can train a model with early stopping and patience=1 and see that it always stops after first epoch. Or you can use the unit test below that I added to pull request.
#### Code sample
```python
from unittest.mock import MagicMock, PropertyMock
from catalyst.core import EarlyStoppingCallback
def test_patience1():
"""@TODO: Docs. Contribution is welcome."""
early_stop = EarlyStoppingCallback(1)
runner = MagicMock()
type(runner).stage_name = PropertyMock(return_value="training")
type(runner).valid_metrics = PropertyMock(return_value={"loss": 0.001})
stop_mock = PropertyMock(return_value=False)
type(runner).need_early_stop = stop_mock
early_stop.on_epoch_end(runner)
assert stop_mock.mock_calls == []
```
### Expected behavior
Training doesn't stop after first epoch. And the unit test passes.
### Environment
```bash
Catalyst version: 20.06
PyTorch version: 1.5.1
Is debug build: No
CUDA used to build PyTorch: None
TensorFlow version: N/A
TensorBoard version: 2.2.2
OS: Mac OSX 10.15.5
GCC version: Could not collect
CMake version: version 3.8.0
Python version: 3.7
Is CUDA available: No
CUDA runtime version: No CUDA
GPU models and configuration: No CUDA
Nvidia driver version: No CUDA
cuDNN version: No CUDA
Versions of relevant libraries:
[pip3] catalyst-codestyle==20.4
[pip3] catalyst-sphinx-theme==1.1.1
[pip3] efficientnet-pytorch==0.6.3
[pip3] numpy==1.18.5
[pip3] segmentation-models-pytorch==0.1.0
[pip3] tensorboard==2.2.2
[pip3] tensorboard-plugin-wit==1.6.0.post3
[pip3] tensorboardX==2.0
[pip3] torch==1.5.1
[pip3] torchvision==0.6.1
[conda] catalyst-codestyle 20.4 <pip>
[conda] catalyst-sphinx-theme 1.1.1 <pip>
[conda] efficientnet-pytorch 0.6.3 <pip>
[conda] numpy 1.18.5 <pip>
[conda] segmentation-models-pytorch 0.1.0 <pip>
[conda] tensorboard 2.2.2 <pip>
[conda] tensorboard-plugin-wit 1.6.0.post3 <pip>
[conda] tensorboardX 2.0 <pip>
[conda] torch 1.5.1 <pip>
[conda] torchvision 0.6.1 <pip>
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `catalyst/core/callbacks/early_stop.py`
Content:
```
1 from catalyst.core.callback import Callback, CallbackNode, CallbackOrder
2 from catalyst.core.runner import IRunner
3
4
5 class CheckRunCallback(Callback):
6 """@TODO: Docs. Contribution is welcome."""
7
8 def __init__(self, num_batch_steps: int = 3, num_epoch_steps: int = 2):
9 """@TODO: Docs. Contribution is welcome."""
10 super().__init__(order=CallbackOrder.external, node=CallbackNode.all)
11 self.num_batch_steps = num_batch_steps
12 self.num_epoch_steps = num_epoch_steps
13
14 def on_epoch_end(self, runner: IRunner):
15 """@TODO: Docs. Contribution is welcome."""
16 if runner.epoch >= self.num_epoch_steps:
17 runner.need_early_stop = True
18
19 def on_batch_end(self, runner: IRunner):
20 """@TODO: Docs. Contribution is welcome."""
21 if runner.loader_batch_step >= self.num_batch_steps:
22 runner.need_early_stop = True
23
24
25 class EarlyStoppingCallback(Callback):
26 """@TODO: Docs. Contribution is welcome."""
27
28 def __init__(
29 self,
30 patience: int,
31 metric: str = "loss",
32 minimize: bool = True,
33 min_delta: float = 1e-6,
34 ):
35 """@TODO: Docs. Contribution is welcome."""
36 super().__init__(order=CallbackOrder.external, node=CallbackNode.all)
37 self.best_score = None
38 self.metric = metric
39 self.patience = patience
40 self.num_bad_epochs = 0
41 self.is_better = None
42
43 if minimize:
44 self.is_better = lambda score, best: score <= (best - min_delta)
45 else:
46 self.is_better = lambda score, best: score >= (best + min_delta)
47
48 def on_epoch_end(self, runner: IRunner) -> None:
49 """@TODO: Docs. Contribution is welcome."""
50 if runner.stage_name.startswith("infer"):
51 return
52
53 score = runner.valid_metrics[self.metric]
54 if self.best_score is None:
55 self.best_score = score
56 if self.is_better(score, self.best_score):
57 self.num_bad_epochs = 0
58 self.best_score = score
59 else:
60 self.num_bad_epochs += 1
61
62 if self.num_bad_epochs >= self.patience:
63 print(f"Early stop at {runner.epoch} epoch")
64 runner.need_early_stop = True
65
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/catalyst/core/callbacks/early_stop.py b/catalyst/core/callbacks/early_stop.py
--- a/catalyst/core/callbacks/early_stop.py
+++ b/catalyst/core/callbacks/early_stop.py
@@ -51,9 +51,7 @@
return
score = runner.valid_metrics[self.metric]
- if self.best_score is None:
- self.best_score = score
- if self.is_better(score, self.best_score):
+ if self.best_score is None or self.is_better(score, self.best_score):
self.num_bad_epochs = 0
self.best_score = score
else:
| {"golden_diff": "diff --git a/catalyst/core/callbacks/early_stop.py b/catalyst/core/callbacks/early_stop.py\n--- a/catalyst/core/callbacks/early_stop.py\n+++ b/catalyst/core/callbacks/early_stop.py\n@@ -51,9 +51,7 @@\n return\n \n score = runner.valid_metrics[self.metric]\n- if self.best_score is None:\n- self.best_score = score\n- if self.is_better(score, self.best_score):\n+ if self.best_score is None or self.is_better(score, self.best_score):\n self.num_bad_epochs = 0\n self.best_score = score\n else:\n", "issue": "EarlyStoppingCallback considers first epoch as bad\n## \ud83d\udc1b Bug Report\r\nEarlyStoppingCallback considers first epoch as bad. This can lead for example to always stopping after first epoch if patience=1.\r\n\r\n\r\n### How To Reproduce\r\nYou can train a model with early stopping and patience=1 and see that it always stops after first epoch. Or you can use the unit test below that I added to pull request.\r\n\r\n#### Code sample\r\n```python\r\nfrom unittest.mock import MagicMock, PropertyMock\r\n\r\nfrom catalyst.core import EarlyStoppingCallback\r\n\r\n\r\ndef test_patience1():\r\n \"\"\"@TODO: Docs. Contribution is welcome.\"\"\"\r\n early_stop = EarlyStoppingCallback(1)\r\n runner = MagicMock()\r\n type(runner).stage_name = PropertyMock(return_value=\"training\")\r\n type(runner).valid_metrics = PropertyMock(return_value={\"loss\": 0.001})\r\n stop_mock = PropertyMock(return_value=False)\r\n type(runner).need_early_stop = stop_mock\r\n\r\n early_stop.on_epoch_end(runner)\r\n\r\n assert stop_mock.mock_calls == []\r\n```\r\n\r\n### Expected behavior\r\nTraining doesn't stop after first epoch. And the unit test passes.\r\n\r\n\r\n### Environment\r\n```bash\r\nCatalyst version: 20.06\r\nPyTorch version: 1.5.1\r\nIs debug build: No\r\nCUDA used to build PyTorch: None\r\nTensorFlow version: N/A\r\nTensorBoard version: 2.2.2\r\n\r\nOS: Mac OSX 10.15.5\r\nGCC version: Could not collect\r\nCMake version: version 3.8.0\r\n\r\nPython version: 3.7\r\nIs CUDA available: No\r\nCUDA runtime version: No CUDA\r\nGPU models and configuration: No CUDA\r\nNvidia driver version: No CUDA\r\ncuDNN version: No CUDA\r\n\r\nVersions of relevant libraries:\r\n[pip3] catalyst-codestyle==20.4\r\n[pip3] catalyst-sphinx-theme==1.1.1\r\n[pip3] efficientnet-pytorch==0.6.3\r\n[pip3] numpy==1.18.5\r\n[pip3] segmentation-models-pytorch==0.1.0\r\n[pip3] tensorboard==2.2.2\r\n[pip3] tensorboard-plugin-wit==1.6.0.post3\r\n[pip3] tensorboardX==2.0\r\n[pip3] torch==1.5.1\r\n[pip3] torchvision==0.6.1\r\n[conda] catalyst-codestyle 20.4 <pip>\r\n[conda] catalyst-sphinx-theme 1.1.1 <pip>\r\n[conda] efficientnet-pytorch 0.6.3 <pip>\r\n[conda] numpy 1.18.5 <pip>\r\n[conda] segmentation-models-pytorch 0.1.0 <pip>\r\n[conda] tensorboard 2.2.2 <pip>\r\n[conda] tensorboard-plugin-wit 1.6.0.post3 <pip>\r\n[conda] tensorboardX 2.0 <pip>\r\n[conda] torch 1.5.1 <pip>\r\n[conda] torchvision 0.6.1 <pip>\r\n```\r\n\n", "before_files": [{"content": "from catalyst.core.callback import Callback, CallbackNode, CallbackOrder\nfrom catalyst.core.runner import IRunner\n\n\nclass CheckRunCallback(Callback):\n \"\"\"@TODO: Docs. Contribution is welcome.\"\"\"\n\n def __init__(self, num_batch_steps: int = 3, num_epoch_steps: int = 2):\n \"\"\"@TODO: Docs. Contribution is welcome.\"\"\"\n super().__init__(order=CallbackOrder.external, node=CallbackNode.all)\n self.num_batch_steps = num_batch_steps\n self.num_epoch_steps = num_epoch_steps\n\n def on_epoch_end(self, runner: IRunner):\n \"\"\"@TODO: Docs. Contribution is welcome.\"\"\"\n if runner.epoch >= self.num_epoch_steps:\n runner.need_early_stop = True\n\n def on_batch_end(self, runner: IRunner):\n \"\"\"@TODO: Docs. Contribution is welcome.\"\"\"\n if runner.loader_batch_step >= self.num_batch_steps:\n runner.need_early_stop = True\n\n\nclass EarlyStoppingCallback(Callback):\n \"\"\"@TODO: Docs. Contribution is welcome.\"\"\"\n\n def __init__(\n self,\n patience: int,\n metric: str = \"loss\",\n minimize: bool = True,\n min_delta: float = 1e-6,\n ):\n \"\"\"@TODO: Docs. Contribution is welcome.\"\"\"\n super().__init__(order=CallbackOrder.external, node=CallbackNode.all)\n self.best_score = None\n self.metric = metric\n self.patience = patience\n self.num_bad_epochs = 0\n self.is_better = None\n\n if minimize:\n self.is_better = lambda score, best: score <= (best - min_delta)\n else:\n self.is_better = lambda score, best: score >= (best + min_delta)\n\n def on_epoch_end(self, runner: IRunner) -> None:\n \"\"\"@TODO: Docs. Contribution is welcome.\"\"\"\n if runner.stage_name.startswith(\"infer\"):\n return\n\n score = runner.valid_metrics[self.metric]\n if self.best_score is None:\n self.best_score = score\n if self.is_better(score, self.best_score):\n self.num_bad_epochs = 0\n self.best_score = score\n else:\n self.num_bad_epochs += 1\n\n if self.num_bad_epochs >= self.patience:\n print(f\"Early stop at {runner.epoch} epoch\")\n runner.need_early_stop = True\n", "path": "catalyst/core/callbacks/early_stop.py"}], "after_files": [{"content": "from catalyst.core.callback import Callback, CallbackNode, CallbackOrder\nfrom catalyst.core.runner import IRunner\n\n\nclass CheckRunCallback(Callback):\n \"\"\"@TODO: Docs. Contribution is welcome.\"\"\"\n\n def __init__(self, num_batch_steps: int = 3, num_epoch_steps: int = 2):\n \"\"\"@TODO: Docs. Contribution is welcome.\"\"\"\n super().__init__(order=CallbackOrder.external, node=CallbackNode.all)\n self.num_batch_steps = num_batch_steps\n self.num_epoch_steps = num_epoch_steps\n\n def on_epoch_end(self, runner: IRunner):\n \"\"\"@TODO: Docs. Contribution is welcome.\"\"\"\n if runner.epoch >= self.num_epoch_steps:\n runner.need_early_stop = True\n\n def on_batch_end(self, runner: IRunner):\n \"\"\"@TODO: Docs. Contribution is welcome.\"\"\"\n if runner.loader_batch_step >= self.num_batch_steps:\n runner.need_early_stop = True\n\n\nclass EarlyStoppingCallback(Callback):\n \"\"\"@TODO: Docs. Contribution is welcome.\"\"\"\n\n def __init__(\n self,\n patience: int,\n metric: str = \"loss\",\n minimize: bool = True,\n min_delta: float = 1e-6,\n ):\n \"\"\"@TODO: Docs. Contribution is welcome.\"\"\"\n super().__init__(order=CallbackOrder.external, node=CallbackNode.all)\n self.best_score = None\n self.metric = metric\n self.patience = patience\n self.num_bad_epochs = 0\n self.is_better = None\n\n if minimize:\n self.is_better = lambda score, best: score <= (best - min_delta)\n else:\n self.is_better = lambda score, best: score >= (best + min_delta)\n\n def on_epoch_end(self, runner: IRunner) -> None:\n \"\"\"@TODO: Docs. Contribution is welcome.\"\"\"\n if runner.stage_name.startswith(\"infer\"):\n return\n\n score = runner.valid_metrics[self.metric]\n if self.best_score is None or self.is_better(score, self.best_score):\n self.num_bad_epochs = 0\n self.best_score = score\n else:\n self.num_bad_epochs += 1\n\n if self.num_bad_epochs >= self.patience:\n print(f\"Early stop at {runner.epoch} epoch\")\n runner.need_early_stop = True\n", "path": "catalyst/core/callbacks/early_stop.py"}]} |
gh_patches_debug_1303 | rasdani/github-patches | git_diff | benoitc__gunicorn-3030 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Odd crash in gthread after page served
I just started to see this after rebuilding an older app after making some small changes.
```
Traceback (most recent call last):
File "/usr/local/lib/python3.8/site-packages/gunicorn/arbiter.py", line 609, in spawn_worker
worker.init_process()
File "/usr/local/lib/python3.8/site-packages/gunicorn/workers/gthread.py", line 95, in init_process
super().init_process()
File "/usr/local/lib/python3.8/site-packages/gunicorn/workers/base.py", line 142, in init_process
self.run()
File "/usr/local/lib/python3.8/site-packages/gunicorn/workers/gthread.py", line 232, in run
self.murder_keepalived()
File "/usr/local/lib/python3.8/site-packages/gunicorn/workers/gthread.py", line 176, in murder_keepalived
self.poller.unregister(conn.sock)
File "/usr/local/lib/python3.8/selectors.py", line 366, in unregister
key = super().unregister(fileobj)
File "/usr/local/lib/python3.8/selectors.py", line 249, in unregister
key = self._fd_to_key.pop(self._fileobj_lookup(fileobj))
File "/usr/local/lib/python3.8/selectors.py", line 225, in _fileobj_lookup
return _fileobj_to_fd(fileobj)
File "/usr/local/lib/python3.8/selectors.py", line 42, in _fileobj_to_fd
raise ValueError("Invalid file descriptor: {}".format(fd))
ValueError: Invalid file descriptor: -1
```
I was not sure if I was causing it, so I started up a very simple Flask app and was able to reproduce. It serves the home page fine, but a moment later I see the above and the worker restarts.
Command
`gunicorn --worker-tmp-dir /dev/shm --workers=1 --threads=4 --worker-class=gthread --log-level=debug --access-logfile=gunicorn_access.log --reload --bind 0.0.0.0:4000 frontend:app`
Environment
```
blinker==1.6.2
click==8.1.5
Flask==2.3.2
gunicorn==21.0.1
importlib-metadata==6.8.0
itsdangerous==2.1.2
Jinja2==3.1.2
MarkupSafe==2.1.3
packaging==23.1
Werkzeug==2.3.6
zipp==3.16.2
```
Will revert to a previous version and see if the error persists.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `gunicorn/workers/gthread.py`
Content:
```
1 # -*- coding: utf-8 -
2 #
3 # This file is part of gunicorn released under the MIT license.
4 # See the NOTICE for more information.
5
6 # design:
7 # A threaded worker accepts connections in the main loop, accepted
8 # connections are added to the thread pool as a connection job.
9 # Keepalive connections are put back in the loop waiting for an event.
10 # If no event happen after the keep alive timeout, the connection is
11 # closed.
12 # pylint: disable=no-else-break
13
14 from concurrent import futures
15 import errno
16 import os
17 import selectors
18 import socket
19 import ssl
20 import sys
21 import time
22 from collections import deque
23 from datetime import datetime
24 from functools import partial
25 from threading import RLock
26
27 from . import base
28 from .. import http
29 from .. import util
30 from .. import sock
31 from ..http import wsgi
32
33
34 class TConn(object):
35
36 def __init__(self, cfg, sock, client, server):
37 self.cfg = cfg
38 self.sock = sock
39 self.client = client
40 self.server = server
41
42 self.timeout = None
43 self.parser = None
44 self.initialized = False
45
46 # set the socket to non blocking
47 self.sock.setblocking(False)
48
49 def init(self):
50 self.initialized = True
51 self.sock.setblocking(True)
52
53 if self.parser is None:
54 # wrap the socket if needed
55 if self.cfg.is_ssl:
56 self.sock = sock.ssl_wrap_socket(self.sock, self.cfg)
57
58 # initialize the parser
59 self.parser = http.RequestParser(self.cfg, self.sock, self.client)
60
61 def set_timeout(self):
62 # set the timeout
63 self.timeout = time.time() + self.cfg.keepalive
64
65 def close(self):
66 util.close(self.sock)
67
68
69 class ThreadWorker(base.Worker):
70
71 def __init__(self, *args, **kwargs):
72 super().__init__(*args, **kwargs)
73 self.worker_connections = self.cfg.worker_connections
74 self.max_keepalived = self.cfg.worker_connections - self.cfg.threads
75 # initialise the pool
76 self.tpool = None
77 self.poller = None
78 self._lock = None
79 self.futures = deque()
80 self._keep = deque()
81 self.nr_conns = 0
82
83 @classmethod
84 def check_config(cls, cfg, log):
85 max_keepalived = cfg.worker_connections - cfg.threads
86
87 if max_keepalived <= 0 and cfg.keepalive:
88 log.warning("No keepalived connections can be handled. " +
89 "Check the number of worker connections and threads.")
90
91 def init_process(self):
92 self.tpool = self.get_thread_pool()
93 self.poller = selectors.DefaultSelector()
94 self._lock = RLock()
95 super().init_process()
96
97 def get_thread_pool(self):
98 """Override this method to customize how the thread pool is created"""
99 return futures.ThreadPoolExecutor(max_workers=self.cfg.threads)
100
101 def handle_quit(self, sig, frame):
102 self.alive = False
103 # worker_int callback
104 self.cfg.worker_int(self)
105 self.tpool.shutdown(False)
106 time.sleep(0.1)
107 sys.exit(0)
108
109 def _wrap_future(self, fs, conn):
110 fs.conn = conn
111 self.futures.append(fs)
112 fs.add_done_callback(self.finish_request)
113
114 def enqueue_req(self, conn):
115 conn.init()
116 # submit the connection to a worker
117 fs = self.tpool.submit(self.handle, conn)
118 self._wrap_future(fs, conn)
119
120 def accept(self, server, listener):
121 try:
122 sock, client = listener.accept()
123 # initialize the connection object
124 conn = TConn(self.cfg, sock, client, server)
125 # set timeout to ensure it will not be in the loop too long
126 conn.set_timeout()
127
128 self.nr_conns += 1
129 # wait until socket is readable
130 with self._lock:
131 self._keep.append(conn)
132 self.poller.register(conn.sock, selectors.EVENT_READ,
133 partial(self.on_client_socket_readable, conn))
134 except EnvironmentError as e:
135 if e.errno not in (errno.EAGAIN, errno.ECONNABORTED,
136 errno.EWOULDBLOCK):
137 raise
138
139 def on_client_socket_readable(self, conn, client):
140 with self._lock:
141 # unregister the client from the poller
142 self.poller.unregister(client)
143
144 if conn.initialized:
145 # remove the connection from keepalive
146 try:
147 self._keep.remove(conn)
148 except ValueError:
149 # race condition
150 return
151
152 # submit the connection to a worker
153 self.enqueue_req(conn)
154
155 def murder_keepalived(self):
156 now = time.time()
157 while True:
158 with self._lock:
159 try:
160 # remove the connection from the queue
161 conn = self._keep.popleft()
162 except IndexError:
163 break
164
165 delta = conn.timeout - now
166 if delta > 0:
167 # add the connection back to the queue
168 with self._lock:
169 self._keep.appendleft(conn)
170 break
171 else:
172 self.nr_conns -= 1
173 # remove the socket from the poller
174 with self._lock:
175 try:
176 self.poller.unregister(conn.sock)
177 except EnvironmentError as e:
178 if e.errno != errno.EBADF:
179 raise
180 except KeyError:
181 # already removed by the system, continue
182 pass
183
184 # close the socket
185 conn.close()
186
187 def is_parent_alive(self):
188 # If our parent changed then we shut down.
189 if self.ppid != os.getppid():
190 self.log.info("Parent changed, shutting down: %s", self)
191 return False
192 return True
193
194 def run(self):
195 # init listeners, add them to the event loop
196 for sock in self.sockets:
197 sock.setblocking(False)
198 # a race condition during graceful shutdown may make the listener
199 # name unavailable in the request handler so capture it once here
200 server = sock.getsockname()
201 acceptor = partial(self.accept, server)
202 self.poller.register(sock, selectors.EVENT_READ, acceptor)
203
204 while self.alive:
205 # notify the arbiter we are alive
206 self.notify()
207
208 # can we accept more connections?
209 if self.nr_conns < self.worker_connections:
210 # wait for an event
211 events = self.poller.select(1.0)
212 for key, _ in events:
213 callback = key.data
214 callback(key.fileobj)
215
216 # check (but do not wait) for finished requests
217 result = futures.wait(self.futures, timeout=0,
218 return_when=futures.FIRST_COMPLETED)
219 else:
220 # wait for a request to finish
221 result = futures.wait(self.futures, timeout=1.0,
222 return_when=futures.FIRST_COMPLETED)
223
224 # clean up finished requests
225 for fut in result.done:
226 self.futures.remove(fut)
227
228 if not self.is_parent_alive():
229 break
230
231 # handle keepalive timeouts
232 self.murder_keepalived()
233
234 self.tpool.shutdown(False)
235 self.poller.close()
236
237 for s in self.sockets:
238 s.close()
239
240 futures.wait(self.futures, timeout=self.cfg.graceful_timeout)
241
242 def finish_request(self, fs):
243 if fs.cancelled():
244 self.nr_conns -= 1
245 fs.conn.close()
246 return
247
248 try:
249 (keepalive, conn) = fs.result()
250 # if the connection should be kept alived add it
251 # to the eventloop and record it
252 if keepalive and self.alive:
253 # flag the socket as non blocked
254 conn.sock.setblocking(False)
255
256 # register the connection
257 conn.set_timeout()
258 with self._lock:
259 self._keep.append(conn)
260
261 # add the socket to the event loop
262 self.poller.register(conn.sock, selectors.EVENT_READ,
263 partial(self.on_client_socket_readable, conn))
264 else:
265 self.nr_conns -= 1
266 conn.close()
267 except Exception:
268 # an exception happened, make sure to close the
269 # socket.
270 self.nr_conns -= 1
271 fs.conn.close()
272
273 def handle(self, conn):
274 keepalive = False
275 req = None
276 try:
277 req = next(conn.parser)
278 if not req:
279 return (False, conn)
280
281 # handle the request
282 keepalive = self.handle_request(req, conn)
283 if keepalive:
284 return (keepalive, conn)
285 except http.errors.NoMoreData as e:
286 self.log.debug("Ignored premature client disconnection. %s", e)
287
288 except StopIteration as e:
289 self.log.debug("Closing connection. %s", e)
290 except ssl.SSLError as e:
291 if e.args[0] == ssl.SSL_ERROR_EOF:
292 self.log.debug("ssl connection closed")
293 conn.sock.close()
294 else:
295 self.log.debug("Error processing SSL request.")
296 self.handle_error(req, conn.sock, conn.client, e)
297
298 except EnvironmentError as e:
299 if e.errno not in (errno.EPIPE, errno.ECONNRESET, errno.ENOTCONN):
300 self.log.exception("Socket error processing request.")
301 else:
302 if e.errno == errno.ECONNRESET:
303 self.log.debug("Ignoring connection reset")
304 elif e.errno == errno.ENOTCONN:
305 self.log.debug("Ignoring socket not connected")
306 else:
307 self.log.debug("Ignoring connection epipe")
308 except Exception as e:
309 self.handle_error(req, conn.sock, conn.client, e)
310
311 return (False, conn)
312
313 def handle_request(self, req, conn):
314 environ = {}
315 resp = None
316 try:
317 self.cfg.pre_request(self, req)
318 request_start = datetime.now()
319 resp, environ = wsgi.create(req, conn.sock, conn.client,
320 conn.server, self.cfg)
321 environ["wsgi.multithread"] = True
322 self.nr += 1
323 if self.nr >= self.max_requests:
324 if self.alive:
325 self.log.info("Autorestarting worker after current request.")
326 self.alive = False
327 resp.force_close()
328
329 if not self.alive or not self.cfg.keepalive:
330 resp.force_close()
331 elif len(self._keep) >= self.max_keepalived:
332 resp.force_close()
333
334 respiter = self.wsgi(environ, resp.start_response)
335 try:
336 if isinstance(respiter, environ['wsgi.file_wrapper']):
337 resp.write_file(respiter)
338 else:
339 for item in respiter:
340 resp.write(item)
341
342 resp.close()
343 finally:
344 request_time = datetime.now() - request_start
345 self.log.access(resp, req, environ, request_time)
346 if hasattr(respiter, "close"):
347 respiter.close()
348
349 if resp.should_close():
350 self.log.debug("Closing connection.")
351 return False
352 except EnvironmentError:
353 # pass to next try-except level
354 util.reraise(*sys.exc_info())
355 except Exception:
356 if resp and resp.headers_sent:
357 # If the requests have already been sent, we should close the
358 # connection to indicate the error.
359 self.log.exception("Error handling request")
360 try:
361 conn.sock.shutdown(socket.SHUT_RDWR)
362 conn.sock.close()
363 except EnvironmentError:
364 pass
365 raise StopIteration()
366 raise
367 finally:
368 try:
369 self.cfg.post_request(self, req, environ, resp)
370 except Exception:
371 self.log.exception("Exception in post_request hook")
372
373 return True
374
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/gunicorn/workers/gthread.py b/gunicorn/workers/gthread.py
--- a/gunicorn/workers/gthread.py
+++ b/gunicorn/workers/gthread.py
@@ -180,6 +180,9 @@
except KeyError:
# already removed by the system, continue
pass
+ except ValueError:
+ # already removed by the system continue
+ pass
# close the socket
conn.close()
| {"golden_diff": "diff --git a/gunicorn/workers/gthread.py b/gunicorn/workers/gthread.py\n--- a/gunicorn/workers/gthread.py\n+++ b/gunicorn/workers/gthread.py\n@@ -180,6 +180,9 @@\n except KeyError:\n # already removed by the system, continue\n pass\n+ except ValueError:\n+ # already removed by the system continue\n+ pass\n \n # close the socket\n conn.close()\n", "issue": "Odd crash in gthread after page served\nI just started to see this after rebuilding an older app after making some small changes. \r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.8/site-packages/gunicorn/arbiter.py\", line 609, in spawn_worker\r\n worker.init_process()\r\n File \"/usr/local/lib/python3.8/site-packages/gunicorn/workers/gthread.py\", line 95, in init_process\r\n super().init_process()\r\n File \"/usr/local/lib/python3.8/site-packages/gunicorn/workers/base.py\", line 142, in init_process\r\n self.run()\r\n File \"/usr/local/lib/python3.8/site-packages/gunicorn/workers/gthread.py\", line 232, in run\r\n self.murder_keepalived()\r\n File \"/usr/local/lib/python3.8/site-packages/gunicorn/workers/gthread.py\", line 176, in murder_keepalived\r\n self.poller.unregister(conn.sock)\r\n File \"/usr/local/lib/python3.8/selectors.py\", line 366, in unregister\r\n key = super().unregister(fileobj)\r\n File \"/usr/local/lib/python3.8/selectors.py\", line 249, in unregister\r\n key = self._fd_to_key.pop(self._fileobj_lookup(fileobj))\r\n File \"/usr/local/lib/python3.8/selectors.py\", line 225, in _fileobj_lookup\r\n return _fileobj_to_fd(fileobj)\r\n File \"/usr/local/lib/python3.8/selectors.py\", line 42, in _fileobj_to_fd\r\n raise ValueError(\"Invalid file descriptor: {}\".format(fd))\r\nValueError: Invalid file descriptor: -1\r\n```\r\nI was not sure if I was causing it, so I started up a very simple Flask app and was able to reproduce. It serves the home page fine, but a moment later I see the above and the worker restarts.\r\n\r\nCommand\r\n`gunicorn --worker-tmp-dir /dev/shm --workers=1 --threads=4 --worker-class=gthread --log-level=debug --access-logfile=gunicorn_access.log --reload --bind 0.0.0.0:4000 frontend:app`\r\n\r\nEnvironment\r\n```\r\nblinker==1.6.2\r\nclick==8.1.5\r\nFlask==2.3.2\r\ngunicorn==21.0.1\r\nimportlib-metadata==6.8.0\r\nitsdangerous==2.1.2\r\nJinja2==3.1.2\r\nMarkupSafe==2.1.3\r\npackaging==23.1\r\nWerkzeug==2.3.6\r\nzipp==3.16.2\r\n```\r\n\r\nWill revert to a previous version and see if the error persists.\n", "before_files": [{"content": "# -*- coding: utf-8 -\n#\n# This file is part of gunicorn released under the MIT license.\n# See the NOTICE for more information.\n\n# design:\n# A threaded worker accepts connections in the main loop, accepted\n# connections are added to the thread pool as a connection job.\n# Keepalive connections are put back in the loop waiting for an event.\n# If no event happen after the keep alive timeout, the connection is\n# closed.\n# pylint: disable=no-else-break\n\nfrom concurrent import futures\nimport errno\nimport os\nimport selectors\nimport socket\nimport ssl\nimport sys\nimport time\nfrom collections import deque\nfrom datetime import datetime\nfrom functools import partial\nfrom threading import RLock\n\nfrom . import base\nfrom .. import http\nfrom .. import util\nfrom .. import sock\nfrom ..http import wsgi\n\n\nclass TConn(object):\n\n def __init__(self, cfg, sock, client, server):\n self.cfg = cfg\n self.sock = sock\n self.client = client\n self.server = server\n\n self.timeout = None\n self.parser = None\n self.initialized = False\n\n # set the socket to non blocking\n self.sock.setblocking(False)\n\n def init(self):\n self.initialized = True\n self.sock.setblocking(True)\n\n if self.parser is None:\n # wrap the socket if needed\n if self.cfg.is_ssl:\n self.sock = sock.ssl_wrap_socket(self.sock, self.cfg)\n\n # initialize the parser\n self.parser = http.RequestParser(self.cfg, self.sock, self.client)\n\n def set_timeout(self):\n # set the timeout\n self.timeout = time.time() + self.cfg.keepalive\n\n def close(self):\n util.close(self.sock)\n\n\nclass ThreadWorker(base.Worker):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.worker_connections = self.cfg.worker_connections\n self.max_keepalived = self.cfg.worker_connections - self.cfg.threads\n # initialise the pool\n self.tpool = None\n self.poller = None\n self._lock = None\n self.futures = deque()\n self._keep = deque()\n self.nr_conns = 0\n\n @classmethod\n def check_config(cls, cfg, log):\n max_keepalived = cfg.worker_connections - cfg.threads\n\n if max_keepalived <= 0 and cfg.keepalive:\n log.warning(\"No keepalived connections can be handled. \" +\n \"Check the number of worker connections and threads.\")\n\n def init_process(self):\n self.tpool = self.get_thread_pool()\n self.poller = selectors.DefaultSelector()\n self._lock = RLock()\n super().init_process()\n\n def get_thread_pool(self):\n \"\"\"Override this method to customize how the thread pool is created\"\"\"\n return futures.ThreadPoolExecutor(max_workers=self.cfg.threads)\n\n def handle_quit(self, sig, frame):\n self.alive = False\n # worker_int callback\n self.cfg.worker_int(self)\n self.tpool.shutdown(False)\n time.sleep(0.1)\n sys.exit(0)\n\n def _wrap_future(self, fs, conn):\n fs.conn = conn\n self.futures.append(fs)\n fs.add_done_callback(self.finish_request)\n\n def enqueue_req(self, conn):\n conn.init()\n # submit the connection to a worker\n fs = self.tpool.submit(self.handle, conn)\n self._wrap_future(fs, conn)\n\n def accept(self, server, listener):\n try:\n sock, client = listener.accept()\n # initialize the connection object\n conn = TConn(self.cfg, sock, client, server)\n # set timeout to ensure it will not be in the loop too long\n conn.set_timeout()\n\n self.nr_conns += 1\n # wait until socket is readable\n with self._lock:\n self._keep.append(conn)\n self.poller.register(conn.sock, selectors.EVENT_READ,\n partial(self.on_client_socket_readable, conn))\n except EnvironmentError as e:\n if e.errno not in (errno.EAGAIN, errno.ECONNABORTED,\n errno.EWOULDBLOCK):\n raise\n\n def on_client_socket_readable(self, conn, client):\n with self._lock:\n # unregister the client from the poller\n self.poller.unregister(client)\n\n if conn.initialized:\n # remove the connection from keepalive\n try:\n self._keep.remove(conn)\n except ValueError:\n # race condition\n return\n\n # submit the connection to a worker\n self.enqueue_req(conn)\n\n def murder_keepalived(self):\n now = time.time()\n while True:\n with self._lock:\n try:\n # remove the connection from the queue\n conn = self._keep.popleft()\n except IndexError:\n break\n\n delta = conn.timeout - now\n if delta > 0:\n # add the connection back to the queue\n with self._lock:\n self._keep.appendleft(conn)\n break\n else:\n self.nr_conns -= 1\n # remove the socket from the poller\n with self._lock:\n try:\n self.poller.unregister(conn.sock)\n except EnvironmentError as e:\n if e.errno != errno.EBADF:\n raise\n except KeyError:\n # already removed by the system, continue\n pass\n\n # close the socket\n conn.close()\n\n def is_parent_alive(self):\n # If our parent changed then we shut down.\n if self.ppid != os.getppid():\n self.log.info(\"Parent changed, shutting down: %s\", self)\n return False\n return True\n\n def run(self):\n # init listeners, add them to the event loop\n for sock in self.sockets:\n sock.setblocking(False)\n # a race condition during graceful shutdown may make the listener\n # name unavailable in the request handler so capture it once here\n server = sock.getsockname()\n acceptor = partial(self.accept, server)\n self.poller.register(sock, selectors.EVENT_READ, acceptor)\n\n while self.alive:\n # notify the arbiter we are alive\n self.notify()\n\n # can we accept more connections?\n if self.nr_conns < self.worker_connections:\n # wait for an event\n events = self.poller.select(1.0)\n for key, _ in events:\n callback = key.data\n callback(key.fileobj)\n\n # check (but do not wait) for finished requests\n result = futures.wait(self.futures, timeout=0,\n return_when=futures.FIRST_COMPLETED)\n else:\n # wait for a request to finish\n result = futures.wait(self.futures, timeout=1.0,\n return_when=futures.FIRST_COMPLETED)\n\n # clean up finished requests\n for fut in result.done:\n self.futures.remove(fut)\n\n if not self.is_parent_alive():\n break\n\n # handle keepalive timeouts\n self.murder_keepalived()\n\n self.tpool.shutdown(False)\n self.poller.close()\n\n for s in self.sockets:\n s.close()\n\n futures.wait(self.futures, timeout=self.cfg.graceful_timeout)\n\n def finish_request(self, fs):\n if fs.cancelled():\n self.nr_conns -= 1\n fs.conn.close()\n return\n\n try:\n (keepalive, conn) = fs.result()\n # if the connection should be kept alived add it\n # to the eventloop and record it\n if keepalive and self.alive:\n # flag the socket as non blocked\n conn.sock.setblocking(False)\n\n # register the connection\n conn.set_timeout()\n with self._lock:\n self._keep.append(conn)\n\n # add the socket to the event loop\n self.poller.register(conn.sock, selectors.EVENT_READ,\n partial(self.on_client_socket_readable, conn))\n else:\n self.nr_conns -= 1\n conn.close()\n except Exception:\n # an exception happened, make sure to close the\n # socket.\n self.nr_conns -= 1\n fs.conn.close()\n\n def handle(self, conn):\n keepalive = False\n req = None\n try:\n req = next(conn.parser)\n if not req:\n return (False, conn)\n\n # handle the request\n keepalive = self.handle_request(req, conn)\n if keepalive:\n return (keepalive, conn)\n except http.errors.NoMoreData as e:\n self.log.debug(\"Ignored premature client disconnection. %s\", e)\n\n except StopIteration as e:\n self.log.debug(\"Closing connection. %s\", e)\n except ssl.SSLError as e:\n if e.args[0] == ssl.SSL_ERROR_EOF:\n self.log.debug(\"ssl connection closed\")\n conn.sock.close()\n else:\n self.log.debug(\"Error processing SSL request.\")\n self.handle_error(req, conn.sock, conn.client, e)\n\n except EnvironmentError as e:\n if e.errno not in (errno.EPIPE, errno.ECONNRESET, errno.ENOTCONN):\n self.log.exception(\"Socket error processing request.\")\n else:\n if e.errno == errno.ECONNRESET:\n self.log.debug(\"Ignoring connection reset\")\n elif e.errno == errno.ENOTCONN:\n self.log.debug(\"Ignoring socket not connected\")\n else:\n self.log.debug(\"Ignoring connection epipe\")\n except Exception as e:\n self.handle_error(req, conn.sock, conn.client, e)\n\n return (False, conn)\n\n def handle_request(self, req, conn):\n environ = {}\n resp = None\n try:\n self.cfg.pre_request(self, req)\n request_start = datetime.now()\n resp, environ = wsgi.create(req, conn.sock, conn.client,\n conn.server, self.cfg)\n environ[\"wsgi.multithread\"] = True\n self.nr += 1\n if self.nr >= self.max_requests:\n if self.alive:\n self.log.info(\"Autorestarting worker after current request.\")\n self.alive = False\n resp.force_close()\n\n if not self.alive or not self.cfg.keepalive:\n resp.force_close()\n elif len(self._keep) >= self.max_keepalived:\n resp.force_close()\n\n respiter = self.wsgi(environ, resp.start_response)\n try:\n if isinstance(respiter, environ['wsgi.file_wrapper']):\n resp.write_file(respiter)\n else:\n for item in respiter:\n resp.write(item)\n\n resp.close()\n finally:\n request_time = datetime.now() - request_start\n self.log.access(resp, req, environ, request_time)\n if hasattr(respiter, \"close\"):\n respiter.close()\n\n if resp.should_close():\n self.log.debug(\"Closing connection.\")\n return False\n except EnvironmentError:\n # pass to next try-except level\n util.reraise(*sys.exc_info())\n except Exception:\n if resp and resp.headers_sent:\n # If the requests have already been sent, we should close the\n # connection to indicate the error.\n self.log.exception(\"Error handling request\")\n try:\n conn.sock.shutdown(socket.SHUT_RDWR)\n conn.sock.close()\n except EnvironmentError:\n pass\n raise StopIteration()\n raise\n finally:\n try:\n self.cfg.post_request(self, req, environ, resp)\n except Exception:\n self.log.exception(\"Exception in post_request hook\")\n\n return True\n", "path": "gunicorn/workers/gthread.py"}], "after_files": [{"content": "# -*- coding: utf-8 -\n#\n# This file is part of gunicorn released under the MIT license.\n# See the NOTICE for more information.\n\n# design:\n# A threaded worker accepts connections in the main loop, accepted\n# connections are added to the thread pool as a connection job.\n# Keepalive connections are put back in the loop waiting for an event.\n# If no event happen after the keep alive timeout, the connection is\n# closed.\n# pylint: disable=no-else-break\n\nfrom concurrent import futures\nimport errno\nimport os\nimport selectors\nimport socket\nimport ssl\nimport sys\nimport time\nfrom collections import deque\nfrom datetime import datetime\nfrom functools import partial\nfrom threading import RLock\n\nfrom . import base\nfrom .. import http\nfrom .. import util\nfrom .. import sock\nfrom ..http import wsgi\n\n\nclass TConn(object):\n\n def __init__(self, cfg, sock, client, server):\n self.cfg = cfg\n self.sock = sock\n self.client = client\n self.server = server\n\n self.timeout = None\n self.parser = None\n self.initialized = False\n\n # set the socket to non blocking\n self.sock.setblocking(False)\n\n def init(self):\n self.initialized = True\n self.sock.setblocking(True)\n\n if self.parser is None:\n # wrap the socket if needed\n if self.cfg.is_ssl:\n self.sock = sock.ssl_wrap_socket(self.sock, self.cfg)\n\n # initialize the parser\n self.parser = http.RequestParser(self.cfg, self.sock, self.client)\n\n def set_timeout(self):\n # set the timeout\n self.timeout = time.time() + self.cfg.keepalive\n\n def close(self):\n util.close(self.sock)\n\n\nclass ThreadWorker(base.Worker):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.worker_connections = self.cfg.worker_connections\n self.max_keepalived = self.cfg.worker_connections - self.cfg.threads\n # initialise the pool\n self.tpool = None\n self.poller = None\n self._lock = None\n self.futures = deque()\n self._keep = deque()\n self.nr_conns = 0\n\n @classmethod\n def check_config(cls, cfg, log):\n max_keepalived = cfg.worker_connections - cfg.threads\n\n if max_keepalived <= 0 and cfg.keepalive:\n log.warning(\"No keepalived connections can be handled. \" +\n \"Check the number of worker connections and threads.\")\n\n def init_process(self):\n self.tpool = self.get_thread_pool()\n self.poller = selectors.DefaultSelector()\n self._lock = RLock()\n super().init_process()\n\n def get_thread_pool(self):\n \"\"\"Override this method to customize how the thread pool is created\"\"\"\n return futures.ThreadPoolExecutor(max_workers=self.cfg.threads)\n\n def handle_quit(self, sig, frame):\n self.alive = False\n # worker_int callback\n self.cfg.worker_int(self)\n self.tpool.shutdown(False)\n time.sleep(0.1)\n sys.exit(0)\n\n def _wrap_future(self, fs, conn):\n fs.conn = conn\n self.futures.append(fs)\n fs.add_done_callback(self.finish_request)\n\n def enqueue_req(self, conn):\n conn.init()\n # submit the connection to a worker\n fs = self.tpool.submit(self.handle, conn)\n self._wrap_future(fs, conn)\n\n def accept(self, server, listener):\n try:\n sock, client = listener.accept()\n # initialize the connection object\n conn = TConn(self.cfg, sock, client, server)\n # set timeout to ensure it will not be in the loop too long\n conn.set_timeout()\n\n self.nr_conns += 1\n # wait until socket is readable\n with self._lock:\n self._keep.append(conn)\n self.poller.register(conn.sock, selectors.EVENT_READ,\n partial(self.on_client_socket_readable, conn))\n except EnvironmentError as e:\n if e.errno not in (errno.EAGAIN, errno.ECONNABORTED,\n errno.EWOULDBLOCK):\n raise\n\n def on_client_socket_readable(self, conn, client):\n with self._lock:\n # unregister the client from the poller\n self.poller.unregister(client)\n\n if conn.initialized:\n # remove the connection from keepalive\n try:\n self._keep.remove(conn)\n except ValueError:\n # race condition\n return\n\n # submit the connection to a worker\n self.enqueue_req(conn)\n\n def murder_keepalived(self):\n now = time.time()\n while True:\n with self._lock:\n try:\n # remove the connection from the queue\n conn = self._keep.popleft()\n except IndexError:\n break\n\n delta = conn.timeout - now\n if delta > 0:\n # add the connection back to the queue\n with self._lock:\n self._keep.appendleft(conn)\n break\n else:\n self.nr_conns -= 1\n # remove the socket from the poller\n with self._lock:\n try:\n self.poller.unregister(conn.sock)\n except EnvironmentError as e:\n if e.errno != errno.EBADF:\n raise\n except KeyError:\n # already removed by the system, continue\n pass\n except ValueError:\n # already removed by the system continue\n pass\n\n # close the socket\n conn.close()\n\n def is_parent_alive(self):\n # If our parent changed then we shut down.\n if self.ppid != os.getppid():\n self.log.info(\"Parent changed, shutting down: %s\", self)\n return False\n return True\n\n def run(self):\n # init listeners, add them to the event loop\n for sock in self.sockets:\n sock.setblocking(False)\n # a race condition during graceful shutdown may make the listener\n # name unavailable in the request handler so capture it once here\n server = sock.getsockname()\n acceptor = partial(self.accept, server)\n self.poller.register(sock, selectors.EVENT_READ, acceptor)\n\n while self.alive:\n # notify the arbiter we are alive\n self.notify()\n\n # can we accept more connections?\n if self.nr_conns < self.worker_connections:\n # wait for an event\n events = self.poller.select(1.0)\n for key, _ in events:\n callback = key.data\n callback(key.fileobj)\n\n # check (but do not wait) for finished requests\n result = futures.wait(self.futures, timeout=0,\n return_when=futures.FIRST_COMPLETED)\n else:\n # wait for a request to finish\n result = futures.wait(self.futures, timeout=1.0,\n return_when=futures.FIRST_COMPLETED)\n\n # clean up finished requests\n for fut in result.done:\n self.futures.remove(fut)\n\n if not self.is_parent_alive():\n break\n\n # handle keepalive timeouts\n self.murder_keepalived()\n\n self.tpool.shutdown(False)\n self.poller.close()\n\n for s in self.sockets:\n s.close()\n\n futures.wait(self.futures, timeout=self.cfg.graceful_timeout)\n\n def finish_request(self, fs):\n if fs.cancelled():\n self.nr_conns -= 1\n fs.conn.close()\n return\n\n try:\n (keepalive, conn) = fs.result()\n # if the connection should be kept alived add it\n # to the eventloop and record it\n if keepalive and self.alive:\n # flag the socket as non blocked\n conn.sock.setblocking(False)\n\n # register the connection\n conn.set_timeout()\n with self._lock:\n self._keep.append(conn)\n\n # add the socket to the event loop\n self.poller.register(conn.sock, selectors.EVENT_READ,\n partial(self.on_client_socket_readable, conn))\n else:\n self.nr_conns -= 1\n conn.close()\n except Exception:\n # an exception happened, make sure to close the\n # socket.\n self.nr_conns -= 1\n fs.conn.close()\n\n def handle(self, conn):\n keepalive = False\n req = None\n try:\n req = next(conn.parser)\n if not req:\n return (False, conn)\n\n # handle the request\n keepalive = self.handle_request(req, conn)\n if keepalive:\n return (keepalive, conn)\n except http.errors.NoMoreData as e:\n self.log.debug(\"Ignored premature client disconnection. %s\", e)\n\n except StopIteration as e:\n self.log.debug(\"Closing connection. %s\", e)\n except ssl.SSLError as e:\n if e.args[0] == ssl.SSL_ERROR_EOF:\n self.log.debug(\"ssl connection closed\")\n conn.sock.close()\n else:\n self.log.debug(\"Error processing SSL request.\")\n self.handle_error(req, conn.sock, conn.client, e)\n\n except EnvironmentError as e:\n if e.errno not in (errno.EPIPE, errno.ECONNRESET, errno.ENOTCONN):\n self.log.exception(\"Socket error processing request.\")\n else:\n if e.errno == errno.ECONNRESET:\n self.log.debug(\"Ignoring connection reset\")\n elif e.errno == errno.ENOTCONN:\n self.log.debug(\"Ignoring socket not connected\")\n else:\n self.log.debug(\"Ignoring connection epipe\")\n except Exception as e:\n self.handle_error(req, conn.sock, conn.client, e)\n\n return (False, conn)\n\n def handle_request(self, req, conn):\n environ = {}\n resp = None\n try:\n self.cfg.pre_request(self, req)\n request_start = datetime.now()\n resp, environ = wsgi.create(req, conn.sock, conn.client,\n conn.server, self.cfg)\n environ[\"wsgi.multithread\"] = True\n self.nr += 1\n if self.nr >= self.max_requests:\n if self.alive:\n self.log.info(\"Autorestarting worker after current request.\")\n self.alive = False\n resp.force_close()\n\n if not self.alive or not self.cfg.keepalive:\n resp.force_close()\n elif len(self._keep) >= self.max_keepalived:\n resp.force_close()\n\n respiter = self.wsgi(environ, resp.start_response)\n try:\n if isinstance(respiter, environ['wsgi.file_wrapper']):\n resp.write_file(respiter)\n else:\n for item in respiter:\n resp.write(item)\n\n resp.close()\n finally:\n request_time = datetime.now() - request_start\n self.log.access(resp, req, environ, request_time)\n if hasattr(respiter, \"close\"):\n respiter.close()\n\n if resp.should_close():\n self.log.debug(\"Closing connection.\")\n return False\n except EnvironmentError:\n # pass to next try-except level\n util.reraise(*sys.exc_info())\n except Exception:\n if resp and resp.headers_sent:\n # If the requests have already been sent, we should close the\n # connection to indicate the error.\n self.log.exception(\"Error handling request\")\n try:\n conn.sock.shutdown(socket.SHUT_RDWR)\n conn.sock.close()\n except EnvironmentError:\n pass\n raise StopIteration()\n raise\n finally:\n try:\n self.cfg.post_request(self, req, environ, resp)\n except Exception:\n self.log.exception(\"Exception in post_request hook\")\n\n return True\n", "path": "gunicorn/workers/gthread.py"}]} |
gh_patches_debug_1304 | rasdani/github-patches | git_diff | NVIDIA__NVFlare-920 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] Tenseal necessary, makes installation impossible on Apple Silicon.
As discussed in #130 tenseal remains unavailable for Apple Silicon. The current NVFlare version (2.2.0) has no optional features and tenseal is necessary, making installation impossible on Apple Silicon.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 # Copyright (c) 2021-2022, NVIDIA CORPORATION.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import datetime
16 import os
17 import shutil
18
19 from setuptools import find_packages, setup
20
21 import versioneer
22
23 # read the contents of your README file
24 this_directory = os.path.abspath(os.path.dirname(__file__))
25 with open(os.path.join(this_directory, "README.md"), encoding="utf-8") as f:
26 long_description = f.read()
27
28 if os.path.exists(os.path.join(this_directory, "nvflare", "poc.zip")):
29 os.remove(os.path.join(this_directory, "nvflare", "poc.zip"))
30 shutil.make_archive(base_name="poc", format="zip", root_dir=os.path.join(this_directory, "nvflare"), base_dir="poc")
31 shutil.move("poc.zip", os.path.join(this_directory, "nvflare", "poc.zip"))
32
33 versions = versioneer.get_versions()
34 if versions["error"]:
35 today = datetime.date.today().timetuple()
36 year = today[0] % 1000
37 month = today[1]
38 day = today[2]
39 version = f"0.0.{year:02d}{month:02d}{day:02d}"
40 else:
41 version = versions["version"]
42
43 release = os.environ.get("NVFL_RELEASE")
44 if release == "1":
45 package_name = "nvflare"
46 else:
47 package_name = "nvflare-nightly"
48
49 setup(
50 name=package_name,
51 version=version,
52 cmdclass=versioneer.get_cmdclass(),
53 description="Federated Learning Application Runtime Environment",
54 url="https://github.com/NVIDIA/NVFlare",
55 package_dir={"nvflare": "nvflare"},
56 packages=find_packages(
57 where=".",
58 include=[
59 "*",
60 ],
61 exclude=["tests", "tests.*"],
62 ),
63 package_data={"": ["*.yml", "*.html", "poc.zip"]},
64 zip_safe=True,
65 license_files=("LICENSE",),
66 classifiers=[
67 "Programming Language :: Python :: 3.7",
68 "Programming Language :: Python :: 3.8",
69 "License :: OSI Approved :: Apache Software License",
70 "Operating System :: POSIX :: Linux",
71 ],
72 long_description=long_description,
73 long_description_content_type="text/markdown",
74 python_requires=">=3.7,<3.9",
75 install_requires=[
76 "cryptography>=36.0.0",
77 "Flask==2.1.2",
78 "Flask-JWT-Extended==4.4.3",
79 "Flask-SQLAlchemy==2.5.1",
80 "google-api-python-client==2.49.0",
81 "grpcio==1.46.3",
82 "gunicorn==20.1.0",
83 "numpy",
84 "protobuf==3.20.1",
85 "psutil==5.9.1",
86 "PyYAML==6.0",
87 "six>=1.15.0",
88 "tenseal==0.3.0",
89 "msgpack==1.0.3",
90 "docker>=6.0",
91 ],
92 entry_points={
93 "console_scripts": [
94 "provision=nvflare.lighter.provision:main",
95 "poc=nvflare.lighter.poc:main",
96 "nvflare=nvflare.cli:main",
97 "authz_preview=nvflare.fuel.hci.tools.authz_preview:main",
98 ],
99 },
100 )
101
102 os.remove(os.path.join(this_directory, "nvflare", "poc.zip"))
103
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -85,10 +85,10 @@
"psutil==5.9.1",
"PyYAML==6.0",
"six>=1.15.0",
- "tenseal==0.3.0",
"msgpack==1.0.3",
"docker>=6.0",
],
+ extras_require={"HE": ["tenseal==0.3.0"]},
entry_points={
"console_scripts": [
"provision=nvflare.lighter.provision:main",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -85,10 +85,10 @@\n \"psutil==5.9.1\",\n \"PyYAML==6.0\",\n \"six>=1.15.0\",\n- \"tenseal==0.3.0\",\n \"msgpack==1.0.3\",\n \"docker>=6.0\",\n ],\n+ extras_require={\"HE\": [\"tenseal==0.3.0\"]},\n entry_points={\n \"console_scripts\": [\n \"provision=nvflare.lighter.provision:main\",\n", "issue": "[BUG] Tenseal necessary, makes installation impossible on Apple Silicon.\n\r\nAs discussed in #130 tenseal remains unavailable for Apple Silicon. The current NVFlare version (2.2.0) has no optional features and tenseal is necessary, making installation impossible on Apple Silicon.\n", "before_files": [{"content": "# Copyright (c) 2021-2022, NVIDIA CORPORATION.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport datetime\nimport os\nimport shutil\n\nfrom setuptools import find_packages, setup\n\nimport versioneer\n\n# read the contents of your README file\nthis_directory = os.path.abspath(os.path.dirname(__file__))\nwith open(os.path.join(this_directory, \"README.md\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nif os.path.exists(os.path.join(this_directory, \"nvflare\", \"poc.zip\")):\n os.remove(os.path.join(this_directory, \"nvflare\", \"poc.zip\"))\nshutil.make_archive(base_name=\"poc\", format=\"zip\", root_dir=os.path.join(this_directory, \"nvflare\"), base_dir=\"poc\")\nshutil.move(\"poc.zip\", os.path.join(this_directory, \"nvflare\", \"poc.zip\"))\n\nversions = versioneer.get_versions()\nif versions[\"error\"]:\n today = datetime.date.today().timetuple()\n year = today[0] % 1000\n month = today[1]\n day = today[2]\n version = f\"0.0.{year:02d}{month:02d}{day:02d}\"\nelse:\n version = versions[\"version\"]\n\nrelease = os.environ.get(\"NVFL_RELEASE\")\nif release == \"1\":\n package_name = \"nvflare\"\nelse:\n package_name = \"nvflare-nightly\"\n\nsetup(\n name=package_name,\n version=version,\n cmdclass=versioneer.get_cmdclass(),\n description=\"Federated Learning Application Runtime Environment\",\n url=\"https://github.com/NVIDIA/NVFlare\",\n package_dir={\"nvflare\": \"nvflare\"},\n packages=find_packages(\n where=\".\",\n include=[\n \"*\",\n ],\n exclude=[\"tests\", \"tests.*\"],\n ),\n package_data={\"\": [\"*.yml\", \"*.html\", \"poc.zip\"]},\n zip_safe=True,\n license_files=(\"LICENSE\",),\n classifiers=[\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: POSIX :: Linux\",\n ],\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n python_requires=\">=3.7,<3.9\",\n install_requires=[\n \"cryptography>=36.0.0\",\n \"Flask==2.1.2\",\n \"Flask-JWT-Extended==4.4.3\",\n \"Flask-SQLAlchemy==2.5.1\",\n \"google-api-python-client==2.49.0\",\n \"grpcio==1.46.3\",\n \"gunicorn==20.1.0\",\n \"numpy\",\n \"protobuf==3.20.1\",\n \"psutil==5.9.1\",\n \"PyYAML==6.0\",\n \"six>=1.15.0\",\n \"tenseal==0.3.0\",\n \"msgpack==1.0.3\",\n \"docker>=6.0\",\n ],\n entry_points={\n \"console_scripts\": [\n \"provision=nvflare.lighter.provision:main\",\n \"poc=nvflare.lighter.poc:main\",\n \"nvflare=nvflare.cli:main\",\n \"authz_preview=nvflare.fuel.hci.tools.authz_preview:main\",\n ],\n },\n)\n\nos.remove(os.path.join(this_directory, \"nvflare\", \"poc.zip\"))\n", "path": "setup.py"}], "after_files": [{"content": "# Copyright (c) 2021-2022, NVIDIA CORPORATION.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport datetime\nimport os\nimport shutil\n\nfrom setuptools import find_packages, setup\n\nimport versioneer\n\n# read the contents of your README file\nthis_directory = os.path.abspath(os.path.dirname(__file__))\nwith open(os.path.join(this_directory, \"README.md\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nif os.path.exists(os.path.join(this_directory, \"nvflare\", \"poc.zip\")):\n os.remove(os.path.join(this_directory, \"nvflare\", \"poc.zip\"))\nshutil.make_archive(base_name=\"poc\", format=\"zip\", root_dir=os.path.join(this_directory, \"nvflare\"), base_dir=\"poc\")\nshutil.move(\"poc.zip\", os.path.join(this_directory, \"nvflare\", \"poc.zip\"))\n\nversions = versioneer.get_versions()\nif versions[\"error\"]:\n today = datetime.date.today().timetuple()\n year = today[0] % 1000\n month = today[1]\n day = today[2]\n version = f\"0.0.{year:02d}{month:02d}{day:02d}\"\nelse:\n version = versions[\"version\"]\n\nrelease = os.environ.get(\"NVFL_RELEASE\")\nif release == \"1\":\n package_name = \"nvflare\"\nelse:\n package_name = \"nvflare-nightly\"\n\nsetup(\n name=package_name,\n version=version,\n cmdclass=versioneer.get_cmdclass(),\n description=\"Federated Learning Application Runtime Environment\",\n url=\"https://github.com/NVIDIA/NVFlare\",\n package_dir={\"nvflare\": \"nvflare\"},\n packages=find_packages(\n where=\".\",\n include=[\n \"*\",\n ],\n exclude=[\"tests\", \"tests.*\"],\n ),\n package_data={\"\": [\"*.yml\", \"*.html\", \"poc.zip\"]},\n zip_safe=True,\n license_files=(\"LICENSE\",),\n classifiers=[\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: POSIX :: Linux\",\n ],\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n python_requires=\">=3.7,<3.9\",\n install_requires=[\n \"cryptography>=36.0.0\",\n \"Flask==2.1.2\",\n \"Flask-JWT-Extended==4.4.3\",\n \"Flask-SQLAlchemy==2.5.1\",\n \"google-api-python-client==2.49.0\",\n \"grpcio==1.46.3\",\n \"gunicorn==20.1.0\",\n \"numpy\",\n \"protobuf==3.20.1\",\n \"psutil==5.9.1\",\n \"PyYAML==6.0\",\n \"six>=1.15.0\",\n \"msgpack==1.0.3\",\n \"docker>=6.0\",\n ],\n extras_require={\"HE\": [\"tenseal==0.3.0\"]},\n entry_points={\n \"console_scripts\": [\n \"provision=nvflare.lighter.provision:main\",\n \"poc=nvflare.lighter.poc:main\",\n \"nvflare=nvflare.cli:main\",\n \"authz_preview=nvflare.fuel.hci.tools.authz_preview:main\",\n ],\n },\n)\n\nos.remove(os.path.join(this_directory, \"nvflare\", \"poc.zip\"))\n", "path": "setup.py"}]} |
gh_patches_debug_1305 | rasdani/github-patches | git_diff | kartoza__prj.app-342 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Order sponsors in their groups
From @andreasneumann:
```For the sponsors listing - is there a clear order within the same level at http://changelog.qgis.org/en/qgis/version/2.16.0/ ?
In my opinion, it should either be ordered alphabetically or by date. Neither seems to be the case. I would prefer alphabetic ordering with in each sponsorship level.```
I think it is actually better to order them with most recently added sponsors first to oldest sponsors last. That we they get the most visibility when they are new, degrading over time to the bottom of the list. What do you think @andreasneumann ?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `django_project/changes/models/version.py`
Content:
```
1 # coding=utf-8
2 from django.core.urlresolvers import reverse
3 # from django.utils.text import slugify
4 from common.utilities import version_slugify
5 import os
6 import logging
7 from core.settings.contrib import STOP_WORDS
8 from django.conf.global_settings import MEDIA_ROOT
9 from django.db import models
10 from .entry import Entry
11 from .sponsorship_period import SponsorshipPeriod
12 from django.contrib.auth.models import User
13 from django.utils.translation import ugettext_lazy as _
14
15 logger = logging.getLogger(__name__)
16
17
18 class ApprovedVersionManager(models.Manager):
19 """Custom version manager that shows only approved records."""
20
21 def get_queryset(self):
22 """Query set generator"""
23 return super(
24 ApprovedVersionManager, self).get_queryset().filter(
25 approved=True)
26
27
28 class UnapprovedVersionManager(models.Manager):
29 """Custom version manager that shows only unapproved records."""
30
31 def get_queryset(self):
32 """Query set generator"""
33 return super(
34 UnapprovedVersionManager, self).get_queryset().filter(
35 approved=False)
36
37
38 # noinspection PyUnresolvedReferences
39 class Version(models.Model):
40 """A version model that the changelog is associated with.."""
41
42 name = models.CharField(
43 help_text='Name of this release e.g. 1.0.1.',
44 max_length=255,
45 null=False,
46 blank=False,
47 unique=False)
48
49 padded_version = models.CharField(
50 help_text=(
51 'Numeric version for this release e.g. 001000001 for 1.0.1 '
52 'calculated by zero padding each component of maj/minor/bugfix '
53 'elements from name.'),
54 max_length=9,
55 null=False,
56 blank=True,
57 unique=False)
58
59 approved = models.BooleanField(
60 help_text=(
61 'Whether this version has been approved for use by the '
62 'project owner.'),
63 default=False)
64
65 image_file = models.ImageField(
66 help_text=(
67 'An optional image for this version e.g. a splashscreen. '
68 'Most browsers support dragging the image directly on to the '
69 '"Choose File" button above.'),
70 upload_to=os.path.join(MEDIA_ROOT, 'images/projects'),
71 blank=True)
72
73 description = models.TextField(
74 null=True,
75 blank=True,
76 help_text='Describe the new version. Markdown is supported.')
77
78 release_date = models.DateField(
79 _('Release date (yyyy-mm-dd)'),
80 help_text='Date of official release',
81 null=True,
82 blank=True)
83
84 author = models.ForeignKey(User)
85 slug = models.SlugField()
86 project = models.ForeignKey('base.Project')
87 objects = models.Manager()
88 approved_objects = ApprovedVersionManager()
89 unapproved_objects = UnapprovedVersionManager()
90
91 # noinspection PyClassicStyleClass
92 class Meta:
93 """Meta options for the version class."""
94 unique_together = (
95 ('name', 'project'),
96 ('slug', 'project'),
97 )
98 app_label = 'changes'
99 # ordering = ['-datetime_created']
100
101 def save(self, *args, **kwargs):
102 if not self.pk:
103 words = self.name.split()
104 filtered_words = [t for t in words if t.lower() not in STOP_WORDS]
105 new_list = ' '.join(filtered_words)
106 self.slug = version_slugify(new_list)[:50]
107 self.padded_version = self.pad_name(self.name)
108 super(Version, self).save(*args, **kwargs)
109
110 def pad_name(self, version):
111 """Create a 0 padded version of the version name.
112
113 e.g. input: 2.10.1
114 e.g. output: 002010100
115
116 This will ensure we have sortable version names.
117
118 :param version: A text version in the form 0.0.0 - if the version is
119 not in this form, we return the version unaltered.
120 :type version: str
121
122 :returns: Zero padded representation of the version e.g. 001010100
123 :rtype: str
124
125 """
126 tokens = version.split('.')
127 if len(tokens) != 3:
128 return version
129 result = ''
130 for token in tokens:
131 result += token.zfill(3)
132 return result
133
134 def __unicode__(self):
135 return u'%s : %s' % (self.project.name, self.name)
136
137 def get_absolute_url(self):
138 return reverse('version-detail', kwargs={
139 'slug': self.slug,
140 'project_slug': self.project.slug
141 })
142
143 def entries(self):
144 """Get the entries for this version."""
145 qs = Entry.objects.filter(version=self).order_by('category__sort_number')
146 return qs
147
148 def _entries_for_category(self, category):
149 """All entries for this version and filtered by the given category.
150
151 :param category: Category to filter by.
152 :type category: Category
153
154 .. note:: only approved entries returned.
155 """
156 qs = Entry.objects.filter(
157 version=self,
158 category=category,
159 approved=True)
160 return qs
161
162 def categories(self):
163 """Get a list of categories where there are one or more entries.
164
165 Example use in template::
166 {% for row in version.categories %}
167 <h2 class="text-muted">{{ row.category.name }}</h2>
168 <ul>
169 {% for entry in row.entries %}
170 <li>{{ entry.name }}</li>
171 {% endfor %}
172 </ul>
173 {% endfor %}
174 """
175 qs = self.entries()
176 used = []
177 categories = []
178 for entry in qs:
179 category = entry.category
180 if category not in used:
181 row = {
182 'category': category,
183 'entries': self._entries_for_category(category)
184 }
185 categories.append(row)
186 used.append(category)
187 return categories
188
189 def sponsors(self):
190 """Return a list of sponsors current at time of this version release.
191
192 :returns: A list of SponsorPeriod objects for current project
193 whose release date coincides with the version release date.
194 Only approved sponsors are returned.
195 Returns None if the release date (which is optional) is not set.
196 :rtype: Queryset, None
197 """
198 if self.release_date is None:
199 return None
200 sponsors = SponsorshipPeriod.approved_objects.filter(
201 end_date__gte=self.release_date).filter(
202 start_date__lte=self.release_date).filter(
203 project=self.project).order_by(
204 'start_date').order_by(
205 '-sponsorship_level__value')
206 return sponsors
207
208 def formatted_release_date(self):
209 """"Return a long formatted released date e.g. 24 June 2016.
210
211 :returns: A string containing the long formatted date, or an empty
212 string if the date is not set.
213 :rtype: str
214 """
215 long_date = None
216 if self.release_date:
217 # %-d Day of the month as a decimal number. (Platform specific)
218 # %B Month as locale’s full name.
219 # %Y Year e.g. 2016
220 long_date = self.release_date.strftime('%-d %B, %Y')
221 return long_date
222
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/django_project/changes/models/version.py b/django_project/changes/models/version.py
--- a/django_project/changes/models/version.py
+++ b/django_project/changes/models/version.py
@@ -202,7 +202,7 @@
start_date__lte=self.release_date).filter(
project=self.project).order_by(
'start_date').order_by(
- '-sponsorship_level__value')
+ '-sponsorship_level__value', 'sponsor__name')
return sponsors
def formatted_release_date(self):
| {"golden_diff": "diff --git a/django_project/changes/models/version.py b/django_project/changes/models/version.py\n--- a/django_project/changes/models/version.py\n+++ b/django_project/changes/models/version.py\n@@ -202,7 +202,7 @@\n start_date__lte=self.release_date).filter(\n project=self.project).order_by(\n 'start_date').order_by(\n- '-sponsorship_level__value')\n+ '-sponsorship_level__value', 'sponsor__name')\n return sponsors\n \n def formatted_release_date(self):\n", "issue": "Order sponsors in their groups\nFrom @andreasneumann: \n\n```For the sponsors listing - is there a clear order within the same level at http://changelog.qgis.org/en/qgis/version/2.16.0/ ?\n\nIn my opinion, it should either be ordered alphabetically or by date. Neither seems to be the case. I would prefer alphabetic ordering with in each sponsorship level.```\n\nI think it is actually better to order them with most recently added sponsors first to oldest sponsors last. That we they get the most visibility when they are new, degrading over time to the bottom of the list. What do you think @andreasneumann ?\n\n", "before_files": [{"content": "# coding=utf-8\nfrom django.core.urlresolvers import reverse\n# from django.utils.text import slugify\nfrom common.utilities import version_slugify\nimport os\nimport logging\nfrom core.settings.contrib import STOP_WORDS\nfrom django.conf.global_settings import MEDIA_ROOT\nfrom django.db import models\nfrom .entry import Entry\nfrom .sponsorship_period import SponsorshipPeriod\nfrom django.contrib.auth.models import User\nfrom django.utils.translation import ugettext_lazy as _\n\nlogger = logging.getLogger(__name__)\n\n\nclass ApprovedVersionManager(models.Manager):\n \"\"\"Custom version manager that shows only approved records.\"\"\"\n\n def get_queryset(self):\n \"\"\"Query set generator\"\"\"\n return super(\n ApprovedVersionManager, self).get_queryset().filter(\n approved=True)\n\n\nclass UnapprovedVersionManager(models.Manager):\n \"\"\"Custom version manager that shows only unapproved records.\"\"\"\n\n def get_queryset(self):\n \"\"\"Query set generator\"\"\"\n return super(\n UnapprovedVersionManager, self).get_queryset().filter(\n approved=False)\n\n\n# noinspection PyUnresolvedReferences\nclass Version(models.Model):\n \"\"\"A version model that the changelog is associated with..\"\"\"\n\n name = models.CharField(\n help_text='Name of this release e.g. 1.0.1.',\n max_length=255,\n null=False,\n blank=False,\n unique=False)\n\n padded_version = models.CharField(\n help_text=(\n 'Numeric version for this release e.g. 001000001 for 1.0.1 '\n 'calculated by zero padding each component of maj/minor/bugfix '\n 'elements from name.'),\n max_length=9,\n null=False,\n blank=True,\n unique=False)\n\n approved = models.BooleanField(\n help_text=(\n 'Whether this version has been approved for use by the '\n 'project owner.'),\n default=False)\n\n image_file = models.ImageField(\n help_text=(\n 'An optional image for this version e.g. a splashscreen. '\n 'Most browsers support dragging the image directly on to the '\n '\"Choose File\" button above.'),\n upload_to=os.path.join(MEDIA_ROOT, 'images/projects'),\n blank=True)\n\n description = models.TextField(\n null=True,\n blank=True,\n help_text='Describe the new version. Markdown is supported.')\n\n release_date = models.DateField(\n _('Release date (yyyy-mm-dd)'),\n help_text='Date of official release',\n null=True,\n blank=True)\n\n author = models.ForeignKey(User)\n slug = models.SlugField()\n project = models.ForeignKey('base.Project')\n objects = models.Manager()\n approved_objects = ApprovedVersionManager()\n unapproved_objects = UnapprovedVersionManager()\n\n # noinspection PyClassicStyleClass\n class Meta:\n \"\"\"Meta options for the version class.\"\"\"\n unique_together = (\n ('name', 'project'),\n ('slug', 'project'),\n )\n app_label = 'changes'\n # ordering = ['-datetime_created']\n\n def save(self, *args, **kwargs):\n if not self.pk:\n words = self.name.split()\n filtered_words = [t for t in words if t.lower() not in STOP_WORDS]\n new_list = ' '.join(filtered_words)\n self.slug = version_slugify(new_list)[:50]\n self.padded_version = self.pad_name(self.name)\n super(Version, self).save(*args, **kwargs)\n\n def pad_name(self, version):\n \"\"\"Create a 0 padded version of the version name.\n\n e.g. input: 2.10.1\n e.g. output: 002010100\n\n This will ensure we have sortable version names.\n\n :param version: A text version in the form 0.0.0 - if the version is\n not in this form, we return the version unaltered.\n :type version: str\n\n :returns: Zero padded representation of the version e.g. 001010100\n :rtype: str\n\n \"\"\"\n tokens = version.split('.')\n if len(tokens) != 3:\n return version\n result = ''\n for token in tokens:\n result += token.zfill(3)\n return result\n\n def __unicode__(self):\n return u'%s : %s' % (self.project.name, self.name)\n\n def get_absolute_url(self):\n return reverse('version-detail', kwargs={\n 'slug': self.slug,\n 'project_slug': self.project.slug\n })\n\n def entries(self):\n \"\"\"Get the entries for this version.\"\"\"\n qs = Entry.objects.filter(version=self).order_by('category__sort_number')\n return qs\n\n def _entries_for_category(self, category):\n \"\"\"All entries for this version and filtered by the given category.\n\n :param category: Category to filter by.\n :type category: Category\n\n .. note:: only approved entries returned.\n \"\"\"\n qs = Entry.objects.filter(\n version=self,\n category=category,\n approved=True)\n return qs\n\n def categories(self):\n \"\"\"Get a list of categories where there are one or more entries.\n\n Example use in template::\n {% for row in version.categories %}\n <h2 class=\"text-muted\">{{ row.category.name }}</h2>\n <ul>\n {% for entry in row.entries %}\n <li>{{ entry.name }}</li>\n {% endfor %}\n </ul>\n {% endfor %}\n \"\"\"\n qs = self.entries()\n used = []\n categories = []\n for entry in qs:\n category = entry.category\n if category not in used:\n row = {\n 'category': category,\n 'entries': self._entries_for_category(category)\n }\n categories.append(row)\n used.append(category)\n return categories\n\n def sponsors(self):\n \"\"\"Return a list of sponsors current at time of this version release.\n\n :returns: A list of SponsorPeriod objects for current project\n whose release date coincides with the version release date.\n Only approved sponsors are returned.\n Returns None if the release date (which is optional) is not set.\n :rtype: Queryset, None\n \"\"\"\n if self.release_date is None:\n return None\n sponsors = SponsorshipPeriod.approved_objects.filter(\n end_date__gte=self.release_date).filter(\n start_date__lte=self.release_date).filter(\n project=self.project).order_by(\n 'start_date').order_by(\n '-sponsorship_level__value')\n return sponsors\n\n def formatted_release_date(self):\n \"\"\"\"Return a long formatted released date e.g. 24 June 2016.\n\n :returns: A string containing the long formatted date, or an empty\n string if the date is not set.\n :rtype: str\n \"\"\"\n long_date = None\n if self.release_date:\n # %-d Day of the month as a decimal number. (Platform specific)\n # %B Month as locale\u2019s full name.\n # %Y Year e.g. 2016\n long_date = self.release_date.strftime('%-d %B, %Y')\n return long_date\n", "path": "django_project/changes/models/version.py"}], "after_files": [{"content": "# coding=utf-8\nfrom django.core.urlresolvers import reverse\n# from django.utils.text import slugify\nfrom common.utilities import version_slugify\nimport os\nimport logging\nfrom core.settings.contrib import STOP_WORDS\nfrom django.conf.global_settings import MEDIA_ROOT\nfrom django.db import models\nfrom .entry import Entry\nfrom .sponsorship_period import SponsorshipPeriod\nfrom django.contrib.auth.models import User\nfrom django.utils.translation import ugettext_lazy as _\n\nlogger = logging.getLogger(__name__)\n\n\nclass ApprovedVersionManager(models.Manager):\n \"\"\"Custom version manager that shows only approved records.\"\"\"\n\n def get_queryset(self):\n \"\"\"Query set generator\"\"\"\n return super(\n ApprovedVersionManager, self).get_queryset().filter(\n approved=True)\n\n\nclass UnapprovedVersionManager(models.Manager):\n \"\"\"Custom version manager that shows only unapproved records.\"\"\"\n\n def get_queryset(self):\n \"\"\"Query set generator\"\"\"\n return super(\n UnapprovedVersionManager, self).get_queryset().filter(\n approved=False)\n\n\n# noinspection PyUnresolvedReferences\nclass Version(models.Model):\n \"\"\"A version model that the changelog is associated with..\"\"\"\n\n name = models.CharField(\n help_text='Name of this release e.g. 1.0.1.',\n max_length=255,\n null=False,\n blank=False,\n unique=False)\n\n padded_version = models.CharField(\n help_text=(\n 'Numeric version for this release e.g. 001000001 for 1.0.1 '\n 'calculated by zero padding each component of maj/minor/bugfix '\n 'elements from name.'),\n max_length=9,\n null=False,\n blank=True,\n unique=False)\n\n approved = models.BooleanField(\n help_text=(\n 'Whether this version has been approved for use by the '\n 'project owner.'),\n default=False)\n\n image_file = models.ImageField(\n help_text=(\n 'An optional image for this version e.g. a splashscreen. '\n 'Most browsers support dragging the image directly on to the '\n '\"Choose File\" button above.'),\n upload_to=os.path.join(MEDIA_ROOT, 'images/projects'),\n blank=True)\n\n description = models.TextField(\n null=True,\n blank=True,\n help_text='Describe the new version. Markdown is supported.')\n\n release_date = models.DateField(\n _('Release date (yyyy-mm-dd)'),\n help_text='Date of official release',\n null=True,\n blank=True)\n\n author = models.ForeignKey(User)\n slug = models.SlugField()\n project = models.ForeignKey('base.Project')\n objects = models.Manager()\n approved_objects = ApprovedVersionManager()\n unapproved_objects = UnapprovedVersionManager()\n\n # noinspection PyClassicStyleClass\n class Meta:\n \"\"\"Meta options for the version class.\"\"\"\n unique_together = (\n ('name', 'project'),\n ('slug', 'project'),\n )\n app_label = 'changes'\n # ordering = ['-datetime_created']\n\n def save(self, *args, **kwargs):\n if not self.pk:\n words = self.name.split()\n filtered_words = [t for t in words if t.lower() not in STOP_WORDS]\n new_list = ' '.join(filtered_words)\n self.slug = version_slugify(new_list)[:50]\n self.padded_version = self.pad_name(self.name)\n super(Version, self).save(*args, **kwargs)\n\n def pad_name(self, version):\n \"\"\"Create a 0 padded version of the version name.\n\n e.g. input: 2.10.1\n e.g. output: 002010100\n\n This will ensure we have sortable version names.\n\n :param version: A text version in the form 0.0.0 - if the version is\n not in this form, we return the version unaltered.\n :type version: str\n\n :returns: Zero padded representation of the version e.g. 001010100\n :rtype: str\n\n \"\"\"\n tokens = version.split('.')\n if len(tokens) != 3:\n return version\n result = ''\n for token in tokens:\n result += token.zfill(3)\n return result\n\n def __unicode__(self):\n return u'%s : %s' % (self.project.name, self.name)\n\n def get_absolute_url(self):\n return reverse('version-detail', kwargs={\n 'slug': self.slug,\n 'project_slug': self.project.slug\n })\n\n def entries(self):\n \"\"\"Get the entries for this version.\"\"\"\n qs = Entry.objects.filter(version=self).order_by('category__sort_number')\n return qs\n\n def _entries_for_category(self, category):\n \"\"\"All entries for this version and filtered by the given category.\n\n :param category: Category to filter by.\n :type category: Category\n\n .. note:: only approved entries returned.\n \"\"\"\n qs = Entry.objects.filter(\n version=self,\n category=category,\n approved=True)\n return qs\n\n def categories(self):\n \"\"\"Get a list of categories where there are one or more entries.\n\n Example use in template::\n {% for row in version.categories %}\n <h2 class=\"text-muted\">{{ row.category.name }}</h2>\n <ul>\n {% for entry in row.entries %}\n <li>{{ entry.name }}</li>\n {% endfor %}\n </ul>\n {% endfor %}\n \"\"\"\n qs = self.entries()\n used = []\n categories = []\n for entry in qs:\n category = entry.category\n if category not in used:\n row = {\n 'category': category,\n 'entries': self._entries_for_category(category)\n }\n categories.append(row)\n used.append(category)\n return categories\n\n def sponsors(self):\n \"\"\"Return a list of sponsors current at time of this version release.\n\n :returns: A list of SponsorPeriod objects for current project\n whose release date coincides with the version release date.\n Only approved sponsors are returned.\n Returns None if the release date (which is optional) is not set.\n :rtype: Queryset, None\n \"\"\"\n if self.release_date is None:\n return None\n sponsors = SponsorshipPeriod.approved_objects.filter(\n end_date__gte=self.release_date).filter(\n start_date__lte=self.release_date).filter(\n project=self.project).order_by(\n 'start_date').order_by(\n '-sponsorship_level__value', 'sponsor__name')\n return sponsors\n\n def formatted_release_date(self):\n \"\"\"\"Return a long formatted released date e.g. 24 June 2016.\n\n :returns: A string containing the long formatted date, or an empty\n string if the date is not set.\n :rtype: str\n \"\"\"\n long_date = None\n if self.release_date:\n # %-d Day of the month as a decimal number. (Platform specific)\n # %B Month as locale\u2019s full name.\n # %Y Year e.g. 2016\n long_date = self.release_date.strftime('%-d %B, %Y')\n return long_date\n", "path": "django_project/changes/models/version.py"}]} |
gh_patches_debug_1306 | rasdani/github-patches | git_diff | Lightning-AI__torchmetrics-1538 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Wrong error in Metric.__iter__
## 🐛 Bug
In the class `Metric`, the `__iter__` method is defined as follows:
```python
def __iter__(self):
"""Iteration over metrics are not allowed. Use metric collections for nesting metrics."""
raise NotImplementedError("Metrics does not support iteration.")
```
In python's docs, the following note is written about `NotImplementedError` (https://docs.python.org/3/library/exceptions.html#NotImplementedError):
> It should not be used to indicate that an operator or method is not meant to be supported at all – in that case either leave the operator / method undefined or, if a subclass, set it to [None](https://docs.python.org/3/library/constants.html#None).
In fact, the use cases for `NotImplementedError` are:
> In user defined base classes, abstract methods should raise this exception when they require derived classes to override the method, or while the class is being developed to indicate that the real implementation still needs to be added.
In PyCharm (and maybe other python code checkers), this leads to a warning for every sub-class of Metric, saying that all abstract methods should be implemented (PyCharm understands a method that raises a `NotImplementedError` as abstract, even if there is no `@abstractmethod` decorator on this method).
Was there a good reason to define `__iter__` like that? Otherwise, could we remove it?
Wrong error in Metric.__iter__
## 🐛 Bug
In the class `Metric`, the `__iter__` method is defined as follows:
```python
def __iter__(self):
"""Iteration over metrics are not allowed. Use metric collections for nesting metrics."""
raise NotImplementedError("Metrics does not support iteration.")
```
In python's docs, the following note is written about `NotImplementedError` (https://docs.python.org/3/library/exceptions.html#NotImplementedError):
> It should not be used to indicate that an operator or method is not meant to be supported at all – in that case either leave the operator / method undefined or, if a subclass, set it to [None](https://docs.python.org/3/library/constants.html#None).
In fact, the use cases for `NotImplementedError` are:
> In user defined base classes, abstract methods should raise this exception when they require derived classes to override the method, or while the class is being developed to indicate that the real implementation still needs to be added.
In PyCharm (and maybe other python code checkers), this leads to a warning for every sub-class of Metric, saying that all abstract methods should be implemented (PyCharm understands a method that raises a `NotImplementedError` as abstract, even if there is no `@abstractmethod` decorator on this method).
Was there a good reason to define `__iter__` like that? Otherwise, could we remove it?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/torchmetrics/metric.py`
Content:
```
1 # Copyright The Lightning team.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 import functools
15 import inspect
16 from abc import ABC, abstractmethod
17 from contextlib import contextmanager
18 from copy import deepcopy
19 from typing import Any, Callable, Dict, Generator, List, Optional, Sequence, Tuple, Union
20
21 import torch
22 from torch import Tensor
23 from torch.nn import Module
24
25 from torchmetrics.utilities import apply_to_collection, rank_zero_warn
26 from torchmetrics.utilities.data import (
27 _flatten,
28 _squeeze_if_scalar,
29 dim_zero_cat,
30 dim_zero_max,
31 dim_zero_mean,
32 dim_zero_min,
33 dim_zero_sum,
34 )
35 from torchmetrics.utilities.distributed import gather_all_tensors
36 from torchmetrics.utilities.exceptions import TorchMetricsUserError
37
38
39 def jit_distributed_available() -> bool:
40 """Determine if distributed mode is initialized."""
41 return torch.distributed.is_available() and torch.distributed.is_initialized()
42
43
44 class Metric(Module, ABC):
45 """Base class for all metrics present in the Metrics API.
46
47 Implements ``add_state()``, ``forward()``, ``reset()`` and a few other things to
48 handle distributed synchronization and per-step metric computation.
49
50 Override ``update()`` and ``compute()`` functions to implement your own metric. Use
51 ``add_state()`` to register metric state variables which keep track of state on each
52 call of ``update()`` and are synchronized across processes when ``compute()`` is called.
53
54 Note:
55 Metric state variables can either be :class:`~torch.Tensor` or an empty list which can we used
56 to store :class:`~torch.Tensor`.
57
58 Note:
59 Different metrics only override ``update()`` and not ``forward()``. A call to ``update()``
60 is valid, but it won't return the metric value at the current step. A call to ``forward()``
61 automatically calls ``update()`` and also returns the metric value at the current step.
62
63 Args:
64 kwargs: additional keyword arguments, see :ref:`Metric kwargs` for more info.
65
66 - compute_on_cpu: If metric state should be stored on CPU during computations. Only works
67 for list states.
68 - dist_sync_on_step: If metric state should synchronize on ``forward()``. Default is ``False``
69 - process_group: The process group on which the synchronization is called. Default is the world.
70 - dist_sync_fn: function that performs the allgather option on the metric state. Default is an
71 custom implementation that calls ``torch.distributed.all_gather`` internally.
72 - distributed_available_fn: function that checks if the distributed backend is available.
73 Defaults to a check of ``torch.distributed.is_available()`` and ``torch.distributed.is_initialized()``.
74 - sync_on_compute: If metric state should synchronize when ``compute`` is called. Default is ``True``-
75 """
76
77 __jit_ignored_attributes__ = ["device"]
78 __jit_unused_properties__ = ["is_differentiable"]
79 is_differentiable: Optional[bool] = None
80 higher_is_better: Optional[bool] = None
81 full_state_update: Optional[bool] = None
82
83 def __init__(
84 self,
85 **kwargs: Any,
86 ) -> None:
87 super().__init__()
88
89 # see (https://github.com/pytorch/pytorch/blob/3e6bb5233f9ca2c5aa55d9cda22a7ee85439aa6e/
90 # torch/nn/modules/module.py#L227)
91 torch._C._log_api_usage_once(f"torchmetrics.metric.{self.__class__.__name__}")
92
93 self._device = torch.device("cpu")
94
95 self.compute_on_cpu = kwargs.pop("compute_on_cpu", False)
96 if not isinstance(self.compute_on_cpu, bool):
97 raise ValueError(
98 f"Expected keyword argument `compute_on_cpu` to be an `bool` but got {self.compute_on_cpu}"
99 )
100
101 self.dist_sync_on_step = kwargs.pop("dist_sync_on_step", False)
102 if not isinstance(self.dist_sync_on_step, bool):
103 raise ValueError(
104 f"Expected keyword argument `dist_sync_on_step` to be an `bool` but got {self.dist_sync_on_step}"
105 )
106
107 self.process_group = kwargs.pop("process_group", None)
108
109 self.dist_sync_fn = kwargs.pop("dist_sync_fn", None)
110 if self.dist_sync_fn is not None and not callable(self.dist_sync_fn):
111 raise ValueError(
112 f"Expected keyword argument `dist_sync_fn` to be an callable function but got {self.dist_sync_fn}"
113 )
114
115 self.distributed_available_fn = kwargs.pop("distributed_available_fn", jit_distributed_available)
116
117 self.sync_on_compute = kwargs.pop("sync_on_compute", True)
118 if not isinstance(self.sync_on_compute, bool):
119 raise ValueError(
120 f"Expected keyword argument `sync_on_compute` to be a `bool` but got {self.sync_on_compute}"
121 )
122
123 if kwargs:
124 kwargs_ = [f"`{a}`" for a in sorted(kwargs)]
125 raise ValueError(f"Unexpected keyword arguments: {', '.join(kwargs_)}")
126
127 # initialize
128 self._update_signature = inspect.signature(self.update)
129 self.update: Callable = self._wrap_update(self.update)
130 self.compute: Callable = self._wrap_compute(self.compute)
131 self._computed = None
132 self._forward_cache = None
133 self._update_count = 0
134 self._to_sync = self.sync_on_compute
135 self._should_unsync = True
136 self._enable_grad = False
137
138 # initialize state
139 self._defaults: Dict[str, Union[List, Tensor]] = {}
140 self._persistent: Dict[str, bool] = {}
141 self._reductions: Dict[str, Union[str, Callable[..., Any], None]] = {}
142
143 # state management
144 self._is_synced = False
145 self._cache: Optional[Dict[str, Union[List[Tensor], Tensor]]] = None
146
147 @property
148 def _update_called(self) -> bool:
149 # TODO: this is needed for internal lightning, remove after v0.12 and update on lightning side
150 return self._update_count > 0
151
152 @property
153 def update_called(self) -> bool:
154 """Returns `True` if `update` or `forward` has been called initialization or last `reset`."""
155 return self._update_count > 0
156
157 @property
158 def update_count(self) -> int:
159 """Get the number of times `update` and/or `forward` has been called since initialization or last
160 `reset`.
161 """
162 return self._update_count
163
164 def add_state(
165 self,
166 name: str,
167 default: Union[list, Tensor],
168 dist_reduce_fx: Optional[Union[str, Callable]] = None,
169 persistent: bool = False,
170 ) -> None:
171 """Add metric state variable. Only used by subclasses.
172
173 Args:
174 name: The name of the state variable. The variable will then be accessible at ``self.name``.
175 default: Default value of the state; can either be a :class:`~torch.Tensor` or an empty list.
176 The state will be reset to this value when ``self.reset()`` is called.
177 dist_reduce_fx (Optional): Function to reduce state across multiple processes in distributed mode.
178 If value is ``"sum"``, ``"mean"``, ``"cat"``, ``"min"`` or ``"max"`` we will use ``torch.sum``,
179 ``torch.mean``, ``torch.cat``, ``torch.min`` and ``torch.max``` respectively, each with argument
180 ``dim=0``. Note that the ``"cat"`` reduction only makes sense if the state is a list, and not
181 a tensor. The user can also pass a custom function in this parameter.
182 persistent (Optional): whether the state will be saved as part of the modules ``state_dict``.
183 Default is ``False``.
184
185 Note:
186 Setting ``dist_reduce_fx`` to None will return the metric state synchronized across different processes.
187 However, there won't be any reduction function applied to the synchronized metric state.
188
189 The metric states would be synced as follows
190
191 - If the metric state is :class:`~torch.Tensor`, the synced value will be a stacked :class:`~torch.Tensor`
192 across the process dimension if the metric state was a :class:`~torch.Tensor`. The original
193 :class:`~torch.Tensor` metric state retains dimension and hence the synchronized output will be of shape
194 ``(num_process, ...)``.
195
196 - If the metric state is a ``list``, the synced value will be a ``list`` containing the
197 combined elements from all processes.
198
199 Note:
200 When passing a custom function to ``dist_reduce_fx``, expect the synchronized metric state to follow
201 the format discussed in the above note.
202
203 Raises:
204 ValueError:
205 If ``default`` is not a ``tensor`` or an ``empty list``.
206 ValueError:
207 If ``dist_reduce_fx`` is not callable or one of ``"mean"``, ``"sum"``, ``"cat"``, ``None``.
208 """
209 if not isinstance(default, (Tensor, list)) or (isinstance(default, list) and default):
210 raise ValueError("state variable must be a tensor or any empty list (where you can append tensors)")
211
212 if dist_reduce_fx == "sum":
213 dist_reduce_fx = dim_zero_sum
214 elif dist_reduce_fx == "mean":
215 dist_reduce_fx = dim_zero_mean
216 elif dist_reduce_fx == "max":
217 dist_reduce_fx = dim_zero_max
218 elif dist_reduce_fx == "min":
219 dist_reduce_fx = dim_zero_min
220 elif dist_reduce_fx == "cat":
221 dist_reduce_fx = dim_zero_cat
222 elif dist_reduce_fx is not None and not callable(dist_reduce_fx):
223 raise ValueError("`dist_reduce_fx` must be callable or one of ['mean', 'sum', 'cat', 'min', 'max', None]")
224
225 if isinstance(default, Tensor):
226 default = default.contiguous()
227
228 setattr(self, name, default)
229
230 self._defaults[name] = deepcopy(default)
231 self._persistent[name] = persistent
232 self._reductions[name] = dist_reduce_fx
233
234 @torch.jit.unused
235 def forward(self, *args: Any, **kwargs: Any) -> Any:
236 """``forward`` serves the dual purpose of both computing the metric on the current batch of inputs but also
237 add the batch statistics to the overall accumululating metric state.
238
239 Input arguments are the exact same as corresponding ``update`` method. The returned output is the exact same as
240 the output of ``compute``.
241 """
242 # check if states are already synced
243 if self._is_synced:
244 raise TorchMetricsUserError(
245 "The Metric shouldn't be synced when performing ``forward``. "
246 "HINT: Did you forget to call ``unsync`` ?."
247 )
248
249 if self.full_state_update or self.full_state_update is None or self.dist_sync_on_step:
250 self._forward_cache = self._forward_full_state_update(*args, **kwargs)
251 else:
252 self._forward_cache = self._forward_reduce_state_update(*args, **kwargs)
253
254 return self._forward_cache
255
256 def _forward_full_state_update(self, *args: Any, **kwargs: Any) -> Any:
257 """forward computation using two calls to `update` to calculate the metric value on the current batch and
258 accumulate global state.
259
260 Doing this secures that metrics that need access to the full metric state during `update` works as expected.
261 """
262 # global accumulation
263 self.update(*args, **kwargs)
264 _update_count = self._update_count
265
266 self._to_sync = self.dist_sync_on_step
267 # skip restore cache operation from compute as cache is stored below.
268 self._should_unsync = False
269 # skip computing on cpu for the batch
270 _temp_compute_on_cpu = self.compute_on_cpu
271 self.compute_on_cpu = False
272
273 # save context before switch
274 cache = {attr: getattr(self, attr) for attr in self._defaults}
275
276 # call reset, update, compute, on single batch
277 self._enable_grad = True # allow grads for batch computation
278 self.reset()
279 self.update(*args, **kwargs)
280 batch_val = self.compute()
281
282 # restore context
283 for attr, val in cache.items():
284 setattr(self, attr, val)
285 self._update_count = _update_count
286
287 # restore context
288 self._is_synced = False
289 self._should_unsync = True
290 self._to_sync = self.sync_on_compute
291 self._computed = None
292 self._enable_grad = False
293 self.compute_on_cpu = _temp_compute_on_cpu
294 if self.compute_on_cpu:
295 self._move_list_states_to_cpu()
296
297 return batch_val
298
299 def _forward_reduce_state_update(self, *args: Any, **kwargs: Any) -> Any:
300 """forward computation using single call to `update` to calculate the metric value on the current batch and
301 accumulate global state.
302
303 This can be done when the global metric state is a sinple reduction of batch states.
304 """
305 # store global state and reset to default
306 global_state = {attr: getattr(self, attr) for attr in self._defaults}
307 _update_count = self._update_count
308 self.reset()
309
310 # local syncronization settings
311 self._to_sync = self.dist_sync_on_step
312 self._should_unsync = False
313 _temp_compute_on_cpu = self.compute_on_cpu
314 self.compute_on_cpu = False
315 self._enable_grad = True # allow grads for batch computation
316
317 # calculate batch state and compute batch value
318 self.update(*args, **kwargs)
319 batch_val = self.compute()
320
321 # reduce batch and global state
322 self._update_count = _update_count + 1
323 with torch.no_grad():
324 self._reduce_states(global_state)
325
326 # restore context
327 self._is_synced = False
328 self._should_unsync = True
329 self._to_sync = self.sync_on_compute
330 self._computed = None
331 self._enable_grad = False
332 self.compute_on_cpu = _temp_compute_on_cpu
333 if self.compute_on_cpu:
334 self._move_list_states_to_cpu()
335
336 return batch_val
337
338 def _reduce_states(self, incoming_state: Dict[str, Any]) -> None:
339 """Add an incoming metric state to the current state of the metric.
340
341 Args:
342 incoming_state: a dict containing a metric state similar metric itself
343 """
344 for attr in self._defaults:
345 local_state = getattr(self, attr)
346 global_state = incoming_state[attr]
347 reduce_fn = self._reductions[attr]
348 if reduce_fn == dim_zero_sum:
349 reduced = global_state + local_state
350 elif reduce_fn == dim_zero_mean:
351 reduced = ((self._update_count - 1) * global_state + local_state).float() / self._update_count
352 elif reduce_fn == dim_zero_max:
353 reduced = torch.max(global_state, local_state)
354 elif reduce_fn == dim_zero_min:
355 reduced = torch.min(global_state, local_state)
356 elif reduce_fn == dim_zero_cat:
357 reduced = global_state + local_state
358 elif reduce_fn is None and isinstance(global_state, Tensor):
359 reduced = torch.stack([global_state, local_state])
360 elif reduce_fn is None and isinstance(global_state, list):
361 reduced = _flatten([global_state, local_state])
362 else:
363 reduced = reduce_fn(torch.stack([global_state, local_state]))
364
365 setattr(self, attr, reduced)
366
367 def _sync_dist(self, dist_sync_fn: Callable = gather_all_tensors, process_group: Optional[Any] = None) -> None:
368 input_dict = {attr: getattr(self, attr) for attr in self._reductions}
369
370 for attr, reduction_fn in self._reductions.items():
371 # pre-concatenate metric states that are lists to reduce number of all_gather operations
372 if reduction_fn == dim_zero_cat and isinstance(input_dict[attr], list) and len(input_dict[attr]) > 1:
373 input_dict[attr] = [dim_zero_cat(input_dict[attr])]
374
375 output_dict = apply_to_collection(
376 input_dict,
377 Tensor,
378 dist_sync_fn,
379 group=process_group or self.process_group,
380 )
381
382 for attr, reduction_fn in self._reductions.items():
383 # pre-processing ops (stack or flatten for inputs)
384
385 if isinstance(output_dict[attr], list) and len(output_dict[attr]) == 0:
386 setattr(self, attr, [])
387 continue
388
389 if isinstance(output_dict[attr][0], Tensor):
390 output_dict[attr] = torch.stack(output_dict[attr])
391 elif isinstance(output_dict[attr][0], list):
392 output_dict[attr] = _flatten(output_dict[attr])
393
394 if not (callable(reduction_fn) or reduction_fn is None):
395 raise TypeError("reduction_fn must be callable or None")
396 reduced = reduction_fn(output_dict[attr]) if reduction_fn is not None else output_dict[attr]
397 setattr(self, attr, reduced)
398
399 def _wrap_update(self, update: Callable) -> Callable:
400 @functools.wraps(update)
401 def wrapped_func(*args: Any, **kwargs: Any) -> None:
402 self._computed = None
403 self._update_count += 1
404 with torch.set_grad_enabled(self._enable_grad):
405 try:
406 update(*args, **kwargs)
407 except RuntimeError as err:
408 if "Expected all tensors to be on" in str(err):
409 raise RuntimeError(
410 "Encountered different devices in metric calculation (see stacktrace for details)."
411 " This could be due to the metric class not being on the same device as input."
412 f" Instead of `metric={self.__class__.__name__}(...)` try to do"
413 f" `metric={self.__class__.__name__}(...).to(device)` where"
414 " device corresponds to the device of the input."
415 ) from err
416 raise err
417
418 if self.compute_on_cpu:
419 self._move_list_states_to_cpu()
420
421 return wrapped_func
422
423 def _move_list_states_to_cpu(self) -> None:
424 """Move list states to cpu to save GPU memory."""
425 for key in self._defaults:
426 current_val = getattr(self, key)
427 if isinstance(current_val, Sequence):
428 setattr(self, key, [cur_v.to("cpu") for cur_v in current_val])
429
430 def sync(
431 self,
432 dist_sync_fn: Optional[Callable] = None,
433 process_group: Optional[Any] = None,
434 should_sync: bool = True,
435 distributed_available: Optional[Callable] = None,
436 ) -> None:
437 """Sync function for manually controlling when metrics states should be synced across processes.
438
439 Args:
440 dist_sync_fn: Function to be used to perform states synchronization
441 process_group:
442 Specify the process group on which synchronization is called.
443 default: `None` (which selects the entire world)
444 should_sync: Whether to apply to state synchronization. This will have an impact
445 only when running in a distributed setting.
446 distributed_available: Function to determine if we are running inside a distributed setting
447 """
448 if self._is_synced and should_sync:
449 raise TorchMetricsUserError("The Metric has already been synced.")
450
451 if distributed_available is None and self.distributed_available_fn is not None:
452 distributed_available = self.distributed_available_fn
453
454 is_distributed = distributed_available() if callable(distributed_available) else None
455
456 if not should_sync or not is_distributed:
457 return
458
459 if dist_sync_fn is None:
460 dist_sync_fn = gather_all_tensors
461
462 # cache prior to syncing
463 self._cache = {attr: getattr(self, attr) for attr in self._defaults}
464
465 # sync
466 self._sync_dist(dist_sync_fn, process_group=process_group)
467 self._is_synced = True
468
469 def unsync(self, should_unsync: bool = True) -> None:
470 """Unsync function for manually controlling when metrics states should be reverted back to their local
471 states.
472
473 Args:
474 should_unsync: Whether to perform unsync
475 """
476 if not should_unsync:
477 return
478
479 if not self._is_synced:
480 raise TorchMetricsUserError("The Metric has already been un-synced.")
481
482 if self._cache is None:
483 raise TorchMetricsUserError("The internal cache should exist to unsync the Metric.")
484
485 # if we synced, restore to cache so that we can continue to accumulate un-synced state
486 for attr, val in self._cache.items():
487 setattr(self, attr, val)
488 self._is_synced = False
489 self._cache = None
490
491 @contextmanager
492 def sync_context(
493 self,
494 dist_sync_fn: Optional[Callable] = None,
495 process_group: Optional[Any] = None,
496 should_sync: bool = True,
497 should_unsync: bool = True,
498 distributed_available: Optional[Callable] = None,
499 ) -> Generator:
500 """Context manager to synchronize the states between processes when running in a distributed setting and
501 restore the local cache states after yielding.
502
503 Args:
504 dist_sync_fn: Function to be used to perform states synchronization
505 process_group:
506 Specify the process group on which synchronization is called.
507 default: `None` (which selects the entire world)
508 should_sync: Whether to apply to state synchronization. This will have an impact
509 only when running in a distributed setting.
510 should_unsync: Whether to restore the cache state so that the metrics can
511 continue to be accumulated.
512 distributed_available: Function to determine if we are running inside a distributed setting
513 """
514 self.sync(
515 dist_sync_fn=dist_sync_fn,
516 process_group=process_group,
517 should_sync=should_sync,
518 distributed_available=distributed_available,
519 )
520
521 yield
522
523 self.unsync(should_unsync=self._is_synced and should_unsync)
524
525 def _wrap_compute(self, compute: Callable) -> Callable:
526 @functools.wraps(compute)
527 def wrapped_func(*args: Any, **kwargs: Any) -> Any:
528 if self._update_count == 0:
529 rank_zero_warn(
530 f"The ``compute`` method of metric {self.__class__.__name__}"
531 " was called before the ``update`` method which may lead to errors,"
532 " as metric states have not yet been updated.",
533 UserWarning,
534 )
535
536 # return cached value
537 if self._computed is not None:
538 return self._computed
539
540 # compute relies on the sync context manager to gather the states across processes and apply reduction
541 # if synchronization happened, the current rank accumulated states will be restored to keep
542 # accumulation going if ``should_unsync=True``,
543 with self.sync_context(
544 dist_sync_fn=self.dist_sync_fn,
545 should_sync=self._to_sync,
546 should_unsync=self._should_unsync,
547 ):
548 value = compute(*args, **kwargs)
549 self._computed = _squeeze_if_scalar(value)
550
551 return self._computed
552
553 return wrapped_func
554
555 @abstractmethod
556 def update(self, *_: Any, **__: Any) -> None:
557 """Override this method to update the state variables of your metric class."""
558
559 @abstractmethod
560 def compute(self) -> Any:
561 """Override this method to compute the final metric value from state variables synchronized across the
562 distributed backend.
563 """
564
565 def plot(self, *_: Any, **__: Any) -> Any:
566 """Override this method plot the metric value."""
567 raise NotImplementedError
568
569 def reset(self) -> None:
570 """This method automatically resets the metric state variables to their default value."""
571 self._update_count = 0
572 self._forward_cache = None
573 self._computed = None
574
575 for attr, default in self._defaults.items():
576 current_val = getattr(self, attr)
577 if isinstance(default, Tensor):
578 setattr(self, attr, default.detach().clone().to(current_val.device))
579 else:
580 setattr(self, attr, [])
581
582 # reset internal states
583 self._cache = None
584 self._is_synced = False
585
586 def clone(self) -> "Metric":
587 """Make a copy of the metric."""
588 return deepcopy(self)
589
590 def __getstate__(self) -> Dict[str, Any]:
591 """Get the current state, including all metric states, for the metric. Used for loading and saving a metric."""
592 # ignore update and compute functions for pickling
593 return {k: v for k, v in self.__dict__.items() if k not in ["update", "compute", "_update_signature"]}
594
595 def __setstate__(self, state: Dict[str, Any]) -> None:
596 """Set the state of the metric, based on a input state. Used for loading and saving a metric."""
597 # manually restore update and compute functions for pickling
598 self.__dict__.update(state)
599 self._update_signature = inspect.signature(self.update)
600 self.update: Callable = self._wrap_update(self.update)
601 self.compute: Callable = self._wrap_compute(self.compute)
602
603 def __setattr__(self, name: str, value: Any) -> None:
604 """Overwrite default method to prevent specific attributes from being set by user."""
605 if name in ("higher_is_better", "is_differentiable", "full_state_update"):
606 raise RuntimeError(f"Can't change const `{name}`.")
607 super().__setattr__(name, value)
608
609 @property
610 def device(self) -> "torch.device":
611 """Return the device of the metric."""
612 return self._device
613
614 def type(self, dst_type: Union[str, torch.dtype]) -> "Metric":
615 """Method override default and prevent dtype casting.
616
617 Please use `metric.set_dtype(dtype)` instead.
618 """
619 return self
620
621 def float(self) -> "Metric":
622 """Method override default and prevent dtype casting.
623
624 Please use `metric.set_dtype(dtype)` instead.
625 """
626 return self
627
628 def double(self) -> "Metric":
629 """Method override default and prevent dtype casting.
630
631 Please use `metric.set_dtype(dtype)` instead.
632 """
633 return self
634
635 def half(self) -> "Metric":
636 """Method override default and prevent dtype casting.
637
638 Please use `metric.set_dtype(dtype)` instead.
639 """
640 return self
641
642 def set_dtype(self, dst_type: Union[str, torch.dtype]) -> "Metric":
643 """Special version of `type` for transferring all metric states to specific dtype
644 Arguments:
645 dst_type (type or string): the desired type.
646 """
647 return super().type(dst_type)
648
649 def _apply(self, fn: Callable) -> Module:
650 """Overwrite _apply function such that we can also move metric states to the correct device when `.to`,
651 `.cuda`, etc methods are called.
652 """
653 this = super()._apply(fn)
654 # Also apply fn to metric states and defaults
655 for key, value in this._defaults.items():
656 if isinstance(value, Tensor):
657 this._defaults[key] = fn(value)
658 elif isinstance(value, Sequence):
659 this._defaults[key] = [fn(v) for v in value]
660
661 current_val = getattr(this, key)
662 if isinstance(current_val, Tensor):
663 setattr(this, key, fn(current_val))
664 elif isinstance(current_val, Sequence):
665 setattr(this, key, [fn(cur_v) for cur_v in current_val])
666 else:
667 raise TypeError(
668 "Expected metric state to be either a Tensor" f"or a list of Tensor, but encountered {current_val}"
669 )
670
671 # make sure to update the device attribute
672 # if the dummy tensor moves device by fn function we should also update the attribute
673 self._device = fn(torch.zeros(1, device=self.device)).device
674
675 # Additional apply to forward cache and computed attributes (may be nested)
676 if this._computed is not None:
677 this._computed = apply_to_collection(this._computed, Tensor, fn)
678 if this._forward_cache is not None:
679 this._forward_cache = apply_to_collection(this._forward_cache, Tensor, fn)
680
681 return this
682
683 def persistent(self, mode: bool = False) -> None:
684 """Method for post-init to change if metric states should be saved to its state_dict."""
685 for key in self._persistent:
686 self._persistent[key] = mode
687
688 def state_dict(
689 self,
690 destination: Dict[str, Any] = None,
691 prefix: str = "",
692 keep_vars: bool = False,
693 ) -> Optional[Dict[str, Any]]:
694 """Get the current state of metric as an dictionary.
695
696 Args:
697 destination: Optional dictionary, that if provided, the state of module will be updated into the dict and
698 the same object is returned. Otherwise, an ``OrderedDict`` will be created and returned.
699 prefix: optional string, a prefix added to parameter and buffer names to compose the keys in state_dict.
700 keep_vars: by default the :class:`~torch.Tensor`s returned in the state dict are detached from autograd.
701 If set to ``True``, detaching will not be performed.
702 """
703 destination = super().state_dict(destination=destination, prefix=prefix, keep_vars=keep_vars)
704 # Register metric states to be part of the state_dict
705 for key in self._defaults:
706 if not self._persistent[key]:
707 continue
708 current_val = getattr(self, key)
709 if not keep_vars:
710 if isinstance(current_val, Tensor):
711 current_val = current_val.detach()
712 elif isinstance(current_val, list):
713 current_val = [cur_v.detach() if isinstance(cur_v, Tensor) else cur_v for cur_v in current_val]
714 destination[prefix + key] = deepcopy(current_val)
715 return destination
716
717 def _load_from_state_dict(
718 self,
719 state_dict: dict,
720 prefix: str,
721 local_metadata: dict,
722 strict: bool,
723 missing_keys: List[str],
724 unexpected_keys: List[str],
725 error_msgs: List[str],
726 ) -> None:
727 """Loads metric states from state_dict."""
728 for key in self._defaults:
729 name = prefix + key
730 if name in state_dict:
731 setattr(self, key, state_dict.pop(name))
732 super()._load_from_state_dict(
733 state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs
734 )
735
736 def _filter_kwargs(self, **kwargs: Any) -> Dict[str, Any]:
737 """filter kwargs such that they match the update signature of the metric."""
738 # filter all parameters based on update signature except those of
739 # type VAR_POSITIONAL (*args) and VAR_KEYWORD (**kwargs)
740 _params = (inspect.Parameter.VAR_POSITIONAL, inspect.Parameter.VAR_KEYWORD)
741 _sign_params = self._update_signature.parameters
742 filtered_kwargs = {
743 k: v for k, v in kwargs.items() if (k in _sign_params and _sign_params[k].kind not in _params)
744 }
745
746 exists_var_keyword = any(v.kind == inspect.Parameter.VAR_KEYWORD for v in _sign_params.values())
747 # if no kwargs filtered, return all kwargs as default
748 if not filtered_kwargs and not exists_var_keyword:
749 # no kwargs in update signature -> don't return any kwargs
750 filtered_kwargs = {}
751 elif exists_var_keyword:
752 # kwargs found in update signature -> return all kwargs to be sure to not omit any.
753 # filtering logic is likely implemented within the update call.
754 filtered_kwargs = kwargs
755 return filtered_kwargs
756
757 def __hash__(self) -> int:
758 """Returns an unique hash of the metric.
759
760 The hash depends on both the class itself but also the current metric state, which therefore enforces that two
761 instances of the same metrics never have the same hash even if they have been updated on the same data.
762 """
763 # we need to add the id here, since PyTorch requires a module hash to be unique.
764 # Internally, PyTorch nn.Module relies on that for children discovery
765 # (see https://github.com/pytorch/pytorch/blob/v1.9.0/torch/nn/modules/module.py#L1544)
766 # For metrics that include tensors it is not a problem,
767 # since their hash is unique based on the memory location but we cannot rely on that for every metric.
768 hash_vals = [self.__class__.__name__, id(self)]
769
770 for key in self._defaults:
771 val = getattr(self, key)
772 # Special case: allow list values, so long
773 # as their elements are hashable
774 if hasattr(val, "__iter__") and not isinstance(val, Tensor):
775 hash_vals.extend(val)
776 else:
777 hash_vals.append(val)
778
779 return hash(tuple(hash_vals))
780
781 def __add__(self, other: "Metric") -> "Metric":
782 """Construct conpositional metric using the addition operator."""
783 return CompositionalMetric(torch.add, self, other)
784
785 def __and__(self, other: "Metric") -> "Metric":
786 """Construct conpositional metric using the logical and operator."""
787 return CompositionalMetric(torch.bitwise_and, self, other)
788
789 def __eq__(self, other: "Metric") -> "Metric":
790 """Construct conpositional metric using the equal operator."""
791 return CompositionalMetric(torch.eq, self, other)
792
793 def __floordiv__(self, other: "Metric") -> "Metric":
794 """Construct conpositional metric using the floor division operator."""
795 return CompositionalMetric(torch.floor_divide, self, other)
796
797 def __ge__(self, other: "Metric") -> "Metric":
798 """Construct conpositional metric using the greater than or equal operator."""
799 return CompositionalMetric(torch.ge, self, other)
800
801 def __gt__(self, other: "Metric") -> "Metric":
802 """Construct conpositional metric using the greater than operator."""
803 return CompositionalMetric(torch.gt, self, other)
804
805 def __le__(self, other: "Metric") -> "Metric":
806 """Construct conpositional metric using the less than or equal operator."""
807 return CompositionalMetric(torch.le, self, other)
808
809 def __lt__(self, other: "Metric") -> "Metric":
810 """Construct conpositional metric using the less than operator."""
811 return CompositionalMetric(torch.lt, self, other)
812
813 def __matmul__(self, other: "Metric") -> "Metric":
814 """Construct conpositional metric using the matrix multiplication operator."""
815 return CompositionalMetric(torch.matmul, self, other)
816
817 def __mod__(self, other: "Metric") -> "Metric":
818 """Construct conpositional metric using the remainder operator."""
819 return CompositionalMetric(torch.fmod, self, other)
820
821 def __mul__(self, other: "Metric") -> "Metric":
822 """Construct conpositional metric using the multiplication operator."""
823 return CompositionalMetric(torch.mul, self, other)
824
825 # Fixme: this shall return bool instead of Metric
826 def __ne__(self, other: "Metric") -> "Metric":
827 """Construct conpositional metric using the not equal operator."""
828 return CompositionalMetric(torch.ne, self, other)
829
830 def __or__(self, other: "Metric") -> "Metric":
831 """Construct conpositional metric using the logical or operator."""
832 return CompositionalMetric(torch.bitwise_or, self, other)
833
834 def __pow__(self, other: "Metric") -> "Metric":
835 """Construct conpositional metric using the exponential/power operator."""
836 return CompositionalMetric(torch.pow, self, other)
837
838 def __radd__(self, other: "Metric") -> "Metric":
839 """Construct conpositional metric using the addition operator."""
840 return CompositionalMetric(torch.add, other, self)
841
842 def __rand__(self, other: "Metric") -> "Metric":
843 """Construct conpositional metric using the logical and operator."""
844 # swap them since bitwise_and only supports that way and it's commutative
845 return CompositionalMetric(torch.bitwise_and, self, other)
846
847 def __rfloordiv__(self, other: "Metric") -> "Metric":
848 """Construct conpositional metric using the floor division operator."""
849 return CompositionalMetric(torch.floor_divide, other, self)
850
851 def __rmatmul__(self, other: "Metric") -> "Metric":
852 """Construct conpositional metric using the matrix multiplication operator."""
853 return CompositionalMetric(torch.matmul, other, self)
854
855 def __rmod__(self, other: "Metric") -> "Metric":
856 """Construct conpositional metric using the remainder operator."""
857 return CompositionalMetric(torch.fmod, other, self)
858
859 def __rmul__(self, other: "Metric") -> "Metric":
860 """Construct conpositional metric using the multiplication operator."""
861 return CompositionalMetric(torch.mul, other, self)
862
863 def __ror__(self, other: "Metric") -> "Metric":
864 """Construct conpositional metric using the logical or operator."""
865 return CompositionalMetric(torch.bitwise_or, other, self)
866
867 def __rpow__(self, other: "Metric") -> "Metric":
868 """Construct conpositional metric using the exponential/power operator."""
869 return CompositionalMetric(torch.pow, other, self)
870
871 def __rsub__(self, other: "Metric") -> "Metric":
872 """Construct conpositional metric using the subtraction operator."""
873 return CompositionalMetric(torch.sub, other, self)
874
875 def __rtruediv__(self, other: "Metric") -> "Metric":
876 """Construct conpositional metric using the true divide operator."""
877 return CompositionalMetric(torch.true_divide, other, self)
878
879 def __rxor__(self, other: "Metric") -> "Metric":
880 """Construct conpositional metric using the logical xor operator."""
881 return CompositionalMetric(torch.bitwise_xor, other, self)
882
883 def __sub__(self, other: "Metric") -> "Metric":
884 """Construct conpositional metric using the subtraction operator."""
885 return CompositionalMetric(torch.sub, self, other)
886
887 def __truediv__(self, other: "Metric") -> "Metric":
888 """Construct conpositional metric using the true divide operator."""
889 return CompositionalMetric(torch.true_divide, self, other)
890
891 def __xor__(self, other: "Metric") -> "Metric":
892 """Construct conpositional metric using the logical xor operator."""
893 return CompositionalMetric(torch.bitwise_xor, self, other)
894
895 def __abs__(self) -> "Metric":
896 """Construct conpositional metric using the absolute operator."""
897 return CompositionalMetric(torch.abs, self, None)
898
899 def __inv__(self) -> "Metric":
900 """Construct conpositional metric using the not operator."""
901 return CompositionalMetric(torch.bitwise_not, self, None)
902
903 def __invert__(self) -> "Metric":
904 """Construct conpositional metric using the not operator."""
905 return self.__inv__()
906
907 def __neg__(self) -> "Metric":
908 """Construct conpositional metric using absolute negative operator."""
909 return CompositionalMetric(_neg, self, None)
910
911 def __pos__(self) -> "Metric":
912 """Construct conpositional metric using absolute operator."""
913 return CompositionalMetric(torch.abs, self, None)
914
915 def __getitem__(self, idx: int) -> "Metric":
916 """Construct conpositional metric using the get item operator."""
917 return CompositionalMetric(lambda x: x[idx], self, None)
918
919 def __getnewargs__(self) -> Tuple:
920 """Needede method for construction of new metrics __new__ method."""
921 return (Metric.__str__(self),)
922
923 def __iter__(self):
924 """Iteration over metrics are not allowed. Use metric collections for nesting metrics."""
925 raise NotImplementedError("Metrics does not support iteration.")
926
927
928 def _neg(x: Tensor) -> Tensor:
929 return -torch.abs(x)
930
931
932 class CompositionalMetric(Metric):
933 """Composition of two metrics with a specific operator which will be executed upon metrics compute."""
934
935 def __init__(
936 self,
937 operator: Callable,
938 metric_a: Union[Metric, int, float, Tensor],
939 metric_b: Union[Metric, int, float, Tensor, None],
940 ) -> None:
941 """Args:
942 operator: the operator taking in one (if metric_b is None)
943 or two arguments. Will be applied to outputs of metric_a.compute()
944 and (optionally if metric_b is not None) metric_b.compute()
945 metric_a: first metric whose compute() result is the first argument of operator
946 metric_b: second metric whose compute() result is the second argument of operator.
947 For operators taking in only one input, this should be None.
948 """
949 super().__init__()
950
951 self.op = operator
952
953 if isinstance(metric_a, Tensor):
954 self.register_buffer("metric_a", metric_a)
955 else:
956 self.metric_a = metric_a
957
958 if isinstance(metric_b, Tensor):
959 self.register_buffer("metric_b", metric_b)
960 else:
961 self.metric_b = metric_b
962
963 def _sync_dist(self, dist_sync_fn: Optional[Callable] = None, process_group: Optional[Any] = None) -> None:
964 """No syncing required here. syncing will be done in metric_a and metric_b."""
965 pass
966
967 def update(self, *args: Any, **kwargs: Any) -> None:
968 """Redirect the call to the input which the conposition was formed from."""
969 if isinstance(self.metric_a, Metric):
970 self.metric_a.update(*args, **self.metric_a._filter_kwargs(**kwargs))
971
972 if isinstance(self.metric_b, Metric):
973 self.metric_b.update(*args, **self.metric_b._filter_kwargs(**kwargs))
974
975 def compute(self) -> Any:
976 """Redirect the call to the input which the conposition was formed from."""
977 # also some parsing for kwargs?
978 val_a = self.metric_a.compute() if isinstance(self.metric_a, Metric) else self.metric_a
979 val_b = self.metric_b.compute() if isinstance(self.metric_b, Metric) else self.metric_b
980
981 if val_b is None:
982 return self.op(val_a)
983
984 return self.op(val_a, val_b)
985
986 @torch.jit.unused
987 def forward(self, *args: Any, **kwargs: Any) -> Any:
988 """Calculate metric on current batch and accumulate to global state."""
989 val_a = (
990 self.metric_a(*args, **self.metric_a._filter_kwargs(**kwargs))
991 if isinstance(self.metric_a, Metric)
992 else self.metric_a
993 )
994 val_b = (
995 self.metric_b(*args, **self.metric_b._filter_kwargs(**kwargs))
996 if isinstance(self.metric_b, Metric)
997 else self.metric_b
998 )
999
1000 if val_a is None:
1001 return None
1002
1003 if val_b is None:
1004 if isinstance(self.metric_b, Metric):
1005 return None
1006
1007 # Unary op
1008 return self.op(val_a)
1009
1010 # Binary op
1011 return self.op(val_a, val_b)
1012
1013 def reset(self) -> None:
1014 """Redirect the call to the input which the conposition was formed from."""
1015 if isinstance(self.metric_a, Metric):
1016 self.metric_a.reset()
1017
1018 if isinstance(self.metric_b, Metric):
1019 self.metric_b.reset()
1020
1021 def persistent(self, mode: bool = False) -> None:
1022 """Change if metric state is persistent (save as part of state_dict) or not.
1023
1024 Args:
1025 mode: bool indicating if all states should be persistent or not
1026
1027 """
1028 if isinstance(self.metric_a, Metric):
1029 self.metric_a.persistent(mode=mode)
1030 if isinstance(self.metric_b, Metric):
1031 self.metric_b.persistent(mode=mode)
1032
1033 def __repr__(self) -> str:
1034 """Returns a representation of the compositional metric, including the two inputs it was formed from."""
1035 _op_metrics = f"(\n {self.op.__name__}(\n {repr(self.metric_a)},\n {repr(self.metric_b)}\n )\n)"
1036 repr_str = self.__class__.__name__ + _op_metrics
1037
1038 return repr_str
1039
1040 def _wrap_compute(self, compute: Callable) -> Callable:
1041 """No wrapping nessesary for compositional metrics."""
1042 return compute
1043
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/torchmetrics/metric.py b/src/torchmetrics/metric.py
--- a/src/torchmetrics/metric.py
+++ b/src/torchmetrics/metric.py
@@ -920,9 +920,7 @@
"""Needede method for construction of new metrics __new__ method."""
return (Metric.__str__(self),)
- def __iter__(self):
- """Iteration over metrics are not allowed. Use metric collections for nesting metrics."""
- raise NotImplementedError("Metrics does not support iteration.")
+ __iter__ = None
def _neg(x: Tensor) -> Tensor:
| {"golden_diff": "diff --git a/src/torchmetrics/metric.py b/src/torchmetrics/metric.py\n--- a/src/torchmetrics/metric.py\n+++ b/src/torchmetrics/metric.py\n@@ -920,9 +920,7 @@\n \"\"\"Needede method for construction of new metrics __new__ method.\"\"\"\n return (Metric.__str__(self),)\n \n- def __iter__(self):\n- \"\"\"Iteration over metrics are not allowed. Use metric collections for nesting metrics.\"\"\"\n- raise NotImplementedError(\"Metrics does not support iteration.\")\n+ __iter__ = None\n \n \n def _neg(x: Tensor) -> Tensor:\n", "issue": "Wrong error in Metric.__iter__\n## \ud83d\udc1b Bug\r\nIn the class `Metric`, the `__iter__` method is defined as follows:\r\n```python\r\ndef __iter__(self):\r\n \"\"\"Iteration over metrics are not allowed. Use metric collections for nesting metrics.\"\"\"\r\n raise NotImplementedError(\"Metrics does not support iteration.\")\r\n```\r\nIn python's docs, the following note is written about `NotImplementedError` (https://docs.python.org/3/library/exceptions.html#NotImplementedError): \r\n> It should not be used to indicate that an operator or method is not meant to be supported at all \u2013 in that case either leave the operator / method undefined or, if a subclass, set it to [None](https://docs.python.org/3/library/constants.html#None).\r\n\r\nIn fact, the use cases for `NotImplementedError` are:\r\n> In user defined base classes, abstract methods should raise this exception when they require derived classes to override the method, or while the class is being developed to indicate that the real implementation still needs to be added.\r\n\r\nIn PyCharm (and maybe other python code checkers), this leads to a warning for every sub-class of Metric, saying that all abstract methods should be implemented (PyCharm understands a method that raises a `NotImplementedError` as abstract, even if there is no `@abstractmethod` decorator on this method).\r\n\r\nWas there a good reason to define `__iter__` like that? Otherwise, could we remove it?\nWrong error in Metric.__iter__\n## \ud83d\udc1b Bug\r\nIn the class `Metric`, the `__iter__` method is defined as follows:\r\n```python\r\ndef __iter__(self):\r\n \"\"\"Iteration over metrics are not allowed. Use metric collections for nesting metrics.\"\"\"\r\n raise NotImplementedError(\"Metrics does not support iteration.\")\r\n```\r\nIn python's docs, the following note is written about `NotImplementedError` (https://docs.python.org/3/library/exceptions.html#NotImplementedError): \r\n> It should not be used to indicate that an operator or method is not meant to be supported at all \u2013 in that case either leave the operator / method undefined or, if a subclass, set it to [None](https://docs.python.org/3/library/constants.html#None).\r\n\r\nIn fact, the use cases for `NotImplementedError` are:\r\n> In user defined base classes, abstract methods should raise this exception when they require derived classes to override the method, or while the class is being developed to indicate that the real implementation still needs to be added.\r\n\r\nIn PyCharm (and maybe other python code checkers), this leads to a warning for every sub-class of Metric, saying that all abstract methods should be implemented (PyCharm understands a method that raises a `NotImplementedError` as abstract, even if there is no `@abstractmethod` decorator on this method).\r\n\r\nWas there a good reason to define `__iter__` like that? Otherwise, could we remove it?\n", "before_files": [{"content": "# Copyright The Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport functools\nimport inspect\nfrom abc import ABC, abstractmethod\nfrom contextlib import contextmanager\nfrom copy import deepcopy\nfrom typing import Any, Callable, Dict, Generator, List, Optional, Sequence, Tuple, Union\n\nimport torch\nfrom torch import Tensor\nfrom torch.nn import Module\n\nfrom torchmetrics.utilities import apply_to_collection, rank_zero_warn\nfrom torchmetrics.utilities.data import (\n _flatten,\n _squeeze_if_scalar,\n dim_zero_cat,\n dim_zero_max,\n dim_zero_mean,\n dim_zero_min,\n dim_zero_sum,\n)\nfrom torchmetrics.utilities.distributed import gather_all_tensors\nfrom torchmetrics.utilities.exceptions import TorchMetricsUserError\n\n\ndef jit_distributed_available() -> bool:\n \"\"\"Determine if distributed mode is initialized.\"\"\"\n return torch.distributed.is_available() and torch.distributed.is_initialized()\n\n\nclass Metric(Module, ABC):\n \"\"\"Base class for all metrics present in the Metrics API.\n\n Implements ``add_state()``, ``forward()``, ``reset()`` and a few other things to\n handle distributed synchronization and per-step metric computation.\n\n Override ``update()`` and ``compute()`` functions to implement your own metric. Use\n ``add_state()`` to register metric state variables which keep track of state on each\n call of ``update()`` and are synchronized across processes when ``compute()`` is called.\n\n Note:\n Metric state variables can either be :class:`~torch.Tensor` or an empty list which can we used\n to store :class:`~torch.Tensor`.\n\n Note:\n Different metrics only override ``update()`` and not ``forward()``. A call to ``update()``\n is valid, but it won't return the metric value at the current step. A call to ``forward()``\n automatically calls ``update()`` and also returns the metric value at the current step.\n\n Args:\n kwargs: additional keyword arguments, see :ref:`Metric kwargs` for more info.\n\n - compute_on_cpu: If metric state should be stored on CPU during computations. Only works\n for list states.\n - dist_sync_on_step: If metric state should synchronize on ``forward()``. Default is ``False``\n - process_group: The process group on which the synchronization is called. Default is the world.\n - dist_sync_fn: function that performs the allgather option on the metric state. Default is an\n custom implementation that calls ``torch.distributed.all_gather`` internally.\n - distributed_available_fn: function that checks if the distributed backend is available.\n Defaults to a check of ``torch.distributed.is_available()`` and ``torch.distributed.is_initialized()``.\n - sync_on_compute: If metric state should synchronize when ``compute`` is called. Default is ``True``-\n \"\"\"\n\n __jit_ignored_attributes__ = [\"device\"]\n __jit_unused_properties__ = [\"is_differentiable\"]\n is_differentiable: Optional[bool] = None\n higher_is_better: Optional[bool] = None\n full_state_update: Optional[bool] = None\n\n def __init__(\n self,\n **kwargs: Any,\n ) -> None:\n super().__init__()\n\n # see (https://github.com/pytorch/pytorch/blob/3e6bb5233f9ca2c5aa55d9cda22a7ee85439aa6e/\n # torch/nn/modules/module.py#L227)\n torch._C._log_api_usage_once(f\"torchmetrics.metric.{self.__class__.__name__}\")\n\n self._device = torch.device(\"cpu\")\n\n self.compute_on_cpu = kwargs.pop(\"compute_on_cpu\", False)\n if not isinstance(self.compute_on_cpu, bool):\n raise ValueError(\n f\"Expected keyword argument `compute_on_cpu` to be an `bool` but got {self.compute_on_cpu}\"\n )\n\n self.dist_sync_on_step = kwargs.pop(\"dist_sync_on_step\", False)\n if not isinstance(self.dist_sync_on_step, bool):\n raise ValueError(\n f\"Expected keyword argument `dist_sync_on_step` to be an `bool` but got {self.dist_sync_on_step}\"\n )\n\n self.process_group = kwargs.pop(\"process_group\", None)\n\n self.dist_sync_fn = kwargs.pop(\"dist_sync_fn\", None)\n if self.dist_sync_fn is not None and not callable(self.dist_sync_fn):\n raise ValueError(\n f\"Expected keyword argument `dist_sync_fn` to be an callable function but got {self.dist_sync_fn}\"\n )\n\n self.distributed_available_fn = kwargs.pop(\"distributed_available_fn\", jit_distributed_available)\n\n self.sync_on_compute = kwargs.pop(\"sync_on_compute\", True)\n if not isinstance(self.sync_on_compute, bool):\n raise ValueError(\n f\"Expected keyword argument `sync_on_compute` to be a `bool` but got {self.sync_on_compute}\"\n )\n\n if kwargs:\n kwargs_ = [f\"`{a}`\" for a in sorted(kwargs)]\n raise ValueError(f\"Unexpected keyword arguments: {', '.join(kwargs_)}\")\n\n # initialize\n self._update_signature = inspect.signature(self.update)\n self.update: Callable = self._wrap_update(self.update)\n self.compute: Callable = self._wrap_compute(self.compute)\n self._computed = None\n self._forward_cache = None\n self._update_count = 0\n self._to_sync = self.sync_on_compute\n self._should_unsync = True\n self._enable_grad = False\n\n # initialize state\n self._defaults: Dict[str, Union[List, Tensor]] = {}\n self._persistent: Dict[str, bool] = {}\n self._reductions: Dict[str, Union[str, Callable[..., Any], None]] = {}\n\n # state management\n self._is_synced = False\n self._cache: Optional[Dict[str, Union[List[Tensor], Tensor]]] = None\n\n @property\n def _update_called(self) -> bool:\n # TODO: this is needed for internal lightning, remove after v0.12 and update on lightning side\n return self._update_count > 0\n\n @property\n def update_called(self) -> bool:\n \"\"\"Returns `True` if `update` or `forward` has been called initialization or last `reset`.\"\"\"\n return self._update_count > 0\n\n @property\n def update_count(self) -> int:\n \"\"\"Get the number of times `update` and/or `forward` has been called since initialization or last\n `reset`.\n \"\"\"\n return self._update_count\n\n def add_state(\n self,\n name: str,\n default: Union[list, Tensor],\n dist_reduce_fx: Optional[Union[str, Callable]] = None,\n persistent: bool = False,\n ) -> None:\n \"\"\"Add metric state variable. Only used by subclasses.\n\n Args:\n name: The name of the state variable. The variable will then be accessible at ``self.name``.\n default: Default value of the state; can either be a :class:`~torch.Tensor` or an empty list.\n The state will be reset to this value when ``self.reset()`` is called.\n dist_reduce_fx (Optional): Function to reduce state across multiple processes in distributed mode.\n If value is ``\"sum\"``, ``\"mean\"``, ``\"cat\"``, ``\"min\"`` or ``\"max\"`` we will use ``torch.sum``,\n ``torch.mean``, ``torch.cat``, ``torch.min`` and ``torch.max``` respectively, each with argument\n ``dim=0``. Note that the ``\"cat\"`` reduction only makes sense if the state is a list, and not\n a tensor. The user can also pass a custom function in this parameter.\n persistent (Optional): whether the state will be saved as part of the modules ``state_dict``.\n Default is ``False``.\n\n Note:\n Setting ``dist_reduce_fx`` to None will return the metric state synchronized across different processes.\n However, there won't be any reduction function applied to the synchronized metric state.\n\n The metric states would be synced as follows\n\n - If the metric state is :class:`~torch.Tensor`, the synced value will be a stacked :class:`~torch.Tensor`\n across the process dimension if the metric state was a :class:`~torch.Tensor`. The original\n :class:`~torch.Tensor` metric state retains dimension and hence the synchronized output will be of shape\n ``(num_process, ...)``.\n\n - If the metric state is a ``list``, the synced value will be a ``list`` containing the\n combined elements from all processes.\n\n Note:\n When passing a custom function to ``dist_reduce_fx``, expect the synchronized metric state to follow\n the format discussed in the above note.\n\n Raises:\n ValueError:\n If ``default`` is not a ``tensor`` or an ``empty list``.\n ValueError:\n If ``dist_reduce_fx`` is not callable or one of ``\"mean\"``, ``\"sum\"``, ``\"cat\"``, ``None``.\n \"\"\"\n if not isinstance(default, (Tensor, list)) or (isinstance(default, list) and default):\n raise ValueError(\"state variable must be a tensor or any empty list (where you can append tensors)\")\n\n if dist_reduce_fx == \"sum\":\n dist_reduce_fx = dim_zero_sum\n elif dist_reduce_fx == \"mean\":\n dist_reduce_fx = dim_zero_mean\n elif dist_reduce_fx == \"max\":\n dist_reduce_fx = dim_zero_max\n elif dist_reduce_fx == \"min\":\n dist_reduce_fx = dim_zero_min\n elif dist_reduce_fx == \"cat\":\n dist_reduce_fx = dim_zero_cat\n elif dist_reduce_fx is not None and not callable(dist_reduce_fx):\n raise ValueError(\"`dist_reduce_fx` must be callable or one of ['mean', 'sum', 'cat', 'min', 'max', None]\")\n\n if isinstance(default, Tensor):\n default = default.contiguous()\n\n setattr(self, name, default)\n\n self._defaults[name] = deepcopy(default)\n self._persistent[name] = persistent\n self._reductions[name] = dist_reduce_fx\n\n @torch.jit.unused\n def forward(self, *args: Any, **kwargs: Any) -> Any:\n \"\"\"``forward`` serves the dual purpose of both computing the metric on the current batch of inputs but also\n add the batch statistics to the overall accumululating metric state.\n\n Input arguments are the exact same as corresponding ``update`` method. The returned output is the exact same as\n the output of ``compute``.\n \"\"\"\n # check if states are already synced\n if self._is_synced:\n raise TorchMetricsUserError(\n \"The Metric shouldn't be synced when performing ``forward``. \"\n \"HINT: Did you forget to call ``unsync`` ?.\"\n )\n\n if self.full_state_update or self.full_state_update is None or self.dist_sync_on_step:\n self._forward_cache = self._forward_full_state_update(*args, **kwargs)\n else:\n self._forward_cache = self._forward_reduce_state_update(*args, **kwargs)\n\n return self._forward_cache\n\n def _forward_full_state_update(self, *args: Any, **kwargs: Any) -> Any:\n \"\"\"forward computation using two calls to `update` to calculate the metric value on the current batch and\n accumulate global state.\n\n Doing this secures that metrics that need access to the full metric state during `update` works as expected.\n \"\"\"\n # global accumulation\n self.update(*args, **kwargs)\n _update_count = self._update_count\n\n self._to_sync = self.dist_sync_on_step\n # skip restore cache operation from compute as cache is stored below.\n self._should_unsync = False\n # skip computing on cpu for the batch\n _temp_compute_on_cpu = self.compute_on_cpu\n self.compute_on_cpu = False\n\n # save context before switch\n cache = {attr: getattr(self, attr) for attr in self._defaults}\n\n # call reset, update, compute, on single batch\n self._enable_grad = True # allow grads for batch computation\n self.reset()\n self.update(*args, **kwargs)\n batch_val = self.compute()\n\n # restore context\n for attr, val in cache.items():\n setattr(self, attr, val)\n self._update_count = _update_count\n\n # restore context\n self._is_synced = False\n self._should_unsync = True\n self._to_sync = self.sync_on_compute\n self._computed = None\n self._enable_grad = False\n self.compute_on_cpu = _temp_compute_on_cpu\n if self.compute_on_cpu:\n self._move_list_states_to_cpu()\n\n return batch_val\n\n def _forward_reduce_state_update(self, *args: Any, **kwargs: Any) -> Any:\n \"\"\"forward computation using single call to `update` to calculate the metric value on the current batch and\n accumulate global state.\n\n This can be done when the global metric state is a sinple reduction of batch states.\n \"\"\"\n # store global state and reset to default\n global_state = {attr: getattr(self, attr) for attr in self._defaults}\n _update_count = self._update_count\n self.reset()\n\n # local syncronization settings\n self._to_sync = self.dist_sync_on_step\n self._should_unsync = False\n _temp_compute_on_cpu = self.compute_on_cpu\n self.compute_on_cpu = False\n self._enable_grad = True # allow grads for batch computation\n\n # calculate batch state and compute batch value\n self.update(*args, **kwargs)\n batch_val = self.compute()\n\n # reduce batch and global state\n self._update_count = _update_count + 1\n with torch.no_grad():\n self._reduce_states(global_state)\n\n # restore context\n self._is_synced = False\n self._should_unsync = True\n self._to_sync = self.sync_on_compute\n self._computed = None\n self._enable_grad = False\n self.compute_on_cpu = _temp_compute_on_cpu\n if self.compute_on_cpu:\n self._move_list_states_to_cpu()\n\n return batch_val\n\n def _reduce_states(self, incoming_state: Dict[str, Any]) -> None:\n \"\"\"Add an incoming metric state to the current state of the metric.\n\n Args:\n incoming_state: a dict containing a metric state similar metric itself\n \"\"\"\n for attr in self._defaults:\n local_state = getattr(self, attr)\n global_state = incoming_state[attr]\n reduce_fn = self._reductions[attr]\n if reduce_fn == dim_zero_sum:\n reduced = global_state + local_state\n elif reduce_fn == dim_zero_mean:\n reduced = ((self._update_count - 1) * global_state + local_state).float() / self._update_count\n elif reduce_fn == dim_zero_max:\n reduced = torch.max(global_state, local_state)\n elif reduce_fn == dim_zero_min:\n reduced = torch.min(global_state, local_state)\n elif reduce_fn == dim_zero_cat:\n reduced = global_state + local_state\n elif reduce_fn is None and isinstance(global_state, Tensor):\n reduced = torch.stack([global_state, local_state])\n elif reduce_fn is None and isinstance(global_state, list):\n reduced = _flatten([global_state, local_state])\n else:\n reduced = reduce_fn(torch.stack([global_state, local_state]))\n\n setattr(self, attr, reduced)\n\n def _sync_dist(self, dist_sync_fn: Callable = gather_all_tensors, process_group: Optional[Any] = None) -> None:\n input_dict = {attr: getattr(self, attr) for attr in self._reductions}\n\n for attr, reduction_fn in self._reductions.items():\n # pre-concatenate metric states that are lists to reduce number of all_gather operations\n if reduction_fn == dim_zero_cat and isinstance(input_dict[attr], list) and len(input_dict[attr]) > 1:\n input_dict[attr] = [dim_zero_cat(input_dict[attr])]\n\n output_dict = apply_to_collection(\n input_dict,\n Tensor,\n dist_sync_fn,\n group=process_group or self.process_group,\n )\n\n for attr, reduction_fn in self._reductions.items():\n # pre-processing ops (stack or flatten for inputs)\n\n if isinstance(output_dict[attr], list) and len(output_dict[attr]) == 0:\n setattr(self, attr, [])\n continue\n\n if isinstance(output_dict[attr][0], Tensor):\n output_dict[attr] = torch.stack(output_dict[attr])\n elif isinstance(output_dict[attr][0], list):\n output_dict[attr] = _flatten(output_dict[attr])\n\n if not (callable(reduction_fn) or reduction_fn is None):\n raise TypeError(\"reduction_fn must be callable or None\")\n reduced = reduction_fn(output_dict[attr]) if reduction_fn is not None else output_dict[attr]\n setattr(self, attr, reduced)\n\n def _wrap_update(self, update: Callable) -> Callable:\n @functools.wraps(update)\n def wrapped_func(*args: Any, **kwargs: Any) -> None:\n self._computed = None\n self._update_count += 1\n with torch.set_grad_enabled(self._enable_grad):\n try:\n update(*args, **kwargs)\n except RuntimeError as err:\n if \"Expected all tensors to be on\" in str(err):\n raise RuntimeError(\n \"Encountered different devices in metric calculation (see stacktrace for details).\"\n \" This could be due to the metric class not being on the same device as input.\"\n f\" Instead of `metric={self.__class__.__name__}(...)` try to do\"\n f\" `metric={self.__class__.__name__}(...).to(device)` where\"\n \" device corresponds to the device of the input.\"\n ) from err\n raise err\n\n if self.compute_on_cpu:\n self._move_list_states_to_cpu()\n\n return wrapped_func\n\n def _move_list_states_to_cpu(self) -> None:\n \"\"\"Move list states to cpu to save GPU memory.\"\"\"\n for key in self._defaults:\n current_val = getattr(self, key)\n if isinstance(current_val, Sequence):\n setattr(self, key, [cur_v.to(\"cpu\") for cur_v in current_val])\n\n def sync(\n self,\n dist_sync_fn: Optional[Callable] = None,\n process_group: Optional[Any] = None,\n should_sync: bool = True,\n distributed_available: Optional[Callable] = None,\n ) -> None:\n \"\"\"Sync function for manually controlling when metrics states should be synced across processes.\n\n Args:\n dist_sync_fn: Function to be used to perform states synchronization\n process_group:\n Specify the process group on which synchronization is called.\n default: `None` (which selects the entire world)\n should_sync: Whether to apply to state synchronization. This will have an impact\n only when running in a distributed setting.\n distributed_available: Function to determine if we are running inside a distributed setting\n \"\"\"\n if self._is_synced and should_sync:\n raise TorchMetricsUserError(\"The Metric has already been synced.\")\n\n if distributed_available is None and self.distributed_available_fn is not None:\n distributed_available = self.distributed_available_fn\n\n is_distributed = distributed_available() if callable(distributed_available) else None\n\n if not should_sync or not is_distributed:\n return\n\n if dist_sync_fn is None:\n dist_sync_fn = gather_all_tensors\n\n # cache prior to syncing\n self._cache = {attr: getattr(self, attr) for attr in self._defaults}\n\n # sync\n self._sync_dist(dist_sync_fn, process_group=process_group)\n self._is_synced = True\n\n def unsync(self, should_unsync: bool = True) -> None:\n \"\"\"Unsync function for manually controlling when metrics states should be reverted back to their local\n states.\n\n Args:\n should_unsync: Whether to perform unsync\n \"\"\"\n if not should_unsync:\n return\n\n if not self._is_synced:\n raise TorchMetricsUserError(\"The Metric has already been un-synced.\")\n\n if self._cache is None:\n raise TorchMetricsUserError(\"The internal cache should exist to unsync the Metric.\")\n\n # if we synced, restore to cache so that we can continue to accumulate un-synced state\n for attr, val in self._cache.items():\n setattr(self, attr, val)\n self._is_synced = False\n self._cache = None\n\n @contextmanager\n def sync_context(\n self,\n dist_sync_fn: Optional[Callable] = None,\n process_group: Optional[Any] = None,\n should_sync: bool = True,\n should_unsync: bool = True,\n distributed_available: Optional[Callable] = None,\n ) -> Generator:\n \"\"\"Context manager to synchronize the states between processes when running in a distributed setting and\n restore the local cache states after yielding.\n\n Args:\n dist_sync_fn: Function to be used to perform states synchronization\n process_group:\n Specify the process group on which synchronization is called.\n default: `None` (which selects the entire world)\n should_sync: Whether to apply to state synchronization. This will have an impact\n only when running in a distributed setting.\n should_unsync: Whether to restore the cache state so that the metrics can\n continue to be accumulated.\n distributed_available: Function to determine if we are running inside a distributed setting\n \"\"\"\n self.sync(\n dist_sync_fn=dist_sync_fn,\n process_group=process_group,\n should_sync=should_sync,\n distributed_available=distributed_available,\n )\n\n yield\n\n self.unsync(should_unsync=self._is_synced and should_unsync)\n\n def _wrap_compute(self, compute: Callable) -> Callable:\n @functools.wraps(compute)\n def wrapped_func(*args: Any, **kwargs: Any) -> Any:\n if self._update_count == 0:\n rank_zero_warn(\n f\"The ``compute`` method of metric {self.__class__.__name__}\"\n \" was called before the ``update`` method which may lead to errors,\"\n \" as metric states have not yet been updated.\",\n UserWarning,\n )\n\n # return cached value\n if self._computed is not None:\n return self._computed\n\n # compute relies on the sync context manager to gather the states across processes and apply reduction\n # if synchronization happened, the current rank accumulated states will be restored to keep\n # accumulation going if ``should_unsync=True``,\n with self.sync_context(\n dist_sync_fn=self.dist_sync_fn,\n should_sync=self._to_sync,\n should_unsync=self._should_unsync,\n ):\n value = compute(*args, **kwargs)\n self._computed = _squeeze_if_scalar(value)\n\n return self._computed\n\n return wrapped_func\n\n @abstractmethod\n def update(self, *_: Any, **__: Any) -> None:\n \"\"\"Override this method to update the state variables of your metric class.\"\"\"\n\n @abstractmethod\n def compute(self) -> Any:\n \"\"\"Override this method to compute the final metric value from state variables synchronized across the\n distributed backend.\n \"\"\"\n\n def plot(self, *_: Any, **__: Any) -> Any:\n \"\"\"Override this method plot the metric value.\"\"\"\n raise NotImplementedError\n\n def reset(self) -> None:\n \"\"\"This method automatically resets the metric state variables to their default value.\"\"\"\n self._update_count = 0\n self._forward_cache = None\n self._computed = None\n\n for attr, default in self._defaults.items():\n current_val = getattr(self, attr)\n if isinstance(default, Tensor):\n setattr(self, attr, default.detach().clone().to(current_val.device))\n else:\n setattr(self, attr, [])\n\n # reset internal states\n self._cache = None\n self._is_synced = False\n\n def clone(self) -> \"Metric\":\n \"\"\"Make a copy of the metric.\"\"\"\n return deepcopy(self)\n\n def __getstate__(self) -> Dict[str, Any]:\n \"\"\"Get the current state, including all metric states, for the metric. Used for loading and saving a metric.\"\"\"\n # ignore update and compute functions for pickling\n return {k: v for k, v in self.__dict__.items() if k not in [\"update\", \"compute\", \"_update_signature\"]}\n\n def __setstate__(self, state: Dict[str, Any]) -> None:\n \"\"\"Set the state of the metric, based on a input state. Used for loading and saving a metric.\"\"\"\n # manually restore update and compute functions for pickling\n self.__dict__.update(state)\n self._update_signature = inspect.signature(self.update)\n self.update: Callable = self._wrap_update(self.update)\n self.compute: Callable = self._wrap_compute(self.compute)\n\n def __setattr__(self, name: str, value: Any) -> None:\n \"\"\"Overwrite default method to prevent specific attributes from being set by user.\"\"\"\n if name in (\"higher_is_better\", \"is_differentiable\", \"full_state_update\"):\n raise RuntimeError(f\"Can't change const `{name}`.\")\n super().__setattr__(name, value)\n\n @property\n def device(self) -> \"torch.device\":\n \"\"\"Return the device of the metric.\"\"\"\n return self._device\n\n def type(self, dst_type: Union[str, torch.dtype]) -> \"Metric\":\n \"\"\"Method override default and prevent dtype casting.\n\n Please use `metric.set_dtype(dtype)` instead.\n \"\"\"\n return self\n\n def float(self) -> \"Metric\":\n \"\"\"Method override default and prevent dtype casting.\n\n Please use `metric.set_dtype(dtype)` instead.\n \"\"\"\n return self\n\n def double(self) -> \"Metric\":\n \"\"\"Method override default and prevent dtype casting.\n\n Please use `metric.set_dtype(dtype)` instead.\n \"\"\"\n return self\n\n def half(self) -> \"Metric\":\n \"\"\"Method override default and prevent dtype casting.\n\n Please use `metric.set_dtype(dtype)` instead.\n \"\"\"\n return self\n\n def set_dtype(self, dst_type: Union[str, torch.dtype]) -> \"Metric\":\n \"\"\"Special version of `type` for transferring all metric states to specific dtype\n Arguments:\n dst_type (type or string): the desired type.\n \"\"\"\n return super().type(dst_type)\n\n def _apply(self, fn: Callable) -> Module:\n \"\"\"Overwrite _apply function such that we can also move metric states to the correct device when `.to`,\n `.cuda`, etc methods are called.\n \"\"\"\n this = super()._apply(fn)\n # Also apply fn to metric states and defaults\n for key, value in this._defaults.items():\n if isinstance(value, Tensor):\n this._defaults[key] = fn(value)\n elif isinstance(value, Sequence):\n this._defaults[key] = [fn(v) for v in value]\n\n current_val = getattr(this, key)\n if isinstance(current_val, Tensor):\n setattr(this, key, fn(current_val))\n elif isinstance(current_val, Sequence):\n setattr(this, key, [fn(cur_v) for cur_v in current_val])\n else:\n raise TypeError(\n \"Expected metric state to be either a Tensor\" f\"or a list of Tensor, but encountered {current_val}\"\n )\n\n # make sure to update the device attribute\n # if the dummy tensor moves device by fn function we should also update the attribute\n self._device = fn(torch.zeros(1, device=self.device)).device\n\n # Additional apply to forward cache and computed attributes (may be nested)\n if this._computed is not None:\n this._computed = apply_to_collection(this._computed, Tensor, fn)\n if this._forward_cache is not None:\n this._forward_cache = apply_to_collection(this._forward_cache, Tensor, fn)\n\n return this\n\n def persistent(self, mode: bool = False) -> None:\n \"\"\"Method for post-init to change if metric states should be saved to its state_dict.\"\"\"\n for key in self._persistent:\n self._persistent[key] = mode\n\n def state_dict(\n self,\n destination: Dict[str, Any] = None,\n prefix: str = \"\",\n keep_vars: bool = False,\n ) -> Optional[Dict[str, Any]]:\n \"\"\"Get the current state of metric as an dictionary.\n\n Args:\n destination: Optional dictionary, that if provided, the state of module will be updated into the dict and\n the same object is returned. Otherwise, an ``OrderedDict`` will be created and returned.\n prefix: optional string, a prefix added to parameter and buffer names to compose the keys in state_dict.\n keep_vars: by default the :class:`~torch.Tensor`s returned in the state dict are detached from autograd.\n If set to ``True``, detaching will not be performed.\n \"\"\"\n destination = super().state_dict(destination=destination, prefix=prefix, keep_vars=keep_vars)\n # Register metric states to be part of the state_dict\n for key in self._defaults:\n if not self._persistent[key]:\n continue\n current_val = getattr(self, key)\n if not keep_vars:\n if isinstance(current_val, Tensor):\n current_val = current_val.detach()\n elif isinstance(current_val, list):\n current_val = [cur_v.detach() if isinstance(cur_v, Tensor) else cur_v for cur_v in current_val]\n destination[prefix + key] = deepcopy(current_val)\n return destination\n\n def _load_from_state_dict(\n self,\n state_dict: dict,\n prefix: str,\n local_metadata: dict,\n strict: bool,\n missing_keys: List[str],\n unexpected_keys: List[str],\n error_msgs: List[str],\n ) -> None:\n \"\"\"Loads metric states from state_dict.\"\"\"\n for key in self._defaults:\n name = prefix + key\n if name in state_dict:\n setattr(self, key, state_dict.pop(name))\n super()._load_from_state_dict(\n state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs\n )\n\n def _filter_kwargs(self, **kwargs: Any) -> Dict[str, Any]:\n \"\"\"filter kwargs such that they match the update signature of the metric.\"\"\"\n # filter all parameters based on update signature except those of\n # type VAR_POSITIONAL (*args) and VAR_KEYWORD (**kwargs)\n _params = (inspect.Parameter.VAR_POSITIONAL, inspect.Parameter.VAR_KEYWORD)\n _sign_params = self._update_signature.parameters\n filtered_kwargs = {\n k: v for k, v in kwargs.items() if (k in _sign_params and _sign_params[k].kind not in _params)\n }\n\n exists_var_keyword = any(v.kind == inspect.Parameter.VAR_KEYWORD for v in _sign_params.values())\n # if no kwargs filtered, return all kwargs as default\n if not filtered_kwargs and not exists_var_keyword:\n # no kwargs in update signature -> don't return any kwargs\n filtered_kwargs = {}\n elif exists_var_keyword:\n # kwargs found in update signature -> return all kwargs to be sure to not omit any.\n # filtering logic is likely implemented within the update call.\n filtered_kwargs = kwargs\n return filtered_kwargs\n\n def __hash__(self) -> int:\n \"\"\"Returns an unique hash of the metric.\n\n The hash depends on both the class itself but also the current metric state, which therefore enforces that two\n instances of the same metrics never have the same hash even if they have been updated on the same data.\n \"\"\"\n # we need to add the id here, since PyTorch requires a module hash to be unique.\n # Internally, PyTorch nn.Module relies on that for children discovery\n # (see https://github.com/pytorch/pytorch/blob/v1.9.0/torch/nn/modules/module.py#L1544)\n # For metrics that include tensors it is not a problem,\n # since their hash is unique based on the memory location but we cannot rely on that for every metric.\n hash_vals = [self.__class__.__name__, id(self)]\n\n for key in self._defaults:\n val = getattr(self, key)\n # Special case: allow list values, so long\n # as their elements are hashable\n if hasattr(val, \"__iter__\") and not isinstance(val, Tensor):\n hash_vals.extend(val)\n else:\n hash_vals.append(val)\n\n return hash(tuple(hash_vals))\n\n def __add__(self, other: \"Metric\") -> \"Metric\":\n \"\"\"Construct conpositional metric using the addition operator.\"\"\"\n return CompositionalMetric(torch.add, self, other)\n\n def __and__(self, other: \"Metric\") -> \"Metric\":\n \"\"\"Construct conpositional metric using the logical and operator.\"\"\"\n return CompositionalMetric(torch.bitwise_and, self, other)\n\n def __eq__(self, other: \"Metric\") -> \"Metric\":\n \"\"\"Construct conpositional metric using the equal operator.\"\"\"\n return CompositionalMetric(torch.eq, self, other)\n\n def __floordiv__(self, other: \"Metric\") -> \"Metric\":\n \"\"\"Construct conpositional metric using the floor division operator.\"\"\"\n return CompositionalMetric(torch.floor_divide, self, other)\n\n def __ge__(self, other: \"Metric\") -> \"Metric\":\n \"\"\"Construct conpositional metric using the greater than or equal operator.\"\"\"\n return CompositionalMetric(torch.ge, self, other)\n\n def __gt__(self, other: \"Metric\") -> \"Metric\":\n \"\"\"Construct conpositional metric using the greater than operator.\"\"\"\n return CompositionalMetric(torch.gt, self, other)\n\n def __le__(self, other: \"Metric\") -> \"Metric\":\n \"\"\"Construct conpositional metric using the less than or equal operator.\"\"\"\n return CompositionalMetric(torch.le, self, other)\n\n def __lt__(self, other: \"Metric\") -> \"Metric\":\n \"\"\"Construct conpositional metric using the less than operator.\"\"\"\n return CompositionalMetric(torch.lt, self, other)\n\n def __matmul__(self, other: \"Metric\") -> \"Metric\":\n \"\"\"Construct conpositional metric using the matrix multiplication operator.\"\"\"\n return CompositionalMetric(torch.matmul, self, other)\n\n def __mod__(self, other: \"Metric\") -> \"Metric\":\n \"\"\"Construct conpositional metric using the remainder operator.\"\"\"\n return CompositionalMetric(torch.fmod, self, other)\n\n def __mul__(self, other: \"Metric\") -> \"Metric\":\n \"\"\"Construct conpositional metric using the multiplication operator.\"\"\"\n return CompositionalMetric(torch.mul, self, other)\n\n # Fixme: this shall return bool instead of Metric\n def __ne__(self, other: \"Metric\") -> \"Metric\":\n \"\"\"Construct conpositional metric using the not equal operator.\"\"\"\n return CompositionalMetric(torch.ne, self, other)\n\n def __or__(self, other: \"Metric\") -> \"Metric\":\n \"\"\"Construct conpositional metric using the logical or operator.\"\"\"\n return CompositionalMetric(torch.bitwise_or, self, other)\n\n def __pow__(self, other: \"Metric\") -> \"Metric\":\n \"\"\"Construct conpositional metric using the exponential/power operator.\"\"\"\n return CompositionalMetric(torch.pow, self, other)\n\n def __radd__(self, other: \"Metric\") -> \"Metric\":\n \"\"\"Construct conpositional metric using the addition operator.\"\"\"\n return CompositionalMetric(torch.add, other, self)\n\n def __rand__(self, other: \"Metric\") -> \"Metric\":\n \"\"\"Construct conpositional metric using the logical and operator.\"\"\"\n # swap them since bitwise_and only supports that way and it's commutative\n return CompositionalMetric(torch.bitwise_and, self, other)\n\n def __rfloordiv__(self, other: \"Metric\") -> \"Metric\":\n \"\"\"Construct conpositional metric using the floor division operator.\"\"\"\n return CompositionalMetric(torch.floor_divide, other, self)\n\n def __rmatmul__(self, other: \"Metric\") -> \"Metric\":\n \"\"\"Construct conpositional metric using the matrix multiplication operator.\"\"\"\n return CompositionalMetric(torch.matmul, other, self)\n\n def __rmod__(self, other: \"Metric\") -> \"Metric\":\n \"\"\"Construct conpositional metric using the remainder operator.\"\"\"\n return CompositionalMetric(torch.fmod, other, self)\n\n def __rmul__(self, other: \"Metric\") -> \"Metric\":\n \"\"\"Construct conpositional metric using the multiplication operator.\"\"\"\n return CompositionalMetric(torch.mul, other, self)\n\n def __ror__(self, other: \"Metric\") -> \"Metric\":\n \"\"\"Construct conpositional metric using the logical or operator.\"\"\"\n return CompositionalMetric(torch.bitwise_or, other, self)\n\n def __rpow__(self, other: \"Metric\") -> \"Metric\":\n \"\"\"Construct conpositional metric using the exponential/power operator.\"\"\"\n return CompositionalMetric(torch.pow, other, self)\n\n def __rsub__(self, other: \"Metric\") -> \"Metric\":\n \"\"\"Construct conpositional metric using the subtraction operator.\"\"\"\n return CompositionalMetric(torch.sub, other, self)\n\n def __rtruediv__(self, other: \"Metric\") -> \"Metric\":\n \"\"\"Construct conpositional metric using the true divide operator.\"\"\"\n return CompositionalMetric(torch.true_divide, other, self)\n\n def __rxor__(self, other: \"Metric\") -> \"Metric\":\n \"\"\"Construct conpositional metric using the logical xor operator.\"\"\"\n return CompositionalMetric(torch.bitwise_xor, other, self)\n\n def __sub__(self, other: \"Metric\") -> \"Metric\":\n \"\"\"Construct conpositional metric using the subtraction operator.\"\"\"\n return CompositionalMetric(torch.sub, self, other)\n\n def __truediv__(self, other: \"Metric\") -> \"Metric\":\n \"\"\"Construct conpositional metric using the true divide operator.\"\"\"\n return CompositionalMetric(torch.true_divide, self, other)\n\n def __xor__(self, other: \"Metric\") -> \"Metric\":\n \"\"\"Construct conpositional metric using the logical xor operator.\"\"\"\n return CompositionalMetric(torch.bitwise_xor, self, other)\n\n def __abs__(self) -> \"Metric\":\n \"\"\"Construct conpositional metric using the absolute operator.\"\"\"\n return CompositionalMetric(torch.abs, self, None)\n\n def __inv__(self) -> \"Metric\":\n \"\"\"Construct conpositional metric using the not operator.\"\"\"\n return CompositionalMetric(torch.bitwise_not, self, None)\n\n def __invert__(self) -> \"Metric\":\n \"\"\"Construct conpositional metric using the not operator.\"\"\"\n return self.__inv__()\n\n def __neg__(self) -> \"Metric\":\n \"\"\"Construct conpositional metric using absolute negative operator.\"\"\"\n return CompositionalMetric(_neg, self, None)\n\n def __pos__(self) -> \"Metric\":\n \"\"\"Construct conpositional metric using absolute operator.\"\"\"\n return CompositionalMetric(torch.abs, self, None)\n\n def __getitem__(self, idx: int) -> \"Metric\":\n \"\"\"Construct conpositional metric using the get item operator.\"\"\"\n return CompositionalMetric(lambda x: x[idx], self, None)\n\n def __getnewargs__(self) -> Tuple:\n \"\"\"Needede method for construction of new metrics __new__ method.\"\"\"\n return (Metric.__str__(self),)\n\n def __iter__(self):\n \"\"\"Iteration over metrics are not allowed. Use metric collections for nesting metrics.\"\"\"\n raise NotImplementedError(\"Metrics does not support iteration.\")\n\n\ndef _neg(x: Tensor) -> Tensor:\n return -torch.abs(x)\n\n\nclass CompositionalMetric(Metric):\n \"\"\"Composition of two metrics with a specific operator which will be executed upon metrics compute.\"\"\"\n\n def __init__(\n self,\n operator: Callable,\n metric_a: Union[Metric, int, float, Tensor],\n metric_b: Union[Metric, int, float, Tensor, None],\n ) -> None:\n \"\"\"Args:\n operator: the operator taking in one (if metric_b is None)\n or two arguments. Will be applied to outputs of metric_a.compute()\n and (optionally if metric_b is not None) metric_b.compute()\n metric_a: first metric whose compute() result is the first argument of operator\n metric_b: second metric whose compute() result is the second argument of operator.\n For operators taking in only one input, this should be None.\n \"\"\"\n super().__init__()\n\n self.op = operator\n\n if isinstance(metric_a, Tensor):\n self.register_buffer(\"metric_a\", metric_a)\n else:\n self.metric_a = metric_a\n\n if isinstance(metric_b, Tensor):\n self.register_buffer(\"metric_b\", metric_b)\n else:\n self.metric_b = metric_b\n\n def _sync_dist(self, dist_sync_fn: Optional[Callable] = None, process_group: Optional[Any] = None) -> None:\n \"\"\"No syncing required here. syncing will be done in metric_a and metric_b.\"\"\"\n pass\n\n def update(self, *args: Any, **kwargs: Any) -> None:\n \"\"\"Redirect the call to the input which the conposition was formed from.\"\"\"\n if isinstance(self.metric_a, Metric):\n self.metric_a.update(*args, **self.metric_a._filter_kwargs(**kwargs))\n\n if isinstance(self.metric_b, Metric):\n self.metric_b.update(*args, **self.metric_b._filter_kwargs(**kwargs))\n\n def compute(self) -> Any:\n \"\"\"Redirect the call to the input which the conposition was formed from.\"\"\"\n # also some parsing for kwargs?\n val_a = self.metric_a.compute() if isinstance(self.metric_a, Metric) else self.metric_a\n val_b = self.metric_b.compute() if isinstance(self.metric_b, Metric) else self.metric_b\n\n if val_b is None:\n return self.op(val_a)\n\n return self.op(val_a, val_b)\n\n @torch.jit.unused\n def forward(self, *args: Any, **kwargs: Any) -> Any:\n \"\"\"Calculate metric on current batch and accumulate to global state.\"\"\"\n val_a = (\n self.metric_a(*args, **self.metric_a._filter_kwargs(**kwargs))\n if isinstance(self.metric_a, Metric)\n else self.metric_a\n )\n val_b = (\n self.metric_b(*args, **self.metric_b._filter_kwargs(**kwargs))\n if isinstance(self.metric_b, Metric)\n else self.metric_b\n )\n\n if val_a is None:\n return None\n\n if val_b is None:\n if isinstance(self.metric_b, Metric):\n return None\n\n # Unary op\n return self.op(val_a)\n\n # Binary op\n return self.op(val_a, val_b)\n\n def reset(self) -> None:\n \"\"\"Redirect the call to the input which the conposition was formed from.\"\"\"\n if isinstance(self.metric_a, Metric):\n self.metric_a.reset()\n\n if isinstance(self.metric_b, Metric):\n self.metric_b.reset()\n\n def persistent(self, mode: bool = False) -> None:\n \"\"\"Change if metric state is persistent (save as part of state_dict) or not.\n\n Args:\n mode: bool indicating if all states should be persistent or not\n\n \"\"\"\n if isinstance(self.metric_a, Metric):\n self.metric_a.persistent(mode=mode)\n if isinstance(self.metric_b, Metric):\n self.metric_b.persistent(mode=mode)\n\n def __repr__(self) -> str:\n \"\"\"Returns a representation of the compositional metric, including the two inputs it was formed from.\"\"\"\n _op_metrics = f\"(\\n {self.op.__name__}(\\n {repr(self.metric_a)},\\n {repr(self.metric_b)}\\n )\\n)\"\n repr_str = self.__class__.__name__ + _op_metrics\n\n return repr_str\n\n def _wrap_compute(self, compute: Callable) -> Callable:\n \"\"\"No wrapping nessesary for compositional metrics.\"\"\"\n return compute\n", "path": "src/torchmetrics/metric.py"}], "after_files": [{"content": "# Copyright The Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport functools\nimport inspect\nfrom abc import ABC, abstractmethod\nfrom contextlib import contextmanager\nfrom copy import deepcopy\nfrom typing import Any, Callable, Dict, Generator, List, Optional, Sequence, Tuple, Union\n\nimport torch\nfrom torch import Tensor\nfrom torch.nn import Module\n\nfrom torchmetrics.utilities import apply_to_collection, rank_zero_warn\nfrom torchmetrics.utilities.data import (\n _flatten,\n _squeeze_if_scalar,\n dim_zero_cat,\n dim_zero_max,\n dim_zero_mean,\n dim_zero_min,\n dim_zero_sum,\n)\nfrom torchmetrics.utilities.distributed import gather_all_tensors\nfrom torchmetrics.utilities.exceptions import TorchMetricsUserError\n\n\ndef jit_distributed_available() -> bool:\n \"\"\"Determine if distributed mode is initialized.\"\"\"\n return torch.distributed.is_available() and torch.distributed.is_initialized()\n\n\nclass Metric(Module, ABC):\n \"\"\"Base class for all metrics present in the Metrics API.\n\n Implements ``add_state()``, ``forward()``, ``reset()`` and a few other things to\n handle distributed synchronization and per-step metric computation.\n\n Override ``update()`` and ``compute()`` functions to implement your own metric. Use\n ``add_state()`` to register metric state variables which keep track of state on each\n call of ``update()`` and are synchronized across processes when ``compute()`` is called.\n\n Note:\n Metric state variables can either be :class:`~torch.Tensor` or an empty list which can we used\n to store :class:`~torch.Tensor`.\n\n Note:\n Different metrics only override ``update()`` and not ``forward()``. A call to ``update()``\n is valid, but it won't return the metric value at the current step. A call to ``forward()``\n automatically calls ``update()`` and also returns the metric value at the current step.\n\n Args:\n kwargs: additional keyword arguments, see :ref:`Metric kwargs` for more info.\n\n - compute_on_cpu: If metric state should be stored on CPU during computations. Only works\n for list states.\n - dist_sync_on_step: If metric state should synchronize on ``forward()``. Default is ``False``\n - process_group: The process group on which the synchronization is called. Default is the world.\n - dist_sync_fn: function that performs the allgather option on the metric state. Default is an\n custom implementation that calls ``torch.distributed.all_gather`` internally.\n - distributed_available_fn: function that checks if the distributed backend is available.\n Defaults to a check of ``torch.distributed.is_available()`` and ``torch.distributed.is_initialized()``.\n - sync_on_compute: If metric state should synchronize when ``compute`` is called. Default is ``True``-\n \"\"\"\n\n __jit_ignored_attributes__ = [\"device\"]\n __jit_unused_properties__ = [\"is_differentiable\"]\n is_differentiable: Optional[bool] = None\n higher_is_better: Optional[bool] = None\n full_state_update: Optional[bool] = None\n\n def __init__(\n self,\n **kwargs: Any,\n ) -> None:\n super().__init__()\n\n # see (https://github.com/pytorch/pytorch/blob/3e6bb5233f9ca2c5aa55d9cda22a7ee85439aa6e/\n # torch/nn/modules/module.py#L227)\n torch._C._log_api_usage_once(f\"torchmetrics.metric.{self.__class__.__name__}\")\n\n self._device = torch.device(\"cpu\")\n\n self.compute_on_cpu = kwargs.pop(\"compute_on_cpu\", False)\n if not isinstance(self.compute_on_cpu, bool):\n raise ValueError(\n f\"Expected keyword argument `compute_on_cpu` to be an `bool` but got {self.compute_on_cpu}\"\n )\n\n self.dist_sync_on_step = kwargs.pop(\"dist_sync_on_step\", False)\n if not isinstance(self.dist_sync_on_step, bool):\n raise ValueError(\n f\"Expected keyword argument `dist_sync_on_step` to be an `bool` but got {self.dist_sync_on_step}\"\n )\n\n self.process_group = kwargs.pop(\"process_group\", None)\n\n self.dist_sync_fn = kwargs.pop(\"dist_sync_fn\", None)\n if self.dist_sync_fn is not None and not callable(self.dist_sync_fn):\n raise ValueError(\n f\"Expected keyword argument `dist_sync_fn` to be an callable function but got {self.dist_sync_fn}\"\n )\n\n self.distributed_available_fn = kwargs.pop(\"distributed_available_fn\", jit_distributed_available)\n\n self.sync_on_compute = kwargs.pop(\"sync_on_compute\", True)\n if not isinstance(self.sync_on_compute, bool):\n raise ValueError(\n f\"Expected keyword argument `sync_on_compute` to be a `bool` but got {self.sync_on_compute}\"\n )\n\n if kwargs:\n kwargs_ = [f\"`{a}`\" for a in sorted(kwargs)]\n raise ValueError(f\"Unexpected keyword arguments: {', '.join(kwargs_)}\")\n\n # initialize\n self._update_signature = inspect.signature(self.update)\n self.update: Callable = self._wrap_update(self.update)\n self.compute: Callable = self._wrap_compute(self.compute)\n self._computed = None\n self._forward_cache = None\n self._update_count = 0\n self._to_sync = self.sync_on_compute\n self._should_unsync = True\n self._enable_grad = False\n\n # initialize state\n self._defaults: Dict[str, Union[List, Tensor]] = {}\n self._persistent: Dict[str, bool] = {}\n self._reductions: Dict[str, Union[str, Callable[..., Any], None]] = {}\n\n # state management\n self._is_synced = False\n self._cache: Optional[Dict[str, Union[List[Tensor], Tensor]]] = None\n\n @property\n def _update_called(self) -> bool:\n # TODO: this is needed for internal lightning, remove after v0.12 and update on lightning side\n return self._update_count > 0\n\n @property\n def update_called(self) -> bool:\n \"\"\"Returns `True` if `update` or `forward` has been called initialization or last `reset`.\"\"\"\n return self._update_count > 0\n\n @property\n def update_count(self) -> int:\n \"\"\"Get the number of times `update` and/or `forward` has been called since initialization or last\n `reset`.\n \"\"\"\n return self._update_count\n\n def add_state(\n self,\n name: str,\n default: Union[list, Tensor],\n dist_reduce_fx: Optional[Union[str, Callable]] = None,\n persistent: bool = False,\n ) -> None:\n \"\"\"Add metric state variable. Only used by subclasses.\n\n Args:\n name: The name of the state variable. The variable will then be accessible at ``self.name``.\n default: Default value of the state; can either be a :class:`~torch.Tensor` or an empty list.\n The state will be reset to this value when ``self.reset()`` is called.\n dist_reduce_fx (Optional): Function to reduce state across multiple processes in distributed mode.\n If value is ``\"sum\"``, ``\"mean\"``, ``\"cat\"``, ``\"min\"`` or ``\"max\"`` we will use ``torch.sum``,\n ``torch.mean``, ``torch.cat``, ``torch.min`` and ``torch.max``` respectively, each with argument\n ``dim=0``. Note that the ``\"cat\"`` reduction only makes sense if the state is a list, and not\n a tensor. The user can also pass a custom function in this parameter.\n persistent (Optional): whether the state will be saved as part of the modules ``state_dict``.\n Default is ``False``.\n\n Note:\n Setting ``dist_reduce_fx`` to None will return the metric state synchronized across different processes.\n However, there won't be any reduction function applied to the synchronized metric state.\n\n The metric states would be synced as follows\n\n - If the metric state is :class:`~torch.Tensor`, the synced value will be a stacked :class:`~torch.Tensor`\n across the process dimension if the metric state was a :class:`~torch.Tensor`. The original\n :class:`~torch.Tensor` metric state retains dimension and hence the synchronized output will be of shape\n ``(num_process, ...)``.\n\n - If the metric state is a ``list``, the synced value will be a ``list`` containing the\n combined elements from all processes.\n\n Note:\n When passing a custom function to ``dist_reduce_fx``, expect the synchronized metric state to follow\n the format discussed in the above note.\n\n Raises:\n ValueError:\n If ``default`` is not a ``tensor`` or an ``empty list``.\n ValueError:\n If ``dist_reduce_fx`` is not callable or one of ``\"mean\"``, ``\"sum\"``, ``\"cat\"``, ``None``.\n \"\"\"\n if not isinstance(default, (Tensor, list)) or (isinstance(default, list) and default):\n raise ValueError(\"state variable must be a tensor or any empty list (where you can append tensors)\")\n\n if dist_reduce_fx == \"sum\":\n dist_reduce_fx = dim_zero_sum\n elif dist_reduce_fx == \"mean\":\n dist_reduce_fx = dim_zero_mean\n elif dist_reduce_fx == \"max\":\n dist_reduce_fx = dim_zero_max\n elif dist_reduce_fx == \"min\":\n dist_reduce_fx = dim_zero_min\n elif dist_reduce_fx == \"cat\":\n dist_reduce_fx = dim_zero_cat\n elif dist_reduce_fx is not None and not callable(dist_reduce_fx):\n raise ValueError(\"`dist_reduce_fx` must be callable or one of ['mean', 'sum', 'cat', 'min', 'max', None]\")\n\n if isinstance(default, Tensor):\n default = default.contiguous()\n\n setattr(self, name, default)\n\n self._defaults[name] = deepcopy(default)\n self._persistent[name] = persistent\n self._reductions[name] = dist_reduce_fx\n\n @torch.jit.unused\n def forward(self, *args: Any, **kwargs: Any) -> Any:\n \"\"\"``forward`` serves the dual purpose of both computing the metric on the current batch of inputs but also\n add the batch statistics to the overall accumululating metric state.\n\n Input arguments are the exact same as corresponding ``update`` method. The returned output is the exact same as\n the output of ``compute``.\n \"\"\"\n # check if states are already synced\n if self._is_synced:\n raise TorchMetricsUserError(\n \"The Metric shouldn't be synced when performing ``forward``. \"\n \"HINT: Did you forget to call ``unsync`` ?.\"\n )\n\n if self.full_state_update or self.full_state_update is None or self.dist_sync_on_step:\n self._forward_cache = self._forward_full_state_update(*args, **kwargs)\n else:\n self._forward_cache = self._forward_reduce_state_update(*args, **kwargs)\n\n return self._forward_cache\n\n def _forward_full_state_update(self, *args: Any, **kwargs: Any) -> Any:\n \"\"\"forward computation using two calls to `update` to calculate the metric value on the current batch and\n accumulate global state.\n\n Doing this secures that metrics that need access to the full metric state during `update` works as expected.\n \"\"\"\n # global accumulation\n self.update(*args, **kwargs)\n _update_count = self._update_count\n\n self._to_sync = self.dist_sync_on_step\n # skip restore cache operation from compute as cache is stored below.\n self._should_unsync = False\n # skip computing on cpu for the batch\n _temp_compute_on_cpu = self.compute_on_cpu\n self.compute_on_cpu = False\n\n # save context before switch\n cache = {attr: getattr(self, attr) for attr in self._defaults}\n\n # call reset, update, compute, on single batch\n self._enable_grad = True # allow grads for batch computation\n self.reset()\n self.update(*args, **kwargs)\n batch_val = self.compute()\n\n # restore context\n for attr, val in cache.items():\n setattr(self, attr, val)\n self._update_count = _update_count\n\n # restore context\n self._is_synced = False\n self._should_unsync = True\n self._to_sync = self.sync_on_compute\n self._computed = None\n self._enable_grad = False\n self.compute_on_cpu = _temp_compute_on_cpu\n if self.compute_on_cpu:\n self._move_list_states_to_cpu()\n\n return batch_val\n\n def _forward_reduce_state_update(self, *args: Any, **kwargs: Any) -> Any:\n \"\"\"forward computation using single call to `update` to calculate the metric value on the current batch and\n accumulate global state.\n\n This can be done when the global metric state is a sinple reduction of batch states.\n \"\"\"\n # store global state and reset to default\n global_state = {attr: getattr(self, attr) for attr in self._defaults}\n _update_count = self._update_count\n self.reset()\n\n # local syncronization settings\n self._to_sync = self.dist_sync_on_step\n self._should_unsync = False\n _temp_compute_on_cpu = self.compute_on_cpu\n self.compute_on_cpu = False\n self._enable_grad = True # allow grads for batch computation\n\n # calculate batch state and compute batch value\n self.update(*args, **kwargs)\n batch_val = self.compute()\n\n # reduce batch and global state\n self._update_count = _update_count + 1\n with torch.no_grad():\n self._reduce_states(global_state)\n\n # restore context\n self._is_synced = False\n self._should_unsync = True\n self._to_sync = self.sync_on_compute\n self._computed = None\n self._enable_grad = False\n self.compute_on_cpu = _temp_compute_on_cpu\n if self.compute_on_cpu:\n self._move_list_states_to_cpu()\n\n return batch_val\n\n def _reduce_states(self, incoming_state: Dict[str, Any]) -> None:\n \"\"\"Add an incoming metric state to the current state of the metric.\n\n Args:\n incoming_state: a dict containing a metric state similar metric itself\n \"\"\"\n for attr in self._defaults:\n local_state = getattr(self, attr)\n global_state = incoming_state[attr]\n reduce_fn = self._reductions[attr]\n if reduce_fn == dim_zero_sum:\n reduced = global_state + local_state\n elif reduce_fn == dim_zero_mean:\n reduced = ((self._update_count - 1) * global_state + local_state).float() / self._update_count\n elif reduce_fn == dim_zero_max:\n reduced = torch.max(global_state, local_state)\n elif reduce_fn == dim_zero_min:\n reduced = torch.min(global_state, local_state)\n elif reduce_fn == dim_zero_cat:\n reduced = global_state + local_state\n elif reduce_fn is None and isinstance(global_state, Tensor):\n reduced = torch.stack([global_state, local_state])\n elif reduce_fn is None and isinstance(global_state, list):\n reduced = _flatten([global_state, local_state])\n else:\n reduced = reduce_fn(torch.stack([global_state, local_state]))\n\n setattr(self, attr, reduced)\n\n def _sync_dist(self, dist_sync_fn: Callable = gather_all_tensors, process_group: Optional[Any] = None) -> None:\n input_dict = {attr: getattr(self, attr) for attr in self._reductions}\n\n for attr, reduction_fn in self._reductions.items():\n # pre-concatenate metric states that are lists to reduce number of all_gather operations\n if reduction_fn == dim_zero_cat and isinstance(input_dict[attr], list) and len(input_dict[attr]) > 1:\n input_dict[attr] = [dim_zero_cat(input_dict[attr])]\n\n output_dict = apply_to_collection(\n input_dict,\n Tensor,\n dist_sync_fn,\n group=process_group or self.process_group,\n )\n\n for attr, reduction_fn in self._reductions.items():\n # pre-processing ops (stack or flatten for inputs)\n\n if isinstance(output_dict[attr], list) and len(output_dict[attr]) == 0:\n setattr(self, attr, [])\n continue\n\n if isinstance(output_dict[attr][0], Tensor):\n output_dict[attr] = torch.stack(output_dict[attr])\n elif isinstance(output_dict[attr][0], list):\n output_dict[attr] = _flatten(output_dict[attr])\n\n if not (callable(reduction_fn) or reduction_fn is None):\n raise TypeError(\"reduction_fn must be callable or None\")\n reduced = reduction_fn(output_dict[attr]) if reduction_fn is not None else output_dict[attr]\n setattr(self, attr, reduced)\n\n def _wrap_update(self, update: Callable) -> Callable:\n @functools.wraps(update)\n def wrapped_func(*args: Any, **kwargs: Any) -> None:\n self._computed = None\n self._update_count += 1\n with torch.set_grad_enabled(self._enable_grad):\n try:\n update(*args, **kwargs)\n except RuntimeError as err:\n if \"Expected all tensors to be on\" in str(err):\n raise RuntimeError(\n \"Encountered different devices in metric calculation (see stacktrace for details).\"\n \" This could be due to the metric class not being on the same device as input.\"\n f\" Instead of `metric={self.__class__.__name__}(...)` try to do\"\n f\" `metric={self.__class__.__name__}(...).to(device)` where\"\n \" device corresponds to the device of the input.\"\n ) from err\n raise err\n\n if self.compute_on_cpu:\n self._move_list_states_to_cpu()\n\n return wrapped_func\n\n def _move_list_states_to_cpu(self) -> None:\n \"\"\"Move list states to cpu to save GPU memory.\"\"\"\n for key in self._defaults:\n current_val = getattr(self, key)\n if isinstance(current_val, Sequence):\n setattr(self, key, [cur_v.to(\"cpu\") for cur_v in current_val])\n\n def sync(\n self,\n dist_sync_fn: Optional[Callable] = None,\n process_group: Optional[Any] = None,\n should_sync: bool = True,\n distributed_available: Optional[Callable] = None,\n ) -> None:\n \"\"\"Sync function for manually controlling when metrics states should be synced across processes.\n\n Args:\n dist_sync_fn: Function to be used to perform states synchronization\n process_group:\n Specify the process group on which synchronization is called.\n default: `None` (which selects the entire world)\n should_sync: Whether to apply to state synchronization. This will have an impact\n only when running in a distributed setting.\n distributed_available: Function to determine if we are running inside a distributed setting\n \"\"\"\n if self._is_synced and should_sync:\n raise TorchMetricsUserError(\"The Metric has already been synced.\")\n\n if distributed_available is None and self.distributed_available_fn is not None:\n distributed_available = self.distributed_available_fn\n\n is_distributed = distributed_available() if callable(distributed_available) else None\n\n if not should_sync or not is_distributed:\n return\n\n if dist_sync_fn is None:\n dist_sync_fn = gather_all_tensors\n\n # cache prior to syncing\n self._cache = {attr: getattr(self, attr) for attr in self._defaults}\n\n # sync\n self._sync_dist(dist_sync_fn, process_group=process_group)\n self._is_synced = True\n\n def unsync(self, should_unsync: bool = True) -> None:\n \"\"\"Unsync function for manually controlling when metrics states should be reverted back to their local\n states.\n\n Args:\n should_unsync: Whether to perform unsync\n \"\"\"\n if not should_unsync:\n return\n\n if not self._is_synced:\n raise TorchMetricsUserError(\"The Metric has already been un-synced.\")\n\n if self._cache is None:\n raise TorchMetricsUserError(\"The internal cache should exist to unsync the Metric.\")\n\n # if we synced, restore to cache so that we can continue to accumulate un-synced state\n for attr, val in self._cache.items():\n setattr(self, attr, val)\n self._is_synced = False\n self._cache = None\n\n @contextmanager\n def sync_context(\n self,\n dist_sync_fn: Optional[Callable] = None,\n process_group: Optional[Any] = None,\n should_sync: bool = True,\n should_unsync: bool = True,\n distributed_available: Optional[Callable] = None,\n ) -> Generator:\n \"\"\"Context manager to synchronize the states between processes when running in a distributed setting and\n restore the local cache states after yielding.\n\n Args:\n dist_sync_fn: Function to be used to perform states synchronization\n process_group:\n Specify the process group on which synchronization is called.\n default: `None` (which selects the entire world)\n should_sync: Whether to apply to state synchronization. This will have an impact\n only when running in a distributed setting.\n should_unsync: Whether to restore the cache state so that the metrics can\n continue to be accumulated.\n distributed_available: Function to determine if we are running inside a distributed setting\n \"\"\"\n self.sync(\n dist_sync_fn=dist_sync_fn,\n process_group=process_group,\n should_sync=should_sync,\n distributed_available=distributed_available,\n )\n\n yield\n\n self.unsync(should_unsync=self._is_synced and should_unsync)\n\n def _wrap_compute(self, compute: Callable) -> Callable:\n @functools.wraps(compute)\n def wrapped_func(*args: Any, **kwargs: Any) -> Any:\n if self._update_count == 0:\n rank_zero_warn(\n f\"The ``compute`` method of metric {self.__class__.__name__}\"\n \" was called before the ``update`` method which may lead to errors,\"\n \" as metric states have not yet been updated.\",\n UserWarning,\n )\n\n # return cached value\n if self._computed is not None:\n return self._computed\n\n # compute relies on the sync context manager to gather the states across processes and apply reduction\n # if synchronization happened, the current rank accumulated states will be restored to keep\n # accumulation going if ``should_unsync=True``,\n with self.sync_context(\n dist_sync_fn=self.dist_sync_fn,\n should_sync=self._to_sync,\n should_unsync=self._should_unsync,\n ):\n value = compute(*args, **kwargs)\n self._computed = _squeeze_if_scalar(value)\n\n return self._computed\n\n return wrapped_func\n\n @abstractmethod\n def update(self, *_: Any, **__: Any) -> None:\n \"\"\"Override this method to update the state variables of your metric class.\"\"\"\n\n @abstractmethod\n def compute(self) -> Any:\n \"\"\"Override this method to compute the final metric value from state variables synchronized across the\n distributed backend.\n \"\"\"\n\n def plot(self, *_: Any, **__: Any) -> Any:\n \"\"\"Override this method plot the metric value.\"\"\"\n raise NotImplementedError\n\n def reset(self) -> None:\n \"\"\"This method automatically resets the metric state variables to their default value.\"\"\"\n self._update_count = 0\n self._forward_cache = None\n self._computed = None\n\n for attr, default in self._defaults.items():\n current_val = getattr(self, attr)\n if isinstance(default, Tensor):\n setattr(self, attr, default.detach().clone().to(current_val.device))\n else:\n setattr(self, attr, [])\n\n # reset internal states\n self._cache = None\n self._is_synced = False\n\n def clone(self) -> \"Metric\":\n \"\"\"Make a copy of the metric.\"\"\"\n return deepcopy(self)\n\n def __getstate__(self) -> Dict[str, Any]:\n \"\"\"Get the current state, including all metric states, for the metric. Used for loading and saving a metric.\"\"\"\n # ignore update and compute functions for pickling\n return {k: v for k, v in self.__dict__.items() if k not in [\"update\", \"compute\", \"_update_signature\"]}\n\n def __setstate__(self, state: Dict[str, Any]) -> None:\n \"\"\"Set the state of the metric, based on a input state. Used for loading and saving a metric.\"\"\"\n # manually restore update and compute functions for pickling\n self.__dict__.update(state)\n self._update_signature = inspect.signature(self.update)\n self.update: Callable = self._wrap_update(self.update)\n self.compute: Callable = self._wrap_compute(self.compute)\n\n def __setattr__(self, name: str, value: Any) -> None:\n \"\"\"Overwrite default method to prevent specific attributes from being set by user.\"\"\"\n if name in (\"higher_is_better\", \"is_differentiable\", \"full_state_update\"):\n raise RuntimeError(f\"Can't change const `{name}`.\")\n super().__setattr__(name, value)\n\n @property\n def device(self) -> \"torch.device\":\n \"\"\"Return the device of the metric.\"\"\"\n return self._device\n\n def type(self, dst_type: Union[str, torch.dtype]) -> \"Metric\":\n \"\"\"Method override default and prevent dtype casting.\n\n Please use `metric.set_dtype(dtype)` instead.\n \"\"\"\n return self\n\n def float(self) -> \"Metric\":\n \"\"\"Method override default and prevent dtype casting.\n\n Please use `metric.set_dtype(dtype)` instead.\n \"\"\"\n return self\n\n def double(self) -> \"Metric\":\n \"\"\"Method override default and prevent dtype casting.\n\n Please use `metric.set_dtype(dtype)` instead.\n \"\"\"\n return self\n\n def half(self) -> \"Metric\":\n \"\"\"Method override default and prevent dtype casting.\n\n Please use `metric.set_dtype(dtype)` instead.\n \"\"\"\n return self\n\n def set_dtype(self, dst_type: Union[str, torch.dtype]) -> \"Metric\":\n \"\"\"Special version of `type` for transferring all metric states to specific dtype\n Arguments:\n dst_type (type or string): the desired type.\n \"\"\"\n return super().type(dst_type)\n\n def _apply(self, fn: Callable) -> Module:\n \"\"\"Overwrite _apply function such that we can also move metric states to the correct device when `.to`,\n `.cuda`, etc methods are called.\n \"\"\"\n this = super()._apply(fn)\n # Also apply fn to metric states and defaults\n for key, value in this._defaults.items():\n if isinstance(value, Tensor):\n this._defaults[key] = fn(value)\n elif isinstance(value, Sequence):\n this._defaults[key] = [fn(v) for v in value]\n\n current_val = getattr(this, key)\n if isinstance(current_val, Tensor):\n setattr(this, key, fn(current_val))\n elif isinstance(current_val, Sequence):\n setattr(this, key, [fn(cur_v) for cur_v in current_val])\n else:\n raise TypeError(\n \"Expected metric state to be either a Tensor\" f\"or a list of Tensor, but encountered {current_val}\"\n )\n\n # make sure to update the device attribute\n # if the dummy tensor moves device by fn function we should also update the attribute\n self._device = fn(torch.zeros(1, device=self.device)).device\n\n # Additional apply to forward cache and computed attributes (may be nested)\n if this._computed is not None:\n this._computed = apply_to_collection(this._computed, Tensor, fn)\n if this._forward_cache is not None:\n this._forward_cache = apply_to_collection(this._forward_cache, Tensor, fn)\n\n return this\n\n def persistent(self, mode: bool = False) -> None:\n \"\"\"Method for post-init to change if metric states should be saved to its state_dict.\"\"\"\n for key in self._persistent:\n self._persistent[key] = mode\n\n def state_dict(\n self,\n destination: Dict[str, Any] = None,\n prefix: str = \"\",\n keep_vars: bool = False,\n ) -> Optional[Dict[str, Any]]:\n \"\"\"Get the current state of metric as an dictionary.\n\n Args:\n destination: Optional dictionary, that if provided, the state of module will be updated into the dict and\n the same object is returned. Otherwise, an ``OrderedDict`` will be created and returned.\n prefix: optional string, a prefix added to parameter and buffer names to compose the keys in state_dict.\n keep_vars: by default the :class:`~torch.Tensor`s returned in the state dict are detached from autograd.\n If set to ``True``, detaching will not be performed.\n \"\"\"\n destination = super().state_dict(destination=destination, prefix=prefix, keep_vars=keep_vars)\n # Register metric states to be part of the state_dict\n for key in self._defaults:\n if not self._persistent[key]:\n continue\n current_val = getattr(self, key)\n if not keep_vars:\n if isinstance(current_val, Tensor):\n current_val = current_val.detach()\n elif isinstance(current_val, list):\n current_val = [cur_v.detach() if isinstance(cur_v, Tensor) else cur_v for cur_v in current_val]\n destination[prefix + key] = deepcopy(current_val)\n return destination\n\n def _load_from_state_dict(\n self,\n state_dict: dict,\n prefix: str,\n local_metadata: dict,\n strict: bool,\n missing_keys: List[str],\n unexpected_keys: List[str],\n error_msgs: List[str],\n ) -> None:\n \"\"\"Loads metric states from state_dict.\"\"\"\n for key in self._defaults:\n name = prefix + key\n if name in state_dict:\n setattr(self, key, state_dict.pop(name))\n super()._load_from_state_dict(\n state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs\n )\n\n def _filter_kwargs(self, **kwargs: Any) -> Dict[str, Any]:\n \"\"\"filter kwargs such that they match the update signature of the metric.\"\"\"\n # filter all parameters based on update signature except those of\n # type VAR_POSITIONAL (*args) and VAR_KEYWORD (**kwargs)\n _params = (inspect.Parameter.VAR_POSITIONAL, inspect.Parameter.VAR_KEYWORD)\n _sign_params = self._update_signature.parameters\n filtered_kwargs = {\n k: v for k, v in kwargs.items() if (k in _sign_params and _sign_params[k].kind not in _params)\n }\n\n exists_var_keyword = any(v.kind == inspect.Parameter.VAR_KEYWORD for v in _sign_params.values())\n # if no kwargs filtered, return all kwargs as default\n if not filtered_kwargs and not exists_var_keyword:\n # no kwargs in update signature -> don't return any kwargs\n filtered_kwargs = {}\n elif exists_var_keyword:\n # kwargs found in update signature -> return all kwargs to be sure to not omit any.\n # filtering logic is likely implemented within the update call.\n filtered_kwargs = kwargs\n return filtered_kwargs\n\n def __hash__(self) -> int:\n \"\"\"Returns an unique hash of the metric.\n\n The hash depends on both the class itself but also the current metric state, which therefore enforces that two\n instances of the same metrics never have the same hash even if they have been updated on the same data.\n \"\"\"\n # we need to add the id here, since PyTorch requires a module hash to be unique.\n # Internally, PyTorch nn.Module relies on that for children discovery\n # (see https://github.com/pytorch/pytorch/blob/v1.9.0/torch/nn/modules/module.py#L1544)\n # For metrics that include tensors it is not a problem,\n # since their hash is unique based on the memory location but we cannot rely on that for every metric.\n hash_vals = [self.__class__.__name__, id(self)]\n\n for key in self._defaults:\n val = getattr(self, key)\n # Special case: allow list values, so long\n # as their elements are hashable\n if hasattr(val, \"__iter__\") and not isinstance(val, Tensor):\n hash_vals.extend(val)\n else:\n hash_vals.append(val)\n\n return hash(tuple(hash_vals))\n\n def __add__(self, other: \"Metric\") -> \"Metric\":\n \"\"\"Construct conpositional metric using the addition operator.\"\"\"\n return CompositionalMetric(torch.add, self, other)\n\n def __and__(self, other: \"Metric\") -> \"Metric\":\n \"\"\"Construct conpositional metric using the logical and operator.\"\"\"\n return CompositionalMetric(torch.bitwise_and, self, other)\n\n def __eq__(self, other: \"Metric\") -> \"Metric\":\n \"\"\"Construct conpositional metric using the equal operator.\"\"\"\n return CompositionalMetric(torch.eq, self, other)\n\n def __floordiv__(self, other: \"Metric\") -> \"Metric\":\n \"\"\"Construct conpositional metric using the floor division operator.\"\"\"\n return CompositionalMetric(torch.floor_divide, self, other)\n\n def __ge__(self, other: \"Metric\") -> \"Metric\":\n \"\"\"Construct conpositional metric using the greater than or equal operator.\"\"\"\n return CompositionalMetric(torch.ge, self, other)\n\n def __gt__(self, other: \"Metric\") -> \"Metric\":\n \"\"\"Construct conpositional metric using the greater than operator.\"\"\"\n return CompositionalMetric(torch.gt, self, other)\n\n def __le__(self, other: \"Metric\") -> \"Metric\":\n \"\"\"Construct conpositional metric using the less than or equal operator.\"\"\"\n return CompositionalMetric(torch.le, self, other)\n\n def __lt__(self, other: \"Metric\") -> \"Metric\":\n \"\"\"Construct conpositional metric using the less than operator.\"\"\"\n return CompositionalMetric(torch.lt, self, other)\n\n def __matmul__(self, other: \"Metric\") -> \"Metric\":\n \"\"\"Construct conpositional metric using the matrix multiplication operator.\"\"\"\n return CompositionalMetric(torch.matmul, self, other)\n\n def __mod__(self, other: \"Metric\") -> \"Metric\":\n \"\"\"Construct conpositional metric using the remainder operator.\"\"\"\n return CompositionalMetric(torch.fmod, self, other)\n\n def __mul__(self, other: \"Metric\") -> \"Metric\":\n \"\"\"Construct conpositional metric using the multiplication operator.\"\"\"\n return CompositionalMetric(torch.mul, self, other)\n\n # Fixme: this shall return bool instead of Metric\n def __ne__(self, other: \"Metric\") -> \"Metric\":\n \"\"\"Construct conpositional metric using the not equal operator.\"\"\"\n return CompositionalMetric(torch.ne, self, other)\n\n def __or__(self, other: \"Metric\") -> \"Metric\":\n \"\"\"Construct conpositional metric using the logical or operator.\"\"\"\n return CompositionalMetric(torch.bitwise_or, self, other)\n\n def __pow__(self, other: \"Metric\") -> \"Metric\":\n \"\"\"Construct conpositional metric using the exponential/power operator.\"\"\"\n return CompositionalMetric(torch.pow, self, other)\n\n def __radd__(self, other: \"Metric\") -> \"Metric\":\n \"\"\"Construct conpositional metric using the addition operator.\"\"\"\n return CompositionalMetric(torch.add, other, self)\n\n def __rand__(self, other: \"Metric\") -> \"Metric\":\n \"\"\"Construct conpositional metric using the logical and operator.\"\"\"\n # swap them since bitwise_and only supports that way and it's commutative\n return CompositionalMetric(torch.bitwise_and, self, other)\n\n def __rfloordiv__(self, other: \"Metric\") -> \"Metric\":\n \"\"\"Construct conpositional metric using the floor division operator.\"\"\"\n return CompositionalMetric(torch.floor_divide, other, self)\n\n def __rmatmul__(self, other: \"Metric\") -> \"Metric\":\n \"\"\"Construct conpositional metric using the matrix multiplication operator.\"\"\"\n return CompositionalMetric(torch.matmul, other, self)\n\n def __rmod__(self, other: \"Metric\") -> \"Metric\":\n \"\"\"Construct conpositional metric using the remainder operator.\"\"\"\n return CompositionalMetric(torch.fmod, other, self)\n\n def __rmul__(self, other: \"Metric\") -> \"Metric\":\n \"\"\"Construct conpositional metric using the multiplication operator.\"\"\"\n return CompositionalMetric(torch.mul, other, self)\n\n def __ror__(self, other: \"Metric\") -> \"Metric\":\n \"\"\"Construct conpositional metric using the logical or operator.\"\"\"\n return CompositionalMetric(torch.bitwise_or, other, self)\n\n def __rpow__(self, other: \"Metric\") -> \"Metric\":\n \"\"\"Construct conpositional metric using the exponential/power operator.\"\"\"\n return CompositionalMetric(torch.pow, other, self)\n\n def __rsub__(self, other: \"Metric\") -> \"Metric\":\n \"\"\"Construct conpositional metric using the subtraction operator.\"\"\"\n return CompositionalMetric(torch.sub, other, self)\n\n def __rtruediv__(self, other: \"Metric\") -> \"Metric\":\n \"\"\"Construct conpositional metric using the true divide operator.\"\"\"\n return CompositionalMetric(torch.true_divide, other, self)\n\n def __rxor__(self, other: \"Metric\") -> \"Metric\":\n \"\"\"Construct conpositional metric using the logical xor operator.\"\"\"\n return CompositionalMetric(torch.bitwise_xor, other, self)\n\n def __sub__(self, other: \"Metric\") -> \"Metric\":\n \"\"\"Construct conpositional metric using the subtraction operator.\"\"\"\n return CompositionalMetric(torch.sub, self, other)\n\n def __truediv__(self, other: \"Metric\") -> \"Metric\":\n \"\"\"Construct conpositional metric using the true divide operator.\"\"\"\n return CompositionalMetric(torch.true_divide, self, other)\n\n def __xor__(self, other: \"Metric\") -> \"Metric\":\n \"\"\"Construct conpositional metric using the logical xor operator.\"\"\"\n return CompositionalMetric(torch.bitwise_xor, self, other)\n\n def __abs__(self) -> \"Metric\":\n \"\"\"Construct conpositional metric using the absolute operator.\"\"\"\n return CompositionalMetric(torch.abs, self, None)\n\n def __inv__(self) -> \"Metric\":\n \"\"\"Construct conpositional metric using the not operator.\"\"\"\n return CompositionalMetric(torch.bitwise_not, self, None)\n\n def __invert__(self) -> \"Metric\":\n \"\"\"Construct conpositional metric using the not operator.\"\"\"\n return self.__inv__()\n\n def __neg__(self) -> \"Metric\":\n \"\"\"Construct conpositional metric using absolute negative operator.\"\"\"\n return CompositionalMetric(_neg, self, None)\n\n def __pos__(self) -> \"Metric\":\n \"\"\"Construct conpositional metric using absolute operator.\"\"\"\n return CompositionalMetric(torch.abs, self, None)\n\n def __getitem__(self, idx: int) -> \"Metric\":\n \"\"\"Construct conpositional metric using the get item operator.\"\"\"\n return CompositionalMetric(lambda x: x[idx], self, None)\n\n def __getnewargs__(self) -> Tuple:\n \"\"\"Needede method for construction of new metrics __new__ method.\"\"\"\n return (Metric.__str__(self),)\n\n __iter__ = None\n\n\ndef _neg(x: Tensor) -> Tensor:\n return -torch.abs(x)\n\n\nclass CompositionalMetric(Metric):\n \"\"\"Composition of two metrics with a specific operator which will be executed upon metrics compute.\"\"\"\n\n def __init__(\n self,\n operator: Callable,\n metric_a: Union[Metric, int, float, Tensor],\n metric_b: Union[Metric, int, float, Tensor, None],\n ) -> None:\n \"\"\"Args:\n operator: the operator taking in one (if metric_b is None)\n or two arguments. Will be applied to outputs of metric_a.compute()\n and (optionally if metric_b is not None) metric_b.compute()\n metric_a: first metric whose compute() result is the first argument of operator\n metric_b: second metric whose compute() result is the second argument of operator.\n For operators taking in only one input, this should be None.\n \"\"\"\n super().__init__()\n\n self.op = operator\n\n if isinstance(metric_a, Tensor):\n self.register_buffer(\"metric_a\", metric_a)\n else:\n self.metric_a = metric_a\n\n if isinstance(metric_b, Tensor):\n self.register_buffer(\"metric_b\", metric_b)\n else:\n self.metric_b = metric_b\n\n def _sync_dist(self, dist_sync_fn: Optional[Callable] = None, process_group: Optional[Any] = None) -> None:\n \"\"\"No syncing required here. syncing will be done in metric_a and metric_b.\"\"\"\n pass\n\n def update(self, *args: Any, **kwargs: Any) -> None:\n \"\"\"Redirect the call to the input which the conposition was formed from.\"\"\"\n if isinstance(self.metric_a, Metric):\n self.metric_a.update(*args, **self.metric_a._filter_kwargs(**kwargs))\n\n if isinstance(self.metric_b, Metric):\n self.metric_b.update(*args, **self.metric_b._filter_kwargs(**kwargs))\n\n def compute(self) -> Any:\n \"\"\"Redirect the call to the input which the conposition was formed from.\"\"\"\n # also some parsing for kwargs?\n val_a = self.metric_a.compute() if isinstance(self.metric_a, Metric) else self.metric_a\n val_b = self.metric_b.compute() if isinstance(self.metric_b, Metric) else self.metric_b\n\n if val_b is None:\n return self.op(val_a)\n\n return self.op(val_a, val_b)\n\n @torch.jit.unused\n def forward(self, *args: Any, **kwargs: Any) -> Any:\n \"\"\"Calculate metric on current batch and accumulate to global state.\"\"\"\n val_a = (\n self.metric_a(*args, **self.metric_a._filter_kwargs(**kwargs))\n if isinstance(self.metric_a, Metric)\n else self.metric_a\n )\n val_b = (\n self.metric_b(*args, **self.metric_b._filter_kwargs(**kwargs))\n if isinstance(self.metric_b, Metric)\n else self.metric_b\n )\n\n if val_a is None:\n return None\n\n if val_b is None:\n if isinstance(self.metric_b, Metric):\n return None\n\n # Unary op\n return self.op(val_a)\n\n # Binary op\n return self.op(val_a, val_b)\n\n def reset(self) -> None:\n \"\"\"Redirect the call to the input which the conposition was formed from.\"\"\"\n if isinstance(self.metric_a, Metric):\n self.metric_a.reset()\n\n if isinstance(self.metric_b, Metric):\n self.metric_b.reset()\n\n def persistent(self, mode: bool = False) -> None:\n \"\"\"Change if metric state is persistent (save as part of state_dict) or not.\n\n Args:\n mode: bool indicating if all states should be persistent or not\n\n \"\"\"\n if isinstance(self.metric_a, Metric):\n self.metric_a.persistent(mode=mode)\n if isinstance(self.metric_b, Metric):\n self.metric_b.persistent(mode=mode)\n\n def __repr__(self) -> str:\n \"\"\"Returns a representation of the compositional metric, including the two inputs it was formed from.\"\"\"\n _op_metrics = f\"(\\n {self.op.__name__}(\\n {repr(self.metric_a)},\\n {repr(self.metric_b)}\\n )\\n)\"\n repr_str = self.__class__.__name__ + _op_metrics\n\n return repr_str\n\n def _wrap_compute(self, compute: Callable) -> Callable:\n \"\"\"No wrapping nessesary for compositional metrics.\"\"\"\n return compute\n", "path": "src/torchmetrics/metric.py"}]} |
gh_patches_debug_1307 | rasdani/github-patches | git_diff | nltk__nltk-2572 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Method2 of smoothing function in nltk.translate.bleu_score needs to ignore unigram precision score
According to [Lin & Och, 2004](http://acl2014.org/acl2014/W14-33/pdf/W14-3346.pdf), second smoothing function should add 1 to both numerator & denominator of precision score for all n-grams where n >= 2. However, we can see from the code that it adds 1 to precision score corresponding to every n-gram including unigrams.
```python
def method2(self, p_n, *args, **kwargs):
"""
Smoothing method 2: Add 1 to both numerator and denominator from
Chin-Yew Lin and Franz Josef Och (2004) Automatic evaluation of
machine translation quality using longest common subsequence and
skip-bigram statistics. In ACL04.
"""
return [
Fraction(p_i.numerator + 1, p_i.denominator + 1, _normalize=False)
for p_i in p_n
]
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nltk/translate/bleu_score.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # Natural Language Toolkit: BLEU Score
3 #
4 # Copyright (C) 2001-2020 NLTK Project
5 # Authors: Chin Yee Lee, Hengfeng Li, Ruxin Hou, Calvin Tanujaya Lim
6 # Contributors: Björn Mattsson, Dmitrijs Milajevs, Liling Tan
7 # URL: <http://nltk.org/>
8 # For license information, see LICENSE.TXT
9
10 """BLEU score implementation."""
11
12 import math
13 import sys
14 from fractions import Fraction
15 import warnings
16 from collections import Counter
17
18 from nltk.util import ngrams
19
20
21 def sentence_bleu(
22 references,
23 hypothesis,
24 weights=(0.25, 0.25, 0.25, 0.25),
25 smoothing_function=None,
26 auto_reweigh=False,
27 ):
28 """
29 Calculate BLEU score (Bilingual Evaluation Understudy) from
30 Papineni, Kishore, Salim Roukos, Todd Ward, and Wei-Jing Zhu. 2002.
31 "BLEU: a method for automatic evaluation of machine translation."
32 In Proceedings of ACL. http://www.aclweb.org/anthology/P02-1040.pdf
33
34 >>> hypothesis1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
35 ... 'ensures', 'that', 'the', 'military', 'always',
36 ... 'obeys', 'the', 'commands', 'of', 'the', 'party']
37
38 >>> hypothesis2 = ['It', 'is', 'to', 'insure', 'the', 'troops',
39 ... 'forever', 'hearing', 'the', 'activity', 'guidebook',
40 ... 'that', 'party', 'direct']
41
42 >>> reference1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
43 ... 'ensures', 'that', 'the', 'military', 'will', 'forever',
44 ... 'heed', 'Party', 'commands']
45
46 >>> reference2 = ['It', 'is', 'the', 'guiding', 'principle', 'which',
47 ... 'guarantees', 'the', 'military', 'forces', 'always',
48 ... 'being', 'under', 'the', 'command', 'of', 'the',
49 ... 'Party']
50
51 >>> reference3 = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
52 ... 'army', 'always', 'to', 'heed', 'the', 'directions',
53 ... 'of', 'the', 'party']
54
55 >>> sentence_bleu([reference1, reference2, reference3], hypothesis1) # doctest: +ELLIPSIS
56 0.5045...
57
58 If there is no ngrams overlap for any order of n-grams, BLEU returns the
59 value 0. This is because the precision for the order of n-grams without
60 overlap is 0, and the geometric mean in the final BLEU score computation
61 multiplies the 0 with the precision of other n-grams. This results in 0
62 (independently of the precision of the othe n-gram orders). The following
63 example has zero 3-gram and 4-gram overlaps:
64
65 >>> round(sentence_bleu([reference1, reference2, reference3], hypothesis2),4) # doctest: +ELLIPSIS
66 0.0
67
68 To avoid this harsh behaviour when no ngram overlaps are found a smoothing
69 function can be used.
70
71 >>> chencherry = SmoothingFunction()
72 >>> sentence_bleu([reference1, reference2, reference3], hypothesis2,
73 ... smoothing_function=chencherry.method1) # doctest: +ELLIPSIS
74 0.0370...
75
76 The default BLEU calculates a score for up to 4-grams using uniform
77 weights (this is called BLEU-4). To evaluate your translations with
78 higher/lower order ngrams, use customized weights. E.g. when accounting
79 for up to 5-grams with uniform weights (this is called BLEU-5) use:
80
81 >>> weights = (1./5., 1./5., 1./5., 1./5., 1./5.)
82 >>> sentence_bleu([reference1, reference2, reference3], hypothesis1, weights) # doctest: +ELLIPSIS
83 0.3920...
84
85 :param references: reference sentences
86 :type references: list(list(str))
87 :param hypothesis: a hypothesis sentence
88 :type hypothesis: list(str)
89 :param weights: weights for unigrams, bigrams, trigrams and so on
90 :type weights: list(float)
91 :param smoothing_function:
92 :type smoothing_function: SmoothingFunction
93 :param auto_reweigh: Option to re-normalize the weights uniformly.
94 :type auto_reweigh: bool
95 :return: The sentence-level BLEU score.
96 :rtype: float
97 """
98 return corpus_bleu(
99 [references], [hypothesis], weights, smoothing_function, auto_reweigh
100 )
101
102
103 def corpus_bleu(
104 list_of_references,
105 hypotheses,
106 weights=(0.25, 0.25, 0.25, 0.25),
107 smoothing_function=None,
108 auto_reweigh=False,
109 ):
110 """
111 Calculate a single corpus-level BLEU score (aka. system-level BLEU) for all
112 the hypotheses and their respective references.
113
114 Instead of averaging the sentence level BLEU scores (i.e. marco-average
115 precision), the original BLEU metric (Papineni et al. 2002) accounts for
116 the micro-average precision (i.e. summing the numerators and denominators
117 for each hypothesis-reference(s) pairs before the division).
118
119 >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
120 ... 'ensures', 'that', 'the', 'military', 'always',
121 ... 'obeys', 'the', 'commands', 'of', 'the', 'party']
122 >>> ref1a = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
123 ... 'ensures', 'that', 'the', 'military', 'will', 'forever',
124 ... 'heed', 'Party', 'commands']
125 >>> ref1b = ['It', 'is', 'the', 'guiding', 'principle', 'which',
126 ... 'guarantees', 'the', 'military', 'forces', 'always',
127 ... 'being', 'under', 'the', 'command', 'of', 'the', 'Party']
128 >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
129 ... 'army', 'always', 'to', 'heed', 'the', 'directions',
130 ... 'of', 'the', 'party']
131
132 >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
133 ... 'interested', 'in', 'world', 'history']
134 >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
135 ... 'because', 'he', 'read', 'the', 'book']
136
137 >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
138 >>> hypotheses = [hyp1, hyp2]
139 >>> corpus_bleu(list_of_references, hypotheses) # doctest: +ELLIPSIS
140 0.5920...
141
142 The example below show that corpus_bleu() is different from averaging
143 sentence_bleu() for hypotheses
144
145 >>> score1 = sentence_bleu([ref1a, ref1b, ref1c], hyp1)
146 >>> score2 = sentence_bleu([ref2a], hyp2)
147 >>> (score1 + score2) / 2 # doctest: +ELLIPSIS
148 0.6223...
149
150 :param list_of_references: a corpus of lists of reference sentences, w.r.t. hypotheses
151 :type list_of_references: list(list(list(str)))
152 :param hypotheses: a list of hypothesis sentences
153 :type hypotheses: list(list(str))
154 :param weights: weights for unigrams, bigrams, trigrams and so on
155 :type weights: list(float)
156 :param smoothing_function:
157 :type smoothing_function: SmoothingFunction
158 :param auto_reweigh: Option to re-normalize the weights uniformly.
159 :type auto_reweigh: bool
160 :return: The corpus-level BLEU score.
161 :rtype: float
162 """
163 # Before proceeding to compute BLEU, perform sanity checks.
164
165 p_numerators = Counter() # Key = ngram order, and value = no. of ngram matches.
166 p_denominators = Counter() # Key = ngram order, and value = no. of ngram in ref.
167 hyp_lengths, ref_lengths = 0, 0
168
169 assert len(list_of_references) == len(hypotheses), (
170 "The number of hypotheses and their reference(s) should be the " "same "
171 )
172
173 # Iterate through each hypothesis and their corresponding references.
174 for references, hypothesis in zip(list_of_references, hypotheses):
175 # For each order of ngram, calculate the numerator and
176 # denominator for the corpus-level modified precision.
177 for i, _ in enumerate(weights, start=1):
178 p_i = modified_precision(references, hypothesis, i)
179 p_numerators[i] += p_i.numerator
180 p_denominators[i] += p_i.denominator
181
182 # Calculate the hypothesis length and the closest reference length.
183 # Adds them to the corpus-level hypothesis and reference counts.
184 hyp_len = len(hypothesis)
185 hyp_lengths += hyp_len
186 ref_lengths += closest_ref_length(references, hyp_len)
187
188 # Calculate corpus-level brevity penalty.
189 bp = brevity_penalty(ref_lengths, hyp_lengths)
190
191 # Uniformly re-weighting based on maximum hypothesis lengths if largest
192 # order of n-grams < 4 and weights is set at default.
193 if auto_reweigh:
194 if hyp_lengths < 4 and weights == (0.25, 0.25, 0.25, 0.25):
195 weights = (1 / hyp_lengths,) * hyp_lengths
196
197 # Collects the various precision values for the different ngram orders.
198 p_n = [
199 Fraction(p_numerators[i], p_denominators[i], _normalize=False)
200 for i, _ in enumerate(weights, start=1)
201 ]
202
203 # Returns 0 if there's no matching n-grams
204 # We only need to check for p_numerators[1] == 0, since if there's
205 # no unigrams, there won't be any higher order ngrams.
206 if p_numerators[1] == 0:
207 return 0
208
209 # If there's no smoothing, set use method0 from SmoothinFunction class.
210 if not smoothing_function:
211 smoothing_function = SmoothingFunction().method0
212 # Smoothen the modified precision.
213 # Note: smoothing_function() may convert values into floats;
214 # it tries to retain the Fraction object as much as the
215 # smoothing method allows.
216 p_n = smoothing_function(
217 p_n, references=references, hypothesis=hypothesis, hyp_len=hyp_lengths
218 )
219 s = (w_i * math.log(p_i) for w_i, p_i in zip(weights, p_n))
220 s = bp * math.exp(math.fsum(s))
221 return s
222
223
224 def modified_precision(references, hypothesis, n):
225 """
226 Calculate modified ngram precision.
227
228 The normal precision method may lead to some wrong translations with
229 high-precision, e.g., the translation, in which a word of reference
230 repeats several times, has very high precision.
231
232 This function only returns the Fraction object that contains the numerator
233 and denominator necessary to calculate the corpus-level precision.
234 To calculate the modified precision for a single pair of hypothesis and
235 references, cast the Fraction object into a float.
236
237 The famous "the the the ... " example shows that you can get BLEU precision
238 by duplicating high frequency words.
239
240 >>> reference1 = 'the cat is on the mat'.split()
241 >>> reference2 = 'there is a cat on the mat'.split()
242 >>> hypothesis1 = 'the the the the the the the'.split()
243 >>> references = [reference1, reference2]
244 >>> float(modified_precision(references, hypothesis1, n=1)) # doctest: +ELLIPSIS
245 0.2857...
246
247 In the modified n-gram precision, a reference word will be considered
248 exhausted after a matching hypothesis word is identified, e.g.
249
250 >>> reference1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
251 ... 'ensures', 'that', 'the', 'military', 'will',
252 ... 'forever', 'heed', 'Party', 'commands']
253 >>> reference2 = ['It', 'is', 'the', 'guiding', 'principle', 'which',
254 ... 'guarantees', 'the', 'military', 'forces', 'always',
255 ... 'being', 'under', 'the', 'command', 'of', 'the',
256 ... 'Party']
257 >>> reference3 = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
258 ... 'army', 'always', 'to', 'heed', 'the', 'directions',
259 ... 'of', 'the', 'party']
260 >>> hypothesis = 'of the'.split()
261 >>> references = [reference1, reference2, reference3]
262 >>> float(modified_precision(references, hypothesis, n=1))
263 1.0
264 >>> float(modified_precision(references, hypothesis, n=2))
265 1.0
266
267 An example of a normal machine translation hypothesis:
268
269 >>> hypothesis1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
270 ... 'ensures', 'that', 'the', 'military', 'always',
271 ... 'obeys', 'the', 'commands', 'of', 'the', 'party']
272
273 >>> hypothesis2 = ['It', 'is', 'to', 'insure', 'the', 'troops',
274 ... 'forever', 'hearing', 'the', 'activity', 'guidebook',
275 ... 'that', 'party', 'direct']
276
277 >>> reference1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
278 ... 'ensures', 'that', 'the', 'military', 'will',
279 ... 'forever', 'heed', 'Party', 'commands']
280
281 >>> reference2 = ['It', 'is', 'the', 'guiding', 'principle', 'which',
282 ... 'guarantees', 'the', 'military', 'forces', 'always',
283 ... 'being', 'under', 'the', 'command', 'of', 'the',
284 ... 'Party']
285
286 >>> reference3 = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
287 ... 'army', 'always', 'to', 'heed', 'the', 'directions',
288 ... 'of', 'the', 'party']
289 >>> references = [reference1, reference2, reference3]
290 >>> float(modified_precision(references, hypothesis1, n=1)) # doctest: +ELLIPSIS
291 0.9444...
292 >>> float(modified_precision(references, hypothesis2, n=1)) # doctest: +ELLIPSIS
293 0.5714...
294 >>> float(modified_precision(references, hypothesis1, n=2)) # doctest: +ELLIPSIS
295 0.5882352941176471
296 >>> float(modified_precision(references, hypothesis2, n=2)) # doctest: +ELLIPSIS
297 0.07692...
298
299
300 :param references: A list of reference translations.
301 :type references: list(list(str))
302 :param hypothesis: A hypothesis translation.
303 :type hypothesis: list(str)
304 :param n: The ngram order.
305 :type n: int
306 :return: BLEU's modified precision for the nth order ngram.
307 :rtype: Fraction
308 """
309 # Extracts all ngrams in hypothesis
310 # Set an empty Counter if hypothesis is empty.
311 counts = Counter(ngrams(hypothesis, n)) if len(hypothesis) >= n else Counter()
312 # Extract a union of references' counts.
313 # max_counts = reduce(or_, [Counter(ngrams(ref, n)) for ref in references])
314 max_counts = {}
315 for reference in references:
316 reference_counts = (
317 Counter(ngrams(reference, n)) if len(reference) >= n else Counter()
318 )
319 for ngram in counts:
320 max_counts[ngram] = max(max_counts.get(ngram, 0), reference_counts[ngram])
321
322 # Assigns the intersection between hypothesis and references' counts.
323 clipped_counts = {
324 ngram: min(count, max_counts[ngram]) for ngram, count in counts.items()
325 }
326
327 numerator = sum(clipped_counts.values())
328 # Ensures that denominator is minimum 1 to avoid ZeroDivisionError.
329 # Usually this happens when the ngram order is > len(reference).
330 denominator = max(1, sum(counts.values()))
331
332 return Fraction(numerator, denominator, _normalize=False)
333
334
335 def closest_ref_length(references, hyp_len):
336 """
337 This function finds the reference that is the closest length to the
338 hypothesis. The closest reference length is referred to as *r* variable
339 from the brevity penalty formula in Papineni et. al. (2002)
340
341 :param references: A list of reference translations.
342 :type references: list(list(str))
343 :param hyp_len: The length of the hypothesis.
344 :type hyp_len: int
345 :return: The length of the reference that's closest to the hypothesis.
346 :rtype: int
347 """
348 ref_lens = (len(reference) for reference in references)
349 closest_ref_len = min(
350 ref_lens, key=lambda ref_len: (abs(ref_len - hyp_len), ref_len)
351 )
352 return closest_ref_len
353
354
355 def brevity_penalty(closest_ref_len, hyp_len):
356 """
357 Calculate brevity penalty.
358
359 As the modified n-gram precision still has the problem from the short
360 length sentence, brevity penalty is used to modify the overall BLEU
361 score according to length.
362
363 An example from the paper. There are three references with length 12, 15
364 and 17. And a concise hypothesis of the length 12. The brevity penalty is 1.
365
366 >>> reference1 = list('aaaaaaaaaaaa') # i.e. ['a'] * 12
367 >>> reference2 = list('aaaaaaaaaaaaaaa') # i.e. ['a'] * 15
368 >>> reference3 = list('aaaaaaaaaaaaaaaaa') # i.e. ['a'] * 17
369 >>> hypothesis = list('aaaaaaaaaaaa') # i.e. ['a'] * 12
370 >>> references = [reference1, reference2, reference3]
371 >>> hyp_len = len(hypothesis)
372 >>> closest_ref_len = closest_ref_length(references, hyp_len)
373 >>> brevity_penalty(closest_ref_len, hyp_len)
374 1.0
375
376 In case a hypothesis translation is shorter than the references, penalty is
377 applied.
378
379 >>> references = [['a'] * 28, ['a'] * 28]
380 >>> hypothesis = ['a'] * 12
381 >>> hyp_len = len(hypothesis)
382 >>> closest_ref_len = closest_ref_length(references, hyp_len)
383 >>> brevity_penalty(closest_ref_len, hyp_len)
384 0.2635971381157267
385
386 The length of the closest reference is used to compute the penalty. If the
387 length of a hypothesis is 12, and the reference lengths are 13 and 2, the
388 penalty is applied because the hypothesis length (12) is less then the
389 closest reference length (13).
390
391 >>> references = [['a'] * 13, ['a'] * 2]
392 >>> hypothesis = ['a'] * 12
393 >>> hyp_len = len(hypothesis)
394 >>> closest_ref_len = closest_ref_length(references, hyp_len)
395 >>> brevity_penalty(closest_ref_len, hyp_len) # doctest: +ELLIPSIS
396 0.9200...
397
398 The brevity penalty doesn't depend on reference order. More importantly,
399 when two reference sentences are at the same distance, the shortest
400 reference sentence length is used.
401
402 >>> references = [['a'] * 13, ['a'] * 11]
403 >>> hypothesis = ['a'] * 12
404 >>> hyp_len = len(hypothesis)
405 >>> closest_ref_len = closest_ref_length(references, hyp_len)
406 >>> bp1 = brevity_penalty(closest_ref_len, hyp_len)
407 >>> hyp_len = len(hypothesis)
408 >>> closest_ref_len = closest_ref_length(reversed(references), hyp_len)
409 >>> bp2 = brevity_penalty(closest_ref_len, hyp_len)
410 >>> bp1 == bp2 == 1
411 True
412
413 A test example from mteval-v13a.pl (starting from the line 705):
414
415 >>> references = [['a'] * 11, ['a'] * 8]
416 >>> hypothesis = ['a'] * 7
417 >>> hyp_len = len(hypothesis)
418 >>> closest_ref_len = closest_ref_length(references, hyp_len)
419 >>> brevity_penalty(closest_ref_len, hyp_len) # doctest: +ELLIPSIS
420 0.8668...
421
422 >>> references = [['a'] * 11, ['a'] * 8, ['a'] * 6, ['a'] * 7]
423 >>> hypothesis = ['a'] * 7
424 >>> hyp_len = len(hypothesis)
425 >>> closest_ref_len = closest_ref_length(references, hyp_len)
426 >>> brevity_penalty(closest_ref_len, hyp_len)
427 1.0
428
429 :param hyp_len: The length of the hypothesis for a single sentence OR the
430 sum of all the hypotheses' lengths for a corpus
431 :type hyp_len: int
432 :param closest_ref_len: The length of the closest reference for a single
433 hypothesis OR the sum of all the closest references for every hypotheses.
434 :type closest_ref_len: int
435 :return: BLEU's brevity penalty.
436 :rtype: float
437 """
438 if hyp_len > closest_ref_len:
439 return 1
440 # If hypothesis is empty, brevity penalty = 0 should result in BLEU = 0.0
441 elif hyp_len == 0:
442 return 0
443 else:
444 return math.exp(1 - closest_ref_len / hyp_len)
445
446
447 class SmoothingFunction:
448 """
449 This is an implementation of the smoothing techniques
450 for segment-level BLEU scores that was presented in
451 Boxing Chen and Collin Cherry (2014) A Systematic Comparison of
452 Smoothing Techniques for Sentence-Level BLEU. In WMT14.
453 http://acl2014.org/acl2014/W14-33/pdf/W14-3346.pdf
454 """
455
456 def __init__(self, epsilon=0.1, alpha=5, k=5):
457 """
458 This will initialize the parameters required for the various smoothing
459 techniques, the default values are set to the numbers used in the
460 experiments from Chen and Cherry (2014).
461
462 >>> hypothesis1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', 'ensures',
463 ... 'that', 'the', 'military', 'always', 'obeys', 'the',
464 ... 'commands', 'of', 'the', 'party']
465 >>> reference1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', 'ensures',
466 ... 'that', 'the', 'military', 'will', 'forever', 'heed',
467 ... 'Party', 'commands']
468
469 >>> chencherry = SmoothingFunction()
470 >>> print(sentence_bleu([reference1], hypothesis1)) # doctest: +ELLIPSIS
471 0.4118...
472 >>> print(sentence_bleu([reference1], hypothesis1, smoothing_function=chencherry.method0)) # doctest: +ELLIPSIS
473 0.4118...
474 >>> print(sentence_bleu([reference1], hypothesis1, smoothing_function=chencherry.method1)) # doctest: +ELLIPSIS
475 0.4118...
476 >>> print(sentence_bleu([reference1], hypothesis1, smoothing_function=chencherry.method2)) # doctest: +ELLIPSIS
477 0.4489...
478 >>> print(sentence_bleu([reference1], hypothesis1, smoothing_function=chencherry.method3)) # doctest: +ELLIPSIS
479 0.4118...
480 >>> print(sentence_bleu([reference1], hypothesis1, smoothing_function=chencherry.method4)) # doctest: +ELLIPSIS
481 0.4118...
482 >>> print(sentence_bleu([reference1], hypothesis1, smoothing_function=chencherry.method5)) # doctest: +ELLIPSIS
483 0.4905...
484 >>> print(sentence_bleu([reference1], hypothesis1, smoothing_function=chencherry.method6)) # doctest: +ELLIPSIS
485 0.4135...
486 >>> print(sentence_bleu([reference1], hypothesis1, smoothing_function=chencherry.method7)) # doctest: +ELLIPSIS
487 0.4905...
488
489 :param epsilon: the epsilon value use in method 1
490 :type epsilon: float
491 :param alpha: the alpha value use in method 6
492 :type alpha: int
493 :param k: the k value use in method 4
494 :type k: int
495 """
496 self.epsilon = epsilon
497 self.alpha = alpha
498 self.k = k
499
500 def method0(self, p_n, *args, **kwargs):
501 """
502 No smoothing.
503 """
504 p_n_new = []
505 for i, p_i in enumerate(p_n):
506 if p_i.numerator != 0:
507 p_n_new.append(p_i)
508 else:
509 _msg = str(
510 "\nThe hypothesis contains 0 counts of {}-gram overlaps.\n"
511 "Therefore the BLEU score evaluates to 0, independently of\n"
512 "how many N-gram overlaps of lower order it contains.\n"
513 "Consider using lower n-gram order or use "
514 "SmoothingFunction()"
515 ).format(i + 1)
516 warnings.warn(_msg)
517 # When numerator==0 where denonminator==0 or !=0, the result
518 # for the precision score should be equal to 0 or undefined.
519 # Due to BLEU geometric mean computation in logarithm space,
520 # we we need to take the return sys.float_info.min such that
521 # math.log(sys.float_info.min) returns a 0 precision score.
522 p_n_new.append(sys.float_info.min)
523 return p_n_new
524
525 def method1(self, p_n, *args, **kwargs):
526 """
527 Smoothing method 1: Add *epsilon* counts to precision with 0 counts.
528 """
529 return [
530 (p_i.numerator + self.epsilon) / p_i.denominator
531 if p_i.numerator == 0
532 else p_i
533 for p_i in p_n
534 ]
535
536 def method2(self, p_n, *args, **kwargs):
537 """
538 Smoothing method 2: Add 1 to both numerator and denominator from
539 Chin-Yew Lin and Franz Josef Och (2004) Automatic evaluation of
540 machine translation quality using longest common subsequence and
541 skip-bigram statistics. In ACL04.
542 """
543 return [
544 Fraction(p_i.numerator + 1, p_i.denominator + 1, _normalize=False)
545 for p_i in p_n
546 ]
547
548 def method3(self, p_n, *args, **kwargs):
549 """
550 Smoothing method 3: NIST geometric sequence smoothing
551 The smoothing is computed by taking 1 / ( 2^k ), instead of 0, for each
552 precision score whose matching n-gram count is null.
553 k is 1 for the first 'n' value for which the n-gram match count is null/
554 For example, if the text contains:
555 - one 2-gram match
556 - and (consequently) two 1-gram matches
557 the n-gram count for each individual precision score would be:
558 - n=1 => prec_count = 2 (two unigrams)
559 - n=2 => prec_count = 1 (one bigram)
560 - n=3 => prec_count = 1/2 (no trigram, taking 'smoothed' value of 1 / ( 2^k ), with k=1)
561 - n=4 => prec_count = 1/4 (no fourgram, taking 'smoothed' value of 1 / ( 2^k ), with k=2)
562 """
563 incvnt = 1 # From the mteval-v13a.pl, it's referred to as k.
564 for i, p_i in enumerate(p_n):
565 if p_i.numerator == 0:
566 p_n[i] = 1 / (2 ** incvnt * p_i.denominator)
567 incvnt += 1
568 return p_n
569
570 def method4(self, p_n, references, hypothesis, hyp_len=None, *args, **kwargs):
571 """
572 Smoothing method 4:
573 Shorter translations may have inflated precision values due to having
574 smaller denominators; therefore, we give them proportionally
575 smaller smoothed counts. Instead of scaling to 1/(2^k), Chen and Cherry
576 suggests dividing by 1/ln(len(T)), where T is the length of the translation.
577 """
578 hyp_len = hyp_len if hyp_len else len(hypothesis)
579 for i, p_i in enumerate(p_n):
580 if p_i.numerator == 0 and hyp_len != 0:
581 incvnt = i + 1 * self.k / math.log(
582 hyp_len
583 ) # Note that this K is different from the K from NIST.
584 p_n[i] = incvnt / p_i.denominator
585 return p_n
586
587 def method5(self, p_n, references, hypothesis, hyp_len=None, *args, **kwargs):
588 """
589 Smoothing method 5:
590 The matched counts for similar values of n should be similar. To a
591 calculate the n-gram matched count, it averages the n−1, n and n+1 gram
592 matched counts.
593 """
594 hyp_len = hyp_len if hyp_len else len(hypothesis)
595 m = {}
596 # Requires an precision value for an addition ngram order.
597 p_n_plus1 = p_n + [modified_precision(references, hypothesis, 5)]
598 m[-1] = p_n[0] + 1
599 for i, p_i in enumerate(p_n):
600 p_n[i] = (m[i - 1] + p_i + p_n_plus1[i + 1]) / 3
601 m[i] = p_n[i]
602 return p_n
603
604 def method6(self, p_n, references, hypothesis, hyp_len=None, *args, **kwargs):
605 """
606 Smoothing method 6:
607 Interpolates the maximum likelihood estimate of the precision *p_n* with
608 a prior estimate *pi0*. The prior is estimated by assuming that the ratio
609 between pn and pn−1 will be the same as that between pn−1 and pn−2; from
610 Gao and He (2013) Training MRF-Based Phrase Translation Models using
611 Gradient Ascent. In NAACL.
612 """
613 hyp_len = hyp_len if hyp_len else len(hypothesis)
614 # This smoothing only works when p_1 and p_2 is non-zero.
615 # Raise an error with an appropriate message when the input is too short
616 # to use this smoothing technique.
617 assert p_n[2], "This smoothing method requires non-zero precision for bigrams."
618 for i, p_i in enumerate(p_n):
619 if i in [0, 1]: # Skips the first 2 orders of ngrams.
620 continue
621 else:
622 pi0 = 0 if p_n[i - 2] == 0 else p_n[i - 1] ** 2 / p_n[i - 2]
623 # No. of ngrams in translation that matches the reference.
624 m = p_i.numerator
625 # No. of ngrams in translation.
626 l = sum(1 for _ in ngrams(hypothesis, i + 1))
627 # Calculates the interpolated precision.
628 p_n[i] = (m + self.alpha * pi0) / (l + self.alpha)
629 return p_n
630
631 def method7(self, p_n, references, hypothesis, hyp_len=None, *args, **kwargs):
632 """
633 Smoothing method 7:
634 Interpolates methods 4 and 5.
635 """
636 hyp_len = hyp_len if hyp_len else len(hypothesis)
637 p_n = self.method4(p_n, references, hypothesis, hyp_len)
638 p_n = self.method5(p_n, references, hypothesis, hyp_len)
639 return p_n
640
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/nltk/translate/bleu_score.py b/nltk/translate/bleu_score.py
--- a/nltk/translate/bleu_score.py
+++ b/nltk/translate/bleu_score.py
@@ -541,8 +541,9 @@
skip-bigram statistics. In ACL04.
"""
return [
- Fraction(p_i.numerator + 1, p_i.denominator + 1, _normalize=False)
- for p_i in p_n
+ Fraction(p_n[i].numerator + 1, p_n[i].denominator + 1, _normalize=False)
+ if i != 0 else p_n[0]
+ for i in range(len(p_n))
]
def method3(self, p_n, *args, **kwargs):
| {"golden_diff": "diff --git a/nltk/translate/bleu_score.py b/nltk/translate/bleu_score.py\n--- a/nltk/translate/bleu_score.py\n+++ b/nltk/translate/bleu_score.py\n@@ -541,8 +541,9 @@\n skip-bigram statistics. In ACL04.\n \"\"\"\n return [\n- Fraction(p_i.numerator + 1, p_i.denominator + 1, _normalize=False)\n- for p_i in p_n\n+ Fraction(p_n[i].numerator + 1, p_n[i].denominator + 1, _normalize=False)\n+ if i != 0 else p_n[0]\n+ for i in range(len(p_n))\n ]\n \n def method3(self, p_n, *args, **kwargs):\n", "issue": "Method2 of smoothing function in nltk.translate.bleu_score needs to ignore unigram precision score\nAccording to [Lin & Och, 2004](http://acl2014.org/acl2014/W14-33/pdf/W14-3346.pdf), second smoothing function should add 1 to both numerator & denominator of precision score for all n-grams where n >= 2. However, we can see from the code that it adds 1 to precision score corresponding to every n-gram including unigrams.\r\n\r\n```python\r\ndef method2(self, p_n, *args, **kwargs):\r\n \"\"\"\r\n Smoothing method 2: Add 1 to both numerator and denominator from\r\n Chin-Yew Lin and Franz Josef Och (2004) Automatic evaluation of\r\n machine translation quality using longest common subsequence and\r\n skip-bigram statistics. In ACL04.\r\n \"\"\"\r\n return [\r\n Fraction(p_i.numerator + 1, p_i.denominator + 1, _normalize=False)\r\n for p_i in p_n\r\n ]\r\n```\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Natural Language Toolkit: BLEU Score\n#\n# Copyright (C) 2001-2020 NLTK Project\n# Authors: Chin Yee Lee, Hengfeng Li, Ruxin Hou, Calvin Tanujaya Lim\n# Contributors: Bj\u00f6rn Mattsson, Dmitrijs Milajevs, Liling Tan\n# URL: <http://nltk.org/>\n# For license information, see LICENSE.TXT\n\n\"\"\"BLEU score implementation.\"\"\"\n\nimport math\nimport sys\nfrom fractions import Fraction\nimport warnings\nfrom collections import Counter\n\nfrom nltk.util import ngrams\n\n\ndef sentence_bleu(\n references,\n hypothesis,\n weights=(0.25, 0.25, 0.25, 0.25),\n smoothing_function=None,\n auto_reweigh=False,\n):\n \"\"\"\n Calculate BLEU score (Bilingual Evaluation Understudy) from\n Papineni, Kishore, Salim Roukos, Todd Ward, and Wei-Jing Zhu. 2002.\n \"BLEU: a method for automatic evaluation of machine translation.\"\n In Proceedings of ACL. http://www.aclweb.org/anthology/P02-1040.pdf\n\n >>> hypothesis1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'military', 'always',\n ... 'obeys', 'the', 'commands', 'of', 'the', 'party']\n\n >>> hypothesis2 = ['It', 'is', 'to', 'insure', 'the', 'troops',\n ... 'forever', 'hearing', 'the', 'activity', 'guidebook',\n ... 'that', 'party', 'direct']\n\n >>> reference1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'military', 'will', 'forever',\n ... 'heed', 'Party', 'commands']\n\n >>> reference2 = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'military', 'forces', 'always',\n ... 'being', 'under', 'the', 'command', 'of', 'the',\n ... 'Party']\n\n >>> reference3 = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'army', 'always', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'party']\n\n >>> sentence_bleu([reference1, reference2, reference3], hypothesis1) # doctest: +ELLIPSIS\n 0.5045...\n\n If there is no ngrams overlap for any order of n-grams, BLEU returns the\n value 0. This is because the precision for the order of n-grams without\n overlap is 0, and the geometric mean in the final BLEU score computation\n multiplies the 0 with the precision of other n-grams. This results in 0\n (independently of the precision of the othe n-gram orders). The following\n example has zero 3-gram and 4-gram overlaps:\n\n >>> round(sentence_bleu([reference1, reference2, reference3], hypothesis2),4) # doctest: +ELLIPSIS\n 0.0\n\n To avoid this harsh behaviour when no ngram overlaps are found a smoothing\n function can be used.\n\n >>> chencherry = SmoothingFunction()\n >>> sentence_bleu([reference1, reference2, reference3], hypothesis2,\n ... smoothing_function=chencherry.method1) # doctest: +ELLIPSIS\n 0.0370...\n\n The default BLEU calculates a score for up to 4-grams using uniform\n weights (this is called BLEU-4). To evaluate your translations with\n higher/lower order ngrams, use customized weights. E.g. when accounting\n for up to 5-grams with uniform weights (this is called BLEU-5) use:\n\n >>> weights = (1./5., 1./5., 1./5., 1./5., 1./5.)\n >>> sentence_bleu([reference1, reference2, reference3], hypothesis1, weights) # doctest: +ELLIPSIS\n 0.3920...\n\n :param references: reference sentences\n :type references: list(list(str))\n :param hypothesis: a hypothesis sentence\n :type hypothesis: list(str)\n :param weights: weights for unigrams, bigrams, trigrams and so on\n :type weights: list(float)\n :param smoothing_function:\n :type smoothing_function: SmoothingFunction\n :param auto_reweigh: Option to re-normalize the weights uniformly.\n :type auto_reweigh: bool\n :return: The sentence-level BLEU score.\n :rtype: float\n \"\"\"\n return corpus_bleu(\n [references], [hypothesis], weights, smoothing_function, auto_reweigh\n )\n\n\ndef corpus_bleu(\n list_of_references,\n hypotheses,\n weights=(0.25, 0.25, 0.25, 0.25),\n smoothing_function=None,\n auto_reweigh=False,\n):\n \"\"\"\n Calculate a single corpus-level BLEU score (aka. system-level BLEU) for all\n the hypotheses and their respective references.\n\n Instead of averaging the sentence level BLEU scores (i.e. marco-average\n precision), the original BLEU metric (Papineni et al. 2002) accounts for\n the micro-average precision (i.e. summing the numerators and denominators\n for each hypothesis-reference(s) pairs before the division).\n\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'military', 'always',\n ... 'obeys', 'the', 'commands', 'of', 'the', 'party']\n >>> ref1a = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'military', 'will', 'forever',\n ... 'heed', 'Party', 'commands']\n >>> ref1b = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'military', 'forces', 'always',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'Party']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'army', 'always', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'party']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> corpus_bleu(list_of_references, hypotheses) # doctest: +ELLIPSIS\n 0.5920...\n\n The example below show that corpus_bleu() is different from averaging\n sentence_bleu() for hypotheses\n\n >>> score1 = sentence_bleu([ref1a, ref1b, ref1c], hyp1)\n >>> score2 = sentence_bleu([ref2a], hyp2)\n >>> (score1 + score2) / 2 # doctest: +ELLIPSIS\n 0.6223...\n\n :param list_of_references: a corpus of lists of reference sentences, w.r.t. hypotheses\n :type list_of_references: list(list(list(str)))\n :param hypotheses: a list of hypothesis sentences\n :type hypotheses: list(list(str))\n :param weights: weights for unigrams, bigrams, trigrams and so on\n :type weights: list(float)\n :param smoothing_function:\n :type smoothing_function: SmoothingFunction\n :param auto_reweigh: Option to re-normalize the weights uniformly.\n :type auto_reweigh: bool\n :return: The corpus-level BLEU score.\n :rtype: float\n \"\"\"\n # Before proceeding to compute BLEU, perform sanity checks.\n\n p_numerators = Counter() # Key = ngram order, and value = no. of ngram matches.\n p_denominators = Counter() # Key = ngram order, and value = no. of ngram in ref.\n hyp_lengths, ref_lengths = 0, 0\n\n assert len(list_of_references) == len(hypotheses), (\n \"The number of hypotheses and their reference(s) should be the \" \"same \"\n )\n\n # Iterate through each hypothesis and their corresponding references.\n for references, hypothesis in zip(list_of_references, hypotheses):\n # For each order of ngram, calculate the numerator and\n # denominator for the corpus-level modified precision.\n for i, _ in enumerate(weights, start=1):\n p_i = modified_precision(references, hypothesis, i)\n p_numerators[i] += p_i.numerator\n p_denominators[i] += p_i.denominator\n\n # Calculate the hypothesis length and the closest reference length.\n # Adds them to the corpus-level hypothesis and reference counts.\n hyp_len = len(hypothesis)\n hyp_lengths += hyp_len\n ref_lengths += closest_ref_length(references, hyp_len)\n\n # Calculate corpus-level brevity penalty.\n bp = brevity_penalty(ref_lengths, hyp_lengths)\n\n # Uniformly re-weighting based on maximum hypothesis lengths if largest\n # order of n-grams < 4 and weights is set at default.\n if auto_reweigh:\n if hyp_lengths < 4 and weights == (0.25, 0.25, 0.25, 0.25):\n weights = (1 / hyp_lengths,) * hyp_lengths\n\n # Collects the various precision values for the different ngram orders.\n p_n = [\n Fraction(p_numerators[i], p_denominators[i], _normalize=False)\n for i, _ in enumerate(weights, start=1)\n ]\n\n # Returns 0 if there's no matching n-grams\n # We only need to check for p_numerators[1] == 0, since if there's\n # no unigrams, there won't be any higher order ngrams.\n if p_numerators[1] == 0:\n return 0\n\n # If there's no smoothing, set use method0 from SmoothinFunction class.\n if not smoothing_function:\n smoothing_function = SmoothingFunction().method0\n # Smoothen the modified precision.\n # Note: smoothing_function() may convert values into floats;\n # it tries to retain the Fraction object as much as the\n # smoothing method allows.\n p_n = smoothing_function(\n p_n, references=references, hypothesis=hypothesis, hyp_len=hyp_lengths\n )\n s = (w_i * math.log(p_i) for w_i, p_i in zip(weights, p_n))\n s = bp * math.exp(math.fsum(s))\n return s\n\n\ndef modified_precision(references, hypothesis, n):\n \"\"\"\n Calculate modified ngram precision.\n\n The normal precision method may lead to some wrong translations with\n high-precision, e.g., the translation, in which a word of reference\n repeats several times, has very high precision.\n\n This function only returns the Fraction object that contains the numerator\n and denominator necessary to calculate the corpus-level precision.\n To calculate the modified precision for a single pair of hypothesis and\n references, cast the Fraction object into a float.\n\n The famous \"the the the ... \" example shows that you can get BLEU precision\n by duplicating high frequency words.\n\n >>> reference1 = 'the cat is on the mat'.split()\n >>> reference2 = 'there is a cat on the mat'.split()\n >>> hypothesis1 = 'the the the the the the the'.split()\n >>> references = [reference1, reference2]\n >>> float(modified_precision(references, hypothesis1, n=1)) # doctest: +ELLIPSIS\n 0.2857...\n\n In the modified n-gram precision, a reference word will be considered\n exhausted after a matching hypothesis word is identified, e.g.\n\n >>> reference1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'military', 'will',\n ... 'forever', 'heed', 'Party', 'commands']\n >>> reference2 = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'military', 'forces', 'always',\n ... 'being', 'under', 'the', 'command', 'of', 'the',\n ... 'Party']\n >>> reference3 = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'army', 'always', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'party']\n >>> hypothesis = 'of the'.split()\n >>> references = [reference1, reference2, reference3]\n >>> float(modified_precision(references, hypothesis, n=1))\n 1.0\n >>> float(modified_precision(references, hypothesis, n=2))\n 1.0\n\n An example of a normal machine translation hypothesis:\n\n >>> hypothesis1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'military', 'always',\n ... 'obeys', 'the', 'commands', 'of', 'the', 'party']\n\n >>> hypothesis2 = ['It', 'is', 'to', 'insure', 'the', 'troops',\n ... 'forever', 'hearing', 'the', 'activity', 'guidebook',\n ... 'that', 'party', 'direct']\n\n >>> reference1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'military', 'will',\n ... 'forever', 'heed', 'Party', 'commands']\n\n >>> reference2 = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'military', 'forces', 'always',\n ... 'being', 'under', 'the', 'command', 'of', 'the',\n ... 'Party']\n\n >>> reference3 = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'army', 'always', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'party']\n >>> references = [reference1, reference2, reference3]\n >>> float(modified_precision(references, hypothesis1, n=1)) # doctest: +ELLIPSIS\n 0.9444...\n >>> float(modified_precision(references, hypothesis2, n=1)) # doctest: +ELLIPSIS\n 0.5714...\n >>> float(modified_precision(references, hypothesis1, n=2)) # doctest: +ELLIPSIS\n 0.5882352941176471\n >>> float(modified_precision(references, hypothesis2, n=2)) # doctest: +ELLIPSIS\n 0.07692...\n\n\n :param references: A list of reference translations.\n :type references: list(list(str))\n :param hypothesis: A hypothesis translation.\n :type hypothesis: list(str)\n :param n: The ngram order.\n :type n: int\n :return: BLEU's modified precision for the nth order ngram.\n :rtype: Fraction\n \"\"\"\n # Extracts all ngrams in hypothesis\n # Set an empty Counter if hypothesis is empty.\n counts = Counter(ngrams(hypothesis, n)) if len(hypothesis) >= n else Counter()\n # Extract a union of references' counts.\n # max_counts = reduce(or_, [Counter(ngrams(ref, n)) for ref in references])\n max_counts = {}\n for reference in references:\n reference_counts = (\n Counter(ngrams(reference, n)) if len(reference) >= n else Counter()\n )\n for ngram in counts:\n max_counts[ngram] = max(max_counts.get(ngram, 0), reference_counts[ngram])\n\n # Assigns the intersection between hypothesis and references' counts.\n clipped_counts = {\n ngram: min(count, max_counts[ngram]) for ngram, count in counts.items()\n }\n\n numerator = sum(clipped_counts.values())\n # Ensures that denominator is minimum 1 to avoid ZeroDivisionError.\n # Usually this happens when the ngram order is > len(reference).\n denominator = max(1, sum(counts.values()))\n\n return Fraction(numerator, denominator, _normalize=False)\n\n\ndef closest_ref_length(references, hyp_len):\n \"\"\"\n This function finds the reference that is the closest length to the\n hypothesis. The closest reference length is referred to as *r* variable\n from the brevity penalty formula in Papineni et. al. (2002)\n\n :param references: A list of reference translations.\n :type references: list(list(str))\n :param hyp_len: The length of the hypothesis.\n :type hyp_len: int\n :return: The length of the reference that's closest to the hypothesis.\n :rtype: int\n \"\"\"\n ref_lens = (len(reference) for reference in references)\n closest_ref_len = min(\n ref_lens, key=lambda ref_len: (abs(ref_len - hyp_len), ref_len)\n )\n return closest_ref_len\n\n\ndef brevity_penalty(closest_ref_len, hyp_len):\n \"\"\"\n Calculate brevity penalty.\n\n As the modified n-gram precision still has the problem from the short\n length sentence, brevity penalty is used to modify the overall BLEU\n score according to length.\n\n An example from the paper. There are three references with length 12, 15\n and 17. And a concise hypothesis of the length 12. The brevity penalty is 1.\n\n >>> reference1 = list('aaaaaaaaaaaa') # i.e. ['a'] * 12\n >>> reference2 = list('aaaaaaaaaaaaaaa') # i.e. ['a'] * 15\n >>> reference3 = list('aaaaaaaaaaaaaaaaa') # i.e. ['a'] * 17\n >>> hypothesis = list('aaaaaaaaaaaa') # i.e. ['a'] * 12\n >>> references = [reference1, reference2, reference3]\n >>> hyp_len = len(hypothesis)\n >>> closest_ref_len = closest_ref_length(references, hyp_len)\n >>> brevity_penalty(closest_ref_len, hyp_len)\n 1.0\n\n In case a hypothesis translation is shorter than the references, penalty is\n applied.\n\n >>> references = [['a'] * 28, ['a'] * 28]\n >>> hypothesis = ['a'] * 12\n >>> hyp_len = len(hypothesis)\n >>> closest_ref_len = closest_ref_length(references, hyp_len)\n >>> brevity_penalty(closest_ref_len, hyp_len)\n 0.2635971381157267\n\n The length of the closest reference is used to compute the penalty. If the\n length of a hypothesis is 12, and the reference lengths are 13 and 2, the\n penalty is applied because the hypothesis length (12) is less then the\n closest reference length (13).\n\n >>> references = [['a'] * 13, ['a'] * 2]\n >>> hypothesis = ['a'] * 12\n >>> hyp_len = len(hypothesis)\n >>> closest_ref_len = closest_ref_length(references, hyp_len)\n >>> brevity_penalty(closest_ref_len, hyp_len) # doctest: +ELLIPSIS\n 0.9200...\n\n The brevity penalty doesn't depend on reference order. More importantly,\n when two reference sentences are at the same distance, the shortest\n reference sentence length is used.\n\n >>> references = [['a'] * 13, ['a'] * 11]\n >>> hypothesis = ['a'] * 12\n >>> hyp_len = len(hypothesis)\n >>> closest_ref_len = closest_ref_length(references, hyp_len)\n >>> bp1 = brevity_penalty(closest_ref_len, hyp_len)\n >>> hyp_len = len(hypothesis)\n >>> closest_ref_len = closest_ref_length(reversed(references), hyp_len)\n >>> bp2 = brevity_penalty(closest_ref_len, hyp_len)\n >>> bp1 == bp2 == 1\n True\n\n A test example from mteval-v13a.pl (starting from the line 705):\n\n >>> references = [['a'] * 11, ['a'] * 8]\n >>> hypothesis = ['a'] * 7\n >>> hyp_len = len(hypothesis)\n >>> closest_ref_len = closest_ref_length(references, hyp_len)\n >>> brevity_penalty(closest_ref_len, hyp_len) # doctest: +ELLIPSIS\n 0.8668...\n\n >>> references = [['a'] * 11, ['a'] * 8, ['a'] * 6, ['a'] * 7]\n >>> hypothesis = ['a'] * 7\n >>> hyp_len = len(hypothesis)\n >>> closest_ref_len = closest_ref_length(references, hyp_len)\n >>> brevity_penalty(closest_ref_len, hyp_len)\n 1.0\n\n :param hyp_len: The length of the hypothesis for a single sentence OR the\n sum of all the hypotheses' lengths for a corpus\n :type hyp_len: int\n :param closest_ref_len: The length of the closest reference for a single\n hypothesis OR the sum of all the closest references for every hypotheses.\n :type closest_ref_len: int\n :return: BLEU's brevity penalty.\n :rtype: float\n \"\"\"\n if hyp_len > closest_ref_len:\n return 1\n # If hypothesis is empty, brevity penalty = 0 should result in BLEU = 0.0\n elif hyp_len == 0:\n return 0\n else:\n return math.exp(1 - closest_ref_len / hyp_len)\n\n\nclass SmoothingFunction:\n \"\"\"\n This is an implementation of the smoothing techniques\n for segment-level BLEU scores that was presented in\n Boxing Chen and Collin Cherry (2014) A Systematic Comparison of\n Smoothing Techniques for Sentence-Level BLEU. In WMT14.\n http://acl2014.org/acl2014/W14-33/pdf/W14-3346.pdf\n \"\"\"\n\n def __init__(self, epsilon=0.1, alpha=5, k=5):\n \"\"\"\n This will initialize the parameters required for the various smoothing\n techniques, the default values are set to the numbers used in the\n experiments from Chen and Cherry (2014).\n\n >>> hypothesis1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', 'ensures',\n ... 'that', 'the', 'military', 'always', 'obeys', 'the',\n ... 'commands', 'of', 'the', 'party']\n >>> reference1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', 'ensures',\n ... 'that', 'the', 'military', 'will', 'forever', 'heed',\n ... 'Party', 'commands']\n\n >>> chencherry = SmoothingFunction()\n >>> print(sentence_bleu([reference1], hypothesis1)) # doctest: +ELLIPSIS\n 0.4118...\n >>> print(sentence_bleu([reference1], hypothesis1, smoothing_function=chencherry.method0)) # doctest: +ELLIPSIS\n 0.4118...\n >>> print(sentence_bleu([reference1], hypothesis1, smoothing_function=chencherry.method1)) # doctest: +ELLIPSIS\n 0.4118...\n >>> print(sentence_bleu([reference1], hypothesis1, smoothing_function=chencherry.method2)) # doctest: +ELLIPSIS\n 0.4489...\n >>> print(sentence_bleu([reference1], hypothesis1, smoothing_function=chencherry.method3)) # doctest: +ELLIPSIS\n 0.4118...\n >>> print(sentence_bleu([reference1], hypothesis1, smoothing_function=chencherry.method4)) # doctest: +ELLIPSIS\n 0.4118...\n >>> print(sentence_bleu([reference1], hypothesis1, smoothing_function=chencherry.method5)) # doctest: +ELLIPSIS\n 0.4905...\n >>> print(sentence_bleu([reference1], hypothesis1, smoothing_function=chencherry.method6)) # doctest: +ELLIPSIS\n 0.4135...\n >>> print(sentence_bleu([reference1], hypothesis1, smoothing_function=chencherry.method7)) # doctest: +ELLIPSIS\n 0.4905...\n\n :param epsilon: the epsilon value use in method 1\n :type epsilon: float\n :param alpha: the alpha value use in method 6\n :type alpha: int\n :param k: the k value use in method 4\n :type k: int\n \"\"\"\n self.epsilon = epsilon\n self.alpha = alpha\n self.k = k\n\n def method0(self, p_n, *args, **kwargs):\n \"\"\"\n No smoothing.\n \"\"\"\n p_n_new = []\n for i, p_i in enumerate(p_n):\n if p_i.numerator != 0:\n p_n_new.append(p_i)\n else:\n _msg = str(\n \"\\nThe hypothesis contains 0 counts of {}-gram overlaps.\\n\"\n \"Therefore the BLEU score evaluates to 0, independently of\\n\"\n \"how many N-gram overlaps of lower order it contains.\\n\"\n \"Consider using lower n-gram order or use \"\n \"SmoothingFunction()\"\n ).format(i + 1)\n warnings.warn(_msg)\n # When numerator==0 where denonminator==0 or !=0, the result\n # for the precision score should be equal to 0 or undefined.\n # Due to BLEU geometric mean computation in logarithm space,\n # we we need to take the return sys.float_info.min such that\n # math.log(sys.float_info.min) returns a 0 precision score.\n p_n_new.append(sys.float_info.min)\n return p_n_new\n\n def method1(self, p_n, *args, **kwargs):\n \"\"\"\n Smoothing method 1: Add *epsilon* counts to precision with 0 counts.\n \"\"\"\n return [\n (p_i.numerator + self.epsilon) / p_i.denominator\n if p_i.numerator == 0\n else p_i\n for p_i in p_n\n ]\n\n def method2(self, p_n, *args, **kwargs):\n \"\"\"\n Smoothing method 2: Add 1 to both numerator and denominator from\n Chin-Yew Lin and Franz Josef Och (2004) Automatic evaluation of\n machine translation quality using longest common subsequence and\n skip-bigram statistics. In ACL04.\n \"\"\"\n return [\n Fraction(p_i.numerator + 1, p_i.denominator + 1, _normalize=False)\n for p_i in p_n\n ]\n\n def method3(self, p_n, *args, **kwargs):\n \"\"\"\n Smoothing method 3: NIST geometric sequence smoothing\n The smoothing is computed by taking 1 / ( 2^k ), instead of 0, for each\n precision score whose matching n-gram count is null.\n k is 1 for the first 'n' value for which the n-gram match count is null/\n For example, if the text contains:\n - one 2-gram match\n - and (consequently) two 1-gram matches\n the n-gram count for each individual precision score would be:\n - n=1 => prec_count = 2 (two unigrams)\n - n=2 => prec_count = 1 (one bigram)\n - n=3 => prec_count = 1/2 (no trigram, taking 'smoothed' value of 1 / ( 2^k ), with k=1)\n - n=4 => prec_count = 1/4 (no fourgram, taking 'smoothed' value of 1 / ( 2^k ), with k=2)\n \"\"\"\n incvnt = 1 # From the mteval-v13a.pl, it's referred to as k.\n for i, p_i in enumerate(p_n):\n if p_i.numerator == 0:\n p_n[i] = 1 / (2 ** incvnt * p_i.denominator)\n incvnt += 1\n return p_n\n\n def method4(self, p_n, references, hypothesis, hyp_len=None, *args, **kwargs):\n \"\"\"\n Smoothing method 4:\n Shorter translations may have inflated precision values due to having\n smaller denominators; therefore, we give them proportionally\n smaller smoothed counts. Instead of scaling to 1/(2^k), Chen and Cherry\n suggests dividing by 1/ln(len(T)), where T is the length of the translation.\n \"\"\"\n hyp_len = hyp_len if hyp_len else len(hypothesis)\n for i, p_i in enumerate(p_n):\n if p_i.numerator == 0 and hyp_len != 0:\n incvnt = i + 1 * self.k / math.log(\n hyp_len\n ) # Note that this K is different from the K from NIST.\n p_n[i] = incvnt / p_i.denominator\n return p_n\n\n def method5(self, p_n, references, hypothesis, hyp_len=None, *args, **kwargs):\n \"\"\"\n Smoothing method 5:\n The matched counts for similar values of n should be similar. To a\n calculate the n-gram matched count, it averages the n\u22121, n and n+1 gram\n matched counts.\n \"\"\"\n hyp_len = hyp_len if hyp_len else len(hypothesis)\n m = {}\n # Requires an precision value for an addition ngram order.\n p_n_plus1 = p_n + [modified_precision(references, hypothesis, 5)]\n m[-1] = p_n[0] + 1\n for i, p_i in enumerate(p_n):\n p_n[i] = (m[i - 1] + p_i + p_n_plus1[i + 1]) / 3\n m[i] = p_n[i]\n return p_n\n\n def method6(self, p_n, references, hypothesis, hyp_len=None, *args, **kwargs):\n \"\"\"\n Smoothing method 6:\n Interpolates the maximum likelihood estimate of the precision *p_n* with\n a prior estimate *pi0*. The prior is estimated by assuming that the ratio\n between pn and pn\u22121 will be the same as that between pn\u22121 and pn\u22122; from\n Gao and He (2013) Training MRF-Based Phrase Translation Models using\n Gradient Ascent. In NAACL.\n \"\"\"\n hyp_len = hyp_len if hyp_len else len(hypothesis)\n # This smoothing only works when p_1 and p_2 is non-zero.\n # Raise an error with an appropriate message when the input is too short\n # to use this smoothing technique.\n assert p_n[2], \"This smoothing method requires non-zero precision for bigrams.\"\n for i, p_i in enumerate(p_n):\n if i in [0, 1]: # Skips the first 2 orders of ngrams.\n continue\n else:\n pi0 = 0 if p_n[i - 2] == 0 else p_n[i - 1] ** 2 / p_n[i - 2]\n # No. of ngrams in translation that matches the reference.\n m = p_i.numerator\n # No. of ngrams in translation.\n l = sum(1 for _ in ngrams(hypothesis, i + 1))\n # Calculates the interpolated precision.\n p_n[i] = (m + self.alpha * pi0) / (l + self.alpha)\n return p_n\n\n def method7(self, p_n, references, hypothesis, hyp_len=None, *args, **kwargs):\n \"\"\"\n Smoothing method 7:\n Interpolates methods 4 and 5.\n \"\"\"\n hyp_len = hyp_len if hyp_len else len(hypothesis)\n p_n = self.method4(p_n, references, hypothesis, hyp_len)\n p_n = self.method5(p_n, references, hypothesis, hyp_len)\n return p_n\n", "path": "nltk/translate/bleu_score.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# Natural Language Toolkit: BLEU Score\n#\n# Copyright (C) 2001-2020 NLTK Project\n# Authors: Chin Yee Lee, Hengfeng Li, Ruxin Hou, Calvin Tanujaya Lim\n# Contributors: Bj\u00f6rn Mattsson, Dmitrijs Milajevs, Liling Tan\n# URL: <http://nltk.org/>\n# For license information, see LICENSE.TXT\n\n\"\"\"BLEU score implementation.\"\"\"\n\nimport math\nimport sys\nfrom fractions import Fraction\nimport warnings\nfrom collections import Counter\n\nfrom nltk.util import ngrams\n\n\ndef sentence_bleu(\n references,\n hypothesis,\n weights=(0.25, 0.25, 0.25, 0.25),\n smoothing_function=None,\n auto_reweigh=False,\n):\n \"\"\"\n Calculate BLEU score (Bilingual Evaluation Understudy) from\n Papineni, Kishore, Salim Roukos, Todd Ward, and Wei-Jing Zhu. 2002.\n \"BLEU: a method for automatic evaluation of machine translation.\"\n In Proceedings of ACL. http://www.aclweb.org/anthology/P02-1040.pdf\n\n >>> hypothesis1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'military', 'always',\n ... 'obeys', 'the', 'commands', 'of', 'the', 'party']\n\n >>> hypothesis2 = ['It', 'is', 'to', 'insure', 'the', 'troops',\n ... 'forever', 'hearing', 'the', 'activity', 'guidebook',\n ... 'that', 'party', 'direct']\n\n >>> reference1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'military', 'will', 'forever',\n ... 'heed', 'Party', 'commands']\n\n >>> reference2 = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'military', 'forces', 'always',\n ... 'being', 'under', 'the', 'command', 'of', 'the',\n ... 'Party']\n\n >>> reference3 = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'army', 'always', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'party']\n\n >>> sentence_bleu([reference1, reference2, reference3], hypothesis1) # doctest: +ELLIPSIS\n 0.5045...\n\n If there is no ngrams overlap for any order of n-grams, BLEU returns the\n value 0. This is because the precision for the order of n-grams without\n overlap is 0, and the geometric mean in the final BLEU score computation\n multiplies the 0 with the precision of other n-grams. This results in 0\n (independently of the precision of the othe n-gram orders). The following\n example has zero 3-gram and 4-gram overlaps:\n\n >>> round(sentence_bleu([reference1, reference2, reference3], hypothesis2),4) # doctest: +ELLIPSIS\n 0.0\n\n To avoid this harsh behaviour when no ngram overlaps are found a smoothing\n function can be used.\n\n >>> chencherry = SmoothingFunction()\n >>> sentence_bleu([reference1, reference2, reference3], hypothesis2,\n ... smoothing_function=chencherry.method1) # doctest: +ELLIPSIS\n 0.0370...\n\n The default BLEU calculates a score for up to 4-grams using uniform\n weights (this is called BLEU-4). To evaluate your translations with\n higher/lower order ngrams, use customized weights. E.g. when accounting\n for up to 5-grams with uniform weights (this is called BLEU-5) use:\n\n >>> weights = (1./5., 1./5., 1./5., 1./5., 1./5.)\n >>> sentence_bleu([reference1, reference2, reference3], hypothesis1, weights) # doctest: +ELLIPSIS\n 0.3920...\n\n :param references: reference sentences\n :type references: list(list(str))\n :param hypothesis: a hypothesis sentence\n :type hypothesis: list(str)\n :param weights: weights for unigrams, bigrams, trigrams and so on\n :type weights: list(float)\n :param smoothing_function:\n :type smoothing_function: SmoothingFunction\n :param auto_reweigh: Option to re-normalize the weights uniformly.\n :type auto_reweigh: bool\n :return: The sentence-level BLEU score.\n :rtype: float\n \"\"\"\n return corpus_bleu(\n [references], [hypothesis], weights, smoothing_function, auto_reweigh\n )\n\n\ndef corpus_bleu(\n list_of_references,\n hypotheses,\n weights=(0.25, 0.25, 0.25, 0.25),\n smoothing_function=None,\n auto_reweigh=False,\n):\n \"\"\"\n Calculate a single corpus-level BLEU score (aka. system-level BLEU) for all\n the hypotheses and their respective references.\n\n Instead of averaging the sentence level BLEU scores (i.e. marco-average\n precision), the original BLEU metric (Papineni et al. 2002) accounts for\n the micro-average precision (i.e. summing the numerators and denominators\n for each hypothesis-reference(s) pairs before the division).\n\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'military', 'always',\n ... 'obeys', 'the', 'commands', 'of', 'the', 'party']\n >>> ref1a = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'military', 'will', 'forever',\n ... 'heed', 'Party', 'commands']\n >>> ref1b = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'military', 'forces', 'always',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'Party']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'army', 'always', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'party']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> corpus_bleu(list_of_references, hypotheses) # doctest: +ELLIPSIS\n 0.5920...\n\n The example below show that corpus_bleu() is different from averaging\n sentence_bleu() for hypotheses\n\n >>> score1 = sentence_bleu([ref1a, ref1b, ref1c], hyp1)\n >>> score2 = sentence_bleu([ref2a], hyp2)\n >>> (score1 + score2) / 2 # doctest: +ELLIPSIS\n 0.6223...\n\n :param list_of_references: a corpus of lists of reference sentences, w.r.t. hypotheses\n :type list_of_references: list(list(list(str)))\n :param hypotheses: a list of hypothesis sentences\n :type hypotheses: list(list(str))\n :param weights: weights for unigrams, bigrams, trigrams and so on\n :type weights: list(float)\n :param smoothing_function:\n :type smoothing_function: SmoothingFunction\n :param auto_reweigh: Option to re-normalize the weights uniformly.\n :type auto_reweigh: bool\n :return: The corpus-level BLEU score.\n :rtype: float\n \"\"\"\n # Before proceeding to compute BLEU, perform sanity checks.\n\n p_numerators = Counter() # Key = ngram order, and value = no. of ngram matches.\n p_denominators = Counter() # Key = ngram order, and value = no. of ngram in ref.\n hyp_lengths, ref_lengths = 0, 0\n\n assert len(list_of_references) == len(hypotheses), (\n \"The number of hypotheses and their reference(s) should be the \" \"same \"\n )\n\n # Iterate through each hypothesis and their corresponding references.\n for references, hypothesis in zip(list_of_references, hypotheses):\n # For each order of ngram, calculate the numerator and\n # denominator for the corpus-level modified precision.\n for i, _ in enumerate(weights, start=1):\n p_i = modified_precision(references, hypothesis, i)\n p_numerators[i] += p_i.numerator\n p_denominators[i] += p_i.denominator\n\n # Calculate the hypothesis length and the closest reference length.\n # Adds them to the corpus-level hypothesis and reference counts.\n hyp_len = len(hypothesis)\n hyp_lengths += hyp_len\n ref_lengths += closest_ref_length(references, hyp_len)\n\n # Calculate corpus-level brevity penalty.\n bp = brevity_penalty(ref_lengths, hyp_lengths)\n\n # Uniformly re-weighting based on maximum hypothesis lengths if largest\n # order of n-grams < 4 and weights is set at default.\n if auto_reweigh:\n if hyp_lengths < 4 and weights == (0.25, 0.25, 0.25, 0.25):\n weights = (1 / hyp_lengths,) * hyp_lengths\n\n # Collects the various precision values for the different ngram orders.\n p_n = [\n Fraction(p_numerators[i], p_denominators[i], _normalize=False)\n for i, _ in enumerate(weights, start=1)\n ]\n\n # Returns 0 if there's no matching n-grams\n # We only need to check for p_numerators[1] == 0, since if there's\n # no unigrams, there won't be any higher order ngrams.\n if p_numerators[1] == 0:\n return 0\n\n # If there's no smoothing, set use method0 from SmoothinFunction class.\n if not smoothing_function:\n smoothing_function = SmoothingFunction().method0\n # Smoothen the modified precision.\n # Note: smoothing_function() may convert values into floats;\n # it tries to retain the Fraction object as much as the\n # smoothing method allows.\n p_n = smoothing_function(\n p_n, references=references, hypothesis=hypothesis, hyp_len=hyp_lengths\n )\n s = (w_i * math.log(p_i) for w_i, p_i in zip(weights, p_n))\n s = bp * math.exp(math.fsum(s))\n return s\n\n\ndef modified_precision(references, hypothesis, n):\n \"\"\"\n Calculate modified ngram precision.\n\n The normal precision method may lead to some wrong translations with\n high-precision, e.g., the translation, in which a word of reference\n repeats several times, has very high precision.\n\n This function only returns the Fraction object that contains the numerator\n and denominator necessary to calculate the corpus-level precision.\n To calculate the modified precision for a single pair of hypothesis and\n references, cast the Fraction object into a float.\n\n The famous \"the the the ... \" example shows that you can get BLEU precision\n by duplicating high frequency words.\n\n >>> reference1 = 'the cat is on the mat'.split()\n >>> reference2 = 'there is a cat on the mat'.split()\n >>> hypothesis1 = 'the the the the the the the'.split()\n >>> references = [reference1, reference2]\n >>> float(modified_precision(references, hypothesis1, n=1)) # doctest: +ELLIPSIS\n 0.2857...\n\n In the modified n-gram precision, a reference word will be considered\n exhausted after a matching hypothesis word is identified, e.g.\n\n >>> reference1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'military', 'will',\n ... 'forever', 'heed', 'Party', 'commands']\n >>> reference2 = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'military', 'forces', 'always',\n ... 'being', 'under', 'the', 'command', 'of', 'the',\n ... 'Party']\n >>> reference3 = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'army', 'always', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'party']\n >>> hypothesis = 'of the'.split()\n >>> references = [reference1, reference2, reference3]\n >>> float(modified_precision(references, hypothesis, n=1))\n 1.0\n >>> float(modified_precision(references, hypothesis, n=2))\n 1.0\n\n An example of a normal machine translation hypothesis:\n\n >>> hypothesis1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'military', 'always',\n ... 'obeys', 'the', 'commands', 'of', 'the', 'party']\n\n >>> hypothesis2 = ['It', 'is', 'to', 'insure', 'the', 'troops',\n ... 'forever', 'hearing', 'the', 'activity', 'guidebook',\n ... 'that', 'party', 'direct']\n\n >>> reference1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'military', 'will',\n ... 'forever', 'heed', 'Party', 'commands']\n\n >>> reference2 = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'military', 'forces', 'always',\n ... 'being', 'under', 'the', 'command', 'of', 'the',\n ... 'Party']\n\n >>> reference3 = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'army', 'always', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'party']\n >>> references = [reference1, reference2, reference3]\n >>> float(modified_precision(references, hypothesis1, n=1)) # doctest: +ELLIPSIS\n 0.9444...\n >>> float(modified_precision(references, hypothesis2, n=1)) # doctest: +ELLIPSIS\n 0.5714...\n >>> float(modified_precision(references, hypothesis1, n=2)) # doctest: +ELLIPSIS\n 0.5882352941176471\n >>> float(modified_precision(references, hypothesis2, n=2)) # doctest: +ELLIPSIS\n 0.07692...\n\n\n :param references: A list of reference translations.\n :type references: list(list(str))\n :param hypothesis: A hypothesis translation.\n :type hypothesis: list(str)\n :param n: The ngram order.\n :type n: int\n :return: BLEU's modified precision for the nth order ngram.\n :rtype: Fraction\n \"\"\"\n # Extracts all ngrams in hypothesis\n # Set an empty Counter if hypothesis is empty.\n counts = Counter(ngrams(hypothesis, n)) if len(hypothesis) >= n else Counter()\n # Extract a union of references' counts.\n # max_counts = reduce(or_, [Counter(ngrams(ref, n)) for ref in references])\n max_counts = {}\n for reference in references:\n reference_counts = (\n Counter(ngrams(reference, n)) if len(reference) >= n else Counter()\n )\n for ngram in counts:\n max_counts[ngram] = max(max_counts.get(ngram, 0), reference_counts[ngram])\n\n # Assigns the intersection between hypothesis and references' counts.\n clipped_counts = {\n ngram: min(count, max_counts[ngram]) for ngram, count in counts.items()\n }\n\n numerator = sum(clipped_counts.values())\n # Ensures that denominator is minimum 1 to avoid ZeroDivisionError.\n # Usually this happens when the ngram order is > len(reference).\n denominator = max(1, sum(counts.values()))\n\n return Fraction(numerator, denominator, _normalize=False)\n\n\ndef closest_ref_length(references, hyp_len):\n \"\"\"\n This function finds the reference that is the closest length to the\n hypothesis. The closest reference length is referred to as *r* variable\n from the brevity penalty formula in Papineni et. al. (2002)\n\n :param references: A list of reference translations.\n :type references: list(list(str))\n :param hyp_len: The length of the hypothesis.\n :type hyp_len: int\n :return: The length of the reference that's closest to the hypothesis.\n :rtype: int\n \"\"\"\n ref_lens = (len(reference) for reference in references)\n closest_ref_len = min(\n ref_lens, key=lambda ref_len: (abs(ref_len - hyp_len), ref_len)\n )\n return closest_ref_len\n\n\ndef brevity_penalty(closest_ref_len, hyp_len):\n \"\"\"\n Calculate brevity penalty.\n\n As the modified n-gram precision still has the problem from the short\n length sentence, brevity penalty is used to modify the overall BLEU\n score according to length.\n\n An example from the paper. There are three references with length 12, 15\n and 17. And a concise hypothesis of the length 12. The brevity penalty is 1.\n\n >>> reference1 = list('aaaaaaaaaaaa') # i.e. ['a'] * 12\n >>> reference2 = list('aaaaaaaaaaaaaaa') # i.e. ['a'] * 15\n >>> reference3 = list('aaaaaaaaaaaaaaaaa') # i.e. ['a'] * 17\n >>> hypothesis = list('aaaaaaaaaaaa') # i.e. ['a'] * 12\n >>> references = [reference1, reference2, reference3]\n >>> hyp_len = len(hypothesis)\n >>> closest_ref_len = closest_ref_length(references, hyp_len)\n >>> brevity_penalty(closest_ref_len, hyp_len)\n 1.0\n\n In case a hypothesis translation is shorter than the references, penalty is\n applied.\n\n >>> references = [['a'] * 28, ['a'] * 28]\n >>> hypothesis = ['a'] * 12\n >>> hyp_len = len(hypothesis)\n >>> closest_ref_len = closest_ref_length(references, hyp_len)\n >>> brevity_penalty(closest_ref_len, hyp_len)\n 0.2635971381157267\n\n The length of the closest reference is used to compute the penalty. If the\n length of a hypothesis is 12, and the reference lengths are 13 and 2, the\n penalty is applied because the hypothesis length (12) is less then the\n closest reference length (13).\n\n >>> references = [['a'] * 13, ['a'] * 2]\n >>> hypothesis = ['a'] * 12\n >>> hyp_len = len(hypothesis)\n >>> closest_ref_len = closest_ref_length(references, hyp_len)\n >>> brevity_penalty(closest_ref_len, hyp_len) # doctest: +ELLIPSIS\n 0.9200...\n\n The brevity penalty doesn't depend on reference order. More importantly,\n when two reference sentences are at the same distance, the shortest\n reference sentence length is used.\n\n >>> references = [['a'] * 13, ['a'] * 11]\n >>> hypothesis = ['a'] * 12\n >>> hyp_len = len(hypothesis)\n >>> closest_ref_len = closest_ref_length(references, hyp_len)\n >>> bp1 = brevity_penalty(closest_ref_len, hyp_len)\n >>> hyp_len = len(hypothesis)\n >>> closest_ref_len = closest_ref_length(reversed(references), hyp_len)\n >>> bp2 = brevity_penalty(closest_ref_len, hyp_len)\n >>> bp1 == bp2 == 1\n True\n\n A test example from mteval-v13a.pl (starting from the line 705):\n\n >>> references = [['a'] * 11, ['a'] * 8]\n >>> hypothesis = ['a'] * 7\n >>> hyp_len = len(hypothesis)\n >>> closest_ref_len = closest_ref_length(references, hyp_len)\n >>> brevity_penalty(closest_ref_len, hyp_len) # doctest: +ELLIPSIS\n 0.8668...\n\n >>> references = [['a'] * 11, ['a'] * 8, ['a'] * 6, ['a'] * 7]\n >>> hypothesis = ['a'] * 7\n >>> hyp_len = len(hypothesis)\n >>> closest_ref_len = closest_ref_length(references, hyp_len)\n >>> brevity_penalty(closest_ref_len, hyp_len)\n 1.0\n\n :param hyp_len: The length of the hypothesis for a single sentence OR the\n sum of all the hypotheses' lengths for a corpus\n :type hyp_len: int\n :param closest_ref_len: The length of the closest reference for a single\n hypothesis OR the sum of all the closest references for every hypotheses.\n :type closest_ref_len: int\n :return: BLEU's brevity penalty.\n :rtype: float\n \"\"\"\n if hyp_len > closest_ref_len:\n return 1\n # If hypothesis is empty, brevity penalty = 0 should result in BLEU = 0.0\n elif hyp_len == 0:\n return 0\n else:\n return math.exp(1 - closest_ref_len / hyp_len)\n\n\nclass SmoothingFunction:\n \"\"\"\n This is an implementation of the smoothing techniques\n for segment-level BLEU scores that was presented in\n Boxing Chen and Collin Cherry (2014) A Systematic Comparison of\n Smoothing Techniques for Sentence-Level BLEU. In WMT14.\n http://acl2014.org/acl2014/W14-33/pdf/W14-3346.pdf\n \"\"\"\n\n def __init__(self, epsilon=0.1, alpha=5, k=5):\n \"\"\"\n This will initialize the parameters required for the various smoothing\n techniques, the default values are set to the numbers used in the\n experiments from Chen and Cherry (2014).\n\n >>> hypothesis1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', 'ensures',\n ... 'that', 'the', 'military', 'always', 'obeys', 'the',\n ... 'commands', 'of', 'the', 'party']\n >>> reference1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', 'ensures',\n ... 'that', 'the', 'military', 'will', 'forever', 'heed',\n ... 'Party', 'commands']\n\n >>> chencherry = SmoothingFunction()\n >>> print(sentence_bleu([reference1], hypothesis1)) # doctest: +ELLIPSIS\n 0.4118...\n >>> print(sentence_bleu([reference1], hypothesis1, smoothing_function=chencherry.method0)) # doctest: +ELLIPSIS\n 0.4118...\n >>> print(sentence_bleu([reference1], hypothesis1, smoothing_function=chencherry.method1)) # doctest: +ELLIPSIS\n 0.4118...\n >>> print(sentence_bleu([reference1], hypothesis1, smoothing_function=chencherry.method2)) # doctest: +ELLIPSIS\n 0.4489...\n >>> print(sentence_bleu([reference1], hypothesis1, smoothing_function=chencherry.method3)) # doctest: +ELLIPSIS\n 0.4118...\n >>> print(sentence_bleu([reference1], hypothesis1, smoothing_function=chencherry.method4)) # doctest: +ELLIPSIS\n 0.4118...\n >>> print(sentence_bleu([reference1], hypothesis1, smoothing_function=chencherry.method5)) # doctest: +ELLIPSIS\n 0.4905...\n >>> print(sentence_bleu([reference1], hypothesis1, smoothing_function=chencherry.method6)) # doctest: +ELLIPSIS\n 0.4135...\n >>> print(sentence_bleu([reference1], hypothesis1, smoothing_function=chencherry.method7)) # doctest: +ELLIPSIS\n 0.4905...\n\n :param epsilon: the epsilon value use in method 1\n :type epsilon: float\n :param alpha: the alpha value use in method 6\n :type alpha: int\n :param k: the k value use in method 4\n :type k: int\n \"\"\"\n self.epsilon = epsilon\n self.alpha = alpha\n self.k = k\n\n def method0(self, p_n, *args, **kwargs):\n \"\"\"\n No smoothing.\n \"\"\"\n p_n_new = []\n for i, p_i in enumerate(p_n):\n if p_i.numerator != 0:\n p_n_new.append(p_i)\n else:\n _msg = str(\n \"\\nThe hypothesis contains 0 counts of {}-gram overlaps.\\n\"\n \"Therefore the BLEU score evaluates to 0, independently of\\n\"\n \"how many N-gram overlaps of lower order it contains.\\n\"\n \"Consider using lower n-gram order or use \"\n \"SmoothingFunction()\"\n ).format(i + 1)\n warnings.warn(_msg)\n # When numerator==0 where denonminator==0 or !=0, the result\n # for the precision score should be equal to 0 or undefined.\n # Due to BLEU geometric mean computation in logarithm space,\n # we we need to take the return sys.float_info.min such that\n # math.log(sys.float_info.min) returns a 0 precision score.\n p_n_new.append(sys.float_info.min)\n return p_n_new\n\n def method1(self, p_n, *args, **kwargs):\n \"\"\"\n Smoothing method 1: Add *epsilon* counts to precision with 0 counts.\n \"\"\"\n return [\n (p_i.numerator + self.epsilon) / p_i.denominator\n if p_i.numerator == 0\n else p_i\n for p_i in p_n\n ]\n\n def method2(self, p_n, *args, **kwargs):\n \"\"\"\n Smoothing method 2: Add 1 to both numerator and denominator from\n Chin-Yew Lin and Franz Josef Och (2004) Automatic evaluation of\n machine translation quality using longest common subsequence and\n skip-bigram statistics. In ACL04.\n \"\"\"\n return [\n Fraction(p_n[i].numerator + 1, p_n[i].denominator + 1, _normalize=False)\n if i != 0 else p_n[0]\n for i in range(len(p_n))\n ]\n\n def method3(self, p_n, *args, **kwargs):\n \"\"\"\n Smoothing method 3: NIST geometric sequence smoothing\n The smoothing is computed by taking 1 / ( 2^k ), instead of 0, for each\n precision score whose matching n-gram count is null.\n k is 1 for the first 'n' value for which the n-gram match count is null/\n For example, if the text contains:\n - one 2-gram match\n - and (consequently) two 1-gram matches\n the n-gram count for each individual precision score would be:\n - n=1 => prec_count = 2 (two unigrams)\n - n=2 => prec_count = 1 (one bigram)\n - n=3 => prec_count = 1/2 (no trigram, taking 'smoothed' value of 1 / ( 2^k ), with k=1)\n - n=4 => prec_count = 1/4 (no fourgram, taking 'smoothed' value of 1 / ( 2^k ), with k=2)\n \"\"\"\n incvnt = 1 # From the mteval-v13a.pl, it's referred to as k.\n for i, p_i in enumerate(p_n):\n if p_i.numerator == 0:\n p_n[i] = 1 / (2 ** incvnt * p_i.denominator)\n incvnt += 1\n return p_n\n\n def method4(self, p_n, references, hypothesis, hyp_len=None, *args, **kwargs):\n \"\"\"\n Smoothing method 4:\n Shorter translations may have inflated precision values due to having\n smaller denominators; therefore, we give them proportionally\n smaller smoothed counts. Instead of scaling to 1/(2^k), Chen and Cherry\n suggests dividing by 1/ln(len(T)), where T is the length of the translation.\n \"\"\"\n hyp_len = hyp_len if hyp_len else len(hypothesis)\n for i, p_i in enumerate(p_n):\n if p_i.numerator == 0 and hyp_len != 0:\n incvnt = i + 1 * self.k / math.log(\n hyp_len\n ) # Note that this K is different from the K from NIST.\n p_n[i] = incvnt / p_i.denominator\n return p_n\n\n def method5(self, p_n, references, hypothesis, hyp_len=None, *args, **kwargs):\n \"\"\"\n Smoothing method 5:\n The matched counts for similar values of n should be similar. To a\n calculate the n-gram matched count, it averages the n\u22121, n and n+1 gram\n matched counts.\n \"\"\"\n hyp_len = hyp_len if hyp_len else len(hypothesis)\n m = {}\n # Requires an precision value for an addition ngram order.\n p_n_plus1 = p_n + [modified_precision(references, hypothesis, 5)]\n m[-1] = p_n[0] + 1\n for i, p_i in enumerate(p_n):\n p_n[i] = (m[i - 1] + p_i + p_n_plus1[i + 1]) / 3\n m[i] = p_n[i]\n return p_n\n\n def method6(self, p_n, references, hypothesis, hyp_len=None, *args, **kwargs):\n \"\"\"\n Smoothing method 6:\n Interpolates the maximum likelihood estimate of the precision *p_n* with\n a prior estimate *pi0*. The prior is estimated by assuming that the ratio\n between pn and pn\u22121 will be the same as that between pn\u22121 and pn\u22122; from\n Gao and He (2013) Training MRF-Based Phrase Translation Models using\n Gradient Ascent. In NAACL.\n \"\"\"\n hyp_len = hyp_len if hyp_len else len(hypothesis)\n # This smoothing only works when p_1 and p_2 is non-zero.\n # Raise an error with an appropriate message when the input is too short\n # to use this smoothing technique.\n assert p_n[2], \"This smoothing method requires non-zero precision for bigrams.\"\n for i, p_i in enumerate(p_n):\n if i in [0, 1]: # Skips the first 2 orders of ngrams.\n continue\n else:\n pi0 = 0 if p_n[i - 2] == 0 else p_n[i - 1] ** 2 / p_n[i - 2]\n # No. of ngrams in translation that matches the reference.\n m = p_i.numerator\n # No. of ngrams in translation.\n l = sum(1 for _ in ngrams(hypothesis, i + 1))\n # Calculates the interpolated precision.\n p_n[i] = (m + self.alpha * pi0) / (l + self.alpha)\n return p_n\n\n def method7(self, p_n, references, hypothesis, hyp_len=None, *args, **kwargs):\n \"\"\"\n Smoothing method 7:\n Interpolates methods 4 and 5.\n \"\"\"\n hyp_len = hyp_len if hyp_len else len(hypothesis)\n p_n = self.method4(p_n, references, hypothesis, hyp_len)\n p_n = self.method5(p_n, references, hypothesis, hyp_len)\n return p_n\n", "path": "nltk/translate/bleu_score.py"}]} |
gh_patches_debug_1308 | rasdani/github-patches | git_diff | open-telemetry__opentelemetry-python-1806 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
OTLP gRPC exporter silently fails if scheme is not specified in endpoint
Issue arising from implementing https://github.com/open-telemetry/opentelemetry-python/pull/1771
**Steps to reproduce**
Supplying an remote collector hostname without scheme causes the OTLP exporter to silently not export spans.
https://github.com/open-telemetry/opentelemetry-python/blob/b3455cd1164f9c5f336cc26a52fb351cb422b0b2/exporter/opentelemetry-exporter-otlp-proto-grpc/src/opentelemetry/exporter/otlp/proto/grpc/exporter.py#L210
`parsed_url.netloc` is an empty str if the scheme is not specified e.g. `localhost:55680`, this causes spans to not be exported to a remote collector as `endpoint` is empty.
**What is the expected behavior?**
Spans are correctly exported to remote collector via OTLP.
**What is the actual behavior?**
Spans are not exported to remote collector via OTLP.
**Additional context**
Per [opentelemetry specs](https://github.com/open-telemetry/opentelemetry-specification/blob/f62744a679814937214fd17394ab3fa8a9099424/specification/protocol/exporter.md#configuration-options), it was written that the scheme must be specified in the endpoint; this library should either enforce that the scheme is supplied (fail hard if not) or assume a sane default (http?) for the purposes of using this library.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `exporter/opentelemetry-exporter-otlp-proto-grpc/src/opentelemetry/exporter/otlp/proto/grpc/exporter.py`
Content:
```
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """OTLP Exporter"""
16
17 import logging
18 from abc import ABC, abstractmethod
19 from collections.abc import Mapping, Sequence
20 from os import environ
21 from time import sleep
22 from typing import Any, Callable, Dict, Generic, List, Optional
23 from typing import Sequence as TypingSequence
24 from typing import Text, TypeVar
25 from urllib import parse
26 from urllib.parse import urlparse
27
28 from backoff import expo
29 from google.rpc.error_details_pb2 import RetryInfo
30 from grpc import (
31 ChannelCredentials,
32 Compression,
33 RpcError,
34 StatusCode,
35 insecure_channel,
36 secure_channel,
37 ssl_channel_credentials,
38 )
39
40 from opentelemetry.proto.common.v1.common_pb2 import AnyValue, KeyValue
41 from opentelemetry.proto.resource.v1.resource_pb2 import Resource
42 from opentelemetry.sdk.environment_variables import (
43 OTEL_EXPORTER_OTLP_CERTIFICATE,
44 OTEL_EXPORTER_OTLP_COMPRESSION,
45 OTEL_EXPORTER_OTLP_ENDPOINT,
46 OTEL_EXPORTER_OTLP_HEADERS,
47 OTEL_EXPORTER_OTLP_TIMEOUT,
48 )
49 from opentelemetry.sdk.resources import Resource as SDKResource
50
51 logger = logging.getLogger(__name__)
52 SDKDataT = TypeVar("SDKDataT")
53 ResourceDataT = TypeVar("ResourceDataT")
54 TypingResourceT = TypeVar("TypingResourceT")
55 ExportServiceRequestT = TypeVar("ExportServiceRequestT")
56 ExportResultT = TypeVar("ExportResultT")
57
58 _ENVIRON_TO_COMPRESSION = {
59 None: None,
60 "gzip": Compression.Gzip,
61 }
62
63
64 class InvalidCompressionValueException(Exception):
65 def __init__(self, environ_key: str, environ_value: str):
66 super().__init__(
67 'Invalid value "{}" for compression envvar {}'.format(
68 environ_value, environ_key
69 )
70 )
71
72
73 def environ_to_compression(environ_key: str) -> Optional[Compression]:
74 environ_value = (
75 environ[environ_key].lower().strip()
76 if environ_key in environ
77 else None
78 )
79 if environ_value not in _ENVIRON_TO_COMPRESSION:
80 raise InvalidCompressionValueException(environ_key, environ_value)
81 return _ENVIRON_TO_COMPRESSION[environ_value]
82
83
84 def _translate_key_values(key: Text, value: Any) -> KeyValue:
85
86 if isinstance(value, bool):
87 any_value = AnyValue(bool_value=value)
88
89 elif isinstance(value, str):
90 any_value = AnyValue(string_value=value)
91
92 elif isinstance(value, int):
93 any_value = AnyValue(int_value=value)
94
95 elif isinstance(value, float):
96 any_value = AnyValue(double_value=value)
97
98 elif isinstance(value, Sequence):
99 any_value = AnyValue(array_value=value)
100
101 elif isinstance(value, Mapping):
102 any_value = AnyValue(kvlist_value=value)
103
104 else:
105 raise Exception(
106 "Invalid type {} of value {}".format(type(value), value)
107 )
108
109 return KeyValue(key=key, value=any_value)
110
111
112 def get_resource_data(
113 sdk_resource_instrumentation_library_data: Dict[
114 SDKResource, ResourceDataT
115 ],
116 resource_class: Callable[..., TypingResourceT],
117 name: str,
118 ) -> List[TypingResourceT]:
119
120 resource_data = []
121
122 for (
123 sdk_resource,
124 instrumentation_library_data,
125 ) in sdk_resource_instrumentation_library_data.items():
126
127 collector_resource = Resource()
128
129 for key, value in sdk_resource.attributes.items():
130
131 try:
132 # pylint: disable=no-member
133 collector_resource.attributes.append(
134 _translate_key_values(key, value)
135 )
136 except Exception as error: # pylint: disable=broad-except
137 logger.exception(error)
138
139 resource_data.append(
140 resource_class(
141 **{
142 "resource": collector_resource,
143 "instrumentation_library_{}".format(name): [
144 instrumentation_library_data
145 ],
146 }
147 )
148 )
149
150 return resource_data
151
152
153 def _load_credential_from_file(filepath) -> ChannelCredentials:
154 try:
155 with open(filepath, "rb") as creds_file:
156 credential = creds_file.read()
157 return ssl_channel_credentials(credential)
158 except FileNotFoundError:
159 logger.exception("Failed to read credential file")
160 return None
161
162
163 def _get_credentials(creds, environ_key):
164 if creds is not None:
165 return creds
166 creds_env = environ.get(environ_key)
167 if creds_env:
168 return _load_credential_from_file(creds_env)
169 return ssl_channel_credentials()
170
171
172 # pylint: disable=no-member
173 class OTLPExporterMixin(
174 ABC, Generic[SDKDataT, ExportServiceRequestT, ExportResultT]
175 ):
176 """OTLP span exporter
177
178 Args:
179 endpoint: OpenTelemetry Collector receiver endpoint
180 insecure: Connection type
181 credentials: ChannelCredentials object for server authentication
182 headers: Headers to send when exporting
183 timeout: Backend request timeout in seconds
184 compression: gRPC compression method to use
185 """
186
187 def __init__(
188 self,
189 endpoint: Optional[str] = None,
190 insecure: Optional[bool] = None,
191 credentials: Optional[ChannelCredentials] = None,
192 headers: Optional[Sequence] = None,
193 timeout: Optional[int] = None,
194 compression: Optional[Compression] = None,
195 ):
196 super().__init__()
197
198 endpoint = endpoint or environ.get(
199 OTEL_EXPORTER_OTLP_ENDPOINT, "http://localhost:4317"
200 )
201
202 parsed_url = urlparse(endpoint)
203
204 if insecure is None:
205 if parsed_url.scheme == "https":
206 insecure = False
207 else:
208 insecure = True
209
210 endpoint = parsed_url.netloc
211
212 self._headers = headers or environ.get(OTEL_EXPORTER_OTLP_HEADERS)
213 if isinstance(self._headers, str):
214 self._headers = tuple(
215 tuple(item.split("=")) for item in self._headers.split(",")
216 )
217 self._timeout = timeout or int(
218 environ.get(OTEL_EXPORTER_OTLP_TIMEOUT, 10)
219 )
220 self._collector_span_kwargs = None
221
222 compression = (
223 environ_to_compression(OTEL_EXPORTER_OTLP_COMPRESSION)
224 if compression is None
225 else compression
226 ) or Compression.NoCompression
227
228 if insecure:
229 self._client = self._stub(
230 insecure_channel(endpoint, compression=compression)
231 )
232 else:
233 credentials = _get_credentials(
234 credentials, OTEL_EXPORTER_OTLP_CERTIFICATE
235 )
236 self._client = self._stub(
237 secure_channel(endpoint, credentials, compression=compression)
238 )
239
240 @abstractmethod
241 def _translate_data(
242 self, data: TypingSequence[SDKDataT]
243 ) -> ExportServiceRequestT:
244 pass
245
246 def _export(self, data: TypingSequence[SDKDataT]) -> ExportResultT:
247 # expo returns a generator that yields delay values which grow
248 # exponentially. Once delay is greater than max_value, the yielded
249 # value will remain constant.
250 # max_value is set to 900 (900 seconds is 15 minutes) to use the same
251 # value as used in the Go implementation.
252
253 max_value = 900
254
255 for delay in expo(max_value=max_value):
256
257 if delay == max_value:
258 return self._result.FAILURE
259
260 try:
261 self._client.Export(
262 request=self._translate_data(data),
263 metadata=self._headers,
264 timeout=self._timeout,
265 )
266
267 return self._result.SUCCESS
268
269 except RpcError as error:
270
271 if error.code() in [
272 StatusCode.CANCELLED,
273 StatusCode.DEADLINE_EXCEEDED,
274 StatusCode.PERMISSION_DENIED,
275 StatusCode.UNAUTHENTICATED,
276 StatusCode.RESOURCE_EXHAUSTED,
277 StatusCode.ABORTED,
278 StatusCode.OUT_OF_RANGE,
279 StatusCode.UNAVAILABLE,
280 StatusCode.DATA_LOSS,
281 ]:
282
283 retry_info_bin = dict(error.trailing_metadata()).get(
284 "google.rpc.retryinfo-bin"
285 )
286 if retry_info_bin is not None:
287 retry_info = RetryInfo()
288 retry_info.ParseFromString(retry_info_bin)
289 delay = (
290 retry_info.retry_delay.seconds
291 + retry_info.retry_delay.nanos / 1.0e9
292 )
293
294 logger.debug(
295 "Waiting %ss before retrying export of span", delay
296 )
297 sleep(delay)
298 continue
299
300 if error.code() == StatusCode.OK:
301 return self._result.SUCCESS
302
303 return self._result.FAILURE
304
305 return self._result.FAILURE
306
307 def shutdown(self) -> None:
308 pass
309
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/exporter/opentelemetry-exporter-otlp-proto-grpc/src/opentelemetry/exporter/otlp/proto/grpc/exporter.py b/exporter/opentelemetry-exporter-otlp-proto-grpc/src/opentelemetry/exporter/otlp/proto/grpc/exporter.py
--- a/exporter/opentelemetry-exporter-otlp-proto-grpc/src/opentelemetry/exporter/otlp/proto/grpc/exporter.py
+++ b/exporter/opentelemetry-exporter-otlp-proto-grpc/src/opentelemetry/exporter/otlp/proto/grpc/exporter.py
@@ -207,7 +207,8 @@
else:
insecure = True
- endpoint = parsed_url.netloc
+ if parsed_url.netloc:
+ endpoint = parsed_url.netloc
self._headers = headers or environ.get(OTEL_EXPORTER_OTLP_HEADERS)
if isinstance(self._headers, str):
| {"golden_diff": "diff --git a/exporter/opentelemetry-exporter-otlp-proto-grpc/src/opentelemetry/exporter/otlp/proto/grpc/exporter.py b/exporter/opentelemetry-exporter-otlp-proto-grpc/src/opentelemetry/exporter/otlp/proto/grpc/exporter.py\n--- a/exporter/opentelemetry-exporter-otlp-proto-grpc/src/opentelemetry/exporter/otlp/proto/grpc/exporter.py\n+++ b/exporter/opentelemetry-exporter-otlp-proto-grpc/src/opentelemetry/exporter/otlp/proto/grpc/exporter.py\n@@ -207,7 +207,8 @@\n else:\n insecure = True\n \n- endpoint = parsed_url.netloc\n+ if parsed_url.netloc:\n+ endpoint = parsed_url.netloc\n \n self._headers = headers or environ.get(OTEL_EXPORTER_OTLP_HEADERS)\n if isinstance(self._headers, str):\n", "issue": "OTLP gRPC exporter silently fails if scheme is not specified in endpoint\nIssue arising from implementing https://github.com/open-telemetry/opentelemetry-python/pull/1771\r\n\r\n**Steps to reproduce**\r\nSupplying an remote collector hostname without scheme causes the OTLP exporter to silently not export spans.\r\n\r\nhttps://github.com/open-telemetry/opentelemetry-python/blob/b3455cd1164f9c5f336cc26a52fb351cb422b0b2/exporter/opentelemetry-exporter-otlp-proto-grpc/src/opentelemetry/exporter/otlp/proto/grpc/exporter.py#L210\r\n\r\n`parsed_url.netloc` is an empty str if the scheme is not specified e.g. `localhost:55680`, this causes spans to not be exported to a remote collector as `endpoint` is empty.\r\n\r\n**What is the expected behavior?**\r\nSpans are correctly exported to remote collector via OTLP.\r\n\r\n**What is the actual behavior?**\r\nSpans are not exported to remote collector via OTLP.\r\n\r\n**Additional context**\r\nPer [opentelemetry specs](https://github.com/open-telemetry/opentelemetry-specification/blob/f62744a679814937214fd17394ab3fa8a9099424/specification/protocol/exporter.md#configuration-options), it was written that the scheme must be specified in the endpoint; this library should either enforce that the scheme is supplied (fail hard if not) or assume a sane default (http?) for the purposes of using this library.\r\n\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"OTLP Exporter\"\"\"\n\nimport logging\nfrom abc import ABC, abstractmethod\nfrom collections.abc import Mapping, Sequence\nfrom os import environ\nfrom time import sleep\nfrom typing import Any, Callable, Dict, Generic, List, Optional\nfrom typing import Sequence as TypingSequence\nfrom typing import Text, TypeVar\nfrom urllib import parse\nfrom urllib.parse import urlparse\n\nfrom backoff import expo\nfrom google.rpc.error_details_pb2 import RetryInfo\nfrom grpc import (\n ChannelCredentials,\n Compression,\n RpcError,\n StatusCode,\n insecure_channel,\n secure_channel,\n ssl_channel_credentials,\n)\n\nfrom opentelemetry.proto.common.v1.common_pb2 import AnyValue, KeyValue\nfrom opentelemetry.proto.resource.v1.resource_pb2 import Resource\nfrom opentelemetry.sdk.environment_variables import (\n OTEL_EXPORTER_OTLP_CERTIFICATE,\n OTEL_EXPORTER_OTLP_COMPRESSION,\n OTEL_EXPORTER_OTLP_ENDPOINT,\n OTEL_EXPORTER_OTLP_HEADERS,\n OTEL_EXPORTER_OTLP_TIMEOUT,\n)\nfrom opentelemetry.sdk.resources import Resource as SDKResource\n\nlogger = logging.getLogger(__name__)\nSDKDataT = TypeVar(\"SDKDataT\")\nResourceDataT = TypeVar(\"ResourceDataT\")\nTypingResourceT = TypeVar(\"TypingResourceT\")\nExportServiceRequestT = TypeVar(\"ExportServiceRequestT\")\nExportResultT = TypeVar(\"ExportResultT\")\n\n_ENVIRON_TO_COMPRESSION = {\n None: None,\n \"gzip\": Compression.Gzip,\n}\n\n\nclass InvalidCompressionValueException(Exception):\n def __init__(self, environ_key: str, environ_value: str):\n super().__init__(\n 'Invalid value \"{}\" for compression envvar {}'.format(\n environ_value, environ_key\n )\n )\n\n\ndef environ_to_compression(environ_key: str) -> Optional[Compression]:\n environ_value = (\n environ[environ_key].lower().strip()\n if environ_key in environ\n else None\n )\n if environ_value not in _ENVIRON_TO_COMPRESSION:\n raise InvalidCompressionValueException(environ_key, environ_value)\n return _ENVIRON_TO_COMPRESSION[environ_value]\n\n\ndef _translate_key_values(key: Text, value: Any) -> KeyValue:\n\n if isinstance(value, bool):\n any_value = AnyValue(bool_value=value)\n\n elif isinstance(value, str):\n any_value = AnyValue(string_value=value)\n\n elif isinstance(value, int):\n any_value = AnyValue(int_value=value)\n\n elif isinstance(value, float):\n any_value = AnyValue(double_value=value)\n\n elif isinstance(value, Sequence):\n any_value = AnyValue(array_value=value)\n\n elif isinstance(value, Mapping):\n any_value = AnyValue(kvlist_value=value)\n\n else:\n raise Exception(\n \"Invalid type {} of value {}\".format(type(value), value)\n )\n\n return KeyValue(key=key, value=any_value)\n\n\ndef get_resource_data(\n sdk_resource_instrumentation_library_data: Dict[\n SDKResource, ResourceDataT\n ],\n resource_class: Callable[..., TypingResourceT],\n name: str,\n) -> List[TypingResourceT]:\n\n resource_data = []\n\n for (\n sdk_resource,\n instrumentation_library_data,\n ) in sdk_resource_instrumentation_library_data.items():\n\n collector_resource = Resource()\n\n for key, value in sdk_resource.attributes.items():\n\n try:\n # pylint: disable=no-member\n collector_resource.attributes.append(\n _translate_key_values(key, value)\n )\n except Exception as error: # pylint: disable=broad-except\n logger.exception(error)\n\n resource_data.append(\n resource_class(\n **{\n \"resource\": collector_resource,\n \"instrumentation_library_{}\".format(name): [\n instrumentation_library_data\n ],\n }\n )\n )\n\n return resource_data\n\n\ndef _load_credential_from_file(filepath) -> ChannelCredentials:\n try:\n with open(filepath, \"rb\") as creds_file:\n credential = creds_file.read()\n return ssl_channel_credentials(credential)\n except FileNotFoundError:\n logger.exception(\"Failed to read credential file\")\n return None\n\n\ndef _get_credentials(creds, environ_key):\n if creds is not None:\n return creds\n creds_env = environ.get(environ_key)\n if creds_env:\n return _load_credential_from_file(creds_env)\n return ssl_channel_credentials()\n\n\n# pylint: disable=no-member\nclass OTLPExporterMixin(\n ABC, Generic[SDKDataT, ExportServiceRequestT, ExportResultT]\n):\n \"\"\"OTLP span exporter\n\n Args:\n endpoint: OpenTelemetry Collector receiver endpoint\n insecure: Connection type\n credentials: ChannelCredentials object for server authentication\n headers: Headers to send when exporting\n timeout: Backend request timeout in seconds\n compression: gRPC compression method to use\n \"\"\"\n\n def __init__(\n self,\n endpoint: Optional[str] = None,\n insecure: Optional[bool] = None,\n credentials: Optional[ChannelCredentials] = None,\n headers: Optional[Sequence] = None,\n timeout: Optional[int] = None,\n compression: Optional[Compression] = None,\n ):\n super().__init__()\n\n endpoint = endpoint or environ.get(\n OTEL_EXPORTER_OTLP_ENDPOINT, \"http://localhost:4317\"\n )\n\n parsed_url = urlparse(endpoint)\n\n if insecure is None:\n if parsed_url.scheme == \"https\":\n insecure = False\n else:\n insecure = True\n\n endpoint = parsed_url.netloc\n\n self._headers = headers or environ.get(OTEL_EXPORTER_OTLP_HEADERS)\n if isinstance(self._headers, str):\n self._headers = tuple(\n tuple(item.split(\"=\")) for item in self._headers.split(\",\")\n )\n self._timeout = timeout or int(\n environ.get(OTEL_EXPORTER_OTLP_TIMEOUT, 10)\n )\n self._collector_span_kwargs = None\n\n compression = (\n environ_to_compression(OTEL_EXPORTER_OTLP_COMPRESSION)\n if compression is None\n else compression\n ) or Compression.NoCompression\n\n if insecure:\n self._client = self._stub(\n insecure_channel(endpoint, compression=compression)\n )\n else:\n credentials = _get_credentials(\n credentials, OTEL_EXPORTER_OTLP_CERTIFICATE\n )\n self._client = self._stub(\n secure_channel(endpoint, credentials, compression=compression)\n )\n\n @abstractmethod\n def _translate_data(\n self, data: TypingSequence[SDKDataT]\n ) -> ExportServiceRequestT:\n pass\n\n def _export(self, data: TypingSequence[SDKDataT]) -> ExportResultT:\n # expo returns a generator that yields delay values which grow\n # exponentially. Once delay is greater than max_value, the yielded\n # value will remain constant.\n # max_value is set to 900 (900 seconds is 15 minutes) to use the same\n # value as used in the Go implementation.\n\n max_value = 900\n\n for delay in expo(max_value=max_value):\n\n if delay == max_value:\n return self._result.FAILURE\n\n try:\n self._client.Export(\n request=self._translate_data(data),\n metadata=self._headers,\n timeout=self._timeout,\n )\n\n return self._result.SUCCESS\n\n except RpcError as error:\n\n if error.code() in [\n StatusCode.CANCELLED,\n StatusCode.DEADLINE_EXCEEDED,\n StatusCode.PERMISSION_DENIED,\n StatusCode.UNAUTHENTICATED,\n StatusCode.RESOURCE_EXHAUSTED,\n StatusCode.ABORTED,\n StatusCode.OUT_OF_RANGE,\n StatusCode.UNAVAILABLE,\n StatusCode.DATA_LOSS,\n ]:\n\n retry_info_bin = dict(error.trailing_metadata()).get(\n \"google.rpc.retryinfo-bin\"\n )\n if retry_info_bin is not None:\n retry_info = RetryInfo()\n retry_info.ParseFromString(retry_info_bin)\n delay = (\n retry_info.retry_delay.seconds\n + retry_info.retry_delay.nanos / 1.0e9\n )\n\n logger.debug(\n \"Waiting %ss before retrying export of span\", delay\n )\n sleep(delay)\n continue\n\n if error.code() == StatusCode.OK:\n return self._result.SUCCESS\n\n return self._result.FAILURE\n\n return self._result.FAILURE\n\n def shutdown(self) -> None:\n pass\n", "path": "exporter/opentelemetry-exporter-otlp-proto-grpc/src/opentelemetry/exporter/otlp/proto/grpc/exporter.py"}], "after_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"OTLP Exporter\"\"\"\n\nimport logging\nfrom abc import ABC, abstractmethod\nfrom collections.abc import Mapping, Sequence\nfrom os import environ\nfrom time import sleep\nfrom typing import Any, Callable, Dict, Generic, List, Optional\nfrom typing import Sequence as TypingSequence\nfrom typing import Text, TypeVar\nfrom urllib import parse\nfrom urllib.parse import urlparse\n\nfrom backoff import expo\nfrom google.rpc.error_details_pb2 import RetryInfo\nfrom grpc import (\n ChannelCredentials,\n Compression,\n RpcError,\n StatusCode,\n insecure_channel,\n secure_channel,\n ssl_channel_credentials,\n)\n\nfrom opentelemetry.proto.common.v1.common_pb2 import AnyValue, KeyValue\nfrom opentelemetry.proto.resource.v1.resource_pb2 import Resource\nfrom opentelemetry.sdk.environment_variables import (\n OTEL_EXPORTER_OTLP_CERTIFICATE,\n OTEL_EXPORTER_OTLP_COMPRESSION,\n OTEL_EXPORTER_OTLP_ENDPOINT,\n OTEL_EXPORTER_OTLP_HEADERS,\n OTEL_EXPORTER_OTLP_TIMEOUT,\n)\nfrom opentelemetry.sdk.resources import Resource as SDKResource\n\nlogger = logging.getLogger(__name__)\nSDKDataT = TypeVar(\"SDKDataT\")\nResourceDataT = TypeVar(\"ResourceDataT\")\nTypingResourceT = TypeVar(\"TypingResourceT\")\nExportServiceRequestT = TypeVar(\"ExportServiceRequestT\")\nExportResultT = TypeVar(\"ExportResultT\")\n\n_ENVIRON_TO_COMPRESSION = {\n None: None,\n \"gzip\": Compression.Gzip,\n}\n\n\nclass InvalidCompressionValueException(Exception):\n def __init__(self, environ_key: str, environ_value: str):\n super().__init__(\n 'Invalid value \"{}\" for compression envvar {}'.format(\n environ_value, environ_key\n )\n )\n\n\ndef environ_to_compression(environ_key: str) -> Optional[Compression]:\n environ_value = (\n environ[environ_key].lower().strip()\n if environ_key in environ\n else None\n )\n if environ_value not in _ENVIRON_TO_COMPRESSION:\n raise InvalidCompressionValueException(environ_key, environ_value)\n return _ENVIRON_TO_COMPRESSION[environ_value]\n\n\ndef _translate_key_values(key: Text, value: Any) -> KeyValue:\n\n if isinstance(value, bool):\n any_value = AnyValue(bool_value=value)\n\n elif isinstance(value, str):\n any_value = AnyValue(string_value=value)\n\n elif isinstance(value, int):\n any_value = AnyValue(int_value=value)\n\n elif isinstance(value, float):\n any_value = AnyValue(double_value=value)\n\n elif isinstance(value, Sequence):\n any_value = AnyValue(array_value=value)\n\n elif isinstance(value, Mapping):\n any_value = AnyValue(kvlist_value=value)\n\n else:\n raise Exception(\n \"Invalid type {} of value {}\".format(type(value), value)\n )\n\n return KeyValue(key=key, value=any_value)\n\n\ndef get_resource_data(\n sdk_resource_instrumentation_library_data: Dict[\n SDKResource, ResourceDataT\n ],\n resource_class: Callable[..., TypingResourceT],\n name: str,\n) -> List[TypingResourceT]:\n\n resource_data = []\n\n for (\n sdk_resource,\n instrumentation_library_data,\n ) in sdk_resource_instrumentation_library_data.items():\n\n collector_resource = Resource()\n\n for key, value in sdk_resource.attributes.items():\n\n try:\n # pylint: disable=no-member\n collector_resource.attributes.append(\n _translate_key_values(key, value)\n )\n except Exception as error: # pylint: disable=broad-except\n logger.exception(error)\n\n resource_data.append(\n resource_class(\n **{\n \"resource\": collector_resource,\n \"instrumentation_library_{}\".format(name): [\n instrumentation_library_data\n ],\n }\n )\n )\n\n return resource_data\n\n\ndef _load_credential_from_file(filepath) -> ChannelCredentials:\n try:\n with open(filepath, \"rb\") as creds_file:\n credential = creds_file.read()\n return ssl_channel_credentials(credential)\n except FileNotFoundError:\n logger.exception(\"Failed to read credential file\")\n return None\n\n\ndef _get_credentials(creds, environ_key):\n if creds is not None:\n return creds\n creds_env = environ.get(environ_key)\n if creds_env:\n return _load_credential_from_file(creds_env)\n return ssl_channel_credentials()\n\n\n# pylint: disable=no-member\nclass OTLPExporterMixin(\n ABC, Generic[SDKDataT, ExportServiceRequestT, ExportResultT]\n):\n \"\"\"OTLP span exporter\n\n Args:\n endpoint: OpenTelemetry Collector receiver endpoint\n insecure: Connection type\n credentials: ChannelCredentials object for server authentication\n headers: Headers to send when exporting\n timeout: Backend request timeout in seconds\n compression: gRPC compression method to use\n \"\"\"\n\n def __init__(\n self,\n endpoint: Optional[str] = None,\n insecure: Optional[bool] = None,\n credentials: Optional[ChannelCredentials] = None,\n headers: Optional[Sequence] = None,\n timeout: Optional[int] = None,\n compression: Optional[Compression] = None,\n ):\n super().__init__()\n\n endpoint = endpoint or environ.get(\n OTEL_EXPORTER_OTLP_ENDPOINT, \"http://localhost:4317\"\n )\n\n parsed_url = urlparse(endpoint)\n\n if insecure is None:\n if parsed_url.scheme == \"https\":\n insecure = False\n else:\n insecure = True\n\n if parsed_url.netloc:\n endpoint = parsed_url.netloc\n\n self._headers = headers or environ.get(OTEL_EXPORTER_OTLP_HEADERS)\n if isinstance(self._headers, str):\n self._headers = tuple(\n tuple(item.split(\"=\")) for item in self._headers.split(\",\")\n )\n self._timeout = timeout or int(\n environ.get(OTEL_EXPORTER_OTLP_TIMEOUT, 10)\n )\n self._collector_span_kwargs = None\n\n compression = (\n environ_to_compression(OTEL_EXPORTER_OTLP_COMPRESSION)\n if compression is None\n else compression\n ) or Compression.NoCompression\n\n if insecure:\n self._client = self._stub(\n insecure_channel(endpoint, compression=compression)\n )\n else:\n credentials = _get_credentials(\n credentials, OTEL_EXPORTER_OTLP_CERTIFICATE\n )\n self._client = self._stub(\n secure_channel(endpoint, credentials, compression=compression)\n )\n\n @abstractmethod\n def _translate_data(\n self, data: TypingSequence[SDKDataT]\n ) -> ExportServiceRequestT:\n pass\n\n def _export(self, data: TypingSequence[SDKDataT]) -> ExportResultT:\n # expo returns a generator that yields delay values which grow\n # exponentially. Once delay is greater than max_value, the yielded\n # value will remain constant.\n # max_value is set to 900 (900 seconds is 15 minutes) to use the same\n # value as used in the Go implementation.\n\n max_value = 900\n\n for delay in expo(max_value=max_value):\n\n if delay == max_value:\n return self._result.FAILURE\n\n try:\n self._client.Export(\n request=self._translate_data(data),\n metadata=self._headers,\n timeout=self._timeout,\n )\n\n return self._result.SUCCESS\n\n except RpcError as error:\n\n if error.code() in [\n StatusCode.CANCELLED,\n StatusCode.DEADLINE_EXCEEDED,\n StatusCode.PERMISSION_DENIED,\n StatusCode.UNAUTHENTICATED,\n StatusCode.RESOURCE_EXHAUSTED,\n StatusCode.ABORTED,\n StatusCode.OUT_OF_RANGE,\n StatusCode.UNAVAILABLE,\n StatusCode.DATA_LOSS,\n ]:\n\n retry_info_bin = dict(error.trailing_metadata()).get(\n \"google.rpc.retryinfo-bin\"\n )\n if retry_info_bin is not None:\n retry_info = RetryInfo()\n retry_info.ParseFromString(retry_info_bin)\n delay = (\n retry_info.retry_delay.seconds\n + retry_info.retry_delay.nanos / 1.0e9\n )\n\n logger.debug(\n \"Waiting %ss before retrying export of span\", delay\n )\n sleep(delay)\n continue\n\n if error.code() == StatusCode.OK:\n return self._result.SUCCESS\n\n return self._result.FAILURE\n\n return self._result.FAILURE\n\n def shutdown(self) -> None:\n pass\n", "path": "exporter/opentelemetry-exporter-otlp-proto-grpc/src/opentelemetry/exporter/otlp/proto/grpc/exporter.py"}]} |
gh_patches_debug_1309 | rasdani/github-patches | git_diff | nautobot__nautobot-1120 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
GraphQL single-object endpoints broken
<!--
NOTE: IF YOUR ISSUE DOES NOT FOLLOW THIS TEMPLATE, IT WILL BE CLOSED.
This form is only for reporting reproducible bugs. If you need assistance
with Nautobot installation, or if you have a general question, please start a
discussion instead: https://github.com/nautobot/nautobot/discussions
Please describe the environment in which you are running Nautobot. Be sure
that you are running an unmodified instance of the latest stable release
before submitting a bug report, and that any plugins have been disabled.
-->
### Environment
* Python version: 3.7
* Nautobot version: 1.2.0-beta1
<!--
Describe in detail the exact steps that someone else can take to reproduce
this bug using the current stable release of Nautobot. Begin with the
creation of any necessary database objects and call out every operation
being performed explicitly. If reporting a bug in the REST API, be sure to
reconstruct the raw HTTP request(s) being made: Don't rely on a client
library such as pynautobot.
-->
### Steps to Reproduce
1. Build Nautobot instance of 1.2.0-beta1.
2. Add some sample devices and associated data.
3. Attempt to use the GraphiQL endpoint with a simple query such as:
```
query ($device_id: ID!) {
device(id: $device_id) {
hostname: name
}
}
```
along with one of your device's UUID as the query.
<!-- What did you expect to happen? -->
### Expected Behavior
Expect to see the device's hostname returned.
<!-- What happened instead? -->
### Observed Behavior
Get error with following:
```
{
"errors": [
{
"message": "'Device' object has no attribute 'only'",
"locations": [
{
"line": 2,
"column": 3
}
],
"path": [
"device"
]
}
],
"data": {
"device": null
}
}
```
A larger query returns this:
```
{
"errors": [
{
"message": "'Device' object has no attribute 'select_related'",
"locations": [
{
"line": 2,
"column": 3
}
],
"path": [
"device"
]
}
],
"data": {
"device": null
}
}
```
This appears to be related to optimizations that were done in the core.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nautobot/core/graphql/generators.py`
Content:
```
1 """Library of generators for GraphQL."""
2
3 import logging
4
5 import graphene
6 import graphene_django_optimizer as gql_optimizer
7 from graphql import GraphQLError
8 from graphene_django import DjangoObjectType
9
10 from nautobot.core.graphql.utils import str_to_var_name, get_filtering_args_from_filterset
11 from nautobot.extras.choices import RelationshipSideChoices
12 from nautobot.extras.models import RelationshipAssociation
13 from nautobot.utilities.utils import get_filterset_for_model
14
15 logger = logging.getLogger("nautobot.graphql.generators")
16 RESOLVER_PREFIX = "resolve_"
17
18
19 def generate_restricted_queryset():
20 """
21 Generate a function to return a restricted queryset compatible with the internal permissions system.
22
23 Note that for built-in models such as ContentType the queryset has no `restrict` method, so we have to
24 fail gracefully in that case.
25 """
26
27 def get_queryset(queryset, info):
28 if not hasattr(queryset, "restrict"):
29 logger.debug(f"Queryset {queryset} is not restrictable")
30 return queryset
31 return queryset.restrict(info.context.user, "view")
32
33 return get_queryset
34
35
36 def generate_null_choices_resolver(name, resolver_name):
37 """
38 Generate function to resolve appropriate type when a field has `null=False` (default), `blank=True`, and
39 `choices` defined.
40
41 Args:
42 name (str): name of the field to resolve
43 resolver_name (str): name of the resolver as declare in DjangoObjectType
44 """
45
46 def resolve_fields_w_choices(model, info, **kwargs):
47 field_value = getattr(model, name)
48 if field_value:
49 return field_value
50 return None
51
52 resolve_fields_w_choices.__name__ = resolver_name
53 return resolve_fields_w_choices
54
55
56 def generate_filter_resolver(schema_type, resolver_name, field_name):
57 """
58 Generate function to resolve OneToMany filtering.
59
60 Args:
61 schema_type (DjangoObjectType): DjangoObjectType for a given model
62 resolver_name (str): name of the resolver
63 field_name (str): name of OneToMany field to filter
64 """
65 filterset_class = schema_type._meta.filterset_class
66
67 def resolve_filter(self, *args, **kwargs):
68 if not filterset_class:
69 return getattr(self, field_name).all()
70
71 resolved_obj = filterset_class(kwargs, getattr(self, field_name).all())
72
73 # Check result filter for errors.
74 if not resolved_obj.errors:
75 return resolved_obj.qs.all()
76
77 errors = {}
78
79 # Build error message from results
80 # Error messages are collected from each filter object
81 for key in resolved_obj.errors:
82 errors[key] = resolved_obj.errors[key]
83
84 # Raising this exception will send the error message in the response of the GraphQL request
85 raise GraphQLError(errors)
86
87 resolve_filter.__name__ = resolver_name
88 return resolve_filter
89
90
91 def generate_custom_field_resolver(name, resolver_name):
92 """Generate function to resolve each custom field within each DjangoObjectType.
93
94 Args:
95 name (str): name of the custom field to resolve
96 resolver_name (str): name of the resolver as declare in DjangoObjectType
97 """
98
99 def resolve_custom_field(self, info, **kwargs):
100 return self.cf.get(name, None)
101
102 resolve_custom_field.__name__ = resolver_name
103 return resolve_custom_field
104
105
106 def generate_computed_field_resolver(name, resolver_name):
107 """Generate an instance method for resolving an individual computed field within a given DjangoObjectType.
108
109 Args:
110 name (str): name of the computed field to resolve
111 resolver_name (str): name of the resolver as declare in DjangoObjectType
112 """
113
114 def resolve_computed_field(self, info, **kwargs):
115 return self.get_computed_field(slug=name)
116
117 resolve_computed_field.__name__ = resolver_name
118 return resolve_computed_field
119
120
121 def generate_relationship_resolver(name, resolver_name, relationship, side, peer_model):
122 """Generate function to resolve each custom relationship within each DjangoObjectType.
123
124 Args:
125 name (str): name of the custom field to resolve
126 resolver_name (str): name of the resolver as declare in DjangoObjectType
127 relationship (Relationship): Relationship object to generate a resolver for
128 side (str): side of the relationship to use for the resolver
129 peer_model (Model): Django Model of the peer of this relationship
130 """
131
132 def resolve_relationship(self, info, **kwargs):
133 """Return a queryset or an object depending on the type of the relationship."""
134 peer_side = RelationshipSideChoices.OPPOSITE[side]
135 query_params = {"relationship": relationship}
136 if not relationship.symmetric:
137 # Get the objects on the other side of this relationship
138 query_params[f"{side}_id"] = self.pk
139 queryset_ids = gql_optimizer.query(
140 RelationshipAssociation.objects.filter(**query_params).values_list(f"{peer_side}_id", flat=True), info
141 )
142 else:
143 # Get objects that are peers for this relationship, regardless of side
144 queryset_ids = list(
145 gql_optimizer.query(
146 RelationshipAssociation.objects.filter(source_id=self.pk, **query_params).values_list(
147 "destination_id", flat=True
148 ),
149 info,
150 )
151 )
152 queryset_ids += list(
153 gql_optimizer.query(
154 RelationshipAssociation.objects.filter(destination_id=self.pk, **query_params).values_list(
155 "source_id", flat=True
156 ),
157 info,
158 )
159 )
160
161 if relationship.has_many(peer_side):
162 return gql_optimizer.query(peer_model.objects.filter(id__in=queryset_ids), info)
163
164 return gql_optimizer.query(peer_model.objects.filter(id__in=queryset_ids).first(), info)
165
166 resolve_relationship.__name__ = resolver_name
167 return resolve_relationship
168
169
170 def generate_schema_type(app_name: str, model: object) -> DjangoObjectType:
171 """
172 Take a Django model and generate a Graphene Type class definition.
173
174 Args:
175 app_name (str): name of the application or plugin the Model is part of.
176 model (object): Django Model
177
178 Example:
179 For a model with a name of "Device", the following class definition is generated:
180
181 class DeviceType(DjangoObjectType):
182 Meta:
183 model = Device
184 fields = ["__all__"]
185
186 If a FilterSet exists for this model at
187 '<app_name>.filters.<ModelName>FilterSet' the filterset will be stored in
188 filterset_class as follows:
189
190 class DeviceType(DjangoObjectType):
191 Meta:
192 model = Device
193 fields = ["__all__"]
194 filterset_class = DeviceFilterSet
195 """
196
197 main_attrs = {}
198 meta_attrs = {"model": model, "fields": "__all__"}
199
200 # We'll attempt to find a FilterSet corresponding to the model
201 # Not all models have a FilterSet defined so the function return none if it can't find a filterset
202 meta_attrs["filterset_class"] = get_filterset_for_model(model)
203
204 main_attrs["Meta"] = type("Meta", (object,), meta_attrs)
205
206 schema_type = type(f"{model.__name__}Type", (DjangoObjectType,), main_attrs)
207 return schema_type
208
209
210 def generate_list_search_parameters(schema_type):
211 """Generate list of query parameters for the list resolver based on a filterset."""
212
213 search_params = {}
214 if schema_type._meta.filterset_class is not None:
215 search_params = get_filtering_args_from_filterset(
216 schema_type._meta.filterset_class,
217 )
218
219 return search_params
220
221
222 def generate_single_item_resolver(schema_type, resolver_name):
223 """Generate a resolver for a single element of schema_type
224
225 Args:
226 schema_type (DjangoObjectType): DjangoObjectType for a given model
227 resolver_name (str): name of the resolver
228
229 Returns:
230 callable: Resolver function for a single element
231 """
232 model = schema_type._meta.model
233
234 def single_resolver(self, info, **kwargs):
235
236 obj_id = kwargs.get("id", None)
237 if obj_id:
238 return gql_optimizer.query(model.objects.restrict(info.context.user, "view").get(pk=obj_id), info)
239 return None
240
241 single_resolver.__name__ = resolver_name
242 return single_resolver
243
244
245 def generate_list_resolver(schema_type, resolver_name):
246 """
247 Generate resolver for a list of schema_type.
248
249 If a filterset_class is associated with the schema_type,
250 the resolver will pass all arguments received to the FilterSet
251 If not, it will return a restricted queryset for all objects
252
253 Args:
254 schema_type (DjangoObjectType): DjangoObjectType for a given model
255 resolver_name (str): name of the resolver
256
257 Returns:
258 callable: Resolver function for list of element
259 """
260 model = schema_type._meta.model
261
262 def list_resolver(self, info, **kwargs):
263 filterset_class = schema_type._meta.filterset_class
264 if filterset_class is not None:
265 resolved_obj = filterset_class(kwargs, model.objects.restrict(info.context.user, "view").all())
266
267 # Check result filter for errors.
268 if resolved_obj.errors:
269 errors = {}
270
271 # Build error message from results
272 # Error messages are collected from each filter object
273 for key in resolved_obj.errors:
274 errors[key] = resolved_obj.errors[key]
275
276 # Raising this exception will send the error message in the response of the GraphQL request
277 raise GraphQLError(errors)
278
279 return gql_optimizer.query(resolved_obj.qs.all(), info)
280
281 return gql_optimizer.query(model.objects.restrict(info.context.user, "view").all(), info)
282
283 list_resolver.__name__ = resolver_name
284 return list_resolver
285
286
287 def generate_attrs_for_schema_type(schema_type):
288 """Generate both attributes and resolvers for a given schema_type.
289
290 Args:
291 schema_type (DjangoObjectType): DjangoObjectType for a given model
292
293 Returns:
294 dict: Dict of attributes ready to merge into the QueryMixin class
295 """
296 attrs = {}
297 model = schema_type._meta.model
298
299 single_item_name = str_to_var_name(model._meta.verbose_name)
300 list_name = str_to_var_name(model._meta.verbose_name_plural)
301
302 # Define Attributes for single item and list with their search parameters
303 search_params = generate_list_search_parameters(schema_type)
304 attrs[single_item_name] = graphene.Field(schema_type, id=graphene.ID())
305 attrs[list_name] = graphene.List(schema_type, **search_params)
306
307 # Define Resolvers for both single item and list
308 single_item_resolver_name = f"{RESOLVER_PREFIX}{single_item_name}"
309 list_resolver_name = f"{RESOLVER_PREFIX}{list_name}"
310 attrs[single_item_resolver_name] = generate_single_item_resolver(schema_type, single_item_resolver_name)
311 attrs[list_resolver_name] = generate_list_resolver(schema_type, list_resolver_name)
312
313 return attrs
314
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/nautobot/core/graphql/generators.py b/nautobot/core/graphql/generators.py
--- a/nautobot/core/graphql/generators.py
+++ b/nautobot/core/graphql/generators.py
@@ -235,7 +235,9 @@
obj_id = kwargs.get("id", None)
if obj_id:
- return gql_optimizer.query(model.objects.restrict(info.context.user, "view").get(pk=obj_id), info)
+ return gql_optimizer.query(
+ model.objects.restrict(info.context.user, "view").filter(pk=obj_id), info
+ ).first()
return None
single_resolver.__name__ = resolver_name
| {"golden_diff": "diff --git a/nautobot/core/graphql/generators.py b/nautobot/core/graphql/generators.py\n--- a/nautobot/core/graphql/generators.py\n+++ b/nautobot/core/graphql/generators.py\n@@ -235,7 +235,9 @@\n \n obj_id = kwargs.get(\"id\", None)\n if obj_id:\n- return gql_optimizer.query(model.objects.restrict(info.context.user, \"view\").get(pk=obj_id), info)\n+ return gql_optimizer.query(\n+ model.objects.restrict(info.context.user, \"view\").filter(pk=obj_id), info\n+ ).first()\n return None\n \n single_resolver.__name__ = resolver_name\n", "issue": "GraphQL single-object endpoints broken\n<!--\r\n NOTE: IF YOUR ISSUE DOES NOT FOLLOW THIS TEMPLATE, IT WILL BE CLOSED.\r\n\r\n This form is only for reporting reproducible bugs. If you need assistance\r\n with Nautobot installation, or if you have a general question, please start a\r\n discussion instead: https://github.com/nautobot/nautobot/discussions\r\n\r\n Please describe the environment in which you are running Nautobot. Be sure\r\n that you are running an unmodified instance of the latest stable release\r\n before submitting a bug report, and that any plugins have been disabled.\r\n-->\r\n### Environment\r\n* Python version: 3.7\r\n* Nautobot version: 1.2.0-beta1\r\n\r\n<!--\r\n Describe in detail the exact steps that someone else can take to reproduce\r\n this bug using the current stable release of Nautobot. Begin with the\r\n creation of any necessary database objects and call out every operation\r\n being performed explicitly. If reporting a bug in the REST API, be sure to\r\n reconstruct the raw HTTP request(s) being made: Don't rely on a client\r\n library such as pynautobot.\r\n-->\r\n### Steps to Reproduce\r\n1. Build Nautobot instance of 1.2.0-beta1.\r\n2. Add some sample devices and associated data.\r\n3. Attempt to use the GraphiQL endpoint with a simple query such as:\r\n\r\n```\r\nquery ($device_id: ID!) {\r\n device(id: $device_id) {\r\n hostname: name\r\n }\r\n}\r\n```\r\n\r\nalong with one of your device's UUID as the query.\r\n\r\n<!-- What did you expect to happen? -->\r\n### Expected Behavior\r\nExpect to see the device's hostname returned.\r\n\r\n<!-- What happened instead? -->\r\n### Observed Behavior\r\nGet error with following:\r\n\r\n```\r\n{\r\n \"errors\": [\r\n {\r\n \"message\": \"'Device' object has no attribute 'only'\",\r\n \"locations\": [\r\n {\r\n \"line\": 2,\r\n \"column\": 3\r\n }\r\n ],\r\n \"path\": [\r\n \"device\"\r\n ]\r\n }\r\n ],\r\n \"data\": {\r\n \"device\": null\r\n }\r\n}\r\n```\r\n\r\nA larger query returns this:\r\n\r\n```\r\n{\r\n \"errors\": [\r\n {\r\n \"message\": \"'Device' object has no attribute 'select_related'\",\r\n \"locations\": [\r\n {\r\n \"line\": 2,\r\n \"column\": 3\r\n }\r\n ],\r\n \"path\": [\r\n \"device\"\r\n ]\r\n }\r\n ],\r\n \"data\": {\r\n \"device\": null\r\n }\r\n}\r\n```\r\n\r\nThis appears to be related to optimizations that were done in the core.\n", "before_files": [{"content": "\"\"\"Library of generators for GraphQL.\"\"\"\n\nimport logging\n\nimport graphene\nimport graphene_django_optimizer as gql_optimizer\nfrom graphql import GraphQLError\nfrom graphene_django import DjangoObjectType\n\nfrom nautobot.core.graphql.utils import str_to_var_name, get_filtering_args_from_filterset\nfrom nautobot.extras.choices import RelationshipSideChoices\nfrom nautobot.extras.models import RelationshipAssociation\nfrom nautobot.utilities.utils import get_filterset_for_model\n\nlogger = logging.getLogger(\"nautobot.graphql.generators\")\nRESOLVER_PREFIX = \"resolve_\"\n\n\ndef generate_restricted_queryset():\n \"\"\"\n Generate a function to return a restricted queryset compatible with the internal permissions system.\n\n Note that for built-in models such as ContentType the queryset has no `restrict` method, so we have to\n fail gracefully in that case.\n \"\"\"\n\n def get_queryset(queryset, info):\n if not hasattr(queryset, \"restrict\"):\n logger.debug(f\"Queryset {queryset} is not restrictable\")\n return queryset\n return queryset.restrict(info.context.user, \"view\")\n\n return get_queryset\n\n\ndef generate_null_choices_resolver(name, resolver_name):\n \"\"\"\n Generate function to resolve appropriate type when a field has `null=False` (default), `blank=True`, and\n `choices` defined.\n\n Args:\n name (str): name of the field to resolve\n resolver_name (str): name of the resolver as declare in DjangoObjectType\n \"\"\"\n\n def resolve_fields_w_choices(model, info, **kwargs):\n field_value = getattr(model, name)\n if field_value:\n return field_value\n return None\n\n resolve_fields_w_choices.__name__ = resolver_name\n return resolve_fields_w_choices\n\n\ndef generate_filter_resolver(schema_type, resolver_name, field_name):\n \"\"\"\n Generate function to resolve OneToMany filtering.\n\n Args:\n schema_type (DjangoObjectType): DjangoObjectType for a given model\n resolver_name (str): name of the resolver\n field_name (str): name of OneToMany field to filter\n \"\"\"\n filterset_class = schema_type._meta.filterset_class\n\n def resolve_filter(self, *args, **kwargs):\n if not filterset_class:\n return getattr(self, field_name).all()\n\n resolved_obj = filterset_class(kwargs, getattr(self, field_name).all())\n\n # Check result filter for errors.\n if not resolved_obj.errors:\n return resolved_obj.qs.all()\n\n errors = {}\n\n # Build error message from results\n # Error messages are collected from each filter object\n for key in resolved_obj.errors:\n errors[key] = resolved_obj.errors[key]\n\n # Raising this exception will send the error message in the response of the GraphQL request\n raise GraphQLError(errors)\n\n resolve_filter.__name__ = resolver_name\n return resolve_filter\n\n\ndef generate_custom_field_resolver(name, resolver_name):\n \"\"\"Generate function to resolve each custom field within each DjangoObjectType.\n\n Args:\n name (str): name of the custom field to resolve\n resolver_name (str): name of the resolver as declare in DjangoObjectType\n \"\"\"\n\n def resolve_custom_field(self, info, **kwargs):\n return self.cf.get(name, None)\n\n resolve_custom_field.__name__ = resolver_name\n return resolve_custom_field\n\n\ndef generate_computed_field_resolver(name, resolver_name):\n \"\"\"Generate an instance method for resolving an individual computed field within a given DjangoObjectType.\n\n Args:\n name (str): name of the computed field to resolve\n resolver_name (str): name of the resolver as declare in DjangoObjectType\n \"\"\"\n\n def resolve_computed_field(self, info, **kwargs):\n return self.get_computed_field(slug=name)\n\n resolve_computed_field.__name__ = resolver_name\n return resolve_computed_field\n\n\ndef generate_relationship_resolver(name, resolver_name, relationship, side, peer_model):\n \"\"\"Generate function to resolve each custom relationship within each DjangoObjectType.\n\n Args:\n name (str): name of the custom field to resolve\n resolver_name (str): name of the resolver as declare in DjangoObjectType\n relationship (Relationship): Relationship object to generate a resolver for\n side (str): side of the relationship to use for the resolver\n peer_model (Model): Django Model of the peer of this relationship\n \"\"\"\n\n def resolve_relationship(self, info, **kwargs):\n \"\"\"Return a queryset or an object depending on the type of the relationship.\"\"\"\n peer_side = RelationshipSideChoices.OPPOSITE[side]\n query_params = {\"relationship\": relationship}\n if not relationship.symmetric:\n # Get the objects on the other side of this relationship\n query_params[f\"{side}_id\"] = self.pk\n queryset_ids = gql_optimizer.query(\n RelationshipAssociation.objects.filter(**query_params).values_list(f\"{peer_side}_id\", flat=True), info\n )\n else:\n # Get objects that are peers for this relationship, regardless of side\n queryset_ids = list(\n gql_optimizer.query(\n RelationshipAssociation.objects.filter(source_id=self.pk, **query_params).values_list(\n \"destination_id\", flat=True\n ),\n info,\n )\n )\n queryset_ids += list(\n gql_optimizer.query(\n RelationshipAssociation.objects.filter(destination_id=self.pk, **query_params).values_list(\n \"source_id\", flat=True\n ),\n info,\n )\n )\n\n if relationship.has_many(peer_side):\n return gql_optimizer.query(peer_model.objects.filter(id__in=queryset_ids), info)\n\n return gql_optimizer.query(peer_model.objects.filter(id__in=queryset_ids).first(), info)\n\n resolve_relationship.__name__ = resolver_name\n return resolve_relationship\n\n\ndef generate_schema_type(app_name: str, model: object) -> DjangoObjectType:\n \"\"\"\n Take a Django model and generate a Graphene Type class definition.\n\n Args:\n app_name (str): name of the application or plugin the Model is part of.\n model (object): Django Model\n\n Example:\n For a model with a name of \"Device\", the following class definition is generated:\n\n class DeviceType(DjangoObjectType):\n Meta:\n model = Device\n fields = [\"__all__\"]\n\n If a FilterSet exists for this model at\n '<app_name>.filters.<ModelName>FilterSet' the filterset will be stored in\n filterset_class as follows:\n\n class DeviceType(DjangoObjectType):\n Meta:\n model = Device\n fields = [\"__all__\"]\n filterset_class = DeviceFilterSet\n \"\"\"\n\n main_attrs = {}\n meta_attrs = {\"model\": model, \"fields\": \"__all__\"}\n\n # We'll attempt to find a FilterSet corresponding to the model\n # Not all models have a FilterSet defined so the function return none if it can't find a filterset\n meta_attrs[\"filterset_class\"] = get_filterset_for_model(model)\n\n main_attrs[\"Meta\"] = type(\"Meta\", (object,), meta_attrs)\n\n schema_type = type(f\"{model.__name__}Type\", (DjangoObjectType,), main_attrs)\n return schema_type\n\n\ndef generate_list_search_parameters(schema_type):\n \"\"\"Generate list of query parameters for the list resolver based on a filterset.\"\"\"\n\n search_params = {}\n if schema_type._meta.filterset_class is not None:\n search_params = get_filtering_args_from_filterset(\n schema_type._meta.filterset_class,\n )\n\n return search_params\n\n\ndef generate_single_item_resolver(schema_type, resolver_name):\n \"\"\"Generate a resolver for a single element of schema_type\n\n Args:\n schema_type (DjangoObjectType): DjangoObjectType for a given model\n resolver_name (str): name of the resolver\n\n Returns:\n callable: Resolver function for a single element\n \"\"\"\n model = schema_type._meta.model\n\n def single_resolver(self, info, **kwargs):\n\n obj_id = kwargs.get(\"id\", None)\n if obj_id:\n return gql_optimizer.query(model.objects.restrict(info.context.user, \"view\").get(pk=obj_id), info)\n return None\n\n single_resolver.__name__ = resolver_name\n return single_resolver\n\n\ndef generate_list_resolver(schema_type, resolver_name):\n \"\"\"\n Generate resolver for a list of schema_type.\n\n If a filterset_class is associated with the schema_type,\n the resolver will pass all arguments received to the FilterSet\n If not, it will return a restricted queryset for all objects\n\n Args:\n schema_type (DjangoObjectType): DjangoObjectType for a given model\n resolver_name (str): name of the resolver\n\n Returns:\n callable: Resolver function for list of element\n \"\"\"\n model = schema_type._meta.model\n\n def list_resolver(self, info, **kwargs):\n filterset_class = schema_type._meta.filterset_class\n if filterset_class is not None:\n resolved_obj = filterset_class(kwargs, model.objects.restrict(info.context.user, \"view\").all())\n\n # Check result filter for errors.\n if resolved_obj.errors:\n errors = {}\n\n # Build error message from results\n # Error messages are collected from each filter object\n for key in resolved_obj.errors:\n errors[key] = resolved_obj.errors[key]\n\n # Raising this exception will send the error message in the response of the GraphQL request\n raise GraphQLError(errors)\n\n return gql_optimizer.query(resolved_obj.qs.all(), info)\n\n return gql_optimizer.query(model.objects.restrict(info.context.user, \"view\").all(), info)\n\n list_resolver.__name__ = resolver_name\n return list_resolver\n\n\ndef generate_attrs_for_schema_type(schema_type):\n \"\"\"Generate both attributes and resolvers for a given schema_type.\n\n Args:\n schema_type (DjangoObjectType): DjangoObjectType for a given model\n\n Returns:\n dict: Dict of attributes ready to merge into the QueryMixin class\n \"\"\"\n attrs = {}\n model = schema_type._meta.model\n\n single_item_name = str_to_var_name(model._meta.verbose_name)\n list_name = str_to_var_name(model._meta.verbose_name_plural)\n\n # Define Attributes for single item and list with their search parameters\n search_params = generate_list_search_parameters(schema_type)\n attrs[single_item_name] = graphene.Field(schema_type, id=graphene.ID())\n attrs[list_name] = graphene.List(schema_type, **search_params)\n\n # Define Resolvers for both single item and list\n single_item_resolver_name = f\"{RESOLVER_PREFIX}{single_item_name}\"\n list_resolver_name = f\"{RESOLVER_PREFIX}{list_name}\"\n attrs[single_item_resolver_name] = generate_single_item_resolver(schema_type, single_item_resolver_name)\n attrs[list_resolver_name] = generate_list_resolver(schema_type, list_resolver_name)\n\n return attrs\n", "path": "nautobot/core/graphql/generators.py"}], "after_files": [{"content": "\"\"\"Library of generators for GraphQL.\"\"\"\n\nimport logging\n\nimport graphene\nimport graphene_django_optimizer as gql_optimizer\nfrom graphql import GraphQLError\nfrom graphene_django import DjangoObjectType\n\nfrom nautobot.core.graphql.utils import str_to_var_name, get_filtering_args_from_filterset\nfrom nautobot.extras.choices import RelationshipSideChoices\nfrom nautobot.extras.models import RelationshipAssociation\nfrom nautobot.utilities.utils import get_filterset_for_model\n\nlogger = logging.getLogger(\"nautobot.graphql.generators\")\nRESOLVER_PREFIX = \"resolve_\"\n\n\ndef generate_restricted_queryset():\n \"\"\"\n Generate a function to return a restricted queryset compatible with the internal permissions system.\n\n Note that for built-in models such as ContentType the queryset has no `restrict` method, so we have to\n fail gracefully in that case.\n \"\"\"\n\n def get_queryset(queryset, info):\n if not hasattr(queryset, \"restrict\"):\n logger.debug(f\"Queryset {queryset} is not restrictable\")\n return queryset\n return queryset.restrict(info.context.user, \"view\")\n\n return get_queryset\n\n\ndef generate_null_choices_resolver(name, resolver_name):\n \"\"\"\n Generate function to resolve appropriate type when a field has `null=False` (default), `blank=True`, and\n `choices` defined.\n\n Args:\n name (str): name of the field to resolve\n resolver_name (str): name of the resolver as declare in DjangoObjectType\n \"\"\"\n\n def resolve_fields_w_choices(model, info, **kwargs):\n field_value = getattr(model, name)\n if field_value:\n return field_value\n return None\n\n resolve_fields_w_choices.__name__ = resolver_name\n return resolve_fields_w_choices\n\n\ndef generate_filter_resolver(schema_type, resolver_name, field_name):\n \"\"\"\n Generate function to resolve OneToMany filtering.\n\n Args:\n schema_type (DjangoObjectType): DjangoObjectType for a given model\n resolver_name (str): name of the resolver\n field_name (str): name of OneToMany field to filter\n \"\"\"\n filterset_class = schema_type._meta.filterset_class\n\n def resolve_filter(self, *args, **kwargs):\n if not filterset_class:\n return getattr(self, field_name).all()\n\n resolved_obj = filterset_class(kwargs, getattr(self, field_name).all())\n\n # Check result filter for errors.\n if not resolved_obj.errors:\n return resolved_obj.qs.all()\n\n errors = {}\n\n # Build error message from results\n # Error messages are collected from each filter object\n for key in resolved_obj.errors:\n errors[key] = resolved_obj.errors[key]\n\n # Raising this exception will send the error message in the response of the GraphQL request\n raise GraphQLError(errors)\n\n resolve_filter.__name__ = resolver_name\n return resolve_filter\n\n\ndef generate_custom_field_resolver(name, resolver_name):\n \"\"\"Generate function to resolve each custom field within each DjangoObjectType.\n\n Args:\n name (str): name of the custom field to resolve\n resolver_name (str): name of the resolver as declare in DjangoObjectType\n \"\"\"\n\n def resolve_custom_field(self, info, **kwargs):\n return self.cf.get(name, None)\n\n resolve_custom_field.__name__ = resolver_name\n return resolve_custom_field\n\n\ndef generate_computed_field_resolver(name, resolver_name):\n \"\"\"Generate an instance method for resolving an individual computed field within a given DjangoObjectType.\n\n Args:\n name (str): name of the computed field to resolve\n resolver_name (str): name of the resolver as declare in DjangoObjectType\n \"\"\"\n\n def resolve_computed_field(self, info, **kwargs):\n return self.get_computed_field(slug=name)\n\n resolve_computed_field.__name__ = resolver_name\n return resolve_computed_field\n\n\ndef generate_relationship_resolver(name, resolver_name, relationship, side, peer_model):\n \"\"\"Generate function to resolve each custom relationship within each DjangoObjectType.\n\n Args:\n name (str): name of the custom field to resolve\n resolver_name (str): name of the resolver as declare in DjangoObjectType\n relationship (Relationship): Relationship object to generate a resolver for\n side (str): side of the relationship to use for the resolver\n peer_model (Model): Django Model of the peer of this relationship\n \"\"\"\n\n def resolve_relationship(self, info, **kwargs):\n \"\"\"Return a queryset or an object depending on the type of the relationship.\"\"\"\n peer_side = RelationshipSideChoices.OPPOSITE[side]\n query_params = {\"relationship\": relationship}\n if not relationship.symmetric:\n # Get the objects on the other side of this relationship\n query_params[f\"{side}_id\"] = self.pk\n queryset_ids = gql_optimizer.query(\n RelationshipAssociation.objects.filter(**query_params).values_list(f\"{peer_side}_id\", flat=True), info\n )\n else:\n # Get objects that are peers for this relationship, regardless of side\n queryset_ids = list(\n gql_optimizer.query(\n RelationshipAssociation.objects.filter(source_id=self.pk, **query_params).values_list(\n \"destination_id\", flat=True\n ),\n info,\n )\n )\n queryset_ids += list(\n gql_optimizer.query(\n RelationshipAssociation.objects.filter(destination_id=self.pk, **query_params).values_list(\n \"source_id\", flat=True\n ),\n info,\n )\n )\n\n if relationship.has_many(peer_side):\n return gql_optimizer.query(peer_model.objects.filter(id__in=queryset_ids), info)\n\n return gql_optimizer.query(peer_model.objects.filter(id__in=queryset_ids).first(), info)\n\n resolve_relationship.__name__ = resolver_name\n return resolve_relationship\n\n\ndef generate_schema_type(app_name: str, model: object) -> DjangoObjectType:\n \"\"\"\n Take a Django model and generate a Graphene Type class definition.\n\n Args:\n app_name (str): name of the application or plugin the Model is part of.\n model (object): Django Model\n\n Example:\n For a model with a name of \"Device\", the following class definition is generated:\n\n class DeviceType(DjangoObjectType):\n Meta:\n model = Device\n fields = [\"__all__\"]\n\n If a FilterSet exists for this model at\n '<app_name>.filters.<ModelName>FilterSet' the filterset will be stored in\n filterset_class as follows:\n\n class DeviceType(DjangoObjectType):\n Meta:\n model = Device\n fields = [\"__all__\"]\n filterset_class = DeviceFilterSet\n \"\"\"\n\n main_attrs = {}\n meta_attrs = {\"model\": model, \"fields\": \"__all__\"}\n\n # We'll attempt to find a FilterSet corresponding to the model\n # Not all models have a FilterSet defined so the function return none if it can't find a filterset\n meta_attrs[\"filterset_class\"] = get_filterset_for_model(model)\n\n main_attrs[\"Meta\"] = type(\"Meta\", (object,), meta_attrs)\n\n schema_type = type(f\"{model.__name__}Type\", (DjangoObjectType,), main_attrs)\n return schema_type\n\n\ndef generate_list_search_parameters(schema_type):\n \"\"\"Generate list of query parameters for the list resolver based on a filterset.\"\"\"\n\n search_params = {}\n if schema_type._meta.filterset_class is not None:\n search_params = get_filtering_args_from_filterset(\n schema_type._meta.filterset_class,\n )\n\n return search_params\n\n\ndef generate_single_item_resolver(schema_type, resolver_name):\n \"\"\"Generate a resolver for a single element of schema_type\n\n Args:\n schema_type (DjangoObjectType): DjangoObjectType for a given model\n resolver_name (str): name of the resolver\n\n Returns:\n callable: Resolver function for a single element\n \"\"\"\n model = schema_type._meta.model\n\n def single_resolver(self, info, **kwargs):\n\n obj_id = kwargs.get(\"id\", None)\n if obj_id:\n return gql_optimizer.query(\n model.objects.restrict(info.context.user, \"view\").filter(pk=obj_id), info\n ).first()\n return None\n\n single_resolver.__name__ = resolver_name\n return single_resolver\n\n\ndef generate_list_resolver(schema_type, resolver_name):\n \"\"\"\n Generate resolver for a list of schema_type.\n\n If a filterset_class is associated with the schema_type,\n the resolver will pass all arguments received to the FilterSet\n If not, it will return a restricted queryset for all objects\n\n Args:\n schema_type (DjangoObjectType): DjangoObjectType for a given model\n resolver_name (str): name of the resolver\n\n Returns:\n callable: Resolver function for list of element\n \"\"\"\n model = schema_type._meta.model\n\n def list_resolver(self, info, **kwargs):\n filterset_class = schema_type._meta.filterset_class\n if filterset_class is not None:\n resolved_obj = filterset_class(kwargs, model.objects.restrict(info.context.user, \"view\").all())\n\n # Check result filter for errors.\n if resolved_obj.errors:\n errors = {}\n\n # Build error message from results\n # Error messages are collected from each filter object\n for key in resolved_obj.errors:\n errors[key] = resolved_obj.errors[key]\n\n # Raising this exception will send the error message in the response of the GraphQL request\n raise GraphQLError(errors)\n\n return gql_optimizer.query(resolved_obj.qs.all(), info)\n\n return gql_optimizer.query(model.objects.restrict(info.context.user, \"view\").all(), info)\n\n list_resolver.__name__ = resolver_name\n return list_resolver\n\n\ndef generate_attrs_for_schema_type(schema_type):\n \"\"\"Generate both attributes and resolvers for a given schema_type.\n\n Args:\n schema_type (DjangoObjectType): DjangoObjectType for a given model\n\n Returns:\n dict: Dict of attributes ready to merge into the QueryMixin class\n \"\"\"\n attrs = {}\n model = schema_type._meta.model\n\n single_item_name = str_to_var_name(model._meta.verbose_name)\n list_name = str_to_var_name(model._meta.verbose_name_plural)\n\n # Define Attributes for single item and list with their search parameters\n search_params = generate_list_search_parameters(schema_type)\n attrs[single_item_name] = graphene.Field(schema_type, id=graphene.ID())\n attrs[list_name] = graphene.List(schema_type, **search_params)\n\n # Define Resolvers for both single item and list\n single_item_resolver_name = f\"{RESOLVER_PREFIX}{single_item_name}\"\n list_resolver_name = f\"{RESOLVER_PREFIX}{list_name}\"\n attrs[single_item_resolver_name] = generate_single_item_resolver(schema_type, single_item_resolver_name)\n attrs[list_resolver_name] = generate_list_resolver(schema_type, list_resolver_name)\n\n return attrs\n", "path": "nautobot/core/graphql/generators.py"}]} |
gh_patches_debug_1310 | rasdani/github-patches | git_diff | chainer__chainer-1178 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
HuberLoss's backward() should not ignore gy
`HuberLoss`'s `backward()` ignores `gy`, so any computation after `HuberLoss` has no effect for gradients backpropagated by `HuberLoss`. I think such a behavior is not correct.
``` python
>>> x = chainer.Variable(np.zeros((1,1), dtype=np.float32))
>>> t = chainer.Variable(np.ones((1,1), dtype=np.float32))
>>> F.huber_loss(x, t, 1.0).backward()
>>> x.grad
array([[-1.]], dtype=float32)
```
``` python
>>> x = chainer.Variable(np.zeros((1,1), dtype=np.float32))
>>> t = chainer.Variable(np.ones((1,1), dtype=np.float32))
>>> (F.huber_loss(x, t, 1.0) * 0).backward() # Multiply the loss by zero
>>> x.grad
array([[-1.]], dtype=float32)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `chainer/functions/loss/huber_loss.py`
Content:
```
1 import numpy
2
3 from chainer import cuda
4 from chainer import function
5 from chainer.utils import type_check
6
7
8 class HuberLoss(function.Function):
9
10 def __init__(self, delta):
11 self.delta = delta
12
13 def check_type_forward(self, in_types):
14 type_check.expect(in_types.size() == 2)
15 type_check.expect(
16 in_types[0].dtype == numpy.float32,
17 in_types[1].dtype == numpy.float32,
18 in_types[0].shape == in_types[1].shape
19 )
20
21 def forward(self, inputs):
22 xp = cuda.get_array_module(*inputs)
23 x0, x1 = inputs
24 self.diff = x0 - x1
25 y = xp.square(self.diff)
26 mask = y > (self.delta ** 2)
27 y -= mask * xp.square(abs(self.diff) - self.delta)
28 y *= 0.5
29 return y.sum(axis=1),
30
31 def backward(self, inputs, gy):
32 xp = cuda.get_array_module(*inputs)
33 mask = xp.abs(self.diff) <= self.delta
34 gx = xp.where(mask, self.diff, self.delta * xp.sign(self.diff))
35 return gx, -gx
36
37
38 def huber_loss(x, t, delta):
39 """Loss function which is less sensitive to outliers in data than MSE.
40
41 .. math::
42 a = x - t
43
44 and
45
46 .. math::
47 L_{\\delta}(a) = \\left \\{ \\begin{array}{cc}
48 \\frac{1}{2} a^2 & {\\rm if~|a| \\leq \\delta} \\\\
49 \\delta (|a| - \\frac{1}{2} \\delta) & {\\rm otherwise,}
50 \\end{array} \\right.
51
52 Args:
53 x (~chainer.Variable): Input variable.
54 The shape of ``x`` should be (:math:`N`, :math:`K`).
55 t (~chainer.Variable): Target variable for regression.
56 The shape of ``t`` should be (:math:`N`, :math:`K`).
57 delta (float): Constant variable for huber loss function
58 as used in definition.
59
60 Returns:
61 ~chainer.Variable: A variable object holding a scalar array of the
62 huber loss :math:`L_{\\delta}`.
63
64 See:
65 `Huber loss - Wikipedia <https://en.wikipedia.org/wiki/Huber_loss>`_.
66
67 """
68 return HuberLoss(delta=delta)(x, t)
69
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/chainer/functions/loss/huber_loss.py b/chainer/functions/loss/huber_loss.py
--- a/chainer/functions/loss/huber_loss.py
+++ b/chainer/functions/loss/huber_loss.py
@@ -31,7 +31,8 @@
def backward(self, inputs, gy):
xp = cuda.get_array_module(*inputs)
mask = xp.abs(self.diff) <= self.delta
- gx = xp.where(mask, self.diff, self.delta * xp.sign(self.diff))
+ gx = gy[0].reshape(gy[0].shape + (1,) * (self.diff.ndim - 1)) * \
+ xp.where(mask, self.diff, self.delta * xp.sign(self.diff))
return gx, -gx
| {"golden_diff": "diff --git a/chainer/functions/loss/huber_loss.py b/chainer/functions/loss/huber_loss.py\n--- a/chainer/functions/loss/huber_loss.py\n+++ b/chainer/functions/loss/huber_loss.py\n@@ -31,7 +31,8 @@\n def backward(self, inputs, gy):\n xp = cuda.get_array_module(*inputs)\n mask = xp.abs(self.diff) <= self.delta\n- gx = xp.where(mask, self.diff, self.delta * xp.sign(self.diff))\n+ gx = gy[0].reshape(gy[0].shape + (1,) * (self.diff.ndim - 1)) * \\\n+ xp.where(mask, self.diff, self.delta * xp.sign(self.diff))\n return gx, -gx\n", "issue": "HuberLoss's backward() should not ignore gy\n`HuberLoss`'s `backward()` ignores `gy`, so any computation after `HuberLoss` has no effect for gradients backpropagated by `HuberLoss`. I think such a behavior is not correct.\n\n``` python\n>>> x = chainer.Variable(np.zeros((1,1), dtype=np.float32))\n>>> t = chainer.Variable(np.ones((1,1), dtype=np.float32))\n>>> F.huber_loss(x, t, 1.0).backward()\n>>> x.grad\narray([[-1.]], dtype=float32)\n```\n\n``` python\n>>> x = chainer.Variable(np.zeros((1,1), dtype=np.float32))\n>>> t = chainer.Variable(np.ones((1,1), dtype=np.float32))\n>>> (F.huber_loss(x, t, 1.0) * 0).backward() # Multiply the loss by zero\n>>> x.grad\narray([[-1.]], dtype=float32)\n```\n\n", "before_files": [{"content": "import numpy\n\nfrom chainer import cuda\nfrom chainer import function\nfrom chainer.utils import type_check\n\n\nclass HuberLoss(function.Function):\n\n def __init__(self, delta):\n self.delta = delta\n\n def check_type_forward(self, in_types):\n type_check.expect(in_types.size() == 2)\n type_check.expect(\n in_types[0].dtype == numpy.float32,\n in_types[1].dtype == numpy.float32,\n in_types[0].shape == in_types[1].shape\n )\n\n def forward(self, inputs):\n xp = cuda.get_array_module(*inputs)\n x0, x1 = inputs\n self.diff = x0 - x1\n y = xp.square(self.diff)\n mask = y > (self.delta ** 2)\n y -= mask * xp.square(abs(self.diff) - self.delta)\n y *= 0.5\n return y.sum(axis=1),\n\n def backward(self, inputs, gy):\n xp = cuda.get_array_module(*inputs)\n mask = xp.abs(self.diff) <= self.delta\n gx = xp.where(mask, self.diff, self.delta * xp.sign(self.diff))\n return gx, -gx\n\n\ndef huber_loss(x, t, delta):\n \"\"\"Loss function which is less sensitive to outliers in data than MSE.\n\n .. math::\n a = x - t\n\n and\n\n .. math::\n L_{\\\\delta}(a) = \\\\left \\\\{ \\\\begin{array}{cc}\n \\\\frac{1}{2} a^2 & {\\\\rm if~|a| \\\\leq \\\\delta} \\\\\\\\\n \\\\delta (|a| - \\\\frac{1}{2} \\\\delta) & {\\\\rm otherwise,}\n \\\\end{array} \\\\right.\n\n Args:\n x (~chainer.Variable): Input variable.\n The shape of ``x`` should be (:math:`N`, :math:`K`).\n t (~chainer.Variable): Target variable for regression.\n The shape of ``t`` should be (:math:`N`, :math:`K`).\n delta (float): Constant variable for huber loss function\n as used in definition.\n\n Returns:\n ~chainer.Variable: A variable object holding a scalar array of the\n huber loss :math:`L_{\\\\delta}`.\n\n See:\n `Huber loss - Wikipedia <https://en.wikipedia.org/wiki/Huber_loss>`_.\n\n \"\"\"\n return HuberLoss(delta=delta)(x, t)\n", "path": "chainer/functions/loss/huber_loss.py"}], "after_files": [{"content": "import numpy\n\nfrom chainer import cuda\nfrom chainer import function\nfrom chainer.utils import type_check\n\n\nclass HuberLoss(function.Function):\n\n def __init__(self, delta):\n self.delta = delta\n\n def check_type_forward(self, in_types):\n type_check.expect(in_types.size() == 2)\n type_check.expect(\n in_types[0].dtype == numpy.float32,\n in_types[1].dtype == numpy.float32,\n in_types[0].shape == in_types[1].shape\n )\n\n def forward(self, inputs):\n xp = cuda.get_array_module(*inputs)\n x0, x1 = inputs\n self.diff = x0 - x1\n y = xp.square(self.diff)\n mask = y > (self.delta ** 2)\n y -= mask * xp.square(abs(self.diff) - self.delta)\n y *= 0.5\n return y.sum(axis=1),\n\n def backward(self, inputs, gy):\n xp = cuda.get_array_module(*inputs)\n mask = xp.abs(self.diff) <= self.delta\n gx = gy[0].reshape(gy[0].shape + (1,) * (self.diff.ndim - 1)) * \\\n xp.where(mask, self.diff, self.delta * xp.sign(self.diff))\n return gx, -gx\n\n\ndef huber_loss(x, t, delta):\n \"\"\"Loss function which is less sensitive to outliers in data than MSE.\n\n .. math::\n a = x - t\n\n and\n\n .. math::\n L_{\\\\delta}(a) = \\\\left \\\\{ \\\\begin{array}{cc}\n \\\\frac{1}{2} a^2 & {\\\\rm if~|a| \\\\leq \\\\delta} \\\\\\\\\n \\\\delta (|a| - \\\\frac{1}{2} \\\\delta) & {\\\\rm otherwise,}\n \\\\end{array} \\\\right.\n\n Args:\n x (~chainer.Variable): Input variable.\n The shape of ``x`` should be (:math:`N`, :math:`K`).\n t (~chainer.Variable): Target variable for regression.\n The shape of ``t`` should be (:math:`N`, :math:`K`).\n delta (float): Constant variable for huber loss function\n as used in definition.\n\n Returns:\n ~chainer.Variable: A variable object holding a scalar array of the\n huber loss :math:`L_{\\\\delta}`.\n\n See:\n `Huber loss - Wikipedia <https://en.wikipedia.org/wiki/Huber_loss>`_.\n\n \"\"\"\n return HuberLoss(delta=delta)(x, t)\n", "path": "chainer/functions/loss/huber_loss.py"}]} |
gh_patches_debug_1311 | rasdani/github-patches | git_diff | aio-libs__aiohttp-4193 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Implement ClientSession.timeout property
It is a bare method that returns `self._timeout`.
Documentation and a simple test are required
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `aiohttp/client.py`
Content:
```
1 """HTTP Client for asyncio."""
2
3 import asyncio
4 import base64
5 import hashlib
6 import json
7 import os
8 import sys
9 import traceback
10 import warnings
11 from types import SimpleNamespace, TracebackType
12 from typing import ( # noqa
13 Any,
14 Awaitable,
15 Callable,
16 Coroutine,
17 Generator,
18 Generic,
19 Iterable,
20 List,
21 Mapping,
22 Optional,
23 Set,
24 Tuple,
25 Type,
26 TypeVar,
27 Union,
28 )
29
30 import attr
31 from multidict import CIMultiDict, MultiDict, MultiDictProxy, istr
32 from typing_extensions import final
33 from yarl import URL
34
35 from . import hdrs, http, payload
36 from .abc import AbstractCookieJar
37 from .client_exceptions import ClientConnectionError as ClientConnectionError
38 from .client_exceptions import (
39 ClientConnectorCertificateError as ClientConnectorCertificateError,
40 )
41 from .client_exceptions import ClientConnectorError as ClientConnectorError
42 from .client_exceptions import (
43 ClientConnectorSSLError as ClientConnectorSSLError,
44 )
45 from .client_exceptions import ClientError as ClientError
46 from .client_exceptions import ClientHttpProxyError as ClientHttpProxyError
47 from .client_exceptions import ClientOSError as ClientOSError
48 from .client_exceptions import ClientPayloadError as ClientPayloadError
49 from .client_exceptions import (
50 ClientProxyConnectionError as ClientProxyConnectionError,
51 )
52 from .client_exceptions import ClientResponseError as ClientResponseError
53 from .client_exceptions import ClientSSLError as ClientSSLError
54 from .client_exceptions import ContentTypeError as ContentTypeError
55 from .client_exceptions import InvalidURL as InvalidURL
56 from .client_exceptions import ServerConnectionError as ServerConnectionError
57 from .client_exceptions import (
58 ServerDisconnectedError as ServerDisconnectedError,
59 )
60 from .client_exceptions import (
61 ServerFingerprintMismatch as ServerFingerprintMismatch,
62 )
63 from .client_exceptions import ServerTimeoutError as ServerTimeoutError
64 from .client_exceptions import TooManyRedirects as TooManyRedirects
65 from .client_exceptions import WSServerHandshakeError as WSServerHandshakeError
66 from .client_reqrep import SSL_ALLOWED_TYPES as SSL_ALLOWED_TYPES
67 from .client_reqrep import ClientRequest as ClientRequest
68 from .client_reqrep import ClientResponse as ClientResponse
69 from .client_reqrep import Fingerprint as Fingerprint
70 from .client_reqrep import RequestInfo as RequestInfo
71 from .client_ws import DEFAULT_WS_CLIENT_TIMEOUT
72 from .client_ws import ClientWebSocketResponse as ClientWebSocketResponse
73 from .client_ws import ClientWSTimeout
74 from .connector import BaseConnector as BaseConnector
75 from .connector import NamedPipeConnector as NamedPipeConnector
76 from .connector import TCPConnector as TCPConnector
77 from .connector import UnixConnector as UnixConnector
78 from .cookiejar import CookieJar
79 from .helpers import (
80 PY_36,
81 BasicAuth,
82 CeilTimeout,
83 TimeoutHandle,
84 get_running_loop,
85 proxies_from_env,
86 sentinel,
87 strip_auth_from_url,
88 )
89 from .http import WS_KEY, HttpVersion, WebSocketReader, WebSocketWriter
90 from .http_websocket import ( # noqa
91 WSHandshakeError,
92 WSMessage,
93 ws_ext_gen,
94 ws_ext_parse,
95 )
96 from .streams import FlowControlDataQueue
97 from .tracing import Trace, TraceConfig
98 from .typedefs import JSONEncoder, LooseCookies, LooseHeaders, StrOrURL
99
100 __all__ = (
101 # client_exceptions
102 'ClientConnectionError',
103 'ClientConnectorCertificateError',
104 'ClientConnectorError',
105 'ClientConnectorSSLError',
106 'ClientError',
107 'ClientHttpProxyError',
108 'ClientOSError',
109 'ClientPayloadError',
110 'ClientProxyConnectionError',
111 'ClientResponseError',
112 'ClientSSLError',
113 'ContentTypeError',
114 'InvalidURL',
115 'ServerConnectionError',
116 'ServerDisconnectedError',
117 'ServerFingerprintMismatch',
118 'ServerTimeoutError',
119 'TooManyRedirects',
120 'WSServerHandshakeError',
121 # client_reqrep
122 'ClientRequest',
123 'ClientResponse',
124 'Fingerprint',
125 'RequestInfo',
126 # connector
127 'BaseConnector',
128 'TCPConnector',
129 'UnixConnector',
130 'NamedPipeConnector',
131 # client_ws
132 'ClientWebSocketResponse',
133 # client
134 'ClientSession',
135 'ClientTimeout',
136 'request')
137
138
139 try:
140 from ssl import SSLContext
141 except ImportError: # pragma: no cover
142 SSLContext = object # type: ignore
143
144
145 @attr.s(frozen=True, slots=True)
146 class ClientTimeout:
147 total = attr.ib(type=Optional[float], default=None)
148 connect = attr.ib(type=Optional[float], default=None)
149 sock_read = attr.ib(type=Optional[float], default=None)
150 sock_connect = attr.ib(type=Optional[float], default=None)
151
152 # pool_queue_timeout = attr.ib(type=float, default=None)
153 # dns_resolution_timeout = attr.ib(type=float, default=None)
154 # socket_connect_timeout = attr.ib(type=float, default=None)
155 # connection_acquiring_timeout = attr.ib(type=float, default=None)
156 # new_connection_timeout = attr.ib(type=float, default=None)
157 # http_header_timeout = attr.ib(type=float, default=None)
158 # response_body_timeout = attr.ib(type=float, default=None)
159
160 # to create a timeout specific for a single request, either
161 # - create a completely new one to overwrite the default
162 # - or use http://www.attrs.org/en/stable/api.html#attr.evolve
163 # to overwrite the defaults
164
165
166 # 5 Minute default read timeout
167 DEFAULT_TIMEOUT = ClientTimeout(total=5*60)
168
169 _RetType = TypeVar('_RetType')
170
171
172 @final
173 class ClientSession:
174 """First-class interface for making HTTP requests."""
175
176 __slots__ = (
177 '_source_traceback', '_connector',
178 '_loop', '_cookie_jar',
179 '_connector_owner', '_default_auth',
180 '_version', '_json_serialize',
181 '_requote_redirect_url',
182 '_timeout', '_raise_for_status', '_auto_decompress',
183 '_trust_env', '_default_headers', '_skip_auto_headers',
184 '_request_class', '_response_class',
185 '_ws_response_class', '_trace_configs')
186
187 def __init__(self, *, connector: Optional[BaseConnector]=None,
188 cookies: Optional[LooseCookies]=None,
189 headers: Optional[LooseHeaders]=None,
190 skip_auto_headers: Optional[Iterable[str]]=None,
191 auth: Optional[BasicAuth]=None,
192 json_serialize: JSONEncoder=json.dumps,
193 request_class: Type[ClientRequest]=ClientRequest,
194 response_class: Type[ClientResponse]=ClientResponse,
195 ws_response_class: Type[ClientWebSocketResponse]=ClientWebSocketResponse, # noqa
196 version: HttpVersion=http.HttpVersion11,
197 cookie_jar: Optional[AbstractCookieJar]=None,
198 connector_owner: bool=True,
199 raise_for_status: Union[bool, Callable[[ClientResponse], Awaitable[None]]]=False, # noqa
200 timeout: Union[object, ClientTimeout]=sentinel,
201 auto_decompress: bool=True,
202 trust_env: bool=False,
203 requote_redirect_url: bool=True,
204 trace_configs: Optional[List[TraceConfig]]=None) -> None:
205
206 loop = get_running_loop()
207
208 if connector is None:
209 connector = TCPConnector()
210
211 # Initialize these three attrs before raising any exception,
212 # they are used in __del__
213 self._connector = connector # type: Optional[BaseConnector]
214 self._loop = loop
215 if loop.get_debug():
216 self._source_traceback = traceback.extract_stack(sys._getframe(1)) # type: Optional[traceback.StackSummary] # noqa
217 else:
218 self._source_traceback = None
219
220 if connector._loop is not loop:
221 raise RuntimeError(
222 "Session and connector have to use same event loop")
223
224 if cookie_jar is None:
225 cookie_jar = CookieJar()
226 self._cookie_jar = cookie_jar
227
228 if cookies is not None:
229 self._cookie_jar.update_cookies(cookies)
230
231 self._connector_owner = connector_owner
232 self._default_auth = auth
233 self._version = version
234 self._json_serialize = json_serialize
235 if timeout is sentinel:
236 self._timeout = DEFAULT_TIMEOUT
237 else:
238 self._timeout = timeout # type: ignore
239 self._raise_for_status = raise_for_status
240 self._auto_decompress = auto_decompress
241 self._trust_env = trust_env
242 self._requote_redirect_url = requote_redirect_url
243
244 # Convert to list of tuples
245 if headers:
246 real_headers = CIMultiDict(headers) # type: CIMultiDict[str]
247 else:
248 real_headers = CIMultiDict()
249 self._default_headers = real_headers # type: CIMultiDict[str]
250 if skip_auto_headers is not None:
251 self._skip_auto_headers = frozenset([istr(i)
252 for i in skip_auto_headers])
253 else:
254 self._skip_auto_headers = frozenset()
255
256 self._request_class = request_class
257 self._response_class = response_class
258 self._ws_response_class = ws_response_class
259
260 self._trace_configs = trace_configs or []
261 for trace_config in self._trace_configs:
262 trace_config.freeze()
263
264 def __init_subclass__(cls: Type['ClientSession']) -> None:
265 raise TypeError("Inheritance class {} from ClientSession "
266 "is forbidden".format(cls.__name__))
267
268 def __del__(self, _warnings: Any=warnings) -> None:
269 if not self.closed:
270 if PY_36:
271 kwargs = {'source': self}
272 else:
273 kwargs = {}
274 _warnings.warn("Unclosed client session {!r}".format(self),
275 ResourceWarning,
276 **kwargs)
277 context = {'client_session': self,
278 'message': 'Unclosed client session'}
279 if self._source_traceback is not None:
280 context['source_traceback'] = self._source_traceback
281 self._loop.call_exception_handler(context)
282
283 def request(self,
284 method: str,
285 url: StrOrURL,
286 **kwargs: Any) -> '_RequestContextManager':
287 """Perform HTTP request."""
288 return _RequestContextManager(self._request(method, url, **kwargs))
289
290 async def _request(
291 self,
292 method: str,
293 str_or_url: StrOrURL, *,
294 params: Optional[Mapping[str, str]]=None,
295 data: Any=None,
296 json: Any=None,
297 cookies: Optional[LooseCookies]=None,
298 headers: LooseHeaders=None,
299 skip_auto_headers: Optional[Iterable[str]]=None,
300 auth: Optional[BasicAuth]=None,
301 allow_redirects: bool=True,
302 max_redirects: int=10,
303 compress: Optional[str]=None,
304 chunked: Optional[bool]=None,
305 expect100: bool=False,
306 raise_for_status: Union[None, bool, Callable[[ClientResponse], Awaitable[None]]]=None, # noqa
307 read_until_eof: bool=True,
308 proxy: Optional[StrOrURL]=None,
309 proxy_auth: Optional[BasicAuth]=None,
310 timeout: Union[ClientTimeout, object]=sentinel,
311 ssl: Optional[Union[SSLContext, bool, Fingerprint]]=None,
312 proxy_headers: Optional[LooseHeaders]=None,
313 trace_request_ctx: Optional[SimpleNamespace]=None
314 ) -> ClientResponse:
315
316 # NOTE: timeout clamps existing connect and read timeouts. We cannot
317 # set the default to None because we need to detect if the user wants
318 # to use the existing timeouts by setting timeout to None.
319
320 if self.closed:
321 raise RuntimeError('Session is closed')
322
323 if not isinstance(ssl, SSL_ALLOWED_TYPES):
324 raise TypeError("ssl should be SSLContext, bool, Fingerprint, "
325 "or None, got {!r} instead.".format(ssl))
326
327 if data is not None and json is not None:
328 raise ValueError(
329 'data and json parameters can not be used at the same time')
330 elif json is not None:
331 data = payload.JsonPayload(json, dumps=self._json_serialize)
332
333 redirects = 0
334 history = []
335 version = self._version
336
337 # Merge with default headers and transform to CIMultiDict
338 headers = self._prepare_headers(headers)
339 proxy_headers = self._prepare_headers(proxy_headers)
340
341 try:
342 url = URL(str_or_url)
343 except ValueError:
344 raise InvalidURL(str_or_url)
345
346 skip_headers = set(self._skip_auto_headers)
347 if skip_auto_headers is not None:
348 for i in skip_auto_headers:
349 skip_headers.add(istr(i))
350
351 if proxy is not None:
352 try:
353 proxy = URL(proxy)
354 except ValueError:
355 raise InvalidURL(proxy)
356
357 if timeout is sentinel:
358 real_timeout = self._timeout # type: ClientTimeout
359 else:
360 if not isinstance(timeout, ClientTimeout):
361 real_timeout = ClientTimeout(total=timeout) # type: ignore
362 else:
363 real_timeout = timeout
364 # timeout is cumulative for all request operations
365 # (request, redirects, responses, data consuming)
366 tm = TimeoutHandle(self._loop, real_timeout.total)
367 handle = tm.start()
368
369 traces = [
370 Trace(
371 self,
372 trace_config,
373 trace_config.trace_config_ctx(
374 trace_request_ctx=trace_request_ctx)
375 )
376 for trace_config in self._trace_configs
377 ]
378
379 for trace in traces:
380 await trace.send_request_start(
381 method,
382 url,
383 headers
384 )
385
386 timer = tm.timer()
387 try:
388 with timer:
389 while True:
390 url, auth_from_url = strip_auth_from_url(url)
391 if auth and auth_from_url:
392 raise ValueError("Cannot combine AUTH argument with "
393 "credentials encoded in URL")
394
395 if auth is None:
396 auth = auth_from_url
397 if auth is None:
398 auth = self._default_auth
399 # It would be confusing if we support explicit
400 # Authorization header with auth argument
401 if (headers is not None and
402 auth is not None and
403 hdrs.AUTHORIZATION in headers):
404 raise ValueError("Cannot combine AUTHORIZATION header "
405 "with AUTH argument or credentials "
406 "encoded in URL")
407
408 all_cookies = self._cookie_jar.filter_cookies(url)
409
410 if cookies is not None:
411 tmp_cookie_jar = CookieJar()
412 tmp_cookie_jar.update_cookies(cookies)
413 req_cookies = tmp_cookie_jar.filter_cookies(url)
414 if req_cookies:
415 all_cookies.load(req_cookies)
416
417 if proxy is not None:
418 proxy = URL(proxy)
419 elif self._trust_env:
420 for scheme, proxy_info in proxies_from_env().items():
421 if scheme == url.scheme:
422 proxy = proxy_info.proxy
423 proxy_auth = proxy_info.proxy_auth
424 break
425
426 req = self._request_class(
427 method, url, params=params, headers=headers,
428 skip_auto_headers=skip_headers, data=data,
429 cookies=all_cookies, auth=auth, version=version,
430 compress=compress, chunked=chunked,
431 expect100=expect100, loop=self._loop,
432 response_class=self._response_class,
433 proxy=proxy, proxy_auth=proxy_auth, timer=timer,
434 session=self,
435 ssl=ssl, proxy_headers=proxy_headers, traces=traces)
436
437 # connection timeout
438 try:
439 with CeilTimeout(real_timeout.connect,
440 loop=self._loop):
441 assert self._connector is not None
442 conn = await self._connector.connect(
443 req,
444 traces=traces,
445 timeout=real_timeout
446 )
447 except asyncio.TimeoutError as exc:
448 raise ServerTimeoutError(
449 'Connection timeout '
450 'to host {0}'.format(url)) from exc
451
452 assert conn.transport is not None
453
454 assert conn.protocol is not None
455 conn.protocol.set_response_params(
456 timer=timer,
457 skip_payload=method.upper() == 'HEAD',
458 read_until_eof=read_until_eof,
459 auto_decompress=self._auto_decompress,
460 read_timeout=real_timeout.sock_read)
461
462 try:
463 try:
464 resp = await req.send(conn)
465 try:
466 await resp.start(conn)
467 except BaseException:
468 resp.close()
469 raise
470 except BaseException:
471 conn.close()
472 raise
473 except ClientError:
474 raise
475 except OSError as exc:
476 raise ClientOSError(*exc.args) from exc
477
478 self._cookie_jar.update_cookies(resp.cookies, resp.url)
479
480 # redirects
481 if resp.status in (
482 301, 302, 303, 307, 308) and allow_redirects:
483
484 for trace in traces:
485 await trace.send_request_redirect(
486 method,
487 url,
488 headers,
489 resp
490 )
491
492 redirects += 1
493 history.append(resp)
494 if max_redirects and redirects >= max_redirects:
495 resp.close()
496 raise TooManyRedirects(
497 history[0].request_info, tuple(history))
498
499 # For 301 and 302, mimic IE, now changed in RFC
500 # https://github.com/kennethreitz/requests/pull/269
501 if (resp.status == 303 and
502 resp.method != hdrs.METH_HEAD) \
503 or (resp.status in (301, 302) and
504 resp.method == hdrs.METH_POST):
505 method = hdrs.METH_GET
506 data = None
507 if headers.get(hdrs.CONTENT_LENGTH):
508 headers.pop(hdrs.CONTENT_LENGTH)
509
510 r_url = (resp.headers.get(hdrs.LOCATION) or
511 resp.headers.get(hdrs.URI))
512 if r_url is None:
513 # see github.com/aio-libs/aiohttp/issues/2022
514 break
515 else:
516 # reading from correct redirection
517 # response is forbidden
518 resp.release()
519
520 try:
521 r_url = URL(
522 r_url, encoded=not self._requote_redirect_url)
523
524 except ValueError:
525 raise InvalidURL(r_url)
526
527 scheme = r_url.scheme
528 if scheme not in ('http', 'https', ''):
529 resp.close()
530 raise ValueError(
531 'Can redirect only to http or https')
532 elif not scheme:
533 r_url = url.join(r_url)
534
535 if url.origin() != r_url.origin():
536 auth = None
537 headers.pop(hdrs.AUTHORIZATION, None)
538
539 url = r_url
540 params = None
541 resp.release()
542 continue
543
544 break
545
546 # check response status
547 if raise_for_status is None:
548 raise_for_status = self._raise_for_status
549
550 if raise_for_status is None:
551 pass
552 elif callable(raise_for_status):
553 await raise_for_status(resp)
554 elif raise_for_status:
555 resp.raise_for_status()
556
557 # register connection
558 if handle is not None:
559 if resp.connection is not None:
560 resp.connection.add_callback(handle.cancel)
561 else:
562 handle.cancel()
563
564 resp._history = tuple(history)
565
566 for trace in traces:
567 await trace.send_request_end(
568 method,
569 url,
570 headers,
571 resp
572 )
573 return resp
574
575 except BaseException as e:
576 # cleanup timer
577 tm.close()
578 if handle:
579 handle.cancel()
580 handle = None
581
582 for trace in traces:
583 await trace.send_request_exception(
584 method,
585 url,
586 headers,
587 e
588 )
589 raise
590
591 def ws_connect(
592 self,
593 url: StrOrURL, *,
594 method: str=hdrs.METH_GET,
595 protocols: Iterable[str]=(),
596 timeout: Union[ClientWSTimeout, float]=sentinel,
597 receive_timeout: Optional[float]=None,
598 autoclose: bool=True,
599 autoping: bool=True,
600 heartbeat: Optional[float]=None,
601 auth: Optional[BasicAuth]=None,
602 origin: Optional[str]=None,
603 headers: Optional[LooseHeaders]=None,
604 proxy: Optional[StrOrURL]=None,
605 proxy_auth: Optional[BasicAuth]=None,
606 ssl: Union[SSLContext, bool, None, Fingerprint]=None,
607 proxy_headers: Optional[LooseHeaders]=None,
608 compress: int=0,
609 max_msg_size: int=4*1024*1024) -> '_WSRequestContextManager':
610 """Initiate websocket connection."""
611 return _WSRequestContextManager(
612 self._ws_connect(url,
613 method=method,
614 protocols=protocols,
615 timeout=timeout,
616 receive_timeout=receive_timeout,
617 autoclose=autoclose,
618 autoping=autoping,
619 heartbeat=heartbeat,
620 auth=auth,
621 origin=origin,
622 headers=headers,
623 proxy=proxy,
624 proxy_auth=proxy_auth,
625 ssl=ssl,
626 proxy_headers=proxy_headers,
627 compress=compress,
628 max_msg_size=max_msg_size))
629
630 async def _ws_connect(
631 self,
632 url: StrOrURL, *,
633 method: str=hdrs.METH_GET,
634 protocols: Iterable[str]=(),
635 timeout: Union[ClientWSTimeout, float]=sentinel,
636 receive_timeout: Optional[float]=None,
637 autoclose: bool=True,
638 autoping: bool=True,
639 heartbeat: Optional[float]=None,
640 auth: Optional[BasicAuth]=None,
641 origin: Optional[str]=None,
642 headers: Optional[LooseHeaders]=None,
643 proxy: Optional[StrOrURL]=None,
644 proxy_auth: Optional[BasicAuth]=None,
645 ssl: Union[SSLContext, bool, None, Fingerprint]=None,
646 proxy_headers: Optional[LooseHeaders]=None,
647 compress: int=0,
648 max_msg_size: int=4*1024*1024
649 ) -> ClientWebSocketResponse:
650 if timeout is not sentinel:
651 if isinstance(timeout, ClientWSTimeout):
652 ws_timeout = timeout
653 else:
654 warnings.warn("parameter 'timeout' of type 'float' "
655 "is deprecated, please use "
656 "'timeout=ClientWSTimeout(ws_close=...)'",
657 DeprecationWarning,
658 stacklevel=2)
659 ws_timeout = ClientWSTimeout(ws_close=timeout)
660 else:
661 ws_timeout = DEFAULT_WS_CLIENT_TIMEOUT
662 if receive_timeout is not None:
663 warnings.warn("float parameter 'receive_timeout' "
664 "is deprecated, please use parameter "
665 "'timeout=ClientWSTimeout(ws_receive=...)'",
666 DeprecationWarning,
667 stacklevel=2)
668 ws_timeout = attr.evolve(ws_timeout, ws_receive=receive_timeout)
669
670 if headers is None:
671 real_headers = CIMultiDict() # type: CIMultiDict[str]
672 else:
673 real_headers = CIMultiDict(headers)
674
675 default_headers = {
676 hdrs.UPGRADE: hdrs.WEBSOCKET,
677 hdrs.CONNECTION: hdrs.UPGRADE,
678 hdrs.SEC_WEBSOCKET_VERSION: '13',
679 }
680
681 for key, value in default_headers.items():
682 real_headers.setdefault(key, value)
683
684 sec_key = base64.b64encode(os.urandom(16))
685 real_headers[hdrs.SEC_WEBSOCKET_KEY] = sec_key.decode()
686
687 if protocols:
688 real_headers[hdrs.SEC_WEBSOCKET_PROTOCOL] = ','.join(protocols)
689 if origin is not None:
690 real_headers[hdrs.ORIGIN] = origin
691 if compress:
692 extstr = ws_ext_gen(compress=compress)
693 real_headers[hdrs.SEC_WEBSOCKET_EXTENSIONS] = extstr
694
695 if not isinstance(ssl, SSL_ALLOWED_TYPES):
696 raise TypeError("ssl should be SSLContext, bool, Fingerprint, "
697 "or None, got {!r} instead.".format(ssl))
698
699 # send request
700 resp = await self.request(method, url,
701 headers=real_headers,
702 read_until_eof=False,
703 auth=auth,
704 proxy=proxy,
705 proxy_auth=proxy_auth,
706 ssl=ssl,
707 proxy_headers=proxy_headers)
708
709 try:
710 # check handshake
711 if resp.status != 101:
712 raise WSServerHandshakeError(
713 resp.request_info,
714 resp.history,
715 message='Invalid response status',
716 status=resp.status,
717 headers=resp.headers)
718
719 if resp.headers.get(hdrs.UPGRADE, '').lower() != 'websocket':
720 raise WSServerHandshakeError(
721 resp.request_info,
722 resp.history,
723 message='Invalid upgrade header',
724 status=resp.status,
725 headers=resp.headers)
726
727 if resp.headers.get(hdrs.CONNECTION, '').lower() != 'upgrade':
728 raise WSServerHandshakeError(
729 resp.request_info,
730 resp.history,
731 message='Invalid connection header',
732 status=resp.status,
733 headers=resp.headers)
734
735 # key calculation
736 key = resp.headers.get(hdrs.SEC_WEBSOCKET_ACCEPT, '')
737 match = base64.b64encode(
738 hashlib.sha1(sec_key + WS_KEY).digest()).decode()
739 if key != match:
740 raise WSServerHandshakeError(
741 resp.request_info,
742 resp.history,
743 message='Invalid challenge response',
744 status=resp.status,
745 headers=resp.headers)
746
747 # websocket protocol
748 protocol = None
749 if protocols and hdrs.SEC_WEBSOCKET_PROTOCOL in resp.headers:
750 resp_protocols = [
751 proto.strip() for proto in
752 resp.headers[hdrs.SEC_WEBSOCKET_PROTOCOL].split(',')]
753
754 for proto in resp_protocols:
755 if proto in protocols:
756 protocol = proto
757 break
758
759 # websocket compress
760 notakeover = False
761 if compress:
762 compress_hdrs = resp.headers.get(hdrs.SEC_WEBSOCKET_EXTENSIONS)
763 if compress_hdrs:
764 try:
765 compress, notakeover = ws_ext_parse(compress_hdrs)
766 except WSHandshakeError as exc:
767 raise WSServerHandshakeError(
768 resp.request_info,
769 resp.history,
770 message=exc.args[0],
771 status=resp.status,
772 headers=resp.headers)
773 else:
774 compress = 0
775 notakeover = False
776
777 conn = resp.connection
778 assert conn is not None
779 proto = conn.protocol
780 assert proto is not None
781 transport = conn.transport
782 assert transport is not None
783 reader = FlowControlDataQueue(
784 proto, limit=2 ** 16, loop=self._loop) # type: FlowControlDataQueue[WSMessage] # noqa
785 proto.set_parser(WebSocketReader(reader, max_msg_size), reader)
786 writer = WebSocketWriter(
787 proto, transport, use_mask=True,
788 compress=compress, notakeover=notakeover)
789 except BaseException:
790 resp.close()
791 raise
792 else:
793 return self._ws_response_class(reader,
794 writer,
795 protocol,
796 resp,
797 ws_timeout,
798 autoclose,
799 autoping,
800 self._loop,
801 heartbeat=heartbeat,
802 compress=compress,
803 client_notakeover=notakeover)
804
805 def _prepare_headers(
806 self,
807 headers: Optional[LooseHeaders]) -> 'CIMultiDict[str]':
808 """ Add default headers and transform it to CIMultiDict
809 """
810 # Convert headers to MultiDict
811 result = CIMultiDict(self._default_headers)
812 if headers:
813 if not isinstance(headers, (MultiDictProxy, MultiDict)):
814 headers = CIMultiDict(headers)
815 added_names = set() # type: Set[str]
816 for key, value in headers.items():
817 if key in added_names:
818 result.add(key, value)
819 else:
820 result[key] = value
821 added_names.add(key)
822 return result
823
824 def get(self, url: StrOrURL, *, allow_redirects: bool=True,
825 **kwargs: Any) -> '_RequestContextManager':
826 """Perform HTTP GET request."""
827 return _RequestContextManager(
828 self._request(hdrs.METH_GET, url,
829 allow_redirects=allow_redirects,
830 **kwargs))
831
832 def options(self, url: StrOrURL, *, allow_redirects: bool=True,
833 **kwargs: Any) -> '_RequestContextManager':
834 """Perform HTTP OPTIONS request."""
835 return _RequestContextManager(
836 self._request(hdrs.METH_OPTIONS, url,
837 allow_redirects=allow_redirects,
838 **kwargs))
839
840 def head(self, url: StrOrURL, *, allow_redirects: bool=False,
841 **kwargs: Any) -> '_RequestContextManager':
842 """Perform HTTP HEAD request."""
843 return _RequestContextManager(
844 self._request(hdrs.METH_HEAD, url,
845 allow_redirects=allow_redirects,
846 **kwargs))
847
848 def post(self, url: StrOrURL,
849 *, data: Any=None, **kwargs: Any) -> '_RequestContextManager':
850 """Perform HTTP POST request."""
851 return _RequestContextManager(
852 self._request(hdrs.METH_POST, url,
853 data=data,
854 **kwargs))
855
856 def put(self, url: StrOrURL,
857 *, data: Any=None, **kwargs: Any) -> '_RequestContextManager':
858 """Perform HTTP PUT request."""
859 return _RequestContextManager(
860 self._request(hdrs.METH_PUT, url,
861 data=data,
862 **kwargs))
863
864 def patch(self, url: StrOrURL,
865 *, data: Any=None, **kwargs: Any) -> '_RequestContextManager':
866 """Perform HTTP PATCH request."""
867 return _RequestContextManager(
868 self._request(hdrs.METH_PATCH, url,
869 data=data,
870 **kwargs))
871
872 def delete(self, url: StrOrURL, **kwargs: Any) -> '_RequestContextManager':
873 """Perform HTTP DELETE request."""
874 return _RequestContextManager(
875 self._request(hdrs.METH_DELETE, url,
876 **kwargs))
877
878 async def close(self) -> None:
879 """Close underlying connector.
880
881 Release all acquired resources.
882 """
883 if not self.closed:
884 if self._connector is not None and self._connector_owner:
885 await self._connector.close()
886 self._connector = None
887
888 @property
889 def closed(self) -> bool:
890 """Is client session closed.
891
892 A readonly property.
893 """
894 return self._connector is None or self._connector.closed
895
896 @property
897 def connector(self) -> Optional[BaseConnector]:
898 """Connector instance used for the session."""
899 return self._connector
900
901 @property
902 def cookie_jar(self) -> AbstractCookieJar:
903 """The session cookies."""
904 return self._cookie_jar
905
906 @property
907 def version(self) -> Tuple[int, int]:
908 """The session HTTP protocol version."""
909 return self._version
910
911 @property
912 def requote_redirect_url(self) -> bool:
913 """Do URL requoting on redirection handling."""
914 return self._requote_redirect_url
915
916 def detach(self) -> None:
917 """Detach connector from session without closing the former.
918
919 Session is switched to closed state anyway.
920 """
921 self._connector = None
922
923 async def __aenter__(self) -> 'ClientSession':
924 return self
925
926 async def __aexit__(self,
927 exc_type: Optional[Type[BaseException]],
928 exc_val: Optional[BaseException],
929 exc_tb: Optional[TracebackType]) -> None:
930 await self.close()
931
932
933 class _BaseRequestContextManager(Coroutine[Any,
934 Any,
935 _RetType],
936 Generic[_RetType]):
937
938 __slots__ = ('_coro', '_resp')
939
940 def __init__(
941 self,
942 coro: Coroutine['asyncio.Future[Any]', None, _RetType]
943 ) -> None:
944 self._coro = coro
945
946 def send(self, arg: None) -> 'asyncio.Future[Any]':
947 return self._coro.send(arg)
948
949 def throw(self, arg: BaseException) -> None: # type: ignore
950 self._coro.throw(arg) # type: ignore
951
952 def close(self) -> None:
953 return self._coro.close()
954
955 def __await__(self) -> Generator[Any, None, _RetType]:
956 ret = self._coro.__await__()
957 return ret
958
959 def __iter__(self) -> Generator[Any, None, _RetType]:
960 return self.__await__()
961
962 async def __aenter__(self) -> _RetType:
963 self._resp = await self._coro
964 return self._resp
965
966
967 class _RequestContextManager(_BaseRequestContextManager[ClientResponse]):
968 async def __aexit__(self,
969 exc_type: Optional[Type[BaseException]],
970 exc: Optional[BaseException],
971 tb: Optional[TracebackType]) -> None:
972 # We're basing behavior on the exception as it can be caused by
973 # user code unrelated to the status of the connection. If you
974 # would like to close a connection you must do that
975 # explicitly. Otherwise connection error handling should kick in
976 # and close/recycle the connection as required.
977 self._resp.release()
978
979
980 class _WSRequestContextManager(_BaseRequestContextManager[
981 ClientWebSocketResponse]):
982 async def __aexit__(self,
983 exc_type: Optional[Type[BaseException]],
984 exc: Optional[BaseException],
985 tb: Optional[TracebackType]) -> None:
986 await self._resp.close()
987
988
989 class _SessionRequestContextManager:
990
991 __slots__ = ('_coro', '_resp', '_session')
992
993 def __init__(self,
994 coro: Coroutine['asyncio.Future[Any]', None, ClientResponse],
995 session: ClientSession) -> None:
996 self._coro = coro
997 self._resp = None # type: Optional[ClientResponse]
998 self._session = session
999
1000 async def __aenter__(self) -> ClientResponse:
1001 try:
1002 self._resp = await self._coro
1003 except BaseException:
1004 await self._session.close()
1005 raise
1006 else:
1007 return self._resp
1008
1009 async def __aexit__(self,
1010 exc_type: Optional[Type[BaseException]],
1011 exc: Optional[BaseException],
1012 tb: Optional[TracebackType]) -> None:
1013 assert self._resp is not None
1014 self._resp.close()
1015 await self._session.close()
1016
1017
1018 def request(
1019 method: str,
1020 url: StrOrURL, *,
1021 params: Optional[Mapping[str, str]]=None,
1022 data: Any=None,
1023 json: Any=None,
1024 headers: LooseHeaders=None,
1025 skip_auto_headers: Optional[Iterable[str]]=None,
1026 auth: Optional[BasicAuth]=None,
1027 allow_redirects: bool=True,
1028 max_redirects: int=10,
1029 compress: Optional[str]=None,
1030 chunked: Optional[bool]=None,
1031 expect100: bool=False,
1032 raise_for_status: Optional[bool]=None,
1033 read_until_eof: bool=True,
1034 proxy: Optional[StrOrURL]=None,
1035 proxy_auth: Optional[BasicAuth]=None,
1036 timeout: Union[ClientTimeout, object]=sentinel,
1037 cookies: Optional[LooseCookies]=None,
1038 version: HttpVersion=http.HttpVersion11,
1039 connector: Optional[BaseConnector]=None
1040 ) -> _SessionRequestContextManager:
1041 """Constructs and sends a request. Returns response object.
1042 method - HTTP method
1043 url - request url
1044 params - (optional) Dictionary or bytes to be sent in the query
1045 string of the new request
1046 data - (optional) Dictionary, bytes, or file-like object to
1047 send in the body of the request
1048 json - (optional) Any json compatible python object
1049 headers - (optional) Dictionary of HTTP Headers to send with
1050 the request
1051 cookies - (optional) Dict object to send with the request
1052 auth - (optional) BasicAuth named tuple represent HTTP Basic Auth
1053 auth - aiohttp.helpers.BasicAuth
1054 allow_redirects - (optional) If set to False, do not follow
1055 redirects
1056 version - Request HTTP version.
1057 compress - Set to True if request has to be compressed
1058 with deflate encoding.
1059 chunked - Set to chunk size for chunked transfer encoding.
1060 expect100 - Expect 100-continue response from server.
1061 connector - BaseConnector sub-class instance to support
1062 connection pooling.
1063 read_until_eof - Read response until eof if response
1064 does not have Content-Length header.
1065 loop - Optional event loop.
1066 timeout - Optional ClientTimeout settings structure, 5min
1067 total timeout by default.
1068 Usage::
1069 >>> import aiohttp
1070 >>> resp = await aiohttp.request('GET', 'http://python.org/')
1071 >>> resp
1072 <ClientResponse(python.org/) [200]>
1073 >>> data = await resp.read()
1074 """
1075 connector_owner = False
1076 if connector is None:
1077 connector_owner = True
1078 connector = TCPConnector(force_close=True)
1079
1080 session = ClientSession(
1081 cookies=cookies, version=version, timeout=timeout,
1082 connector=connector, connector_owner=connector_owner)
1083
1084 return _SessionRequestContextManager(
1085 session._request(method, url,
1086 params=params,
1087 data=data,
1088 json=json,
1089 headers=headers,
1090 skip_auto_headers=skip_auto_headers,
1091 auth=auth,
1092 allow_redirects=allow_redirects,
1093 max_redirects=max_redirects,
1094 compress=compress,
1095 chunked=chunked,
1096 expect100=expect100,
1097 raise_for_status=raise_for_status,
1098 read_until_eof=read_until_eof,
1099 proxy=proxy,
1100 proxy_auth=proxy_auth,),
1101 session)
1102
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/aiohttp/client.py b/aiohttp/client.py
--- a/aiohttp/client.py
+++ b/aiohttp/client.py
@@ -913,6 +913,11 @@
"""Do URL requoting on redirection handling."""
return self._requote_redirect_url
+ @property
+ def timeout(self) -> Union[object, ClientTimeout]:
+ """Timeout for the session."""
+ return self._timeout
+
def detach(self) -> None:
"""Detach connector from session without closing the former.
| {"golden_diff": "diff --git a/aiohttp/client.py b/aiohttp/client.py\n--- a/aiohttp/client.py\n+++ b/aiohttp/client.py\n@@ -913,6 +913,11 @@\n \"\"\"Do URL requoting on redirection handling.\"\"\"\n return self._requote_redirect_url\n \n+ @property\n+ def timeout(self) -> Union[object, ClientTimeout]:\n+ \"\"\"Timeout for the session.\"\"\"\n+ return self._timeout\n+\n def detach(self) -> None:\n \"\"\"Detach connector from session without closing the former.\n", "issue": "Implement ClientSession.timeout property\nIt is a bare method that returns `self._timeout`.\r\n\r\nDocumentation and a simple test are required\n", "before_files": [{"content": "\"\"\"HTTP Client for asyncio.\"\"\"\n\nimport asyncio\nimport base64\nimport hashlib\nimport json\nimport os\nimport sys\nimport traceback\nimport warnings\nfrom types import SimpleNamespace, TracebackType\nfrom typing import ( # noqa\n Any,\n Awaitable,\n Callable,\n Coroutine,\n Generator,\n Generic,\n Iterable,\n List,\n Mapping,\n Optional,\n Set,\n Tuple,\n Type,\n TypeVar,\n Union,\n)\n\nimport attr\nfrom multidict import CIMultiDict, MultiDict, MultiDictProxy, istr\nfrom typing_extensions import final\nfrom yarl import URL\n\nfrom . import hdrs, http, payload\nfrom .abc import AbstractCookieJar\nfrom .client_exceptions import ClientConnectionError as ClientConnectionError\nfrom .client_exceptions import (\n ClientConnectorCertificateError as ClientConnectorCertificateError,\n)\nfrom .client_exceptions import ClientConnectorError as ClientConnectorError\nfrom .client_exceptions import (\n ClientConnectorSSLError as ClientConnectorSSLError,\n)\nfrom .client_exceptions import ClientError as ClientError\nfrom .client_exceptions import ClientHttpProxyError as ClientHttpProxyError\nfrom .client_exceptions import ClientOSError as ClientOSError\nfrom .client_exceptions import ClientPayloadError as ClientPayloadError\nfrom .client_exceptions import (\n ClientProxyConnectionError as ClientProxyConnectionError,\n)\nfrom .client_exceptions import ClientResponseError as ClientResponseError\nfrom .client_exceptions import ClientSSLError as ClientSSLError\nfrom .client_exceptions import ContentTypeError as ContentTypeError\nfrom .client_exceptions import InvalidURL as InvalidURL\nfrom .client_exceptions import ServerConnectionError as ServerConnectionError\nfrom .client_exceptions import (\n ServerDisconnectedError as ServerDisconnectedError,\n)\nfrom .client_exceptions import (\n ServerFingerprintMismatch as ServerFingerprintMismatch,\n)\nfrom .client_exceptions import ServerTimeoutError as ServerTimeoutError\nfrom .client_exceptions import TooManyRedirects as TooManyRedirects\nfrom .client_exceptions import WSServerHandshakeError as WSServerHandshakeError\nfrom .client_reqrep import SSL_ALLOWED_TYPES as SSL_ALLOWED_TYPES\nfrom .client_reqrep import ClientRequest as ClientRequest\nfrom .client_reqrep import ClientResponse as ClientResponse\nfrom .client_reqrep import Fingerprint as Fingerprint\nfrom .client_reqrep import RequestInfo as RequestInfo\nfrom .client_ws import DEFAULT_WS_CLIENT_TIMEOUT\nfrom .client_ws import ClientWebSocketResponse as ClientWebSocketResponse\nfrom .client_ws import ClientWSTimeout\nfrom .connector import BaseConnector as BaseConnector\nfrom .connector import NamedPipeConnector as NamedPipeConnector\nfrom .connector import TCPConnector as TCPConnector\nfrom .connector import UnixConnector as UnixConnector\nfrom .cookiejar import CookieJar\nfrom .helpers import (\n PY_36,\n BasicAuth,\n CeilTimeout,\n TimeoutHandle,\n get_running_loop,\n proxies_from_env,\n sentinel,\n strip_auth_from_url,\n)\nfrom .http import WS_KEY, HttpVersion, WebSocketReader, WebSocketWriter\nfrom .http_websocket import ( # noqa\n WSHandshakeError,\n WSMessage,\n ws_ext_gen,\n ws_ext_parse,\n)\nfrom .streams import FlowControlDataQueue\nfrom .tracing import Trace, TraceConfig\nfrom .typedefs import JSONEncoder, LooseCookies, LooseHeaders, StrOrURL\n\n__all__ = (\n # client_exceptions\n 'ClientConnectionError',\n 'ClientConnectorCertificateError',\n 'ClientConnectorError',\n 'ClientConnectorSSLError',\n 'ClientError',\n 'ClientHttpProxyError',\n 'ClientOSError',\n 'ClientPayloadError',\n 'ClientProxyConnectionError',\n 'ClientResponseError',\n 'ClientSSLError',\n 'ContentTypeError',\n 'InvalidURL',\n 'ServerConnectionError',\n 'ServerDisconnectedError',\n 'ServerFingerprintMismatch',\n 'ServerTimeoutError',\n 'TooManyRedirects',\n 'WSServerHandshakeError',\n # client_reqrep\n 'ClientRequest',\n 'ClientResponse',\n 'Fingerprint',\n 'RequestInfo',\n # connector\n 'BaseConnector',\n 'TCPConnector',\n 'UnixConnector',\n 'NamedPipeConnector',\n # client_ws\n 'ClientWebSocketResponse',\n # client\n 'ClientSession',\n 'ClientTimeout',\n 'request')\n\n\ntry:\n from ssl import SSLContext\nexcept ImportError: # pragma: no cover\n SSLContext = object # type: ignore\n\n\[email protected](frozen=True, slots=True)\nclass ClientTimeout:\n total = attr.ib(type=Optional[float], default=None)\n connect = attr.ib(type=Optional[float], default=None)\n sock_read = attr.ib(type=Optional[float], default=None)\n sock_connect = attr.ib(type=Optional[float], default=None)\n\n # pool_queue_timeout = attr.ib(type=float, default=None)\n # dns_resolution_timeout = attr.ib(type=float, default=None)\n # socket_connect_timeout = attr.ib(type=float, default=None)\n # connection_acquiring_timeout = attr.ib(type=float, default=None)\n # new_connection_timeout = attr.ib(type=float, default=None)\n # http_header_timeout = attr.ib(type=float, default=None)\n # response_body_timeout = attr.ib(type=float, default=None)\n\n # to create a timeout specific for a single request, either\n # - create a completely new one to overwrite the default\n # - or use http://www.attrs.org/en/stable/api.html#attr.evolve\n # to overwrite the defaults\n\n\n# 5 Minute default read timeout\nDEFAULT_TIMEOUT = ClientTimeout(total=5*60)\n\n_RetType = TypeVar('_RetType')\n\n\n@final\nclass ClientSession:\n \"\"\"First-class interface for making HTTP requests.\"\"\"\n\n __slots__ = (\n '_source_traceback', '_connector',\n '_loop', '_cookie_jar',\n '_connector_owner', '_default_auth',\n '_version', '_json_serialize',\n '_requote_redirect_url',\n '_timeout', '_raise_for_status', '_auto_decompress',\n '_trust_env', '_default_headers', '_skip_auto_headers',\n '_request_class', '_response_class',\n '_ws_response_class', '_trace_configs')\n\n def __init__(self, *, connector: Optional[BaseConnector]=None,\n cookies: Optional[LooseCookies]=None,\n headers: Optional[LooseHeaders]=None,\n skip_auto_headers: Optional[Iterable[str]]=None,\n auth: Optional[BasicAuth]=None,\n json_serialize: JSONEncoder=json.dumps,\n request_class: Type[ClientRequest]=ClientRequest,\n response_class: Type[ClientResponse]=ClientResponse,\n ws_response_class: Type[ClientWebSocketResponse]=ClientWebSocketResponse, # noqa\n version: HttpVersion=http.HttpVersion11,\n cookie_jar: Optional[AbstractCookieJar]=None,\n connector_owner: bool=True,\n raise_for_status: Union[bool, Callable[[ClientResponse], Awaitable[None]]]=False, # noqa\n timeout: Union[object, ClientTimeout]=sentinel,\n auto_decompress: bool=True,\n trust_env: bool=False,\n requote_redirect_url: bool=True,\n trace_configs: Optional[List[TraceConfig]]=None) -> None:\n\n loop = get_running_loop()\n\n if connector is None:\n connector = TCPConnector()\n\n # Initialize these three attrs before raising any exception,\n # they are used in __del__\n self._connector = connector # type: Optional[BaseConnector]\n self._loop = loop\n if loop.get_debug():\n self._source_traceback = traceback.extract_stack(sys._getframe(1)) # type: Optional[traceback.StackSummary] # noqa\n else:\n self._source_traceback = None\n\n if connector._loop is not loop:\n raise RuntimeError(\n \"Session and connector have to use same event loop\")\n\n if cookie_jar is None:\n cookie_jar = CookieJar()\n self._cookie_jar = cookie_jar\n\n if cookies is not None:\n self._cookie_jar.update_cookies(cookies)\n\n self._connector_owner = connector_owner\n self._default_auth = auth\n self._version = version\n self._json_serialize = json_serialize\n if timeout is sentinel:\n self._timeout = DEFAULT_TIMEOUT\n else:\n self._timeout = timeout # type: ignore\n self._raise_for_status = raise_for_status\n self._auto_decompress = auto_decompress\n self._trust_env = trust_env\n self._requote_redirect_url = requote_redirect_url\n\n # Convert to list of tuples\n if headers:\n real_headers = CIMultiDict(headers) # type: CIMultiDict[str]\n else:\n real_headers = CIMultiDict()\n self._default_headers = real_headers # type: CIMultiDict[str]\n if skip_auto_headers is not None:\n self._skip_auto_headers = frozenset([istr(i)\n for i in skip_auto_headers])\n else:\n self._skip_auto_headers = frozenset()\n\n self._request_class = request_class\n self._response_class = response_class\n self._ws_response_class = ws_response_class\n\n self._trace_configs = trace_configs or []\n for trace_config in self._trace_configs:\n trace_config.freeze()\n\n def __init_subclass__(cls: Type['ClientSession']) -> None:\n raise TypeError(\"Inheritance class {} from ClientSession \"\n \"is forbidden\".format(cls.__name__))\n\n def __del__(self, _warnings: Any=warnings) -> None:\n if not self.closed:\n if PY_36:\n kwargs = {'source': self}\n else:\n kwargs = {}\n _warnings.warn(\"Unclosed client session {!r}\".format(self),\n ResourceWarning,\n **kwargs)\n context = {'client_session': self,\n 'message': 'Unclosed client session'}\n if self._source_traceback is not None:\n context['source_traceback'] = self._source_traceback\n self._loop.call_exception_handler(context)\n\n def request(self,\n method: str,\n url: StrOrURL,\n **kwargs: Any) -> '_RequestContextManager':\n \"\"\"Perform HTTP request.\"\"\"\n return _RequestContextManager(self._request(method, url, **kwargs))\n\n async def _request(\n self,\n method: str,\n str_or_url: StrOrURL, *,\n params: Optional[Mapping[str, str]]=None,\n data: Any=None,\n json: Any=None,\n cookies: Optional[LooseCookies]=None,\n headers: LooseHeaders=None,\n skip_auto_headers: Optional[Iterable[str]]=None,\n auth: Optional[BasicAuth]=None,\n allow_redirects: bool=True,\n max_redirects: int=10,\n compress: Optional[str]=None,\n chunked: Optional[bool]=None,\n expect100: bool=False,\n raise_for_status: Union[None, bool, Callable[[ClientResponse], Awaitable[None]]]=None, # noqa\n read_until_eof: bool=True,\n proxy: Optional[StrOrURL]=None,\n proxy_auth: Optional[BasicAuth]=None,\n timeout: Union[ClientTimeout, object]=sentinel,\n ssl: Optional[Union[SSLContext, bool, Fingerprint]]=None,\n proxy_headers: Optional[LooseHeaders]=None,\n trace_request_ctx: Optional[SimpleNamespace]=None\n ) -> ClientResponse:\n\n # NOTE: timeout clamps existing connect and read timeouts. We cannot\n # set the default to None because we need to detect if the user wants\n # to use the existing timeouts by setting timeout to None.\n\n if self.closed:\n raise RuntimeError('Session is closed')\n\n if not isinstance(ssl, SSL_ALLOWED_TYPES):\n raise TypeError(\"ssl should be SSLContext, bool, Fingerprint, \"\n \"or None, got {!r} instead.\".format(ssl))\n\n if data is not None and json is not None:\n raise ValueError(\n 'data and json parameters can not be used at the same time')\n elif json is not None:\n data = payload.JsonPayload(json, dumps=self._json_serialize)\n\n redirects = 0\n history = []\n version = self._version\n\n # Merge with default headers and transform to CIMultiDict\n headers = self._prepare_headers(headers)\n proxy_headers = self._prepare_headers(proxy_headers)\n\n try:\n url = URL(str_or_url)\n except ValueError:\n raise InvalidURL(str_or_url)\n\n skip_headers = set(self._skip_auto_headers)\n if skip_auto_headers is not None:\n for i in skip_auto_headers:\n skip_headers.add(istr(i))\n\n if proxy is not None:\n try:\n proxy = URL(proxy)\n except ValueError:\n raise InvalidURL(proxy)\n\n if timeout is sentinel:\n real_timeout = self._timeout # type: ClientTimeout\n else:\n if not isinstance(timeout, ClientTimeout):\n real_timeout = ClientTimeout(total=timeout) # type: ignore\n else:\n real_timeout = timeout\n # timeout is cumulative for all request operations\n # (request, redirects, responses, data consuming)\n tm = TimeoutHandle(self._loop, real_timeout.total)\n handle = tm.start()\n\n traces = [\n Trace(\n self,\n trace_config,\n trace_config.trace_config_ctx(\n trace_request_ctx=trace_request_ctx)\n )\n for trace_config in self._trace_configs\n ]\n\n for trace in traces:\n await trace.send_request_start(\n method,\n url,\n headers\n )\n\n timer = tm.timer()\n try:\n with timer:\n while True:\n url, auth_from_url = strip_auth_from_url(url)\n if auth and auth_from_url:\n raise ValueError(\"Cannot combine AUTH argument with \"\n \"credentials encoded in URL\")\n\n if auth is None:\n auth = auth_from_url\n if auth is None:\n auth = self._default_auth\n # It would be confusing if we support explicit\n # Authorization header with auth argument\n if (headers is not None and\n auth is not None and\n hdrs.AUTHORIZATION in headers):\n raise ValueError(\"Cannot combine AUTHORIZATION header \"\n \"with AUTH argument or credentials \"\n \"encoded in URL\")\n\n all_cookies = self._cookie_jar.filter_cookies(url)\n\n if cookies is not None:\n tmp_cookie_jar = CookieJar()\n tmp_cookie_jar.update_cookies(cookies)\n req_cookies = tmp_cookie_jar.filter_cookies(url)\n if req_cookies:\n all_cookies.load(req_cookies)\n\n if proxy is not None:\n proxy = URL(proxy)\n elif self._trust_env:\n for scheme, proxy_info in proxies_from_env().items():\n if scheme == url.scheme:\n proxy = proxy_info.proxy\n proxy_auth = proxy_info.proxy_auth\n break\n\n req = self._request_class(\n method, url, params=params, headers=headers,\n skip_auto_headers=skip_headers, data=data,\n cookies=all_cookies, auth=auth, version=version,\n compress=compress, chunked=chunked,\n expect100=expect100, loop=self._loop,\n response_class=self._response_class,\n proxy=proxy, proxy_auth=proxy_auth, timer=timer,\n session=self,\n ssl=ssl, proxy_headers=proxy_headers, traces=traces)\n\n # connection timeout\n try:\n with CeilTimeout(real_timeout.connect,\n loop=self._loop):\n assert self._connector is not None\n conn = await self._connector.connect(\n req,\n traces=traces,\n timeout=real_timeout\n )\n except asyncio.TimeoutError as exc:\n raise ServerTimeoutError(\n 'Connection timeout '\n 'to host {0}'.format(url)) from exc\n\n assert conn.transport is not None\n\n assert conn.protocol is not None\n conn.protocol.set_response_params(\n timer=timer,\n skip_payload=method.upper() == 'HEAD',\n read_until_eof=read_until_eof,\n auto_decompress=self._auto_decompress,\n read_timeout=real_timeout.sock_read)\n\n try:\n try:\n resp = await req.send(conn)\n try:\n await resp.start(conn)\n except BaseException:\n resp.close()\n raise\n except BaseException:\n conn.close()\n raise\n except ClientError:\n raise\n except OSError as exc:\n raise ClientOSError(*exc.args) from exc\n\n self._cookie_jar.update_cookies(resp.cookies, resp.url)\n\n # redirects\n if resp.status in (\n 301, 302, 303, 307, 308) and allow_redirects:\n\n for trace in traces:\n await trace.send_request_redirect(\n method,\n url,\n headers,\n resp\n )\n\n redirects += 1\n history.append(resp)\n if max_redirects and redirects >= max_redirects:\n resp.close()\n raise TooManyRedirects(\n history[0].request_info, tuple(history))\n\n # For 301 and 302, mimic IE, now changed in RFC\n # https://github.com/kennethreitz/requests/pull/269\n if (resp.status == 303 and\n resp.method != hdrs.METH_HEAD) \\\n or (resp.status in (301, 302) and\n resp.method == hdrs.METH_POST):\n method = hdrs.METH_GET\n data = None\n if headers.get(hdrs.CONTENT_LENGTH):\n headers.pop(hdrs.CONTENT_LENGTH)\n\n r_url = (resp.headers.get(hdrs.LOCATION) or\n resp.headers.get(hdrs.URI))\n if r_url is None:\n # see github.com/aio-libs/aiohttp/issues/2022\n break\n else:\n # reading from correct redirection\n # response is forbidden\n resp.release()\n\n try:\n r_url = URL(\n r_url, encoded=not self._requote_redirect_url)\n\n except ValueError:\n raise InvalidURL(r_url)\n\n scheme = r_url.scheme\n if scheme not in ('http', 'https', ''):\n resp.close()\n raise ValueError(\n 'Can redirect only to http or https')\n elif not scheme:\n r_url = url.join(r_url)\n\n if url.origin() != r_url.origin():\n auth = None\n headers.pop(hdrs.AUTHORIZATION, None)\n\n url = r_url\n params = None\n resp.release()\n continue\n\n break\n\n # check response status\n if raise_for_status is None:\n raise_for_status = self._raise_for_status\n\n if raise_for_status is None:\n pass\n elif callable(raise_for_status):\n await raise_for_status(resp)\n elif raise_for_status:\n resp.raise_for_status()\n\n # register connection\n if handle is not None:\n if resp.connection is not None:\n resp.connection.add_callback(handle.cancel)\n else:\n handle.cancel()\n\n resp._history = tuple(history)\n\n for trace in traces:\n await trace.send_request_end(\n method,\n url,\n headers,\n resp\n )\n return resp\n\n except BaseException as e:\n # cleanup timer\n tm.close()\n if handle:\n handle.cancel()\n handle = None\n\n for trace in traces:\n await trace.send_request_exception(\n method,\n url,\n headers,\n e\n )\n raise\n\n def ws_connect(\n self,\n url: StrOrURL, *,\n method: str=hdrs.METH_GET,\n protocols: Iterable[str]=(),\n timeout: Union[ClientWSTimeout, float]=sentinel,\n receive_timeout: Optional[float]=None,\n autoclose: bool=True,\n autoping: bool=True,\n heartbeat: Optional[float]=None,\n auth: Optional[BasicAuth]=None,\n origin: Optional[str]=None,\n headers: Optional[LooseHeaders]=None,\n proxy: Optional[StrOrURL]=None,\n proxy_auth: Optional[BasicAuth]=None,\n ssl: Union[SSLContext, bool, None, Fingerprint]=None,\n proxy_headers: Optional[LooseHeaders]=None,\n compress: int=0,\n max_msg_size: int=4*1024*1024) -> '_WSRequestContextManager':\n \"\"\"Initiate websocket connection.\"\"\"\n return _WSRequestContextManager(\n self._ws_connect(url,\n method=method,\n protocols=protocols,\n timeout=timeout,\n receive_timeout=receive_timeout,\n autoclose=autoclose,\n autoping=autoping,\n heartbeat=heartbeat,\n auth=auth,\n origin=origin,\n headers=headers,\n proxy=proxy,\n proxy_auth=proxy_auth,\n ssl=ssl,\n proxy_headers=proxy_headers,\n compress=compress,\n max_msg_size=max_msg_size))\n\n async def _ws_connect(\n self,\n url: StrOrURL, *,\n method: str=hdrs.METH_GET,\n protocols: Iterable[str]=(),\n timeout: Union[ClientWSTimeout, float]=sentinel,\n receive_timeout: Optional[float]=None,\n autoclose: bool=True,\n autoping: bool=True,\n heartbeat: Optional[float]=None,\n auth: Optional[BasicAuth]=None,\n origin: Optional[str]=None,\n headers: Optional[LooseHeaders]=None,\n proxy: Optional[StrOrURL]=None,\n proxy_auth: Optional[BasicAuth]=None,\n ssl: Union[SSLContext, bool, None, Fingerprint]=None,\n proxy_headers: Optional[LooseHeaders]=None,\n compress: int=0,\n max_msg_size: int=4*1024*1024\n ) -> ClientWebSocketResponse:\n if timeout is not sentinel:\n if isinstance(timeout, ClientWSTimeout):\n ws_timeout = timeout\n else:\n warnings.warn(\"parameter 'timeout' of type 'float' \"\n \"is deprecated, please use \"\n \"'timeout=ClientWSTimeout(ws_close=...)'\",\n DeprecationWarning,\n stacklevel=2)\n ws_timeout = ClientWSTimeout(ws_close=timeout)\n else:\n ws_timeout = DEFAULT_WS_CLIENT_TIMEOUT\n if receive_timeout is not None:\n warnings.warn(\"float parameter 'receive_timeout' \"\n \"is deprecated, please use parameter \"\n \"'timeout=ClientWSTimeout(ws_receive=...)'\",\n DeprecationWarning,\n stacklevel=2)\n ws_timeout = attr.evolve(ws_timeout, ws_receive=receive_timeout)\n\n if headers is None:\n real_headers = CIMultiDict() # type: CIMultiDict[str]\n else:\n real_headers = CIMultiDict(headers)\n\n default_headers = {\n hdrs.UPGRADE: hdrs.WEBSOCKET,\n hdrs.CONNECTION: hdrs.UPGRADE,\n hdrs.SEC_WEBSOCKET_VERSION: '13',\n }\n\n for key, value in default_headers.items():\n real_headers.setdefault(key, value)\n\n sec_key = base64.b64encode(os.urandom(16))\n real_headers[hdrs.SEC_WEBSOCKET_KEY] = sec_key.decode()\n\n if protocols:\n real_headers[hdrs.SEC_WEBSOCKET_PROTOCOL] = ','.join(protocols)\n if origin is not None:\n real_headers[hdrs.ORIGIN] = origin\n if compress:\n extstr = ws_ext_gen(compress=compress)\n real_headers[hdrs.SEC_WEBSOCKET_EXTENSIONS] = extstr\n\n if not isinstance(ssl, SSL_ALLOWED_TYPES):\n raise TypeError(\"ssl should be SSLContext, bool, Fingerprint, \"\n \"or None, got {!r} instead.\".format(ssl))\n\n # send request\n resp = await self.request(method, url,\n headers=real_headers,\n read_until_eof=False,\n auth=auth,\n proxy=proxy,\n proxy_auth=proxy_auth,\n ssl=ssl,\n proxy_headers=proxy_headers)\n\n try:\n # check handshake\n if resp.status != 101:\n raise WSServerHandshakeError(\n resp.request_info,\n resp.history,\n message='Invalid response status',\n status=resp.status,\n headers=resp.headers)\n\n if resp.headers.get(hdrs.UPGRADE, '').lower() != 'websocket':\n raise WSServerHandshakeError(\n resp.request_info,\n resp.history,\n message='Invalid upgrade header',\n status=resp.status,\n headers=resp.headers)\n\n if resp.headers.get(hdrs.CONNECTION, '').lower() != 'upgrade':\n raise WSServerHandshakeError(\n resp.request_info,\n resp.history,\n message='Invalid connection header',\n status=resp.status,\n headers=resp.headers)\n\n # key calculation\n key = resp.headers.get(hdrs.SEC_WEBSOCKET_ACCEPT, '')\n match = base64.b64encode(\n hashlib.sha1(sec_key + WS_KEY).digest()).decode()\n if key != match:\n raise WSServerHandshakeError(\n resp.request_info,\n resp.history,\n message='Invalid challenge response',\n status=resp.status,\n headers=resp.headers)\n\n # websocket protocol\n protocol = None\n if protocols and hdrs.SEC_WEBSOCKET_PROTOCOL in resp.headers:\n resp_protocols = [\n proto.strip() for proto in\n resp.headers[hdrs.SEC_WEBSOCKET_PROTOCOL].split(',')]\n\n for proto in resp_protocols:\n if proto in protocols:\n protocol = proto\n break\n\n # websocket compress\n notakeover = False\n if compress:\n compress_hdrs = resp.headers.get(hdrs.SEC_WEBSOCKET_EXTENSIONS)\n if compress_hdrs:\n try:\n compress, notakeover = ws_ext_parse(compress_hdrs)\n except WSHandshakeError as exc:\n raise WSServerHandshakeError(\n resp.request_info,\n resp.history,\n message=exc.args[0],\n status=resp.status,\n headers=resp.headers)\n else:\n compress = 0\n notakeover = False\n\n conn = resp.connection\n assert conn is not None\n proto = conn.protocol\n assert proto is not None\n transport = conn.transport\n assert transport is not None\n reader = FlowControlDataQueue(\n proto, limit=2 ** 16, loop=self._loop) # type: FlowControlDataQueue[WSMessage] # noqa\n proto.set_parser(WebSocketReader(reader, max_msg_size), reader)\n writer = WebSocketWriter(\n proto, transport, use_mask=True,\n compress=compress, notakeover=notakeover)\n except BaseException:\n resp.close()\n raise\n else:\n return self._ws_response_class(reader,\n writer,\n protocol,\n resp,\n ws_timeout,\n autoclose,\n autoping,\n self._loop,\n heartbeat=heartbeat,\n compress=compress,\n client_notakeover=notakeover)\n\n def _prepare_headers(\n self,\n headers: Optional[LooseHeaders]) -> 'CIMultiDict[str]':\n \"\"\" Add default headers and transform it to CIMultiDict\n \"\"\"\n # Convert headers to MultiDict\n result = CIMultiDict(self._default_headers)\n if headers:\n if not isinstance(headers, (MultiDictProxy, MultiDict)):\n headers = CIMultiDict(headers)\n added_names = set() # type: Set[str]\n for key, value in headers.items():\n if key in added_names:\n result.add(key, value)\n else:\n result[key] = value\n added_names.add(key)\n return result\n\n def get(self, url: StrOrURL, *, allow_redirects: bool=True,\n **kwargs: Any) -> '_RequestContextManager':\n \"\"\"Perform HTTP GET request.\"\"\"\n return _RequestContextManager(\n self._request(hdrs.METH_GET, url,\n allow_redirects=allow_redirects,\n **kwargs))\n\n def options(self, url: StrOrURL, *, allow_redirects: bool=True,\n **kwargs: Any) -> '_RequestContextManager':\n \"\"\"Perform HTTP OPTIONS request.\"\"\"\n return _RequestContextManager(\n self._request(hdrs.METH_OPTIONS, url,\n allow_redirects=allow_redirects,\n **kwargs))\n\n def head(self, url: StrOrURL, *, allow_redirects: bool=False,\n **kwargs: Any) -> '_RequestContextManager':\n \"\"\"Perform HTTP HEAD request.\"\"\"\n return _RequestContextManager(\n self._request(hdrs.METH_HEAD, url,\n allow_redirects=allow_redirects,\n **kwargs))\n\n def post(self, url: StrOrURL,\n *, data: Any=None, **kwargs: Any) -> '_RequestContextManager':\n \"\"\"Perform HTTP POST request.\"\"\"\n return _RequestContextManager(\n self._request(hdrs.METH_POST, url,\n data=data,\n **kwargs))\n\n def put(self, url: StrOrURL,\n *, data: Any=None, **kwargs: Any) -> '_RequestContextManager':\n \"\"\"Perform HTTP PUT request.\"\"\"\n return _RequestContextManager(\n self._request(hdrs.METH_PUT, url,\n data=data,\n **kwargs))\n\n def patch(self, url: StrOrURL,\n *, data: Any=None, **kwargs: Any) -> '_RequestContextManager':\n \"\"\"Perform HTTP PATCH request.\"\"\"\n return _RequestContextManager(\n self._request(hdrs.METH_PATCH, url,\n data=data,\n **kwargs))\n\n def delete(self, url: StrOrURL, **kwargs: Any) -> '_RequestContextManager':\n \"\"\"Perform HTTP DELETE request.\"\"\"\n return _RequestContextManager(\n self._request(hdrs.METH_DELETE, url,\n **kwargs))\n\n async def close(self) -> None:\n \"\"\"Close underlying connector.\n\n Release all acquired resources.\n \"\"\"\n if not self.closed:\n if self._connector is not None and self._connector_owner:\n await self._connector.close()\n self._connector = None\n\n @property\n def closed(self) -> bool:\n \"\"\"Is client session closed.\n\n A readonly property.\n \"\"\"\n return self._connector is None or self._connector.closed\n\n @property\n def connector(self) -> Optional[BaseConnector]:\n \"\"\"Connector instance used for the session.\"\"\"\n return self._connector\n\n @property\n def cookie_jar(self) -> AbstractCookieJar:\n \"\"\"The session cookies.\"\"\"\n return self._cookie_jar\n\n @property\n def version(self) -> Tuple[int, int]:\n \"\"\"The session HTTP protocol version.\"\"\"\n return self._version\n\n @property\n def requote_redirect_url(self) -> bool:\n \"\"\"Do URL requoting on redirection handling.\"\"\"\n return self._requote_redirect_url\n\n def detach(self) -> None:\n \"\"\"Detach connector from session without closing the former.\n\n Session is switched to closed state anyway.\n \"\"\"\n self._connector = None\n\n async def __aenter__(self) -> 'ClientSession':\n return self\n\n async def __aexit__(self,\n exc_type: Optional[Type[BaseException]],\n exc_val: Optional[BaseException],\n exc_tb: Optional[TracebackType]) -> None:\n await self.close()\n\n\nclass _BaseRequestContextManager(Coroutine[Any,\n Any,\n _RetType],\n Generic[_RetType]):\n\n __slots__ = ('_coro', '_resp')\n\n def __init__(\n self,\n coro: Coroutine['asyncio.Future[Any]', None, _RetType]\n ) -> None:\n self._coro = coro\n\n def send(self, arg: None) -> 'asyncio.Future[Any]':\n return self._coro.send(arg)\n\n def throw(self, arg: BaseException) -> None: # type: ignore\n self._coro.throw(arg) # type: ignore\n\n def close(self) -> None:\n return self._coro.close()\n\n def __await__(self) -> Generator[Any, None, _RetType]:\n ret = self._coro.__await__()\n return ret\n\n def __iter__(self) -> Generator[Any, None, _RetType]:\n return self.__await__()\n\n async def __aenter__(self) -> _RetType:\n self._resp = await self._coro\n return self._resp\n\n\nclass _RequestContextManager(_BaseRequestContextManager[ClientResponse]):\n async def __aexit__(self,\n exc_type: Optional[Type[BaseException]],\n exc: Optional[BaseException],\n tb: Optional[TracebackType]) -> None:\n # We're basing behavior on the exception as it can be caused by\n # user code unrelated to the status of the connection. If you\n # would like to close a connection you must do that\n # explicitly. Otherwise connection error handling should kick in\n # and close/recycle the connection as required.\n self._resp.release()\n\n\nclass _WSRequestContextManager(_BaseRequestContextManager[\n ClientWebSocketResponse]):\n async def __aexit__(self,\n exc_type: Optional[Type[BaseException]],\n exc: Optional[BaseException],\n tb: Optional[TracebackType]) -> None:\n await self._resp.close()\n\n\nclass _SessionRequestContextManager:\n\n __slots__ = ('_coro', '_resp', '_session')\n\n def __init__(self,\n coro: Coroutine['asyncio.Future[Any]', None, ClientResponse],\n session: ClientSession) -> None:\n self._coro = coro\n self._resp = None # type: Optional[ClientResponse]\n self._session = session\n\n async def __aenter__(self) -> ClientResponse:\n try:\n self._resp = await self._coro\n except BaseException:\n await self._session.close()\n raise\n else:\n return self._resp\n\n async def __aexit__(self,\n exc_type: Optional[Type[BaseException]],\n exc: Optional[BaseException],\n tb: Optional[TracebackType]) -> None:\n assert self._resp is not None\n self._resp.close()\n await self._session.close()\n\n\ndef request(\n method: str,\n url: StrOrURL, *,\n params: Optional[Mapping[str, str]]=None,\n data: Any=None,\n json: Any=None,\n headers: LooseHeaders=None,\n skip_auto_headers: Optional[Iterable[str]]=None,\n auth: Optional[BasicAuth]=None,\n allow_redirects: bool=True,\n max_redirects: int=10,\n compress: Optional[str]=None,\n chunked: Optional[bool]=None,\n expect100: bool=False,\n raise_for_status: Optional[bool]=None,\n read_until_eof: bool=True,\n proxy: Optional[StrOrURL]=None,\n proxy_auth: Optional[BasicAuth]=None,\n timeout: Union[ClientTimeout, object]=sentinel,\n cookies: Optional[LooseCookies]=None,\n version: HttpVersion=http.HttpVersion11,\n connector: Optional[BaseConnector]=None\n) -> _SessionRequestContextManager:\n \"\"\"Constructs and sends a request. Returns response object.\n method - HTTP method\n url - request url\n params - (optional) Dictionary or bytes to be sent in the query\n string of the new request\n data - (optional) Dictionary, bytes, or file-like object to\n send in the body of the request\n json - (optional) Any json compatible python object\n headers - (optional) Dictionary of HTTP Headers to send with\n the request\n cookies - (optional) Dict object to send with the request\n auth - (optional) BasicAuth named tuple represent HTTP Basic Auth\n auth - aiohttp.helpers.BasicAuth\n allow_redirects - (optional) If set to False, do not follow\n redirects\n version - Request HTTP version.\n compress - Set to True if request has to be compressed\n with deflate encoding.\n chunked - Set to chunk size for chunked transfer encoding.\n expect100 - Expect 100-continue response from server.\n connector - BaseConnector sub-class instance to support\n connection pooling.\n read_until_eof - Read response until eof if response\n does not have Content-Length header.\n loop - Optional event loop.\n timeout - Optional ClientTimeout settings structure, 5min\n total timeout by default.\n Usage::\n >>> import aiohttp\n >>> resp = await aiohttp.request('GET', 'http://python.org/')\n >>> resp\n <ClientResponse(python.org/) [200]>\n >>> data = await resp.read()\n \"\"\"\n connector_owner = False\n if connector is None:\n connector_owner = True\n connector = TCPConnector(force_close=True)\n\n session = ClientSession(\n cookies=cookies, version=version, timeout=timeout,\n connector=connector, connector_owner=connector_owner)\n\n return _SessionRequestContextManager(\n session._request(method, url,\n params=params,\n data=data,\n json=json,\n headers=headers,\n skip_auto_headers=skip_auto_headers,\n auth=auth,\n allow_redirects=allow_redirects,\n max_redirects=max_redirects,\n compress=compress,\n chunked=chunked,\n expect100=expect100,\n raise_for_status=raise_for_status,\n read_until_eof=read_until_eof,\n proxy=proxy,\n proxy_auth=proxy_auth,),\n session)\n", "path": "aiohttp/client.py"}], "after_files": [{"content": "\"\"\"HTTP Client for asyncio.\"\"\"\n\nimport asyncio\nimport base64\nimport hashlib\nimport json\nimport os\nimport sys\nimport traceback\nimport warnings\nfrom types import SimpleNamespace, TracebackType\nfrom typing import ( # noqa\n Any,\n Awaitable,\n Callable,\n Coroutine,\n Generator,\n Generic,\n Iterable,\n List,\n Mapping,\n Optional,\n Set,\n Tuple,\n Type,\n TypeVar,\n Union,\n)\n\nimport attr\nfrom multidict import CIMultiDict, MultiDict, MultiDictProxy, istr\nfrom typing_extensions import final\nfrom yarl import URL\n\nfrom . import hdrs, http, payload\nfrom .abc import AbstractCookieJar\nfrom .client_exceptions import ClientConnectionError as ClientConnectionError\nfrom .client_exceptions import (\n ClientConnectorCertificateError as ClientConnectorCertificateError,\n)\nfrom .client_exceptions import ClientConnectorError as ClientConnectorError\nfrom .client_exceptions import (\n ClientConnectorSSLError as ClientConnectorSSLError,\n)\nfrom .client_exceptions import ClientError as ClientError\nfrom .client_exceptions import ClientHttpProxyError as ClientHttpProxyError\nfrom .client_exceptions import ClientOSError as ClientOSError\nfrom .client_exceptions import ClientPayloadError as ClientPayloadError\nfrom .client_exceptions import (\n ClientProxyConnectionError as ClientProxyConnectionError,\n)\nfrom .client_exceptions import ClientResponseError as ClientResponseError\nfrom .client_exceptions import ClientSSLError as ClientSSLError\nfrom .client_exceptions import ContentTypeError as ContentTypeError\nfrom .client_exceptions import InvalidURL as InvalidURL\nfrom .client_exceptions import ServerConnectionError as ServerConnectionError\nfrom .client_exceptions import (\n ServerDisconnectedError as ServerDisconnectedError,\n)\nfrom .client_exceptions import (\n ServerFingerprintMismatch as ServerFingerprintMismatch,\n)\nfrom .client_exceptions import ServerTimeoutError as ServerTimeoutError\nfrom .client_exceptions import TooManyRedirects as TooManyRedirects\nfrom .client_exceptions import WSServerHandshakeError as WSServerHandshakeError\nfrom .client_reqrep import SSL_ALLOWED_TYPES as SSL_ALLOWED_TYPES\nfrom .client_reqrep import ClientRequest as ClientRequest\nfrom .client_reqrep import ClientResponse as ClientResponse\nfrom .client_reqrep import Fingerprint as Fingerprint\nfrom .client_reqrep import RequestInfo as RequestInfo\nfrom .client_ws import DEFAULT_WS_CLIENT_TIMEOUT\nfrom .client_ws import ClientWebSocketResponse as ClientWebSocketResponse\nfrom .client_ws import ClientWSTimeout\nfrom .connector import BaseConnector as BaseConnector\nfrom .connector import NamedPipeConnector as NamedPipeConnector\nfrom .connector import TCPConnector as TCPConnector\nfrom .connector import UnixConnector as UnixConnector\nfrom .cookiejar import CookieJar\nfrom .helpers import (\n PY_36,\n BasicAuth,\n CeilTimeout,\n TimeoutHandle,\n get_running_loop,\n proxies_from_env,\n sentinel,\n strip_auth_from_url,\n)\nfrom .http import WS_KEY, HttpVersion, WebSocketReader, WebSocketWriter\nfrom .http_websocket import ( # noqa\n WSHandshakeError,\n WSMessage,\n ws_ext_gen,\n ws_ext_parse,\n)\nfrom .streams import FlowControlDataQueue\nfrom .tracing import Trace, TraceConfig\nfrom .typedefs import JSONEncoder, LooseCookies, LooseHeaders, StrOrURL\n\n__all__ = (\n # client_exceptions\n 'ClientConnectionError',\n 'ClientConnectorCertificateError',\n 'ClientConnectorError',\n 'ClientConnectorSSLError',\n 'ClientError',\n 'ClientHttpProxyError',\n 'ClientOSError',\n 'ClientPayloadError',\n 'ClientProxyConnectionError',\n 'ClientResponseError',\n 'ClientSSLError',\n 'ContentTypeError',\n 'InvalidURL',\n 'ServerConnectionError',\n 'ServerDisconnectedError',\n 'ServerFingerprintMismatch',\n 'ServerTimeoutError',\n 'TooManyRedirects',\n 'WSServerHandshakeError',\n # client_reqrep\n 'ClientRequest',\n 'ClientResponse',\n 'Fingerprint',\n 'RequestInfo',\n # connector\n 'BaseConnector',\n 'TCPConnector',\n 'UnixConnector',\n 'NamedPipeConnector',\n # client_ws\n 'ClientWebSocketResponse',\n # client\n 'ClientSession',\n 'ClientTimeout',\n 'request')\n\n\ntry:\n from ssl import SSLContext\nexcept ImportError: # pragma: no cover\n SSLContext = object # type: ignore\n\n\[email protected](frozen=True, slots=True)\nclass ClientTimeout:\n total = attr.ib(type=Optional[float], default=None)\n connect = attr.ib(type=Optional[float], default=None)\n sock_read = attr.ib(type=Optional[float], default=None)\n sock_connect = attr.ib(type=Optional[float], default=None)\n\n # pool_queue_timeout = attr.ib(type=float, default=None)\n # dns_resolution_timeout = attr.ib(type=float, default=None)\n # socket_connect_timeout = attr.ib(type=float, default=None)\n # connection_acquiring_timeout = attr.ib(type=float, default=None)\n # new_connection_timeout = attr.ib(type=float, default=None)\n # http_header_timeout = attr.ib(type=float, default=None)\n # response_body_timeout = attr.ib(type=float, default=None)\n\n # to create a timeout specific for a single request, either\n # - create a completely new one to overwrite the default\n # - or use http://www.attrs.org/en/stable/api.html#attr.evolve\n # to overwrite the defaults\n\n\n# 5 Minute default read timeout\nDEFAULT_TIMEOUT = ClientTimeout(total=5*60)\n\n_RetType = TypeVar('_RetType')\n\n\n@final\nclass ClientSession:\n \"\"\"First-class interface for making HTTP requests.\"\"\"\n\n __slots__ = (\n '_source_traceback', '_connector',\n '_loop', '_cookie_jar',\n '_connector_owner', '_default_auth',\n '_version', '_json_serialize',\n '_requote_redirect_url',\n '_timeout', '_raise_for_status', '_auto_decompress',\n '_trust_env', '_default_headers', '_skip_auto_headers',\n '_request_class', '_response_class',\n '_ws_response_class', '_trace_configs')\n\n def __init__(self, *, connector: Optional[BaseConnector]=None,\n cookies: Optional[LooseCookies]=None,\n headers: Optional[LooseHeaders]=None,\n skip_auto_headers: Optional[Iterable[str]]=None,\n auth: Optional[BasicAuth]=None,\n json_serialize: JSONEncoder=json.dumps,\n request_class: Type[ClientRequest]=ClientRequest,\n response_class: Type[ClientResponse]=ClientResponse,\n ws_response_class: Type[ClientWebSocketResponse]=ClientWebSocketResponse, # noqa\n version: HttpVersion=http.HttpVersion11,\n cookie_jar: Optional[AbstractCookieJar]=None,\n connector_owner: bool=True,\n raise_for_status: Union[bool, Callable[[ClientResponse], Awaitable[None]]]=False, # noqa\n timeout: Union[object, ClientTimeout]=sentinel,\n auto_decompress: bool=True,\n trust_env: bool=False,\n requote_redirect_url: bool=True,\n trace_configs: Optional[List[TraceConfig]]=None) -> None:\n\n loop = get_running_loop()\n\n if connector is None:\n connector = TCPConnector()\n\n # Initialize these three attrs before raising any exception,\n # they are used in __del__\n self._connector = connector # type: Optional[BaseConnector]\n self._loop = loop\n if loop.get_debug():\n self._source_traceback = traceback.extract_stack(sys._getframe(1)) # type: Optional[traceback.StackSummary] # noqa\n else:\n self._source_traceback = None\n\n if connector._loop is not loop:\n raise RuntimeError(\n \"Session and connector have to use same event loop\")\n\n if cookie_jar is None:\n cookie_jar = CookieJar()\n self._cookie_jar = cookie_jar\n\n if cookies is not None:\n self._cookie_jar.update_cookies(cookies)\n\n self._connector_owner = connector_owner\n self._default_auth = auth\n self._version = version\n self._json_serialize = json_serialize\n if timeout is sentinel:\n self._timeout = DEFAULT_TIMEOUT\n else:\n self._timeout = timeout # type: ignore\n self._raise_for_status = raise_for_status\n self._auto_decompress = auto_decompress\n self._trust_env = trust_env\n self._requote_redirect_url = requote_redirect_url\n\n # Convert to list of tuples\n if headers:\n real_headers = CIMultiDict(headers) # type: CIMultiDict[str]\n else:\n real_headers = CIMultiDict()\n self._default_headers = real_headers # type: CIMultiDict[str]\n if skip_auto_headers is not None:\n self._skip_auto_headers = frozenset([istr(i)\n for i in skip_auto_headers])\n else:\n self._skip_auto_headers = frozenset()\n\n self._request_class = request_class\n self._response_class = response_class\n self._ws_response_class = ws_response_class\n\n self._trace_configs = trace_configs or []\n for trace_config in self._trace_configs:\n trace_config.freeze()\n\n def __init_subclass__(cls: Type['ClientSession']) -> None:\n raise TypeError(\"Inheritance class {} from ClientSession \"\n \"is forbidden\".format(cls.__name__))\n\n def __del__(self, _warnings: Any=warnings) -> None:\n if not self.closed:\n if PY_36:\n kwargs = {'source': self}\n else:\n kwargs = {}\n _warnings.warn(\"Unclosed client session {!r}\".format(self),\n ResourceWarning,\n **kwargs)\n context = {'client_session': self,\n 'message': 'Unclosed client session'}\n if self._source_traceback is not None:\n context['source_traceback'] = self._source_traceback\n self._loop.call_exception_handler(context)\n\n def request(self,\n method: str,\n url: StrOrURL,\n **kwargs: Any) -> '_RequestContextManager':\n \"\"\"Perform HTTP request.\"\"\"\n return _RequestContextManager(self._request(method, url, **kwargs))\n\n async def _request(\n self,\n method: str,\n str_or_url: StrOrURL, *,\n params: Optional[Mapping[str, str]]=None,\n data: Any=None,\n json: Any=None,\n cookies: Optional[LooseCookies]=None,\n headers: LooseHeaders=None,\n skip_auto_headers: Optional[Iterable[str]]=None,\n auth: Optional[BasicAuth]=None,\n allow_redirects: bool=True,\n max_redirects: int=10,\n compress: Optional[str]=None,\n chunked: Optional[bool]=None,\n expect100: bool=False,\n raise_for_status: Union[None, bool, Callable[[ClientResponse], Awaitable[None]]]=None, # noqa\n read_until_eof: bool=True,\n proxy: Optional[StrOrURL]=None,\n proxy_auth: Optional[BasicAuth]=None,\n timeout: Union[ClientTimeout, object]=sentinel,\n ssl: Optional[Union[SSLContext, bool, Fingerprint]]=None,\n proxy_headers: Optional[LooseHeaders]=None,\n trace_request_ctx: Optional[SimpleNamespace]=None\n ) -> ClientResponse:\n\n # NOTE: timeout clamps existing connect and read timeouts. We cannot\n # set the default to None because we need to detect if the user wants\n # to use the existing timeouts by setting timeout to None.\n\n if self.closed:\n raise RuntimeError('Session is closed')\n\n if not isinstance(ssl, SSL_ALLOWED_TYPES):\n raise TypeError(\"ssl should be SSLContext, bool, Fingerprint, \"\n \"or None, got {!r} instead.\".format(ssl))\n\n if data is not None and json is not None:\n raise ValueError(\n 'data and json parameters can not be used at the same time')\n elif json is not None:\n data = payload.JsonPayload(json, dumps=self._json_serialize)\n\n redirects = 0\n history = []\n version = self._version\n\n # Merge with default headers and transform to CIMultiDict\n headers = self._prepare_headers(headers)\n proxy_headers = self._prepare_headers(proxy_headers)\n\n try:\n url = URL(str_or_url)\n except ValueError:\n raise InvalidURL(str_or_url)\n\n skip_headers = set(self._skip_auto_headers)\n if skip_auto_headers is not None:\n for i in skip_auto_headers:\n skip_headers.add(istr(i))\n\n if proxy is not None:\n try:\n proxy = URL(proxy)\n except ValueError:\n raise InvalidURL(proxy)\n\n if timeout is sentinel:\n real_timeout = self._timeout # type: ClientTimeout\n else:\n if not isinstance(timeout, ClientTimeout):\n real_timeout = ClientTimeout(total=timeout) # type: ignore\n else:\n real_timeout = timeout\n # timeout is cumulative for all request operations\n # (request, redirects, responses, data consuming)\n tm = TimeoutHandle(self._loop, real_timeout.total)\n handle = tm.start()\n\n traces = [\n Trace(\n self,\n trace_config,\n trace_config.trace_config_ctx(\n trace_request_ctx=trace_request_ctx)\n )\n for trace_config in self._trace_configs\n ]\n\n for trace in traces:\n await trace.send_request_start(\n method,\n url,\n headers\n )\n\n timer = tm.timer()\n try:\n with timer:\n while True:\n url, auth_from_url = strip_auth_from_url(url)\n if auth and auth_from_url:\n raise ValueError(\"Cannot combine AUTH argument with \"\n \"credentials encoded in URL\")\n\n if auth is None:\n auth = auth_from_url\n if auth is None:\n auth = self._default_auth\n # It would be confusing if we support explicit\n # Authorization header with auth argument\n if (headers is not None and\n auth is not None and\n hdrs.AUTHORIZATION in headers):\n raise ValueError(\"Cannot combine AUTHORIZATION header \"\n \"with AUTH argument or credentials \"\n \"encoded in URL\")\n\n all_cookies = self._cookie_jar.filter_cookies(url)\n\n if cookies is not None:\n tmp_cookie_jar = CookieJar()\n tmp_cookie_jar.update_cookies(cookies)\n req_cookies = tmp_cookie_jar.filter_cookies(url)\n if req_cookies:\n all_cookies.load(req_cookies)\n\n if proxy is not None:\n proxy = URL(proxy)\n elif self._trust_env:\n for scheme, proxy_info in proxies_from_env().items():\n if scheme == url.scheme:\n proxy = proxy_info.proxy\n proxy_auth = proxy_info.proxy_auth\n break\n\n req = self._request_class(\n method, url, params=params, headers=headers,\n skip_auto_headers=skip_headers, data=data,\n cookies=all_cookies, auth=auth, version=version,\n compress=compress, chunked=chunked,\n expect100=expect100, loop=self._loop,\n response_class=self._response_class,\n proxy=proxy, proxy_auth=proxy_auth, timer=timer,\n session=self,\n ssl=ssl, proxy_headers=proxy_headers, traces=traces)\n\n # connection timeout\n try:\n with CeilTimeout(real_timeout.connect,\n loop=self._loop):\n assert self._connector is not None\n conn = await self._connector.connect(\n req,\n traces=traces,\n timeout=real_timeout\n )\n except asyncio.TimeoutError as exc:\n raise ServerTimeoutError(\n 'Connection timeout '\n 'to host {0}'.format(url)) from exc\n\n assert conn.transport is not None\n\n assert conn.protocol is not None\n conn.protocol.set_response_params(\n timer=timer,\n skip_payload=method.upper() == 'HEAD',\n read_until_eof=read_until_eof,\n auto_decompress=self._auto_decompress,\n read_timeout=real_timeout.sock_read)\n\n try:\n try:\n resp = await req.send(conn)\n try:\n await resp.start(conn)\n except BaseException:\n resp.close()\n raise\n except BaseException:\n conn.close()\n raise\n except ClientError:\n raise\n except OSError as exc:\n raise ClientOSError(*exc.args) from exc\n\n self._cookie_jar.update_cookies(resp.cookies, resp.url)\n\n # redirects\n if resp.status in (\n 301, 302, 303, 307, 308) and allow_redirects:\n\n for trace in traces:\n await trace.send_request_redirect(\n method,\n url,\n headers,\n resp\n )\n\n redirects += 1\n history.append(resp)\n if max_redirects and redirects >= max_redirects:\n resp.close()\n raise TooManyRedirects(\n history[0].request_info, tuple(history))\n\n # For 301 and 302, mimic IE, now changed in RFC\n # https://github.com/kennethreitz/requests/pull/269\n if (resp.status == 303 and\n resp.method != hdrs.METH_HEAD) \\\n or (resp.status in (301, 302) and\n resp.method == hdrs.METH_POST):\n method = hdrs.METH_GET\n data = None\n if headers.get(hdrs.CONTENT_LENGTH):\n headers.pop(hdrs.CONTENT_LENGTH)\n\n r_url = (resp.headers.get(hdrs.LOCATION) or\n resp.headers.get(hdrs.URI))\n if r_url is None:\n # see github.com/aio-libs/aiohttp/issues/2022\n break\n else:\n # reading from correct redirection\n # response is forbidden\n resp.release()\n\n try:\n r_url = URL(\n r_url, encoded=not self._requote_redirect_url)\n\n except ValueError:\n raise InvalidURL(r_url)\n\n scheme = r_url.scheme\n if scheme not in ('http', 'https', ''):\n resp.close()\n raise ValueError(\n 'Can redirect only to http or https')\n elif not scheme:\n r_url = url.join(r_url)\n\n if url.origin() != r_url.origin():\n auth = None\n headers.pop(hdrs.AUTHORIZATION, None)\n\n url = r_url\n params = None\n resp.release()\n continue\n\n break\n\n # check response status\n if raise_for_status is None:\n raise_for_status = self._raise_for_status\n\n if raise_for_status is None:\n pass\n elif callable(raise_for_status):\n await raise_for_status(resp)\n elif raise_for_status:\n resp.raise_for_status()\n\n # register connection\n if handle is not None:\n if resp.connection is not None:\n resp.connection.add_callback(handle.cancel)\n else:\n handle.cancel()\n\n resp._history = tuple(history)\n\n for trace in traces:\n await trace.send_request_end(\n method,\n url,\n headers,\n resp\n )\n return resp\n\n except BaseException as e:\n # cleanup timer\n tm.close()\n if handle:\n handle.cancel()\n handle = None\n\n for trace in traces:\n await trace.send_request_exception(\n method,\n url,\n headers,\n e\n )\n raise\n\n def ws_connect(\n self,\n url: StrOrURL, *,\n method: str=hdrs.METH_GET,\n protocols: Iterable[str]=(),\n timeout: Union[ClientWSTimeout, float]=sentinel,\n receive_timeout: Optional[float]=None,\n autoclose: bool=True,\n autoping: bool=True,\n heartbeat: Optional[float]=None,\n auth: Optional[BasicAuth]=None,\n origin: Optional[str]=None,\n headers: Optional[LooseHeaders]=None,\n proxy: Optional[StrOrURL]=None,\n proxy_auth: Optional[BasicAuth]=None,\n ssl: Union[SSLContext, bool, None, Fingerprint]=None,\n proxy_headers: Optional[LooseHeaders]=None,\n compress: int=0,\n max_msg_size: int=4*1024*1024) -> '_WSRequestContextManager':\n \"\"\"Initiate websocket connection.\"\"\"\n return _WSRequestContextManager(\n self._ws_connect(url,\n method=method,\n protocols=protocols,\n timeout=timeout,\n receive_timeout=receive_timeout,\n autoclose=autoclose,\n autoping=autoping,\n heartbeat=heartbeat,\n auth=auth,\n origin=origin,\n headers=headers,\n proxy=proxy,\n proxy_auth=proxy_auth,\n ssl=ssl,\n proxy_headers=proxy_headers,\n compress=compress,\n max_msg_size=max_msg_size))\n\n async def _ws_connect(\n self,\n url: StrOrURL, *,\n method: str=hdrs.METH_GET,\n protocols: Iterable[str]=(),\n timeout: Union[ClientWSTimeout, float]=sentinel,\n receive_timeout: Optional[float]=None,\n autoclose: bool=True,\n autoping: bool=True,\n heartbeat: Optional[float]=None,\n auth: Optional[BasicAuth]=None,\n origin: Optional[str]=None,\n headers: Optional[LooseHeaders]=None,\n proxy: Optional[StrOrURL]=None,\n proxy_auth: Optional[BasicAuth]=None,\n ssl: Union[SSLContext, bool, None, Fingerprint]=None,\n proxy_headers: Optional[LooseHeaders]=None,\n compress: int=0,\n max_msg_size: int=4*1024*1024\n ) -> ClientWebSocketResponse:\n if timeout is not sentinel:\n if isinstance(timeout, ClientWSTimeout):\n ws_timeout = timeout\n else:\n warnings.warn(\"parameter 'timeout' of type 'float' \"\n \"is deprecated, please use \"\n \"'timeout=ClientWSTimeout(ws_close=...)'\",\n DeprecationWarning,\n stacklevel=2)\n ws_timeout = ClientWSTimeout(ws_close=timeout)\n else:\n ws_timeout = DEFAULT_WS_CLIENT_TIMEOUT\n if receive_timeout is not None:\n warnings.warn(\"float parameter 'receive_timeout' \"\n \"is deprecated, please use parameter \"\n \"'timeout=ClientWSTimeout(ws_receive=...)'\",\n DeprecationWarning,\n stacklevel=2)\n ws_timeout = attr.evolve(ws_timeout, ws_receive=receive_timeout)\n\n if headers is None:\n real_headers = CIMultiDict() # type: CIMultiDict[str]\n else:\n real_headers = CIMultiDict(headers)\n\n default_headers = {\n hdrs.UPGRADE: hdrs.WEBSOCKET,\n hdrs.CONNECTION: hdrs.UPGRADE,\n hdrs.SEC_WEBSOCKET_VERSION: '13',\n }\n\n for key, value in default_headers.items():\n real_headers.setdefault(key, value)\n\n sec_key = base64.b64encode(os.urandom(16))\n real_headers[hdrs.SEC_WEBSOCKET_KEY] = sec_key.decode()\n\n if protocols:\n real_headers[hdrs.SEC_WEBSOCKET_PROTOCOL] = ','.join(protocols)\n if origin is not None:\n real_headers[hdrs.ORIGIN] = origin\n if compress:\n extstr = ws_ext_gen(compress=compress)\n real_headers[hdrs.SEC_WEBSOCKET_EXTENSIONS] = extstr\n\n if not isinstance(ssl, SSL_ALLOWED_TYPES):\n raise TypeError(\"ssl should be SSLContext, bool, Fingerprint, \"\n \"or None, got {!r} instead.\".format(ssl))\n\n # send request\n resp = await self.request(method, url,\n headers=real_headers,\n read_until_eof=False,\n auth=auth,\n proxy=proxy,\n proxy_auth=proxy_auth,\n ssl=ssl,\n proxy_headers=proxy_headers)\n\n try:\n # check handshake\n if resp.status != 101:\n raise WSServerHandshakeError(\n resp.request_info,\n resp.history,\n message='Invalid response status',\n status=resp.status,\n headers=resp.headers)\n\n if resp.headers.get(hdrs.UPGRADE, '').lower() != 'websocket':\n raise WSServerHandshakeError(\n resp.request_info,\n resp.history,\n message='Invalid upgrade header',\n status=resp.status,\n headers=resp.headers)\n\n if resp.headers.get(hdrs.CONNECTION, '').lower() != 'upgrade':\n raise WSServerHandshakeError(\n resp.request_info,\n resp.history,\n message='Invalid connection header',\n status=resp.status,\n headers=resp.headers)\n\n # key calculation\n key = resp.headers.get(hdrs.SEC_WEBSOCKET_ACCEPT, '')\n match = base64.b64encode(\n hashlib.sha1(sec_key + WS_KEY).digest()).decode()\n if key != match:\n raise WSServerHandshakeError(\n resp.request_info,\n resp.history,\n message='Invalid challenge response',\n status=resp.status,\n headers=resp.headers)\n\n # websocket protocol\n protocol = None\n if protocols and hdrs.SEC_WEBSOCKET_PROTOCOL in resp.headers:\n resp_protocols = [\n proto.strip() for proto in\n resp.headers[hdrs.SEC_WEBSOCKET_PROTOCOL].split(',')]\n\n for proto in resp_protocols:\n if proto in protocols:\n protocol = proto\n break\n\n # websocket compress\n notakeover = False\n if compress:\n compress_hdrs = resp.headers.get(hdrs.SEC_WEBSOCKET_EXTENSIONS)\n if compress_hdrs:\n try:\n compress, notakeover = ws_ext_parse(compress_hdrs)\n except WSHandshakeError as exc:\n raise WSServerHandshakeError(\n resp.request_info,\n resp.history,\n message=exc.args[0],\n status=resp.status,\n headers=resp.headers)\n else:\n compress = 0\n notakeover = False\n\n conn = resp.connection\n assert conn is not None\n proto = conn.protocol\n assert proto is not None\n transport = conn.transport\n assert transport is not None\n reader = FlowControlDataQueue(\n proto, limit=2 ** 16, loop=self._loop) # type: FlowControlDataQueue[WSMessage] # noqa\n proto.set_parser(WebSocketReader(reader, max_msg_size), reader)\n writer = WebSocketWriter(\n proto, transport, use_mask=True,\n compress=compress, notakeover=notakeover)\n except BaseException:\n resp.close()\n raise\n else:\n return self._ws_response_class(reader,\n writer,\n protocol,\n resp,\n ws_timeout,\n autoclose,\n autoping,\n self._loop,\n heartbeat=heartbeat,\n compress=compress,\n client_notakeover=notakeover)\n\n def _prepare_headers(\n self,\n headers: Optional[LooseHeaders]) -> 'CIMultiDict[str]':\n \"\"\" Add default headers and transform it to CIMultiDict\n \"\"\"\n # Convert headers to MultiDict\n result = CIMultiDict(self._default_headers)\n if headers:\n if not isinstance(headers, (MultiDictProxy, MultiDict)):\n headers = CIMultiDict(headers)\n added_names = set() # type: Set[str]\n for key, value in headers.items():\n if key in added_names:\n result.add(key, value)\n else:\n result[key] = value\n added_names.add(key)\n return result\n\n def get(self, url: StrOrURL, *, allow_redirects: bool=True,\n **kwargs: Any) -> '_RequestContextManager':\n \"\"\"Perform HTTP GET request.\"\"\"\n return _RequestContextManager(\n self._request(hdrs.METH_GET, url,\n allow_redirects=allow_redirects,\n **kwargs))\n\n def options(self, url: StrOrURL, *, allow_redirects: bool=True,\n **kwargs: Any) -> '_RequestContextManager':\n \"\"\"Perform HTTP OPTIONS request.\"\"\"\n return _RequestContextManager(\n self._request(hdrs.METH_OPTIONS, url,\n allow_redirects=allow_redirects,\n **kwargs))\n\n def head(self, url: StrOrURL, *, allow_redirects: bool=False,\n **kwargs: Any) -> '_RequestContextManager':\n \"\"\"Perform HTTP HEAD request.\"\"\"\n return _RequestContextManager(\n self._request(hdrs.METH_HEAD, url,\n allow_redirects=allow_redirects,\n **kwargs))\n\n def post(self, url: StrOrURL,\n *, data: Any=None, **kwargs: Any) -> '_RequestContextManager':\n \"\"\"Perform HTTP POST request.\"\"\"\n return _RequestContextManager(\n self._request(hdrs.METH_POST, url,\n data=data,\n **kwargs))\n\n def put(self, url: StrOrURL,\n *, data: Any=None, **kwargs: Any) -> '_RequestContextManager':\n \"\"\"Perform HTTP PUT request.\"\"\"\n return _RequestContextManager(\n self._request(hdrs.METH_PUT, url,\n data=data,\n **kwargs))\n\n def patch(self, url: StrOrURL,\n *, data: Any=None, **kwargs: Any) -> '_RequestContextManager':\n \"\"\"Perform HTTP PATCH request.\"\"\"\n return _RequestContextManager(\n self._request(hdrs.METH_PATCH, url,\n data=data,\n **kwargs))\n\n def delete(self, url: StrOrURL, **kwargs: Any) -> '_RequestContextManager':\n \"\"\"Perform HTTP DELETE request.\"\"\"\n return _RequestContextManager(\n self._request(hdrs.METH_DELETE, url,\n **kwargs))\n\n async def close(self) -> None:\n \"\"\"Close underlying connector.\n\n Release all acquired resources.\n \"\"\"\n if not self.closed:\n if self._connector is not None and self._connector_owner:\n await self._connector.close()\n self._connector = None\n\n @property\n def closed(self) -> bool:\n \"\"\"Is client session closed.\n\n A readonly property.\n \"\"\"\n return self._connector is None or self._connector.closed\n\n @property\n def connector(self) -> Optional[BaseConnector]:\n \"\"\"Connector instance used for the session.\"\"\"\n return self._connector\n\n @property\n def cookie_jar(self) -> AbstractCookieJar:\n \"\"\"The session cookies.\"\"\"\n return self._cookie_jar\n\n @property\n def version(self) -> Tuple[int, int]:\n \"\"\"The session HTTP protocol version.\"\"\"\n return self._version\n\n @property\n def requote_redirect_url(self) -> bool:\n \"\"\"Do URL requoting on redirection handling.\"\"\"\n return self._requote_redirect_url\n\n @property\n def timeout(self) -> Union[object, ClientTimeout]:\n \"\"\"Timeout for the session.\"\"\"\n return self._timeout\n\n def detach(self) -> None:\n \"\"\"Detach connector from session without closing the former.\n\n Session is switched to closed state anyway.\n \"\"\"\n self._connector = None\n\n async def __aenter__(self) -> 'ClientSession':\n return self\n\n async def __aexit__(self,\n exc_type: Optional[Type[BaseException]],\n exc_val: Optional[BaseException],\n exc_tb: Optional[TracebackType]) -> None:\n await self.close()\n\n\nclass _BaseRequestContextManager(Coroutine[Any,\n Any,\n _RetType],\n Generic[_RetType]):\n\n __slots__ = ('_coro', '_resp')\n\n def __init__(\n self,\n coro: Coroutine['asyncio.Future[Any]', None, _RetType]\n ) -> None:\n self._coro = coro\n\n def send(self, arg: None) -> 'asyncio.Future[Any]':\n return self._coro.send(arg)\n\n def throw(self, arg: BaseException) -> None: # type: ignore\n self._coro.throw(arg) # type: ignore\n\n def close(self) -> None:\n return self._coro.close()\n\n def __await__(self) -> Generator[Any, None, _RetType]:\n ret = self._coro.__await__()\n return ret\n\n def __iter__(self) -> Generator[Any, None, _RetType]:\n return self.__await__()\n\n async def __aenter__(self) -> _RetType:\n self._resp = await self._coro\n return self._resp\n\n\nclass _RequestContextManager(_BaseRequestContextManager[ClientResponse]):\n async def __aexit__(self,\n exc_type: Optional[Type[BaseException]],\n exc: Optional[BaseException],\n tb: Optional[TracebackType]) -> None:\n # We're basing behavior on the exception as it can be caused by\n # user code unrelated to the status of the connection. If you\n # would like to close a connection you must do that\n # explicitly. Otherwise connection error handling should kick in\n # and close/recycle the connection as required.\n self._resp.release()\n\n\nclass _WSRequestContextManager(_BaseRequestContextManager[\n ClientWebSocketResponse]):\n async def __aexit__(self,\n exc_type: Optional[Type[BaseException]],\n exc: Optional[BaseException],\n tb: Optional[TracebackType]) -> None:\n await self._resp.close()\n\n\nclass _SessionRequestContextManager:\n\n __slots__ = ('_coro', '_resp', '_session')\n\n def __init__(self,\n coro: Coroutine['asyncio.Future[Any]', None, ClientResponse],\n session: ClientSession) -> None:\n self._coro = coro\n self._resp = None # type: Optional[ClientResponse]\n self._session = session\n\n async def __aenter__(self) -> ClientResponse:\n try:\n self._resp = await self._coro\n except BaseException:\n await self._session.close()\n raise\n else:\n return self._resp\n\n async def __aexit__(self,\n exc_type: Optional[Type[BaseException]],\n exc: Optional[BaseException],\n tb: Optional[TracebackType]) -> None:\n assert self._resp is not None\n self._resp.close()\n await self._session.close()\n\n\ndef request(\n method: str,\n url: StrOrURL, *,\n params: Optional[Mapping[str, str]]=None,\n data: Any=None,\n json: Any=None,\n headers: LooseHeaders=None,\n skip_auto_headers: Optional[Iterable[str]]=None,\n auth: Optional[BasicAuth]=None,\n allow_redirects: bool=True,\n max_redirects: int=10,\n compress: Optional[str]=None,\n chunked: Optional[bool]=None,\n expect100: bool=False,\n raise_for_status: Optional[bool]=None,\n read_until_eof: bool=True,\n proxy: Optional[StrOrURL]=None,\n proxy_auth: Optional[BasicAuth]=None,\n timeout: Union[ClientTimeout, object]=sentinel,\n cookies: Optional[LooseCookies]=None,\n version: HttpVersion=http.HttpVersion11,\n connector: Optional[BaseConnector]=None\n) -> _SessionRequestContextManager:\n \"\"\"Constructs and sends a request. Returns response object.\n method - HTTP method\n url - request url\n params - (optional) Dictionary or bytes to be sent in the query\n string of the new request\n data - (optional) Dictionary, bytes, or file-like object to\n send in the body of the request\n json - (optional) Any json compatible python object\n headers - (optional) Dictionary of HTTP Headers to send with\n the request\n cookies - (optional) Dict object to send with the request\n auth - (optional) BasicAuth named tuple represent HTTP Basic Auth\n auth - aiohttp.helpers.BasicAuth\n allow_redirects - (optional) If set to False, do not follow\n redirects\n version - Request HTTP version.\n compress - Set to True if request has to be compressed\n with deflate encoding.\n chunked - Set to chunk size for chunked transfer encoding.\n expect100 - Expect 100-continue response from server.\n connector - BaseConnector sub-class instance to support\n connection pooling.\n read_until_eof - Read response until eof if response\n does not have Content-Length header.\n loop - Optional event loop.\n timeout - Optional ClientTimeout settings structure, 5min\n total timeout by default.\n Usage::\n >>> import aiohttp\n >>> resp = await aiohttp.request('GET', 'http://python.org/')\n >>> resp\n <ClientResponse(python.org/) [200]>\n >>> data = await resp.read()\n \"\"\"\n connector_owner = False\n if connector is None:\n connector_owner = True\n connector = TCPConnector(force_close=True)\n\n session = ClientSession(\n cookies=cookies, version=version, timeout=timeout,\n connector=connector, connector_owner=connector_owner)\n\n return _SessionRequestContextManager(\n session._request(method, url,\n params=params,\n data=data,\n json=json,\n headers=headers,\n skip_auto_headers=skip_auto_headers,\n auth=auth,\n allow_redirects=allow_redirects,\n max_redirects=max_redirects,\n compress=compress,\n chunked=chunked,\n expect100=expect100,\n raise_for_status=raise_for_status,\n read_until_eof=read_until_eof,\n proxy=proxy,\n proxy_auth=proxy_auth,),\n session)\n", "path": "aiohttp/client.py"}]} |
gh_patches_debug_1312 | rasdani/github-patches | git_diff | holoviz__panel-1716 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
BytesIO/StringIO break pn.pane.image if called second time
#### ALL software version info
Panel: 0.10.0.post7+gb5cf6928
#### Description of expected behavior and the observed behavior
When having a BytesIO/StringIO object as input to pn.pane.image it will crash the second time a widget is called.
I don´t know if it a feature or a bug... I have made a simple implementation of a fix, if it is a bug [here](https://github.com/Hoxbro/panel/commit/af03e001a23d1e1cea065f85abfb4e0176d0d688).
#### Complete, minimal, self-contained example code that reproduces the issue
```python
import os
import param
import panel as pn
class ImageExample(param.Parameterized):
def __init__(self, **params):
self.button = pn.widgets.Button(name="Create Animation", button_type="primary")
self.break_ = pn.widgets.Button(name="Break Animation", button_type="primary")
super().__init__(**params)
self.gif = None
@param.depends(pn.state.param.busy)
def animation(self, busy):
return None if busy else self.gif
@param.depends("button.clicks")
def _create_animation(self):
# it could be any gif... :)
self.gif = pn.pane.GIF(open(os.path.join(os.path.dirname(__file__),'index.gif'), 'rb'))
@param.depends("break_.clicks")
def _break_animation(self):
pass
ie = ImageExample()
pn.Column(
ie.button,
ie.break_,
ie.animation,
ie._create_animation,
).servable()
```
#### Screenshots or screencasts of the bug in action

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `panel/pane/image.py`
Content:
```
1 """
2 Contains Image panes including renderers for PNG, SVG, GIF and JPG
3 file types.
4 """
5 from __future__ import absolute_import, division, unicode_literals
6
7 import base64
8
9 from io import BytesIO
10 from six import string_types
11
12 import param
13
14 from .markup import escape, DivPaneBase
15 from ..util import isfile, isurl
16
17
18 class ImageBase(DivPaneBase):
19 """
20 Encodes an image as base64 and wraps it in a Bokeh Div model.
21 This is an abstract base class that needs the image type
22 to be specified and specific code for determining the image shape.
23
24 The imgtype determines the filetype, extension, and MIME type for
25 this image. Each image type (png,jpg,gif) has a base class that
26 supports anything with a `_repr_X_` method (where X is `png`,
27 `gif`, etc.), a local file with the given file extension, or a
28 HTTP(S) url with the given extension. Subclasses of each type can
29 provide their own way of obtaining or generating a PNG.
30 """
31
32 alt_text = param.String(default=None, doc="""
33 alt text to add to the image tag. The alt text is shown when a
34 user cannot load or display the image.""")
35
36 link_url = param.String(default=None, doc="""
37 A link URL to make the image clickable and link to some other
38 website.""")
39
40 embed = param.Boolean(default=True, doc="""
41 Whether to embed the image as base64.""")
42
43 imgtype = 'None'
44
45 _rerender_params = ['alt_text', 'link_url', 'embed', 'object', 'style']
46
47 _target_transforms = {'object': """'<img src="' + value + '"></img>'"""}
48
49 __abstract = True
50
51 @classmethod
52 def applies(cls, obj):
53 imgtype = cls.imgtype
54 if hasattr(obj, '_repr_{}_'.format(imgtype)):
55 return True
56 if isinstance(obj, string_types):
57 if isfile(obj) and obj.endswith('.'+imgtype):
58 return True
59 if isurl(obj, [cls.imgtype]):
60 return True
61 elif isurl(obj, None):
62 return 0
63 if hasattr(obj, 'read'): # Check for file like object
64 return True
65 return False
66
67 def _type_error(self, object):
68 if isinstance(object, string_types):
69 raise ValueError("%s pane cannot parse string that is not a filename "
70 "or URL." % type(self).__name__)
71 super(ImageBase, self)._type_error(object)
72
73 def _img(self):
74 if hasattr(self.object, '_repr_{}_'.format(self.imgtype)):
75 return getattr(self.object, '_repr_' + self.imgtype + '_')()
76 if isinstance(self.object, string_types):
77 if isfile(self.object):
78 with open(self.object, 'rb') as f:
79 return f.read()
80 if hasattr(self.object, 'read'):
81 return self.object.read()
82 if isurl(self.object, None):
83 import requests
84 r = requests.request(url=self.object, method='GET')
85 return r.content
86
87 def _b64(self):
88 data = self._img()
89 if not isinstance(data, bytes):
90 data = data.encode('utf-8')
91 b64 = base64.b64encode(data).decode("utf-8")
92 return "data:image/"+self.imgtype+f";base64,{b64}"
93
94 def _imgshape(self, data):
95 """Calculate and return image width,height"""
96 raise NotImplementedError
97
98 def _get_properties(self):
99 p = super(ImageBase, self)._get_properties()
100 if self.object is None:
101 return dict(p, text='<img></img>')
102 data = self._img()
103 if not isinstance(data, bytes):
104 data = base64.b64decode(data)
105 width, height = self._imgshape(data)
106 if self.width is not None:
107 if self.height is None:
108 height = int((self.width/width)*height)
109 else:
110 height = self.height
111 width = self.width
112 elif self.height is not None:
113 width = int((self.height/height)*width)
114 height = self.height
115 if not self.embed:
116 src = self.object
117 else:
118 b64 = base64.b64encode(data).decode("utf-8")
119 src = "data:image/"+self.imgtype+";base64,{b64}".format(b64=b64)
120
121 smode = self.sizing_mode
122 if smode in ['fixed', None]:
123 w, h = '%spx' % width, '%spx' % height
124 elif smode == 'stretch_both':
125 w, h = '100%', '100%'
126 elif smode == 'stretch_width':
127 w, h = '%spx' % width, '100%'
128 elif smode == 'stretch_height':
129 w, h = '100%', '%spx' % height
130 elif smode == 'scale_height':
131 w, h = 'auto', '100%'
132 else:
133 w, h = '100%', 'auto'
134
135 html = '<img src="{src}" width="{width}" height="{height}" alt="{alt}"></img>'.format(
136 src=src, width=w, height=h, alt=self.alt_text or '')
137
138 if self.link_url:
139 html = '<a href="{url}" target="_blank">{html}</a>'.format(
140 url=self.link_url, html=html)
141
142 return dict(p, width=width, height=height, text=escape(html))
143
144
145 class PNG(ImageBase):
146
147 imgtype = 'png'
148
149 @classmethod
150 def _imgshape(cls, data):
151 import struct
152 w, h = struct.unpack('>LL', data[16:24])
153 return int(w), int(h)
154
155
156 class GIF(ImageBase):
157
158 imgtype = 'gif'
159
160 @classmethod
161 def _imgshape(cls, data):
162 import struct
163 w, h = struct.unpack("<HH", data[6:10])
164 return int(w), int(h)
165
166
167 class JPG(ImageBase):
168
169 imgtype = 'jpg'
170
171 @classmethod
172 def _imgshape(cls, data):
173 import struct
174 b = BytesIO(data)
175 b.read(2)
176 c = b.read(1)
177 while (c and ord(c) != 0xDA):
178 while (ord(c) != 0xFF): c = b.read(1)
179 while (ord(c) == 0xFF): c = b.read(1)
180 if (ord(c) >= 0xC0 and ord(c) <= 0xC3):
181 b.read(3)
182 h, w = struct.unpack(">HH", b.read(4))
183 break
184 else:
185 b.read(int(struct.unpack(">H", b.read(2))[0])-2)
186 c = b.read(1)
187 return int(w), int(h)
188
189
190 class SVG(ImageBase):
191
192 encode = param.Boolean(default=False, doc="""
193 Whether to enable base64 encoding of the SVG, base64 encoded
194 SVGs do not support links.""")
195
196 imgtype = 'svg'
197
198 _rerender_params = ImageBase._rerender_params + ['encode']
199
200 @classmethod
201 def applies(cls, obj):
202 return (super(SVG, cls).applies(obj) or
203 (isinstance(obj, string_types) and obj.lstrip().startswith('<svg')))
204
205 def _type_error(self, object):
206 if isinstance(object, string_types):
207 raise ValueError("%s pane cannot parse string that is not a filename, "
208 "URL or a SVG XML contents." % type(self).__name__)
209 super(SVG, self)._type_error(object)
210
211 def _img(self):
212 if (isinstance(self.object, string_types) and
213 self.object.lstrip().startswith('<svg')):
214 return self.object
215 return super(SVG, self)._img()
216
217 def _b64(self):
218 data = self._img()
219 if not isinstance(data, bytes):
220 data = data.encode('utf-8')
221 b64 = base64.b64encode(data).decode("utf-8")
222 return f"data:image/svg+xml;base64,{b64}"
223
224 def _imgshape(self, data):
225 return (self.width, self.height)
226
227 def _get_properties(self):
228 p = super(ImageBase, self)._get_properties()
229 if self.object is None:
230 return dict(p, text='<img></img>')
231 data = self._img()
232 width, height = self._imgshape(data)
233 if not isinstance(data, bytes):
234 data = data.encode('utf-8')
235
236 if self.encode:
237 b64 = base64.b64encode(data).decode("utf-8")
238 src = "data:image/svg+xml;base64,{b64}".format(b64=b64)
239 html = "<img src='{src}' width={width} height={height}></img>".format(
240 src=src, width=width, height=height
241 )
242 else:
243 html = data.decode("utf-8")
244 return dict(p, width=width, height=height, text=escape(html))
245
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/panel/pane/image.py b/panel/pane/image.py
--- a/panel/pane/image.py
+++ b/panel/pane/image.py
@@ -78,6 +78,8 @@
with open(self.object, 'rb') as f:
return f.read()
if hasattr(self.object, 'read'):
+ if hasattr(self.object, 'seek'):
+ self.object.seek(0)
return self.object.read()
if isurl(self.object, None):
import requests
| {"golden_diff": "diff --git a/panel/pane/image.py b/panel/pane/image.py\n--- a/panel/pane/image.py\n+++ b/panel/pane/image.py\n@@ -78,6 +78,8 @@\n with open(self.object, 'rb') as f:\n return f.read()\n if hasattr(self.object, 'read'):\n+ if hasattr(self.object, 'seek'):\n+ self.object.seek(0)\n return self.object.read()\n if isurl(self.object, None):\n import requests\n", "issue": "BytesIO/StringIO break pn.pane.image if called second time\n#### ALL software version info\r\nPanel: 0.10.0.post7+gb5cf6928\r\n\r\n#### Description of expected behavior and the observed behavior\r\nWhen having a BytesIO/StringIO object as input to pn.pane.image it will crash the second time a widget is called.\r\nI don\u00b4t know if it a feature or a bug... I have made a simple implementation of a fix, if it is a bug [here](https://github.com/Hoxbro/panel/commit/af03e001a23d1e1cea065f85abfb4e0176d0d688).\r\n\r\n\r\n#### Complete, minimal, self-contained example code that reproduces the issue\r\n\r\n```python\r\nimport os\r\n\r\nimport param\r\nimport panel as pn\r\n\r\n\r\nclass ImageExample(param.Parameterized):\r\n def __init__(self, **params):\r\n self.button = pn.widgets.Button(name=\"Create Animation\", button_type=\"primary\")\r\n self.break_ = pn.widgets.Button(name=\"Break Animation\", button_type=\"primary\")\r\n super().__init__(**params)\r\n self.gif = None\r\n\r\n @param.depends(pn.state.param.busy)\r\n def animation(self, busy):\r\n return None if busy else self.gif\r\n\r\n @param.depends(\"button.clicks\")\r\n def _create_animation(self):\r\n # it could be any gif... :)\r\n self.gif = pn.pane.GIF(open(os.path.join(os.path.dirname(__file__),'index.gif'), 'rb'))\r\n\r\n @param.depends(\"break_.clicks\")\r\n def _break_animation(self):\r\n pass\r\n\r\n\r\nie = ImageExample()\r\npn.Column(\r\n ie.button,\r\n ie.break_,\r\n ie.animation,\r\n ie._create_animation,\r\n).servable()\r\n\r\n\r\n```\r\n\r\n\r\n#### Screenshots or screencasts of the bug in action\r\n\n", "before_files": [{"content": "\"\"\"\nContains Image panes including renderers for PNG, SVG, GIF and JPG\nfile types.\n\"\"\"\nfrom __future__ import absolute_import, division, unicode_literals\n\nimport base64\n\nfrom io import BytesIO\nfrom six import string_types\n\nimport param\n\nfrom .markup import escape, DivPaneBase\nfrom ..util import isfile, isurl\n\n\nclass ImageBase(DivPaneBase):\n \"\"\"\n Encodes an image as base64 and wraps it in a Bokeh Div model.\n This is an abstract base class that needs the image type\n to be specified and specific code for determining the image shape.\n\n The imgtype determines the filetype, extension, and MIME type for\n this image. Each image type (png,jpg,gif) has a base class that\n supports anything with a `_repr_X_` method (where X is `png`,\n `gif`, etc.), a local file with the given file extension, or a\n HTTP(S) url with the given extension. Subclasses of each type can\n provide their own way of obtaining or generating a PNG.\n \"\"\"\n\n alt_text = param.String(default=None, doc=\"\"\"\n alt text to add to the image tag. The alt text is shown when a\n user cannot load or display the image.\"\"\")\n\n link_url = param.String(default=None, doc=\"\"\"\n A link URL to make the image clickable and link to some other\n website.\"\"\")\n\n embed = param.Boolean(default=True, doc=\"\"\"\n Whether to embed the image as base64.\"\"\")\n\n imgtype = 'None'\n\n _rerender_params = ['alt_text', 'link_url', 'embed', 'object', 'style']\n\n _target_transforms = {'object': \"\"\"'<img src=\"' + value + '\"></img>'\"\"\"}\n\n __abstract = True\n\n @classmethod\n def applies(cls, obj):\n imgtype = cls.imgtype\n if hasattr(obj, '_repr_{}_'.format(imgtype)):\n return True\n if isinstance(obj, string_types):\n if isfile(obj) and obj.endswith('.'+imgtype):\n return True\n if isurl(obj, [cls.imgtype]):\n return True\n elif isurl(obj, None):\n return 0\n if hasattr(obj, 'read'): # Check for file like object\n return True\n return False\n\n def _type_error(self, object):\n if isinstance(object, string_types):\n raise ValueError(\"%s pane cannot parse string that is not a filename \"\n \"or URL.\" % type(self).__name__)\n super(ImageBase, self)._type_error(object)\n\n def _img(self):\n if hasattr(self.object, '_repr_{}_'.format(self.imgtype)):\n return getattr(self.object, '_repr_' + self.imgtype + '_')()\n if isinstance(self.object, string_types):\n if isfile(self.object):\n with open(self.object, 'rb') as f:\n return f.read()\n if hasattr(self.object, 'read'):\n return self.object.read()\n if isurl(self.object, None):\n import requests\n r = requests.request(url=self.object, method='GET')\n return r.content\n\n def _b64(self):\n data = self._img()\n if not isinstance(data, bytes):\n data = data.encode('utf-8')\n b64 = base64.b64encode(data).decode(\"utf-8\")\n return \"data:image/\"+self.imgtype+f\";base64,{b64}\"\n\n def _imgshape(self, data):\n \"\"\"Calculate and return image width,height\"\"\"\n raise NotImplementedError\n\n def _get_properties(self):\n p = super(ImageBase, self)._get_properties()\n if self.object is None:\n return dict(p, text='<img></img>')\n data = self._img()\n if not isinstance(data, bytes):\n data = base64.b64decode(data)\n width, height = self._imgshape(data)\n if self.width is not None:\n if self.height is None:\n height = int((self.width/width)*height)\n else:\n height = self.height\n width = self.width\n elif self.height is not None:\n width = int((self.height/height)*width)\n height = self.height\n if not self.embed:\n src = self.object\n else:\n b64 = base64.b64encode(data).decode(\"utf-8\")\n src = \"data:image/\"+self.imgtype+\";base64,{b64}\".format(b64=b64)\n\n smode = self.sizing_mode\n if smode in ['fixed', None]:\n w, h = '%spx' % width, '%spx' % height\n elif smode == 'stretch_both':\n w, h = '100%', '100%'\n elif smode == 'stretch_width':\n w, h = '%spx' % width, '100%'\n elif smode == 'stretch_height':\n w, h = '100%', '%spx' % height\n elif smode == 'scale_height':\n w, h = 'auto', '100%'\n else:\n w, h = '100%', 'auto'\n\n html = '<img src=\"{src}\" width=\"{width}\" height=\"{height}\" alt=\"{alt}\"></img>'.format(\n src=src, width=w, height=h, alt=self.alt_text or '')\n\n if self.link_url:\n html = '<a href=\"{url}\" target=\"_blank\">{html}</a>'.format(\n url=self.link_url, html=html)\n\n return dict(p, width=width, height=height, text=escape(html))\n\n\nclass PNG(ImageBase):\n\n imgtype = 'png'\n\n @classmethod\n def _imgshape(cls, data):\n import struct\n w, h = struct.unpack('>LL', data[16:24])\n return int(w), int(h)\n\n\nclass GIF(ImageBase):\n\n imgtype = 'gif'\n\n @classmethod\n def _imgshape(cls, data):\n import struct\n w, h = struct.unpack(\"<HH\", data[6:10])\n return int(w), int(h)\n\n\nclass JPG(ImageBase):\n\n imgtype = 'jpg'\n\n @classmethod\n def _imgshape(cls, data):\n import struct\n b = BytesIO(data)\n b.read(2)\n c = b.read(1)\n while (c and ord(c) != 0xDA):\n while (ord(c) != 0xFF): c = b.read(1)\n while (ord(c) == 0xFF): c = b.read(1)\n if (ord(c) >= 0xC0 and ord(c) <= 0xC3):\n b.read(3)\n h, w = struct.unpack(\">HH\", b.read(4))\n break\n else:\n b.read(int(struct.unpack(\">H\", b.read(2))[0])-2)\n c = b.read(1)\n return int(w), int(h)\n\n\nclass SVG(ImageBase):\n\n encode = param.Boolean(default=False, doc=\"\"\"\n Whether to enable base64 encoding of the SVG, base64 encoded\n SVGs do not support links.\"\"\")\n\n imgtype = 'svg'\n\n _rerender_params = ImageBase._rerender_params + ['encode']\n\n @classmethod\n def applies(cls, obj):\n return (super(SVG, cls).applies(obj) or\n (isinstance(obj, string_types) and obj.lstrip().startswith('<svg')))\n\n def _type_error(self, object):\n if isinstance(object, string_types):\n raise ValueError(\"%s pane cannot parse string that is not a filename, \"\n \"URL or a SVG XML contents.\" % type(self).__name__)\n super(SVG, self)._type_error(object)\n\n def _img(self):\n if (isinstance(self.object, string_types) and\n self.object.lstrip().startswith('<svg')):\n return self.object\n return super(SVG, self)._img()\n\n def _b64(self):\n data = self._img()\n if not isinstance(data, bytes):\n data = data.encode('utf-8')\n b64 = base64.b64encode(data).decode(\"utf-8\")\n return f\"data:image/svg+xml;base64,{b64}\"\n\n def _imgshape(self, data):\n return (self.width, self.height)\n\n def _get_properties(self):\n p = super(ImageBase, self)._get_properties()\n if self.object is None:\n return dict(p, text='<img></img>')\n data = self._img()\n width, height = self._imgshape(data)\n if not isinstance(data, bytes):\n data = data.encode('utf-8')\n\n if self.encode:\n b64 = base64.b64encode(data).decode(\"utf-8\")\n src = \"data:image/svg+xml;base64,{b64}\".format(b64=b64)\n html = \"<img src='{src}' width={width} height={height}></img>\".format(\n src=src, width=width, height=height\n )\n else:\n html = data.decode(\"utf-8\")\n return dict(p, width=width, height=height, text=escape(html))\n", "path": "panel/pane/image.py"}], "after_files": [{"content": "\"\"\"\nContains Image panes including renderers for PNG, SVG, GIF and JPG\nfile types.\n\"\"\"\nfrom __future__ import absolute_import, division, unicode_literals\n\nimport base64\n\nfrom io import BytesIO\nfrom six import string_types\n\nimport param\n\nfrom .markup import escape, DivPaneBase\nfrom ..util import isfile, isurl\n\n\nclass ImageBase(DivPaneBase):\n \"\"\"\n Encodes an image as base64 and wraps it in a Bokeh Div model.\n This is an abstract base class that needs the image type\n to be specified and specific code for determining the image shape.\n\n The imgtype determines the filetype, extension, and MIME type for\n this image. Each image type (png,jpg,gif) has a base class that\n supports anything with a `_repr_X_` method (where X is `png`,\n `gif`, etc.), a local file with the given file extension, or a\n HTTP(S) url with the given extension. Subclasses of each type can\n provide their own way of obtaining or generating a PNG.\n \"\"\"\n\n alt_text = param.String(default=None, doc=\"\"\"\n alt text to add to the image tag. The alt text is shown when a\n user cannot load or display the image.\"\"\")\n\n link_url = param.String(default=None, doc=\"\"\"\n A link URL to make the image clickable and link to some other\n website.\"\"\")\n\n embed = param.Boolean(default=True, doc=\"\"\"\n Whether to embed the image as base64.\"\"\")\n\n imgtype = 'None'\n\n _rerender_params = ['alt_text', 'link_url', 'embed', 'object', 'style']\n\n _target_transforms = {'object': \"\"\"'<img src=\"' + value + '\"></img>'\"\"\"}\n\n __abstract = True\n\n @classmethod\n def applies(cls, obj):\n imgtype = cls.imgtype\n if hasattr(obj, '_repr_{}_'.format(imgtype)):\n return True\n if isinstance(obj, string_types):\n if isfile(obj) and obj.endswith('.'+imgtype):\n return True\n if isurl(obj, [cls.imgtype]):\n return True\n elif isurl(obj, None):\n return 0\n if hasattr(obj, 'read'): # Check for file like object\n return True\n return False\n\n def _type_error(self, object):\n if isinstance(object, string_types):\n raise ValueError(\"%s pane cannot parse string that is not a filename \"\n \"or URL.\" % type(self).__name__)\n super(ImageBase, self)._type_error(object)\n\n def _img(self):\n if hasattr(self.object, '_repr_{}_'.format(self.imgtype)):\n return getattr(self.object, '_repr_' + self.imgtype + '_')()\n if isinstance(self.object, string_types):\n if isfile(self.object):\n with open(self.object, 'rb') as f:\n return f.read()\n if hasattr(self.object, 'read'):\n if hasattr(self.object, 'seek'):\n self.object.seek(0)\n return self.object.read()\n if isurl(self.object, None):\n import requests\n r = requests.request(url=self.object, method='GET')\n return r.content\n\n def _b64(self):\n data = self._img()\n if not isinstance(data, bytes):\n data = data.encode('utf-8')\n b64 = base64.b64encode(data).decode(\"utf-8\")\n return \"data:image/\"+self.imgtype+f\";base64,{b64}\"\n\n def _imgshape(self, data):\n \"\"\"Calculate and return image width,height\"\"\"\n raise NotImplementedError\n\n def _get_properties(self):\n p = super(ImageBase, self)._get_properties()\n if self.object is None:\n return dict(p, text='<img></img>')\n data = self._img()\n if not isinstance(data, bytes):\n data = base64.b64decode(data)\n width, height = self._imgshape(data)\n if self.width is not None:\n if self.height is None:\n height = int((self.width/width)*height)\n else:\n height = self.height\n width = self.width\n elif self.height is not None:\n width = int((self.height/height)*width)\n height = self.height\n if not self.embed:\n src = self.object\n else:\n b64 = base64.b64encode(data).decode(\"utf-8\")\n src = \"data:image/\"+self.imgtype+\";base64,{b64}\".format(b64=b64)\n\n smode = self.sizing_mode\n if smode in ['fixed', None]:\n w, h = '%spx' % width, '%spx' % height\n elif smode == 'stretch_both':\n w, h = '100%', '100%'\n elif smode == 'stretch_width':\n w, h = '%spx' % width, '100%'\n elif smode == 'stretch_height':\n w, h = '100%', '%spx' % height\n elif smode == 'scale_height':\n w, h = 'auto', '100%'\n else:\n w, h = '100%', 'auto'\n\n html = '<img src=\"{src}\" width=\"{width}\" height=\"{height}\" alt=\"{alt}\"></img>'.format(\n src=src, width=w, height=h, alt=self.alt_text or '')\n\n if self.link_url:\n html = '<a href=\"{url}\" target=\"_blank\">{html}</a>'.format(\n url=self.link_url, html=html)\n\n return dict(p, width=width, height=height, text=escape(html))\n\n\nclass PNG(ImageBase):\n\n imgtype = 'png'\n\n @classmethod\n def _imgshape(cls, data):\n import struct\n w, h = struct.unpack('>LL', data[16:24])\n return int(w), int(h)\n\n\nclass GIF(ImageBase):\n\n imgtype = 'gif'\n\n @classmethod\n def _imgshape(cls, data):\n import struct\n w, h = struct.unpack(\"<HH\", data[6:10])\n return int(w), int(h)\n\n\nclass JPG(ImageBase):\n\n imgtype = 'jpg'\n\n @classmethod\n def _imgshape(cls, data):\n import struct\n b = BytesIO(data)\n b.read(2)\n c = b.read(1)\n while (c and ord(c) != 0xDA):\n while (ord(c) != 0xFF): c = b.read(1)\n while (ord(c) == 0xFF): c = b.read(1)\n if (ord(c) >= 0xC0 and ord(c) <= 0xC3):\n b.read(3)\n h, w = struct.unpack(\">HH\", b.read(4))\n break\n else:\n b.read(int(struct.unpack(\">H\", b.read(2))[0])-2)\n c = b.read(1)\n return int(w), int(h)\n\n\nclass SVG(ImageBase):\n\n encode = param.Boolean(default=False, doc=\"\"\"\n Whether to enable base64 encoding of the SVG, base64 encoded\n SVGs do not support links.\"\"\")\n\n imgtype = 'svg'\n\n _rerender_params = ImageBase._rerender_params + ['encode']\n\n @classmethod\n def applies(cls, obj):\n return (super(SVG, cls).applies(obj) or\n (isinstance(obj, string_types) and obj.lstrip().startswith('<svg')))\n\n def _type_error(self, object):\n if isinstance(object, string_types):\n raise ValueError(\"%s pane cannot parse string that is not a filename, \"\n \"URL or a SVG XML contents.\" % type(self).__name__)\n super(SVG, self)._type_error(object)\n\n def _img(self):\n if (isinstance(self.object, string_types) and\n self.object.lstrip().startswith('<svg')):\n return self.object\n return super(SVG, self)._img()\n\n def _b64(self):\n data = self._img()\n if not isinstance(data, bytes):\n data = data.encode('utf-8')\n b64 = base64.b64encode(data).decode(\"utf-8\")\n return f\"data:image/svg+xml;base64,{b64}\"\n\n def _imgshape(self, data):\n return (self.width, self.height)\n\n def _get_properties(self):\n p = super(ImageBase, self)._get_properties()\n if self.object is None:\n return dict(p, text='<img></img>')\n data = self._img()\n width, height = self._imgshape(data)\n if not isinstance(data, bytes):\n data = data.encode('utf-8')\n\n if self.encode:\n b64 = base64.b64encode(data).decode(\"utf-8\")\n src = \"data:image/svg+xml;base64,{b64}\".format(b64=b64)\n html = \"<img src='{src}' width={width} height={height}></img>\".format(\n src=src, width=width, height=height\n )\n else:\n html = data.decode(\"utf-8\")\n return dict(p, width=width, height=height, text=escape(html))\n", "path": "panel/pane/image.py"}]} |
gh_patches_debug_1313 | rasdani/github-patches | git_diff | pwr-Solaar__Solaar-1272 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Cannot unpair logitech mouse from logitech nano receiver
**Information**
- solaar: 1.0.6 (pip3)
- Distribution: ubuntu 21.10
- Kernel version: Linux 5.13.0-16-generic x86_64 GNU/Linux
```
Nano Receiver
Device path : /dev/hidraw2
USB id : 046d:C52F
Serial : BEF9D3BF
Firmware : 30.00.B0009
Has 1 paired device(s) out of a maximum of 1.
Has 36 successful pairing(s) remaining.
Notifications: wireless, software present (0x000900)
1: Marathon Mouse M705 (M-R0073)
Device path : /dev/hidraw6
WPID : 406D
Codename : M705 (M-R0073)
Kind : mouse
Protocol : HID++ 4.5
Polling rate : 8 ms (125Hz)
Serial number: 1967999A
Model ID: 406D00000000
Unit ID: AD21B902
Bootloader: BOT 59.00.B0002
Firmware: RQM 67.01.B0005
The power switch is located on the base.
Supports 27 HID++ 2.0 features:
0: ROOT {0000}
1: FEATURE SET {0001}
2: DEVICE FW VERSION {0003}
Firmware: Bootloader BOT 59.00.B0002 406D5F5048B901
Firmware: Firmware RQM 67.01.B0005 406D5F5048B901
Unit ID: AD21B902 Model ID: 406D00000000 Transport IDs: {'wpid': '406D'}
3: DEVICE NAME {0005}
Name: Marathon Mouse/Performance Plus M705
Kind: mouse
4: WIRELESS DEVICE STATUS {1D4B}
5: RESET {0020}
6: BATTERY STATUS {1000}
Battery: 50%, discharging, next level 20%.
7: REPROG CONTROLS V4 {1B04}
Key/Button Actions (saved): {'80': 80, '81': 81, '82': 82, '83': 83, '86': 86, '91': 91, '93': 93}
Key/Button Actions : {'80': 80, '81': 81, '82': 82, '83': 83, '86': 86, '91': 91, '93': 93}
8: POINTER SPEED {2205}
Pointer Speed: 1.0
Sensitivity (Pointer Speed) (saved): 256
Sensitivity (Pointer Speed) : 256
9: VERTICAL SCROLLING {2100}
Roller type: 3G
Ratchet per turn: 24
Scroll lines: 0
10: DFUCONTROL SIGNED {00C2}
11: DEVICE RESET {1802} internal, hidden
12: unknown:1803 {1803} internal, hidden
13: CONFIG DEVICE PROPS {1806} internal, hidden
14: unknown:1810 {1810} internal, hidden
15: unknown:1830 {1830} internal, hidden
16: unknown:1890 {1890} internal, hidden
17: unknown:18A1 {18A1} internal, hidden
18: unknown:1DF3 {1DF3} internal, hidden
19: unknown:1E00 {1E00} hidden
20: unknown:1EB0 {1EB0} internal, hidden
21: unknown:1861 {1861} internal, hidden
22: unknown:18B1 {18B1} internal, hidden
23: unknown:1850 {1850} internal, hidden
24: unknown:1F03 {1F03} internal, hidden
25: unknown:18C0 {18C0} internal, hidden
26: HIRES WHEEL {2121}
Multiplier: 8
Has invert: Normal wheel motion
Has ratchet switch: Free wheel mode
High resolution mode
HID notification
Scroll Wheel Direction (saved): False
Scroll Wheel Direction : False
Scroll Wheel Resolution (saved): True
Scroll Wheel Resolution : True
Has 7 reprogrammable keys:
0: Left Button , default: Left Click => Left Click
mse, reprogrammable, pos:0, group:1, group mask:g1
reporting: default
1: Right Button , default: Right Click => Right Click
mse, reprogrammable, pos:0, group:1, group mask:g1
reporting: default
2: Middle Button , default: Mouse Middle Button => Mouse Middle Button
mse, reprogrammable, divertable, pos:0, group:2, group mask:g1,g2
reporting: default
3: Back Button , default: Mouse Back Button => Mouse Back Button
mse, reprogrammable, divertable, pos:0, group:2, group mask:g1,g2
reporting: default
4: Forward Button , default: Mouse Forward Button => Mouse Forward Button
mse, reprogrammable, divertable, pos:0, group:2, group mask:g1,g2
reporting: default
5: Left Tilt , default: Mouse Scroll Left Button => Mouse Scroll Left Button
mse, reprogrammable, divertable, pos:0, group:2, group mask:g1,g2
reporting: default
6: Right Tilt , default: Mouse Scroll Right Button => Mouse Scroll Right Button
mse, reprogrammable, divertable, pos:0, group:2, group mask:g1,g2
reporting: default
Battery: 50%, discharging, next level 20%.
```
**Describe the bug**
Hitting the unpair button has no effect: the mouse remains paired no matter what.
The following error appears the second time I hit the unpair button:
```
Traceback (most recent call last):
File "/usr/local/lib/python3.9/dist-packages/solaar/ui/window.py", line 201, in _unpair_current_device
assert bool(device)
AssertionError
```
**Additional context**
Solaar has been installed following the guidelines outlined [here](https://pwr-solaar.github.io/Solaar/installation).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lib/logitech_receiver/base_usb.py`
Content:
```
1 # -*- python-mode -*-
2 # -*- coding: UTF-8 -*-
3
4 ## Copyright (C) 2012-2013 Daniel Pavel
5 ##
6 ## This program is free software; you can redistribute it and/or modify
7 ## it under the terms of the GNU General Public License as published by
8 ## the Free Software Foundation; either version 2 of the License, or
9 ## (at your option) any later version.
10 ##
11 ## This program is distributed in the hope that it will be useful,
12 ## but WITHOUT ANY WARRANTY; without even the implied warranty of
13 ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 ## GNU General Public License for more details.
15 ##
16 ## You should have received a copy of the GNU General Public License along
17 ## with this program; if not, write to the Free Software Foundation, Inc.,
18 ## 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
19
20 ## According to Logitech, they use the following product IDs (as of September 2020)
21 ## USB product IDs for receivers: 0xC526 - 0xC5xx
22 ## Wireless PIDs for hidpp10 devices: 0x2006 - 0x2019
23 ## Wireless PIDs for hidpp20 devices: 0x4002 - 0x4097, 0x4101 - 0x4102
24 ## USB product IDs for hidpp20 devices: 0xC07D - 0xC093, 0xC32B - 0xC344
25 ## Bluetooth product IDs (for hidpp20 devices): 0xB012 - 0xB0xx, 0xB32A - 0xB3xx
26
27 # USB ids of Logitech wireless receivers.
28 # Only receivers supporting the HID++ protocol can go in here.
29
30 from __future__ import absolute_import, division, print_function, unicode_literals
31
32 from .descriptors import DEVICES as _DEVICES
33 from .i18n import _
34
35 # max_devices is only used for receivers that do not support reading from _R.receiver_info offset 0x03, default to 1
36 # may_unpair is only used for receivers that do not support reading from _R.receiver_info offset 0x03, default to False
37 # unpair is for receivers that do support reading from _R.receiver_info offset 0x03, no default
38 ## should this last be changed so that may_unpair is used for all receivers? writing to _R.receiver_pairing doesn't seem right
39 # re_pairs determines whether a receiver pairs by replacing existing pairings, default to False
40 ## currently only one receiver is so marked - should there be more?
41 # ex100_27mhz_wpid_fix enable workarounds for EX100 and possible other old 27Mhz receivers
42
43 _DRIVER = ('hid-generic', 'generic-usb', 'logitech-djreceiver')
44
45 _unifying_receiver = lambda product_id: {
46 'vendor_id': 0x046d,
47 'product_id': product_id,
48 'usb_interface': 2,
49 'hid_driver': _DRIVER, # noqa: F821
50 'name': _('Unifying Receiver')
51 }
52
53 _nano_receiver = lambda product_id: {
54 'vendor_id': 0x046d,
55 'product_id': product_id,
56 'usb_interface': 1,
57 'hid_driver': _DRIVER, # noqa: F821
58 'name': _('Nano Receiver'),
59 'may_unpair': False,
60 're_pairs': True
61 }
62
63 _nano_receiver_no_unpair = lambda product_id: {
64 'vendor_id': 0x046d,
65 'product_id': product_id,
66 'usb_interface': 1,
67 'hid_driver': _DRIVER, # noqa: F821
68 'name': _('Nano Receiver'),
69 'may_unpair': False,
70 'unpair': False,
71 're_pairs': True
72 }
73
74 _nano_receiver_max2 = lambda product_id: {
75 'vendor_id': 0x046d,
76 'product_id': product_id,
77 'usb_interface': 1,
78 'hid_driver': _DRIVER, # noqa: F821
79 'name': _('Nano Receiver'),
80 'max_devices': 2,
81 'may_unpair': False,
82 're_pairs': True
83 }
84
85 _nano_receiver_maxn = lambda product_id, max: {
86 'vendor_id': 0x046d,
87 'product_id': product_id,
88 'usb_interface': 1,
89 'hid_driver': _DRIVER, # noqa: F821
90 'name': _('Nano Receiver'),
91 'max_devices': max,
92 'may_unpair': False,
93 're_pairs': True
94 }
95
96 _lenovo_receiver = lambda product_id: {
97 'vendor_id': 0x17ef,
98 'product_id': product_id,
99 'usb_interface': 1,
100 'hid_driver': _DRIVER, # noqa: F821
101 'name': _('Nano Receiver')
102 }
103
104 _lightspeed_receiver = lambda product_id: {
105 'vendor_id': 0x046d,
106 'product_id': product_id,
107 'usb_interface': 2,
108 'hid_driver': _DRIVER, # noqa: F821
109 'name': _('Lightspeed Receiver')
110 }
111
112 _ex100_receiver = lambda product_id: {
113 'vendor_id': 0x046d,
114 'product_id': product_id,
115 'usb_interface': 1,
116 'hid_driver': _DRIVER, # noqa: F821
117 'name': _('EX100 Receiver 27 Mhz'),
118 'max_devices': 4,
119 'may_unpair': False,
120 're_pairs': True,
121 'ex100_27mhz_wpid_fix': True
122 }
123
124 # standard Unifying receivers (marked with the orange Unifying logo)
125 UNIFYING_RECEIVER_C52B = _unifying_receiver(0xc52b)
126 UNIFYING_RECEIVER_C532 = _unifying_receiver(0xc532)
127
128 # Nano receviers that support the Unifying protocol
129 NANO_RECEIVER_ADVANCED = _nano_receiver(0xc52f)
130
131 # ex100 old style receiver pre-unifyimg protocol
132 EX100_27MHZ_RECEIVER_C517 = _ex100_receiver(0xc517)
133
134 # Nano receivers that don't support the Unifying protocol
135 NANO_RECEIVER_C518 = _nano_receiver(0xc518)
136 NANO_RECEIVER_C51A = _nano_receiver(0xc51a)
137 NANO_RECEIVER_C51B = _nano_receiver(0xc51b)
138 NANO_RECEIVER_C521 = _nano_receiver(0xc521)
139 NANO_RECEIVER_C525 = _nano_receiver(0xc525)
140 NANO_RECEIVER_C526 = _nano_receiver(0xc526)
141 NANO_RECEIVER_C52e = _nano_receiver_no_unpair(0xc52e)
142 NANO_RECEIVER_C531 = _nano_receiver(0xc531)
143 NANO_RECEIVER_C534 = _nano_receiver_max2(0xc534)
144 NANO_RECEIVER_C537 = _nano_receiver(0xc537)
145 NANO_RECEIVER_6042 = _lenovo_receiver(0x6042)
146
147 # Lightspeed receivers
148 LIGHTSPEED_RECEIVER_C539 = _lightspeed_receiver(0xc539)
149 LIGHTSPEED_RECEIVER_C53a = _lightspeed_receiver(0xc53a)
150 LIGHTSPEED_RECEIVER_C53f = _lightspeed_receiver(0xc53f)
151 LIGHTSPEED_RECEIVER_C53d = _lightspeed_receiver(0xc53d)
152 LIGHTSPEED_RECEIVER_C545 = _lightspeed_receiver(0xc545)
153 LIGHTSPEED_RECEIVER_C541 = _lightspeed_receiver(0xc541)
154 LIGHTSPEED_RECEIVER_C547 = _lightspeed_receiver(0xc547)
155
156 ALL = (
157 UNIFYING_RECEIVER_C52B,
158 UNIFYING_RECEIVER_C532,
159 NANO_RECEIVER_ADVANCED,
160 EX100_27MHZ_RECEIVER_C517,
161 NANO_RECEIVER_C518,
162 NANO_RECEIVER_C51A,
163 NANO_RECEIVER_C51B,
164 NANO_RECEIVER_C521,
165 NANO_RECEIVER_C525,
166 NANO_RECEIVER_C526,
167 NANO_RECEIVER_C52e,
168 NANO_RECEIVER_C531,
169 NANO_RECEIVER_C534,
170 NANO_RECEIVER_C537,
171 NANO_RECEIVER_6042,
172 LIGHTSPEED_RECEIVER_C539,
173 LIGHTSPEED_RECEIVER_C53a,
174 LIGHTSPEED_RECEIVER_C53f,
175 LIGHTSPEED_RECEIVER_C53d,
176 LIGHTSPEED_RECEIVER_C545,
177 LIGHTSPEED_RECEIVER_C541,
178 LIGHTSPEED_RECEIVER_C547,
179 )
180
181 _wired_device = lambda product_id, interface: {
182 'vendor_id': 0x046d,
183 'product_id': product_id,
184 'bus_id': 0x3,
185 'usb_interface': interface,
186 'isDevice': True
187 }
188
189 _bt_device = lambda product_id: {'vendor_id': 0x046d, 'product_id': product_id, 'bus_id': 0x5, 'isDevice': True}
190
191 DEVICES = []
192
193 for _ignore, d in _DEVICES.items():
194 if d.usbid:
195 DEVICES.append(_wired_device(d.usbid, d.interface if d.interface else 2))
196 if d.btid:
197 DEVICES.append(_bt_device(d.btid))
198
199
200 def other_device_check(bus_id, vendor_id, product_id):
201 """Check whether product is a Logitech USB-connected or Bluetooth device based on bus, vendor, and product IDs
202 This allows Solaar to support receiverless HID++ 2.0 devices that it knows nothing about"""
203 if vendor_id != 0x46d: # Logitech
204 return
205 if bus_id == 0x3: # USB
206 if (product_id >= 0xC07D and product_id <= 0xC093 or product_id >= 0xC32B and product_id <= 0xC344):
207 return _wired_device(product_id, 2)
208 elif bus_id == 0x5: # Bluetooth
209 if (product_id >= 0xB012 and product_id <= 0xB0FF or product_id >= 0xB32A and product_id <= 0xB3FF):
210 return _bt_device(product_id)
211
212
213 def product_information(usb_id):
214 if isinstance(usb_id, str):
215 usb_id = int(usb_id, 16)
216 for r in ALL:
217 if usb_id == r.get('product_id'):
218 return r
219 return {}
220
221
222 del _DRIVER, _unifying_receiver, _nano_receiver, _lenovo_receiver, _lightspeed_receiver
223
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lib/logitech_receiver/base_usb.py b/lib/logitech_receiver/base_usb.py
--- a/lib/logitech_receiver/base_usb.py
+++ b/lib/logitech_receiver/base_usb.py
@@ -126,7 +126,7 @@
UNIFYING_RECEIVER_C532 = _unifying_receiver(0xc532)
# Nano receviers that support the Unifying protocol
-NANO_RECEIVER_ADVANCED = _nano_receiver(0xc52f)
+NANO_RECEIVER_ADVANCED = _nano_receiver_no_unpair(0xc52f)
# ex100 old style receiver pre-unifyimg protocol
EX100_27MHZ_RECEIVER_C517 = _ex100_receiver(0xc517)
| {"golden_diff": "diff --git a/lib/logitech_receiver/base_usb.py b/lib/logitech_receiver/base_usb.py\n--- a/lib/logitech_receiver/base_usb.py\n+++ b/lib/logitech_receiver/base_usb.py\n@@ -126,7 +126,7 @@\n UNIFYING_RECEIVER_C532 = _unifying_receiver(0xc532)\n \n # Nano receviers that support the Unifying protocol\n-NANO_RECEIVER_ADVANCED = _nano_receiver(0xc52f)\n+NANO_RECEIVER_ADVANCED = _nano_receiver_no_unpair(0xc52f)\n \n # ex100 old style receiver pre-unifyimg protocol\n EX100_27MHZ_RECEIVER_C517 = _ex100_receiver(0xc517)\n", "issue": "Cannot unpair logitech mouse from logitech nano receiver\n**Information**\r\n- solaar: 1.0.6 (pip3)\r\n- Distribution: ubuntu 21.10\r\n- Kernel version: Linux 5.13.0-16-generic x86_64 GNU/Linux\r\n\r\n```\r\nNano Receiver\r\n Device path : /dev/hidraw2\r\n USB id : 046d:C52F\r\n Serial : BEF9D3BF\r\n Firmware : 30.00.B0009\r\n Has 1 paired device(s) out of a maximum of 1.\r\n Has 36 successful pairing(s) remaining.\r\n Notifications: wireless, software present (0x000900)\r\n\r\n 1: Marathon Mouse M705 (M-R0073)\r\n Device path : /dev/hidraw6\r\n WPID : 406D\r\n Codename : M705 (M-R0073)\r\n Kind : mouse\r\n Protocol : HID++ 4.5\r\n Polling rate : 8 ms (125Hz)\r\n Serial number: 1967999A\r\n Model ID: 406D00000000\r\n Unit ID: AD21B902\r\n Bootloader: BOT 59.00.B0002\r\n Firmware: RQM 67.01.B0005\r\n The power switch is located on the base.\r\n Supports 27 HID++ 2.0 features:\r\n 0: ROOT {0000} \r\n 1: FEATURE SET {0001} \r\n 2: DEVICE FW VERSION {0003} \r\n Firmware: Bootloader BOT 59.00.B0002 406D5F5048B901\r\n Firmware: Firmware RQM 67.01.B0005 406D5F5048B901\r\n Unit ID: AD21B902 Model ID: 406D00000000 Transport IDs: {'wpid': '406D'}\r\n 3: DEVICE NAME {0005} \r\n Name: Marathon Mouse/Performance Plus M705\r\n Kind: mouse\r\n 4: WIRELESS DEVICE STATUS {1D4B} \r\n 5: RESET {0020} \r\n 6: BATTERY STATUS {1000} \r\n Battery: 50%, discharging, next level 20%.\r\n 7: REPROG CONTROLS V4 {1B04} \r\n Key/Button Actions (saved): {'80': 80, '81': 81, '82': 82, '83': 83, '86': 86, '91': 91, '93': 93}\r\n Key/Button Actions : {'80': 80, '81': 81, '82': 82, '83': 83, '86': 86, '91': 91, '93': 93}\r\n 8: POINTER SPEED {2205} \r\n Pointer Speed: 1.0\r\n Sensitivity (Pointer Speed) (saved): 256\r\n Sensitivity (Pointer Speed) : 256\r\n 9: VERTICAL SCROLLING {2100} \r\n Roller type: 3G\r\n Ratchet per turn: 24\r\n Scroll lines: 0\r\n 10: DFUCONTROL SIGNED {00C2} \r\n 11: DEVICE RESET {1802} internal, hidden\r\n 12: unknown:1803 {1803} internal, hidden\r\n 13: CONFIG DEVICE PROPS {1806} internal, hidden\r\n 14: unknown:1810 {1810} internal, hidden\r\n 15: unknown:1830 {1830} internal, hidden\r\n 16: unknown:1890 {1890} internal, hidden\r\n 17: unknown:18A1 {18A1} internal, hidden\r\n 18: unknown:1DF3 {1DF3} internal, hidden\r\n 19: unknown:1E00 {1E00} hidden\r\n 20: unknown:1EB0 {1EB0} internal, hidden\r\n 21: unknown:1861 {1861} internal, hidden\r\n 22: unknown:18B1 {18B1} internal, hidden\r\n 23: unknown:1850 {1850} internal, hidden\r\n 24: unknown:1F03 {1F03} internal, hidden\r\n 25: unknown:18C0 {18C0} internal, hidden\r\n 26: HIRES WHEEL {2121} \r\n Multiplier: 8\r\n Has invert: Normal wheel motion\r\n Has ratchet switch: Free wheel mode\r\n High resolution mode\r\n HID notification\r\n Scroll Wheel Direction (saved): False\r\n Scroll Wheel Direction : False\r\n Scroll Wheel Resolution (saved): True\r\n Scroll Wheel Resolution : True\r\n Has 7 reprogrammable keys:\r\n 0: Left Button , default: Left Click => Left Click \r\n mse, reprogrammable, pos:0, group:1, group mask:g1\r\n reporting: default\r\n 1: Right Button , default: Right Click => Right Click \r\n mse, reprogrammable, pos:0, group:1, group mask:g1\r\n reporting: default\r\n 2: Middle Button , default: Mouse Middle Button => Mouse Middle Button \r\n mse, reprogrammable, divertable, pos:0, group:2, group mask:g1,g2\r\n reporting: default\r\n 3: Back Button , default: Mouse Back Button => Mouse Back Button \r\n mse, reprogrammable, divertable, pos:0, group:2, group mask:g1,g2\r\n reporting: default\r\n 4: Forward Button , default: Mouse Forward Button => Mouse Forward Button \r\n mse, reprogrammable, divertable, pos:0, group:2, group mask:g1,g2\r\n reporting: default\r\n 5: Left Tilt , default: Mouse Scroll Left Button => Mouse Scroll Left Button \r\n mse, reprogrammable, divertable, pos:0, group:2, group mask:g1,g2\r\n reporting: default\r\n 6: Right Tilt , default: Mouse Scroll Right Button => Mouse Scroll Right Button \r\n mse, reprogrammable, divertable, pos:0, group:2, group mask:g1,g2\r\n reporting: default\r\n Battery: 50%, discharging, next level 20%.\r\n```\r\n\r\n**Describe the bug**\r\nHitting the unpair button has no effect: the mouse remains paired no matter what.\r\nThe following error appears the second time I hit the unpair button:\r\n```\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.9/dist-packages/solaar/ui/window.py\", line 201, in _unpair_current_device\r\n assert bool(device)\r\nAssertionError\r\n```\r\n\r\n**Additional context**\r\nSolaar has been installed following the guidelines outlined [here](https://pwr-solaar.github.io/Solaar/installation).\r\n\n", "before_files": [{"content": "# -*- python-mode -*-\n# -*- coding: UTF-8 -*-\n\n## Copyright (C) 2012-2013 Daniel Pavel\n##\n## This program is free software; you can redistribute it and/or modify\n## it under the terms of the GNU General Public License as published by\n## the Free Software Foundation; either version 2 of the License, or\n## (at your option) any later version.\n##\n## This program is distributed in the hope that it will be useful,\n## but WITHOUT ANY WARRANTY; without even the implied warranty of\n## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n## GNU General Public License for more details.\n##\n## You should have received a copy of the GNU General Public License along\n## with this program; if not, write to the Free Software Foundation, Inc.,\n## 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n\n## According to Logitech, they use the following product IDs (as of September 2020)\n## USB product IDs for receivers: 0xC526 - 0xC5xx\n## Wireless PIDs for hidpp10 devices: 0x2006 - 0x2019\n## Wireless PIDs for hidpp20 devices: 0x4002 - 0x4097, 0x4101 - 0x4102\n## USB product IDs for hidpp20 devices: 0xC07D - 0xC093, 0xC32B - 0xC344\n## Bluetooth product IDs (for hidpp20 devices): 0xB012 - 0xB0xx, 0xB32A - 0xB3xx\n\n# USB ids of Logitech wireless receivers.\n# Only receivers supporting the HID++ protocol can go in here.\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom .descriptors import DEVICES as _DEVICES\nfrom .i18n import _\n\n# max_devices is only used for receivers that do not support reading from _R.receiver_info offset 0x03, default to 1\n# may_unpair is only used for receivers that do not support reading from _R.receiver_info offset 0x03, default to False\n# unpair is for receivers that do support reading from _R.receiver_info offset 0x03, no default\n## should this last be changed so that may_unpair is used for all receivers? writing to _R.receiver_pairing doesn't seem right\n# re_pairs determines whether a receiver pairs by replacing existing pairings, default to False\n## currently only one receiver is so marked - should there be more?\n# ex100_27mhz_wpid_fix enable workarounds for EX100 and possible other old 27Mhz receivers\n\n_DRIVER = ('hid-generic', 'generic-usb', 'logitech-djreceiver')\n\n_unifying_receiver = lambda product_id: {\n 'vendor_id': 0x046d,\n 'product_id': product_id,\n 'usb_interface': 2,\n 'hid_driver': _DRIVER, # noqa: F821\n 'name': _('Unifying Receiver')\n}\n\n_nano_receiver = lambda product_id: {\n 'vendor_id': 0x046d,\n 'product_id': product_id,\n 'usb_interface': 1,\n 'hid_driver': _DRIVER, # noqa: F821\n 'name': _('Nano Receiver'),\n 'may_unpair': False,\n 're_pairs': True\n}\n\n_nano_receiver_no_unpair = lambda product_id: {\n 'vendor_id': 0x046d,\n 'product_id': product_id,\n 'usb_interface': 1,\n 'hid_driver': _DRIVER, # noqa: F821\n 'name': _('Nano Receiver'),\n 'may_unpair': False,\n 'unpair': False,\n 're_pairs': True\n}\n\n_nano_receiver_max2 = lambda product_id: {\n 'vendor_id': 0x046d,\n 'product_id': product_id,\n 'usb_interface': 1,\n 'hid_driver': _DRIVER, # noqa: F821\n 'name': _('Nano Receiver'),\n 'max_devices': 2,\n 'may_unpair': False,\n 're_pairs': True\n}\n\n_nano_receiver_maxn = lambda product_id, max: {\n 'vendor_id': 0x046d,\n 'product_id': product_id,\n 'usb_interface': 1,\n 'hid_driver': _DRIVER, # noqa: F821\n 'name': _('Nano Receiver'),\n 'max_devices': max,\n 'may_unpair': False,\n 're_pairs': True\n}\n\n_lenovo_receiver = lambda product_id: {\n 'vendor_id': 0x17ef,\n 'product_id': product_id,\n 'usb_interface': 1,\n 'hid_driver': _DRIVER, # noqa: F821\n 'name': _('Nano Receiver')\n}\n\n_lightspeed_receiver = lambda product_id: {\n 'vendor_id': 0x046d,\n 'product_id': product_id,\n 'usb_interface': 2,\n 'hid_driver': _DRIVER, # noqa: F821\n 'name': _('Lightspeed Receiver')\n}\n\n_ex100_receiver = lambda product_id: {\n 'vendor_id': 0x046d,\n 'product_id': product_id,\n 'usb_interface': 1,\n 'hid_driver': _DRIVER, # noqa: F821\n 'name': _('EX100 Receiver 27 Mhz'),\n 'max_devices': 4,\n 'may_unpair': False,\n 're_pairs': True,\n 'ex100_27mhz_wpid_fix': True\n}\n\n# standard Unifying receivers (marked with the orange Unifying logo)\nUNIFYING_RECEIVER_C52B = _unifying_receiver(0xc52b)\nUNIFYING_RECEIVER_C532 = _unifying_receiver(0xc532)\n\n# Nano receviers that support the Unifying protocol\nNANO_RECEIVER_ADVANCED = _nano_receiver(0xc52f)\n\n# ex100 old style receiver pre-unifyimg protocol\nEX100_27MHZ_RECEIVER_C517 = _ex100_receiver(0xc517)\n\n# Nano receivers that don't support the Unifying protocol\nNANO_RECEIVER_C518 = _nano_receiver(0xc518)\nNANO_RECEIVER_C51A = _nano_receiver(0xc51a)\nNANO_RECEIVER_C51B = _nano_receiver(0xc51b)\nNANO_RECEIVER_C521 = _nano_receiver(0xc521)\nNANO_RECEIVER_C525 = _nano_receiver(0xc525)\nNANO_RECEIVER_C526 = _nano_receiver(0xc526)\nNANO_RECEIVER_C52e = _nano_receiver_no_unpair(0xc52e)\nNANO_RECEIVER_C531 = _nano_receiver(0xc531)\nNANO_RECEIVER_C534 = _nano_receiver_max2(0xc534)\nNANO_RECEIVER_C537 = _nano_receiver(0xc537)\nNANO_RECEIVER_6042 = _lenovo_receiver(0x6042)\n\n# Lightspeed receivers\nLIGHTSPEED_RECEIVER_C539 = _lightspeed_receiver(0xc539)\nLIGHTSPEED_RECEIVER_C53a = _lightspeed_receiver(0xc53a)\nLIGHTSPEED_RECEIVER_C53f = _lightspeed_receiver(0xc53f)\nLIGHTSPEED_RECEIVER_C53d = _lightspeed_receiver(0xc53d)\nLIGHTSPEED_RECEIVER_C545 = _lightspeed_receiver(0xc545)\nLIGHTSPEED_RECEIVER_C541 = _lightspeed_receiver(0xc541)\nLIGHTSPEED_RECEIVER_C547 = _lightspeed_receiver(0xc547)\n\nALL = (\n UNIFYING_RECEIVER_C52B,\n UNIFYING_RECEIVER_C532,\n NANO_RECEIVER_ADVANCED,\n EX100_27MHZ_RECEIVER_C517,\n NANO_RECEIVER_C518,\n NANO_RECEIVER_C51A,\n NANO_RECEIVER_C51B,\n NANO_RECEIVER_C521,\n NANO_RECEIVER_C525,\n NANO_RECEIVER_C526,\n NANO_RECEIVER_C52e,\n NANO_RECEIVER_C531,\n NANO_RECEIVER_C534,\n NANO_RECEIVER_C537,\n NANO_RECEIVER_6042,\n LIGHTSPEED_RECEIVER_C539,\n LIGHTSPEED_RECEIVER_C53a,\n LIGHTSPEED_RECEIVER_C53f,\n LIGHTSPEED_RECEIVER_C53d,\n LIGHTSPEED_RECEIVER_C545,\n LIGHTSPEED_RECEIVER_C541,\n LIGHTSPEED_RECEIVER_C547,\n)\n\n_wired_device = lambda product_id, interface: {\n 'vendor_id': 0x046d,\n 'product_id': product_id,\n 'bus_id': 0x3,\n 'usb_interface': interface,\n 'isDevice': True\n}\n\n_bt_device = lambda product_id: {'vendor_id': 0x046d, 'product_id': product_id, 'bus_id': 0x5, 'isDevice': True}\n\nDEVICES = []\n\nfor _ignore, d in _DEVICES.items():\n if d.usbid:\n DEVICES.append(_wired_device(d.usbid, d.interface if d.interface else 2))\n if d.btid:\n DEVICES.append(_bt_device(d.btid))\n\n\ndef other_device_check(bus_id, vendor_id, product_id):\n \"\"\"Check whether product is a Logitech USB-connected or Bluetooth device based on bus, vendor, and product IDs\n This allows Solaar to support receiverless HID++ 2.0 devices that it knows nothing about\"\"\"\n if vendor_id != 0x46d: # Logitech\n return\n if bus_id == 0x3: # USB\n if (product_id >= 0xC07D and product_id <= 0xC093 or product_id >= 0xC32B and product_id <= 0xC344):\n return _wired_device(product_id, 2)\n elif bus_id == 0x5: # Bluetooth\n if (product_id >= 0xB012 and product_id <= 0xB0FF or product_id >= 0xB32A and product_id <= 0xB3FF):\n return _bt_device(product_id)\n\n\ndef product_information(usb_id):\n if isinstance(usb_id, str):\n usb_id = int(usb_id, 16)\n for r in ALL:\n if usb_id == r.get('product_id'):\n return r\n return {}\n\n\ndel _DRIVER, _unifying_receiver, _nano_receiver, _lenovo_receiver, _lightspeed_receiver\n", "path": "lib/logitech_receiver/base_usb.py"}], "after_files": [{"content": "# -*- python-mode -*-\n# -*- coding: UTF-8 -*-\n\n## Copyright (C) 2012-2013 Daniel Pavel\n##\n## This program is free software; you can redistribute it and/or modify\n## it under the terms of the GNU General Public License as published by\n## the Free Software Foundation; either version 2 of the License, or\n## (at your option) any later version.\n##\n## This program is distributed in the hope that it will be useful,\n## but WITHOUT ANY WARRANTY; without even the implied warranty of\n## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n## GNU General Public License for more details.\n##\n## You should have received a copy of the GNU General Public License along\n## with this program; if not, write to the Free Software Foundation, Inc.,\n## 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n\n## According to Logitech, they use the following product IDs (as of September 2020)\n## USB product IDs for receivers: 0xC526 - 0xC5xx\n## Wireless PIDs for hidpp10 devices: 0x2006 - 0x2019\n## Wireless PIDs for hidpp20 devices: 0x4002 - 0x4097, 0x4101 - 0x4102\n## USB product IDs for hidpp20 devices: 0xC07D - 0xC093, 0xC32B - 0xC344\n## Bluetooth product IDs (for hidpp20 devices): 0xB012 - 0xB0xx, 0xB32A - 0xB3xx\n\n# USB ids of Logitech wireless receivers.\n# Only receivers supporting the HID++ protocol can go in here.\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom .descriptors import DEVICES as _DEVICES\nfrom .i18n import _\n\n# max_devices is only used for receivers that do not support reading from _R.receiver_info offset 0x03, default to 1\n# may_unpair is only used for receivers that do not support reading from _R.receiver_info offset 0x03, default to False\n# unpair is for receivers that do support reading from _R.receiver_info offset 0x03, no default\n## should this last be changed so that may_unpair is used for all receivers? writing to _R.receiver_pairing doesn't seem right\n# re_pairs determines whether a receiver pairs by replacing existing pairings, default to False\n## currently only one receiver is so marked - should there be more?\n# ex100_27mhz_wpid_fix enable workarounds for EX100 and possible other old 27Mhz receivers\n\n_DRIVER = ('hid-generic', 'generic-usb', 'logitech-djreceiver')\n\n_unifying_receiver = lambda product_id: {\n 'vendor_id': 0x046d,\n 'product_id': product_id,\n 'usb_interface': 2,\n 'hid_driver': _DRIVER, # noqa: F821\n 'name': _('Unifying Receiver')\n}\n\n_nano_receiver = lambda product_id: {\n 'vendor_id': 0x046d,\n 'product_id': product_id,\n 'usb_interface': 1,\n 'hid_driver': _DRIVER, # noqa: F821\n 'name': _('Nano Receiver'),\n 'may_unpair': False,\n 're_pairs': True\n}\n\n_nano_receiver_no_unpair = lambda product_id: {\n 'vendor_id': 0x046d,\n 'product_id': product_id,\n 'usb_interface': 1,\n 'hid_driver': _DRIVER, # noqa: F821\n 'name': _('Nano Receiver'),\n 'may_unpair': False,\n 'unpair': False,\n 're_pairs': True\n}\n\n_nano_receiver_max2 = lambda product_id: {\n 'vendor_id': 0x046d,\n 'product_id': product_id,\n 'usb_interface': 1,\n 'hid_driver': _DRIVER, # noqa: F821\n 'name': _('Nano Receiver'),\n 'max_devices': 2,\n 'may_unpair': False,\n 're_pairs': True\n}\n\n_nano_receiver_maxn = lambda product_id, max: {\n 'vendor_id': 0x046d,\n 'product_id': product_id,\n 'usb_interface': 1,\n 'hid_driver': _DRIVER, # noqa: F821\n 'name': _('Nano Receiver'),\n 'max_devices': max,\n 'may_unpair': False,\n 're_pairs': True\n}\n\n_lenovo_receiver = lambda product_id: {\n 'vendor_id': 0x17ef,\n 'product_id': product_id,\n 'usb_interface': 1,\n 'hid_driver': _DRIVER, # noqa: F821\n 'name': _('Nano Receiver')\n}\n\n_lightspeed_receiver = lambda product_id: {\n 'vendor_id': 0x046d,\n 'product_id': product_id,\n 'usb_interface': 2,\n 'hid_driver': _DRIVER, # noqa: F821\n 'name': _('Lightspeed Receiver')\n}\n\n_ex100_receiver = lambda product_id: {\n 'vendor_id': 0x046d,\n 'product_id': product_id,\n 'usb_interface': 1,\n 'hid_driver': _DRIVER, # noqa: F821\n 'name': _('EX100 Receiver 27 Mhz'),\n 'max_devices': 4,\n 'may_unpair': False,\n 're_pairs': True,\n 'ex100_27mhz_wpid_fix': True\n}\n\n# standard Unifying receivers (marked with the orange Unifying logo)\nUNIFYING_RECEIVER_C52B = _unifying_receiver(0xc52b)\nUNIFYING_RECEIVER_C532 = _unifying_receiver(0xc532)\n\n# Nano receviers that support the Unifying protocol\nNANO_RECEIVER_ADVANCED = _nano_receiver_no_unpair(0xc52f)\n\n# ex100 old style receiver pre-unifyimg protocol\nEX100_27MHZ_RECEIVER_C517 = _ex100_receiver(0xc517)\n\n# Nano receivers that don't support the Unifying protocol\nNANO_RECEIVER_C518 = _nano_receiver(0xc518)\nNANO_RECEIVER_C51A = _nano_receiver(0xc51a)\nNANO_RECEIVER_C51B = _nano_receiver(0xc51b)\nNANO_RECEIVER_C521 = _nano_receiver(0xc521)\nNANO_RECEIVER_C525 = _nano_receiver(0xc525)\nNANO_RECEIVER_C526 = _nano_receiver(0xc526)\nNANO_RECEIVER_C52e = _nano_receiver_no_unpair(0xc52e)\nNANO_RECEIVER_C531 = _nano_receiver(0xc531)\nNANO_RECEIVER_C534 = _nano_receiver_max2(0xc534)\nNANO_RECEIVER_C537 = _nano_receiver(0xc537)\nNANO_RECEIVER_6042 = _lenovo_receiver(0x6042)\n\n# Lightspeed receivers\nLIGHTSPEED_RECEIVER_C539 = _lightspeed_receiver(0xc539)\nLIGHTSPEED_RECEIVER_C53a = _lightspeed_receiver(0xc53a)\nLIGHTSPEED_RECEIVER_C53f = _lightspeed_receiver(0xc53f)\nLIGHTSPEED_RECEIVER_C53d = _lightspeed_receiver(0xc53d)\nLIGHTSPEED_RECEIVER_C545 = _lightspeed_receiver(0xc545)\nLIGHTSPEED_RECEIVER_C541 = _lightspeed_receiver(0xc541)\nLIGHTSPEED_RECEIVER_C547 = _lightspeed_receiver(0xc547)\n\nALL = (\n UNIFYING_RECEIVER_C52B,\n UNIFYING_RECEIVER_C532,\n NANO_RECEIVER_ADVANCED,\n EX100_27MHZ_RECEIVER_C517,\n NANO_RECEIVER_C518,\n NANO_RECEIVER_C51A,\n NANO_RECEIVER_C51B,\n NANO_RECEIVER_C521,\n NANO_RECEIVER_C525,\n NANO_RECEIVER_C526,\n NANO_RECEIVER_C52e,\n NANO_RECEIVER_C531,\n NANO_RECEIVER_C534,\n NANO_RECEIVER_C537,\n NANO_RECEIVER_6042,\n LIGHTSPEED_RECEIVER_C539,\n LIGHTSPEED_RECEIVER_C53a,\n LIGHTSPEED_RECEIVER_C53f,\n LIGHTSPEED_RECEIVER_C53d,\n LIGHTSPEED_RECEIVER_C545,\n LIGHTSPEED_RECEIVER_C541,\n LIGHTSPEED_RECEIVER_C547,\n)\n\n_wired_device = lambda product_id, interface: {\n 'vendor_id': 0x046d,\n 'product_id': product_id,\n 'bus_id': 0x3,\n 'usb_interface': interface,\n 'isDevice': True\n}\n\n_bt_device = lambda product_id: {'vendor_id': 0x046d, 'product_id': product_id, 'bus_id': 0x5, 'isDevice': True}\n\nDEVICES = []\n\nfor _ignore, d in _DEVICES.items():\n if d.usbid:\n DEVICES.append(_wired_device(d.usbid, d.interface if d.interface else 2))\n if d.btid:\n DEVICES.append(_bt_device(d.btid))\n\n\ndef other_device_check(bus_id, vendor_id, product_id):\n \"\"\"Check whether product is a Logitech USB-connected or Bluetooth device based on bus, vendor, and product IDs\n This allows Solaar to support receiverless HID++ 2.0 devices that it knows nothing about\"\"\"\n if vendor_id != 0x46d: # Logitech\n return\n if bus_id == 0x3: # USB\n if (product_id >= 0xC07D and product_id <= 0xC093 or product_id >= 0xC32B and product_id <= 0xC344):\n return _wired_device(product_id, 2)\n elif bus_id == 0x5: # Bluetooth\n if (product_id >= 0xB012 and product_id <= 0xB0FF or product_id >= 0xB32A and product_id <= 0xB3FF):\n return _bt_device(product_id)\n\n\ndef product_information(usb_id):\n if isinstance(usb_id, str):\n usb_id = int(usb_id, 16)\n for r in ALL:\n if usb_id == r.get('product_id'):\n return r\n return {}\n\n\ndel _DRIVER, _unifying_receiver, _nano_receiver, _lenovo_receiver, _lightspeed_receiver\n", "path": "lib/logitech_receiver/base_usb.py"}]} |
gh_patches_debug_1314 | rasdani/github-patches | git_diff | netbox-community__netbox-9819 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Circuit termination on interface not working when accesssed from interface table
### NetBox version
v3.3-beta1
### Python version
3.9
### Steps to Reproduce
1. Create device with interfaces
2. Create circuit with Z-side on same site as device
3. Try to connect interface to circuit termination from interface table
4. Select B Side Cicuit
5. Try to select Side for circuit
### Expected Behavior
On B Side you are able to select side for circuit
### Observed Behavior
Side option menu is empty because.
This is caused because $ is missing for `$termination_{cable_end}_circuit`
https://github.com/netbox-community/netbox/blob/abfa6a325af17d65038304feed2536330d8c60a7/netbox/dcim/forms/connections.py#L141
Circuit termination on interface not working when accesssed from interface table
### NetBox version
v3.3-beta1
### Python version
3.9
### Steps to Reproduce
1. Create device with interfaces
2. Create circuit with Z-side on same site as device
3. Try to connect interface to circuit termination from interface table
4. Select B Side Cicuit
5. Try to select Side for circuit
### Expected Behavior
On B Side you are able to select side for circuit
### Observed Behavior
Side option menu is empty because.
This is caused because $ is missing for `$termination_{cable_end}_circuit`
https://github.com/netbox-community/netbox/blob/abfa6a325af17d65038304feed2536330d8c60a7/netbox/dcim/forms/connections.py#L141
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `netbox/dcim/forms/connections.py`
Content:
```
1 from django import forms
2
3 from circuits.models import Circuit, CircuitTermination, Provider
4 from dcim.models import *
5 from utilities.forms import DynamicModelChoiceField, DynamicModelMultipleChoiceField
6 from .models import CableForm
7
8
9 def get_cable_form(a_type, b_type):
10
11 class FormMetaclass(forms.models.ModelFormMetaclass):
12
13 def __new__(mcs, name, bases, attrs):
14
15 for cable_end, term_cls in (('a', a_type), ('b', b_type)):
16
17 attrs[f'termination_{cable_end}_region'] = DynamicModelChoiceField(
18 queryset=Region.objects.all(),
19 label='Region',
20 required=False,
21 initial_params={
22 'sites': f'$termination_{cable_end}_site'
23 }
24 )
25 attrs[f'termination_{cable_end}_sitegroup'] = DynamicModelChoiceField(
26 queryset=SiteGroup.objects.all(),
27 label='Site group',
28 required=False,
29 initial_params={
30 'sites': f'$termination_{cable_end}_site'
31 }
32 )
33 attrs[f'termination_{cable_end}_site'] = DynamicModelChoiceField(
34 queryset=Site.objects.all(),
35 label='Site',
36 required=False,
37 query_params={
38 'region_id': f'$termination_{cable_end}_region',
39 'group_id': f'$termination_{cable_end}_sitegroup',
40 }
41 )
42 attrs[f'termination_{cable_end}_location'] = DynamicModelChoiceField(
43 queryset=Location.objects.all(),
44 label='Location',
45 required=False,
46 null_option='None',
47 query_params={
48 'site_id': f'$termination_{cable_end}_site'
49 }
50 )
51
52 # Device component
53 if hasattr(term_cls, 'device'):
54
55 attrs[f'termination_{cable_end}_rack'] = DynamicModelChoiceField(
56 queryset=Rack.objects.all(),
57 label='Rack',
58 required=False,
59 null_option='None',
60 initial_params={
61 'devices': f'$termination_{cable_end}_device'
62 },
63 query_params={
64 'site_id': f'$termination_{cable_end}_site',
65 'location_id': f'$termination_{cable_end}_location',
66 }
67 )
68 attrs[f'termination_{cable_end}_device'] = DynamicModelChoiceField(
69 queryset=Device.objects.all(),
70 label='Device',
71 required=False,
72 initial_params={
73 f'{term_cls._meta.model_name}s__in': f'${cable_end}_terminations'
74 },
75 query_params={
76 'site_id': f'$termination_{cable_end}_site',
77 'location_id': f'$termination_{cable_end}_location',
78 'rack_id': f'$termination_{cable_end}_rack',
79 }
80 )
81 attrs[f'{cable_end}_terminations'] = DynamicModelMultipleChoiceField(
82 queryset=term_cls.objects.all(),
83 label=term_cls._meta.verbose_name.title(),
84 disabled_indicator='_occupied',
85 query_params={
86 'device_id': f'$termination_{cable_end}_device',
87 }
88 )
89
90 # PowerFeed
91 elif term_cls == PowerFeed:
92
93 attrs[f'termination_{cable_end}_powerpanel'] = DynamicModelChoiceField(
94 queryset=PowerPanel.objects.all(),
95 label='Power Panel',
96 required=False,
97 initial_params={
98 'powerfeeds__in': f'${cable_end}_terminations'
99 },
100 query_params={
101 'site_id': f'$termination_{cable_end}_site',
102 'location_id': f'$termination_{cable_end}_location',
103 }
104 )
105 attrs[f'{cable_end}_terminations'] = DynamicModelMultipleChoiceField(
106 queryset=term_cls.objects.all(),
107 label='Power Feed',
108 disabled_indicator='_occupied',
109 query_params={
110 'powerpanel_id': f'$termination_{cable_end}_powerpanel',
111 }
112 )
113
114 # CircuitTermination
115 elif term_cls == CircuitTermination:
116
117 attrs[f'termination_{cable_end}_provider'] = DynamicModelChoiceField(
118 queryset=Provider.objects.all(),
119 label='Provider',
120 initial_params={
121 'circuits': f'$termination_{cable_end}_circuit'
122 },
123 required=False
124 )
125 attrs[f'termination_{cable_end}_circuit'] = DynamicModelChoiceField(
126 queryset=Circuit.objects.all(),
127 label='Circuit',
128 initial_params={
129 'terminations__in': f'${cable_end}_terminations'
130 },
131 query_params={
132 'provider_id': f'$termination_{cable_end}_provider',
133 'site_id': f'$termination_{cable_end}_site',
134 }
135 )
136 attrs[f'{cable_end}_terminations'] = DynamicModelMultipleChoiceField(
137 queryset=term_cls.objects.all(),
138 label='Side',
139 disabled_indicator='_occupied',
140 query_params={
141 'circuit_id': f'termination_{cable_end}_circuit',
142 }
143 )
144
145 return super().__new__(mcs, name, bases, attrs)
146
147 class _CableForm(CableForm, metaclass=FormMetaclass):
148
149 def __init__(self, *args, **kwargs):
150
151 # TODO: Temporary hack to work around list handling limitations with utils.normalize_querydict()
152 for field_name in ('a_terminations', 'b_terminations'):
153 if field_name in kwargs.get('initial', {}) and type(kwargs['initial'][field_name]) is not list:
154 kwargs['initial'][field_name] = [kwargs['initial'][field_name]]
155
156 super().__init__(*args, **kwargs)
157
158 if self.instance and self.instance.pk:
159 # Initialize A/B terminations when modifying an existing Cable instance
160 self.initial['a_terminations'] = self.instance.a_terminations
161 self.initial['b_terminations'] = self.instance.b_terminations
162
163 def clean(self):
164 super().clean()
165
166 # Set the A/B terminations on the Cable instance
167 self.instance.a_terminations = self.cleaned_data['a_terminations']
168 self.instance.b_terminations = self.cleaned_data['b_terminations']
169
170 return _CableForm
171
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/netbox/dcim/forms/connections.py b/netbox/dcim/forms/connections.py
--- a/netbox/dcim/forms/connections.py
+++ b/netbox/dcim/forms/connections.py
@@ -138,7 +138,7 @@
label='Side',
disabled_indicator='_occupied',
query_params={
- 'circuit_id': f'termination_{cable_end}_circuit',
+ 'circuit_id': f'$termination_{cable_end}_circuit',
}
)
| {"golden_diff": "diff --git a/netbox/dcim/forms/connections.py b/netbox/dcim/forms/connections.py\n--- a/netbox/dcim/forms/connections.py\n+++ b/netbox/dcim/forms/connections.py\n@@ -138,7 +138,7 @@\n label='Side',\n disabled_indicator='_occupied',\n query_params={\n- 'circuit_id': f'termination_{cable_end}_circuit',\n+ 'circuit_id': f'$termination_{cable_end}_circuit',\n }\n )\n", "issue": "Circuit termination on interface not working when accesssed from interface table\n### NetBox version\n\nv3.3-beta1\n\n### Python version\n\n3.9\n\n### Steps to Reproduce\n\n1. Create device with interfaces\r\n2. Create circuit with Z-side on same site as device\r\n3. Try to connect interface to circuit termination from interface table\r\n4. Select B Side Cicuit\r\n5. Try to select Side for circuit\n\n### Expected Behavior\n\nOn B Side you are able to select side for circuit\n\n### Observed Behavior\n\nSide option menu is empty because.\r\n\r\nThis is caused because $ is missing for `$termination_{cable_end}_circuit`\r\n\r\nhttps://github.com/netbox-community/netbox/blob/abfa6a325af17d65038304feed2536330d8c60a7/netbox/dcim/forms/connections.py#L141\nCircuit termination on interface not working when accesssed from interface table\n### NetBox version\n\nv3.3-beta1\n\n### Python version\n\n3.9\n\n### Steps to Reproduce\n\n1. Create device with interfaces\r\n2. Create circuit with Z-side on same site as device\r\n3. Try to connect interface to circuit termination from interface table\r\n4. Select B Side Cicuit\r\n5. Try to select Side for circuit\n\n### Expected Behavior\n\nOn B Side you are able to select side for circuit\n\n### Observed Behavior\n\nSide option menu is empty because.\r\n\r\nThis is caused because $ is missing for `$termination_{cable_end}_circuit`\r\n\r\nhttps://github.com/netbox-community/netbox/blob/abfa6a325af17d65038304feed2536330d8c60a7/netbox/dcim/forms/connections.py#L141\n", "before_files": [{"content": "from django import forms\n\nfrom circuits.models import Circuit, CircuitTermination, Provider\nfrom dcim.models import *\nfrom utilities.forms import DynamicModelChoiceField, DynamicModelMultipleChoiceField\nfrom .models import CableForm\n\n\ndef get_cable_form(a_type, b_type):\n\n class FormMetaclass(forms.models.ModelFormMetaclass):\n\n def __new__(mcs, name, bases, attrs):\n\n for cable_end, term_cls in (('a', a_type), ('b', b_type)):\n\n attrs[f'termination_{cable_end}_region'] = DynamicModelChoiceField(\n queryset=Region.objects.all(),\n label='Region',\n required=False,\n initial_params={\n 'sites': f'$termination_{cable_end}_site'\n }\n )\n attrs[f'termination_{cable_end}_sitegroup'] = DynamicModelChoiceField(\n queryset=SiteGroup.objects.all(),\n label='Site group',\n required=False,\n initial_params={\n 'sites': f'$termination_{cable_end}_site'\n }\n )\n attrs[f'termination_{cable_end}_site'] = DynamicModelChoiceField(\n queryset=Site.objects.all(),\n label='Site',\n required=False,\n query_params={\n 'region_id': f'$termination_{cable_end}_region',\n 'group_id': f'$termination_{cable_end}_sitegroup',\n }\n )\n attrs[f'termination_{cable_end}_location'] = DynamicModelChoiceField(\n queryset=Location.objects.all(),\n label='Location',\n required=False,\n null_option='None',\n query_params={\n 'site_id': f'$termination_{cable_end}_site'\n }\n )\n\n # Device component\n if hasattr(term_cls, 'device'):\n\n attrs[f'termination_{cable_end}_rack'] = DynamicModelChoiceField(\n queryset=Rack.objects.all(),\n label='Rack',\n required=False,\n null_option='None',\n initial_params={\n 'devices': f'$termination_{cable_end}_device'\n },\n query_params={\n 'site_id': f'$termination_{cable_end}_site',\n 'location_id': f'$termination_{cable_end}_location',\n }\n )\n attrs[f'termination_{cable_end}_device'] = DynamicModelChoiceField(\n queryset=Device.objects.all(),\n label='Device',\n required=False,\n initial_params={\n f'{term_cls._meta.model_name}s__in': f'${cable_end}_terminations'\n },\n query_params={\n 'site_id': f'$termination_{cable_end}_site',\n 'location_id': f'$termination_{cable_end}_location',\n 'rack_id': f'$termination_{cable_end}_rack',\n }\n )\n attrs[f'{cable_end}_terminations'] = DynamicModelMultipleChoiceField(\n queryset=term_cls.objects.all(),\n label=term_cls._meta.verbose_name.title(),\n disabled_indicator='_occupied',\n query_params={\n 'device_id': f'$termination_{cable_end}_device',\n }\n )\n\n # PowerFeed\n elif term_cls == PowerFeed:\n\n attrs[f'termination_{cable_end}_powerpanel'] = DynamicModelChoiceField(\n queryset=PowerPanel.objects.all(),\n label='Power Panel',\n required=False,\n initial_params={\n 'powerfeeds__in': f'${cable_end}_terminations'\n },\n query_params={\n 'site_id': f'$termination_{cable_end}_site',\n 'location_id': f'$termination_{cable_end}_location',\n }\n )\n attrs[f'{cable_end}_terminations'] = DynamicModelMultipleChoiceField(\n queryset=term_cls.objects.all(),\n label='Power Feed',\n disabled_indicator='_occupied',\n query_params={\n 'powerpanel_id': f'$termination_{cable_end}_powerpanel',\n }\n )\n\n # CircuitTermination\n elif term_cls == CircuitTermination:\n\n attrs[f'termination_{cable_end}_provider'] = DynamicModelChoiceField(\n queryset=Provider.objects.all(),\n label='Provider',\n initial_params={\n 'circuits': f'$termination_{cable_end}_circuit'\n },\n required=False\n )\n attrs[f'termination_{cable_end}_circuit'] = DynamicModelChoiceField(\n queryset=Circuit.objects.all(),\n label='Circuit',\n initial_params={\n 'terminations__in': f'${cable_end}_terminations'\n },\n query_params={\n 'provider_id': f'$termination_{cable_end}_provider',\n 'site_id': f'$termination_{cable_end}_site',\n }\n )\n attrs[f'{cable_end}_terminations'] = DynamicModelMultipleChoiceField(\n queryset=term_cls.objects.all(),\n label='Side',\n disabled_indicator='_occupied',\n query_params={\n 'circuit_id': f'termination_{cable_end}_circuit',\n }\n )\n\n return super().__new__(mcs, name, bases, attrs)\n\n class _CableForm(CableForm, metaclass=FormMetaclass):\n\n def __init__(self, *args, **kwargs):\n\n # TODO: Temporary hack to work around list handling limitations with utils.normalize_querydict()\n for field_name in ('a_terminations', 'b_terminations'):\n if field_name in kwargs.get('initial', {}) and type(kwargs['initial'][field_name]) is not list:\n kwargs['initial'][field_name] = [kwargs['initial'][field_name]]\n\n super().__init__(*args, **kwargs)\n\n if self.instance and self.instance.pk:\n # Initialize A/B terminations when modifying an existing Cable instance\n self.initial['a_terminations'] = self.instance.a_terminations\n self.initial['b_terminations'] = self.instance.b_terminations\n\n def clean(self):\n super().clean()\n\n # Set the A/B terminations on the Cable instance\n self.instance.a_terminations = self.cleaned_data['a_terminations']\n self.instance.b_terminations = self.cleaned_data['b_terminations']\n\n return _CableForm\n", "path": "netbox/dcim/forms/connections.py"}], "after_files": [{"content": "from django import forms\n\nfrom circuits.models import Circuit, CircuitTermination, Provider\nfrom dcim.models import *\nfrom utilities.forms import DynamicModelChoiceField, DynamicModelMultipleChoiceField\nfrom .models import CableForm\n\n\ndef get_cable_form(a_type, b_type):\n\n class FormMetaclass(forms.models.ModelFormMetaclass):\n\n def __new__(mcs, name, bases, attrs):\n\n for cable_end, term_cls in (('a', a_type), ('b', b_type)):\n\n attrs[f'termination_{cable_end}_region'] = DynamicModelChoiceField(\n queryset=Region.objects.all(),\n label='Region',\n required=False,\n initial_params={\n 'sites': f'$termination_{cable_end}_site'\n }\n )\n attrs[f'termination_{cable_end}_sitegroup'] = DynamicModelChoiceField(\n queryset=SiteGroup.objects.all(),\n label='Site group',\n required=False,\n initial_params={\n 'sites': f'$termination_{cable_end}_site'\n }\n )\n attrs[f'termination_{cable_end}_site'] = DynamicModelChoiceField(\n queryset=Site.objects.all(),\n label='Site',\n required=False,\n query_params={\n 'region_id': f'$termination_{cable_end}_region',\n 'group_id': f'$termination_{cable_end}_sitegroup',\n }\n )\n attrs[f'termination_{cable_end}_location'] = DynamicModelChoiceField(\n queryset=Location.objects.all(),\n label='Location',\n required=False,\n null_option='None',\n query_params={\n 'site_id': f'$termination_{cable_end}_site'\n }\n )\n\n # Device component\n if hasattr(term_cls, 'device'):\n\n attrs[f'termination_{cable_end}_rack'] = DynamicModelChoiceField(\n queryset=Rack.objects.all(),\n label='Rack',\n required=False,\n null_option='None',\n initial_params={\n 'devices': f'$termination_{cable_end}_device'\n },\n query_params={\n 'site_id': f'$termination_{cable_end}_site',\n 'location_id': f'$termination_{cable_end}_location',\n }\n )\n attrs[f'termination_{cable_end}_device'] = DynamicModelChoiceField(\n queryset=Device.objects.all(),\n label='Device',\n required=False,\n initial_params={\n f'{term_cls._meta.model_name}s__in': f'${cable_end}_terminations'\n },\n query_params={\n 'site_id': f'$termination_{cable_end}_site',\n 'location_id': f'$termination_{cable_end}_location',\n 'rack_id': f'$termination_{cable_end}_rack',\n }\n )\n attrs[f'{cable_end}_terminations'] = DynamicModelMultipleChoiceField(\n queryset=term_cls.objects.all(),\n label=term_cls._meta.verbose_name.title(),\n disabled_indicator='_occupied',\n query_params={\n 'device_id': f'$termination_{cable_end}_device',\n }\n )\n\n # PowerFeed\n elif term_cls == PowerFeed:\n\n attrs[f'termination_{cable_end}_powerpanel'] = DynamicModelChoiceField(\n queryset=PowerPanel.objects.all(),\n label='Power Panel',\n required=False,\n initial_params={\n 'powerfeeds__in': f'${cable_end}_terminations'\n },\n query_params={\n 'site_id': f'$termination_{cable_end}_site',\n 'location_id': f'$termination_{cable_end}_location',\n }\n )\n attrs[f'{cable_end}_terminations'] = DynamicModelMultipleChoiceField(\n queryset=term_cls.objects.all(),\n label='Power Feed',\n disabled_indicator='_occupied',\n query_params={\n 'powerpanel_id': f'$termination_{cable_end}_powerpanel',\n }\n )\n\n # CircuitTermination\n elif term_cls == CircuitTermination:\n\n attrs[f'termination_{cable_end}_provider'] = DynamicModelChoiceField(\n queryset=Provider.objects.all(),\n label='Provider',\n initial_params={\n 'circuits': f'$termination_{cable_end}_circuit'\n },\n required=False\n )\n attrs[f'termination_{cable_end}_circuit'] = DynamicModelChoiceField(\n queryset=Circuit.objects.all(),\n label='Circuit',\n initial_params={\n 'terminations__in': f'${cable_end}_terminations'\n },\n query_params={\n 'provider_id': f'$termination_{cable_end}_provider',\n 'site_id': f'$termination_{cable_end}_site',\n }\n )\n attrs[f'{cable_end}_terminations'] = DynamicModelMultipleChoiceField(\n queryset=term_cls.objects.all(),\n label='Side',\n disabled_indicator='_occupied',\n query_params={\n 'circuit_id': f'$termination_{cable_end}_circuit',\n }\n )\n\n return super().__new__(mcs, name, bases, attrs)\n\n class _CableForm(CableForm, metaclass=FormMetaclass):\n\n def __init__(self, *args, **kwargs):\n\n # TODO: Temporary hack to work around list handling limitations with utils.normalize_querydict()\n for field_name in ('a_terminations', 'b_terminations'):\n if field_name in kwargs.get('initial', {}) and type(kwargs['initial'][field_name]) is not list:\n kwargs['initial'][field_name] = [kwargs['initial'][field_name]]\n\n super().__init__(*args, **kwargs)\n\n if self.instance and self.instance.pk:\n # Initialize A/B terminations when modifying an existing Cable instance\n self.initial['a_terminations'] = self.instance.a_terminations\n self.initial['b_terminations'] = self.instance.b_terminations\n\n def clean(self):\n super().clean()\n\n # Set the A/B terminations on the Cable instance\n self.instance.a_terminations = self.cleaned_data['a_terminations']\n self.instance.b_terminations = self.cleaned_data['b_terminations']\n\n return _CableForm\n", "path": "netbox/dcim/forms/connections.py"}]} |
gh_patches_debug_1315 | rasdani/github-patches | git_diff | Theano__Theano-6539 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
CUDA headers not included in compilation of GpuMagmaMatrixInverse node
I'm using theano with magma (enabled in theano config) to invert a matrix on the GPU:
```
import theano
import theano.tensor as T
A = T.matrix()
B = T.nlinalg.matrix_inverse(A)
f = theano.function([A], [B])
```
However, the function does not compile:
```
Exception: ('The following error happened while compiling the node', GpuMagmaMatrixInverse{inplace=True}(GpuContiguous.0), '\n', 'Compilation failed (return status=1): In file included from /home/user/.theano/compiledir_Linux-4.4--generic-x86_64-with-Ubuntu-16.04-xenial-x86_64-2.7.12-64/tmpQn5ybC/mod.cpp:15:0:. /usr/local/include/gpuarray/ext_cuda.h:4:18: fatal error: cuda.h: No such file or directory. compilation terminated.. ', '[GpuMagmaMatrixInverse{inplace=True}(<GpuArrayType<None>(float32, matrix)>)]')
```
Looking at the compile string:
```
Problem occurred during compilation with the command line below:
/usr/bin/g++ -shared -g -O3 -fno-math-errno -Wno-unused-label -Wno-unused-variable -Wno-write-strings -march=haswell -mmmx -mno-3dnow -msse -msse2 -msse3 -mssse3 -mno-sse4a -mcx16 -msahf -mmovbe -maes -mno-sha -mpclmul -mpopcnt -mabm -mno-lwp -mfma -mno-fma4 -mno-xop -mbmi -mbmi2 -mno-tbm -mavx -mavx2 -msse4.2 -msse4.1 -mlzcnt -mno-rtm -mno-hle -mrdrnd -mf16c -mfsgsbase -mno-rdseed -mno-prfchw -mno-adx -mfxsr -mxsave -mxsaveopt -mno-avx512f -mno-avx512er -mno-avx512cd -mno-avx512pf -mno-prefetchwt1 -mno-clflushopt -mno-xsavec -mno-xsaves -mno-avx512dq -mno-avx512bw -mno-avx512vl -mno-avx512ifma -mno-avx512vbmi -mno-clwb -mno-pcommit -mno-mwaitx --param l1-cache-size=32 --param l1-cache-line-size=64 --param l2-cache-size=6144 -mtune=haswell -DNPY_NO_DEPRECATED_API=NPY_1_7_API_VERSION -m64 -fPIC -I/usr/local/lib/python2.7/dist-packages/pygpu-0.7.5-py2.7-linux-x86_64.egg/pygpu -I/usr/local/lib/python2.7/dist-packages/numpy/core/include -I/usr/include -I/usr/local/lib/python2.7/dist-packages/theano/gpuarray/c_code -I/usr/local/magma/include -I/usr/local/lib/python2.7/dist-packages/numpy/core/include -I/usr/include/python2.7 -I/usr/local/lib/python2.7/dist-packages/theano/gof/c_code -L/usr/lib -L/usr/local/magma/lib -L/usr/lib -fvisibility=hidden -o /home/user/.theano/compiledir_Linux-4.4--generic-x86_64-with-Ubuntu-16.04-xenial-x86_64-2.7.12-64/tmpEwOSsC/328bc07052efa6880d04b71f5ab1c93b229fc94fe91f2ab4f71631da1bc97d76.so /home/user/.theano/compiledir_Linux-4.4--generic-x86_64-with-Ubuntu-16.04-xenial-x86_64-2.7.12-64/tmpEwOSsC/mod.cpp -lgpuarray -lmagma -lpython2.7
In file included from /home/user/.theano/compiledir_Linux-4.4--generic-x86_64-with-Ubuntu-16.04-xenial-x86_64-2.7.12-64/tmpEwOSsC/mod.cpp:15:0:
/usr/local/include/gpuarray/ext_cuda.h:4:18: fatal error: cuda.h: No such file or directory
compilation terminated.
```
it is apparent that `gpuarray/ext_cuda.h` includes `<cuda.h>` but the CUDA headers are not being provided to the compiler (g++).
As a workaround, I'm pointing the compiler to the CUDA headers by setting:
```
export CPATH=/usr/local/cuda/include
```
However, it would make more sense to do this with a `-I` in the compile string.
Probably, `std_include_dirs()` in `gof/cmodule.py` should include the CUDA headers as well (?)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `theano/gpuarray/linalg.py`
Content:
```
1 from __future__ import absolute_import, division, print_function
2
3 import warnings
4
5 import pkg_resources
6 import numpy as np
7 from numpy.linalg.linalg import LinAlgError
8
9 import theano
10 from theano import Op, config, tensor
11 from theano.scalar import bool as bool_t
12 from theano.gof import COp, ParamsType
13 from theano.gpuarray import GpuArrayType
14
15 from .basic_ops import (CGpuKernelBase, as_gpuarray_variable, gpu_contiguous, gpuarray_helper_inc_dir,
16 infer_context_name)
17 from .type import gpu_context_type
18
19 try:
20 import pygpu
21 from pygpu.basic import triu, tril
22 pygpu_available = True
23 except ImportError:
24 pygpu_available = False
25
26 cusolver_available = False
27 try:
28 import skcuda
29 from skcuda import cusolver
30 cusolver_available = True
31 except (ImportError, OSError, RuntimeError, pkg_resources.DistributionNotFound):
32 pass
33
34 cublas_available = False
35 try:
36 from skcuda import cublas
37 cublas_available = True
38 except (ImportError, OSError, RuntimeError, pkg_resources.DistributionNotFound):
39 pass
40
41 if cusolver_available:
42 # Add cusolver call as it is missing in skcuda
43 # SPOTRS
44 cusolver._libcusolver.cusolverDnSpotrs.restype = int
45 cusolver._libcusolver.cusolverDnSpotrs.argtypes = [cusolver.ctypes.c_void_p,
46 cusolver.ctypes.c_int,
47 cusolver.ctypes.c_int,
48 cusolver.ctypes.c_int,
49 cusolver.ctypes.c_void_p,
50 cusolver.ctypes.c_int,
51 cusolver.ctypes.c_void_p,
52 cusolver.ctypes.c_int,
53 cusolver.ctypes.c_void_p]
54
55 def cusolverDnSpotrs(handle, uplo, n, nrhs, A, lda,
56 B, ldb, devInfo):
57 """
58 Solve real single precision linear system for hermitian matrices.
59 References
60 ----------
61 `cusolverDn<t>potrs <http://docs.nvidia.com/cuda/cusolver/index.html#cuds-lt-t-gt-potrs>`_
62 """
63
64 status = cusolver._libcusolver.cusolverDnSpotrs(handle, uplo, n, nrhs,
65 int(A), lda, int(B),
66 ldb, int(devInfo))
67 cusolver.cusolverCheckStatus(status)
68
69
70 def attach_cusolver_handle_to_context(ctx):
71 handle = getattr(ctx, 'cusolver_handle', None)
72 if handle is None:
73 with ctx:
74 ctx.cusolver_handle = cusolver.cusolverDnCreate()
75
76
77 def attach_cublas_handle_to_context(ctx):
78 handle = getattr(ctx, 'cublas_handle', None)
79 if handle is None:
80 with ctx:
81 ctx.cublas_handle = cublas.cublasCreate()
82
83
84 # it is a subset of all cases available in slinalg's MATRIX_STRUCTURE
85 MATRIX_STRUCTURES_SOLVE = (
86 'general',
87 'symmetric',
88 'lower_triangular',
89 'upper_triangular')
90
91
92 class GpuCusolverSolve(Op):
93 """
94 CUSOLVER GPU solver OP.
95
96 Parameters
97 ----------
98 trans
99 Whether to take the transpose of the input matrix or not.
100
101 """
102
103 __props__ = ('A_structure', 'trans', 'inplace')
104
105 def __init__(self, A_structure='general', trans='N', inplace=False):
106 self.trans = trans
107 self.inplace = inplace
108 self.A_structure = A_structure
109 if self.inplace:
110 self.destroy_map = {0: [0]}
111 assert A_structure in MATRIX_STRUCTURES_SOLVE
112 super(GpuCusolverSolve, self).__init__()
113
114 def make_node(self, inp1, inp2):
115 if not cusolver_available:
116 raise RuntimeError('CUSOLVER is not available and '
117 'GpuCusolverSolve Op can not be constructed.')
118 if skcuda.__version__ <= '0.5.1':
119 warnings.warn('The GpuSolve op requires scikit-cuda > 0.5.1 to work with CUDA 8')
120 context_name = infer_context_name(inp1, inp2)
121
122 inp1 = as_gpuarray_variable(inp1, context_name)
123 inp2 = as_gpuarray_variable(inp2, context_name)
124
125 inp1 = gpu_contiguous(inp1)
126 inp2 = gpu_contiguous(inp2)
127
128 # this op can only operate on float32 matrices
129 assert inp1.ndim == 2
130 assert inp2.ndim == 2
131 assert inp1.dtype == 'float32'
132 assert inp2.dtype == 'float32'
133
134 return theano.Apply(
135 self, [inp1, inp2],
136 [GpuArrayType('float32',
137 broadcastable=inp1.broadcastable,
138 context_name=context_name)()])
139
140 def prepare_node(self, node, storage_map, compute_map, impl):
141 ctx = node.inputs[0].type.context
142 attach_cusolver_handle_to_context(ctx)
143
144 def check_dev_info(self, dev_info):
145 val = np.asarray(dev_info)[0]
146 if val > 0:
147 raise LinAlgError('A is singular')
148
149 def perform(self, node, inputs, outputs):
150 context = inputs[0][0].context
151
152 # Size of the matrices to invert.
153 z = outputs[0]
154
155 # Matrix.
156 A = inputs[0]
157
158 # Solution vectors.
159 b = inputs[1]
160
161 assert(len(A.shape) == 2)
162 assert(len(b.shape) == 2)
163
164 if self.trans in ['T', 'C']:
165 trans = 1
166 l, n = A.shape
167 k, m = b.shape
168 elif self.trans == 'N':
169 trans = 0
170 n, l = A.shape
171 k, m = b.shape
172 else:
173 raise ValueError('Invalid value for trans')
174 if l != n:
175 raise ValueError('A must be a square matrix')
176 if n != k:
177 raise ValueError('A and b must be aligned.')
178
179 lda = max(1, n)
180 ldb = max(1, k)
181
182 # We copy A and b as cusolver operates inplace
183 b = pygpu.array(b, copy=True, order='F')
184 if not self.inplace:
185 A = pygpu.array(A, copy=True)
186 A_ptr = A.gpudata
187 b_ptr = b.gpudata
188
189 # cusolver expects a F ordered matrix, but A is not explicitly
190 # converted between C and F order, instead we switch the
191 # "transpose" flag.
192 if A.flags['C_CONTIGUOUS']:
193 trans = 1 - trans
194
195 if self.A_structure == 'symmetric':
196 with context:
197 workspace_size = cusolver.cusolverDnSpotrf_bufferSize(
198 context.cusolver_handle, 0, n, A_ptr, lda)
199
200 workspace = pygpu.zeros(workspace_size, dtype='float32',
201 context=context)
202
203 dev_info = pygpu.zeros((1,), dtype='int32', context=context)
204
205 workspace_ptr = workspace.gpudata
206 dev_info_ptr = dev_info.gpudata
207
208 with context:
209 cusolver.cusolverDnSpotrf(
210 context.cusolver_handle, 0, n, A_ptr, lda, workspace_ptr,
211 workspace_size, dev_info_ptr)
212 self.check_dev_info(dev_info)
213
214 cusolverDnSpotrs(
215 context.cusolver_handle, 0, n, m, A_ptr, lda,
216 b_ptr, ldb, dev_info_ptr)
217
218 else:
219 # general case for A
220 with context:
221 workspace_size = cusolver.cusolverDnSgetrf_bufferSize(
222 context.cusolver_handle, n, n, A_ptr, lda)
223
224 workspace = pygpu.zeros(workspace_size, dtype='float32',
225 context=context)
226
227 pivots = pygpu.zeros(n, dtype='int32', context=context)
228
229 dev_info = pygpu.zeros((1,), dtype='int32', context=context)
230
231 workspace_ptr = workspace.gpudata
232 pivots_ptr = pivots.gpudata
233 dev_info_ptr = dev_info.gpudata
234
235 with context:
236 cusolver.cusolverDnSgetrf(
237 context.cusolver_handle, n, n, A_ptr, lda, workspace_ptr,
238 pivots_ptr, dev_info_ptr)
239 self.check_dev_info(dev_info)
240
241 cusolver.cusolverDnSgetrs(
242 context.cusolver_handle, trans, n, m, A_ptr, lda,
243 pivots_ptr, b_ptr, ldb, dev_info_ptr)
244
245 z[0] = b
246
247
248 class GpuCublasTriangularSolve(Op):
249 """
250 CUBLAS GPU Triangular Solve Op.
251
252 Parameters
253 ----------
254 lower
255 Whether system is lower-triangular (True) or upper-triangular (False).
256 trans
257 Whether to take the transpose of the input matrix or not.
258 """
259 __props__ = ('trans', 'lower')
260
261 def __init__(self, lower=True, trans='N'):
262 self.trans = trans
263 self.lower = lower
264 super(GpuCublasTriangularSolve, self).__init__()
265
266 def make_node(self, inp1, inp2):
267 if not cublas_available:
268 raise RuntimeError('CUBLAS is not available and '
269 'GpuCublasTriangularSolve Op can not be constructed.')
270 context_name = infer_context_name(inp1, inp2)
271
272 inp1 = as_gpuarray_variable(inp1, context_name)
273 inp2 = as_gpuarray_variable(inp2, context_name)
274
275 inp1 = gpu_contiguous(inp1)
276 inp2 = gpu_contiguous(inp2)
277
278 # this op can only operate on float32 matrices
279 assert inp1.ndim == 2
280 assert inp2.ndim in [1, 2]
281 assert inp1.dtype == 'float32'
282 assert inp2.dtype == 'float32'
283
284 return theano.Apply(self, [inp1, inp2],
285 [GpuArrayType('float32',
286 broadcastable=inp2.broadcastable,
287 context_name=context_name)()])
288
289 def prepare_node(self, node, storage_map, compute_map, impl):
290 ctx = node.inputs[0].type.context
291 attach_cublas_handle_to_context(ctx)
292
293 def perform(self, node, inputs, outputs):
294 ctx = node.inputs[0].type.context
295
296 # Solution set
297 x = outputs[0]
298
299 # Matrix.
300 A = inputs[0]
301
302 # right hand side
303 b = inputs[1]
304
305 assert(len(A.shape) == 2)
306 assert(len(b.shape) in [1, 2])
307
308 # implicitly deal with the difference between C order
309 # and fortran order by flipping the trans and lower flags
310 lower = not self.lower
311 trans = self.trans
312 if trans in ['T', 'C']:
313 trans = 'N'
314 l, n = A.shape
315 elif trans == 'N':
316 trans = 'T'
317 n, l = A.shape
318 else:
319 raise ValueError('Invalid value for trans')
320
321 if b.ndim == 2:
322 k, m = b.shape
323 else:
324 k, = b.shape
325 m = 1
326
327 if l != n:
328 raise ValueError('A must be a square matrix')
329 if n != k:
330 raise ValueError('A and b must be aligned.')
331
332 lda = max(1, n)
333 ldb = max(1, k)
334
335 # solution overwrites right hand side on exit
336 b = pygpu.array(b, copy=True, order='F')
337
338 A_ptr = A.gpudata
339 b_ptr = b.gpudata
340
341 # unit scalar used for multiplication
342 alpha = 1.0
343 # indicates matrix A is on left of B
344 side = 'l'
345 # set whether upper or lower part of matrix A stored
346 uplo = 'l' if lower else 'u'
347 # indicates elements on diagonal of matrix A may not be unity
348 diag = 'n'
349
350 with ctx:
351 if b.ndim == 1:
352 # matrix vector solve
353 cublas.cublasStrsv(ctx.cublas_handle, uplo, trans, diag, n,
354 A_ptr, lda, b_ptr, 1)
355 else:
356 cublas.cublasStrsm(ctx.cublas_handle, side, uplo, trans, diag,
357 n, m, alpha, A_ptr, lda, b_ptr, ldb)
358
359 x[0] = b
360
361
362 def gpu_solve(A, b, A_structure='general', trans='N'):
363 if A_structure == 'lower':
364 return GpuCublasTriangularSolve(True, trans)(A, b)
365 elif A_structure == 'upper':
366 return GpuCublasTriangularSolve(False, trans)(A, b)
367
368 return GpuCusolverSolve(A_structure, trans)(A, b)
369
370
371 class GpuCholesky(Op):
372 """
373 CUSOLVER GPU Cholesky Op.
374
375 Given a real positive definite matrix `A` returns either a lower
376 triangular matrix `L` such that `A == dot(L, L.T)` if `lower == True`
377 else returns an upper triangular matrix `U` such that `A == dot(U.T, U)`
378 if `lower == False`.
379
380 Parameters
381 ----------
382 lower
383 Whether to return a lower rather than upper triangular decomposition.
384
385 """
386
387 __props__ = ('lower', 'inplace')
388
389 def __init__(self, lower=True, inplace=False):
390 self.lower = lower
391 self.inplace = inplace
392 if self.inplace:
393 self.destroy_map = {0: [0]}
394 super(GpuCholesky, self).__init__()
395
396 def clone_inplace(self):
397 return self.__class__(lower=self.lower, inplace=True)
398
399 def make_node(self, inp):
400 if not cusolver_available:
401 raise RuntimeError('CUSOLVER is not available and '
402 'GpuCholesky Op can not be constructed.')
403 if skcuda.__version__ <= '0.5.1':
404 warnings.warn('The GpuCholesky op requires scikit-cuda > 0.5.1 to work with CUDA 8')
405 if not pygpu_available:
406 raise RuntimeError('Missing pygpu or triu/tril functions.'
407 'Install or update libgpuarray.')
408 context_name = infer_context_name(inp)
409
410 inp = as_gpuarray_variable(inp, context_name)
411
412 inp = gpu_contiguous(inp)
413
414 # this op can only operate on float32 matrices
415 # because of current implementation of triu/tril.
416 # TODO: support float64 for triu/tril in GpuArray and for GpuCholesky/GpuCusolverSolve in Theano.
417 assert inp.ndim == 2
418 assert inp.dtype == 'float32'
419
420 return theano.Apply(self, [inp], [inp.type()])
421
422 def prepare_node(self, node, storage_map, compute_map, impl):
423 ctx = node.inputs[0].type.context
424 attach_cusolver_handle_to_context(ctx)
425
426 def perform(self, node, inputs, outputs):
427 context = inputs[0][0].context
428
429 # Input matrix.
430 A = inputs[0]
431
432 l, n = A.shape
433 if l != n:
434 raise ValueError('A must be a square matrix')
435
436 lda = max(1, n)
437
438 # cusolver operates on F ordered matrices, but A is expected
439 # to be symmetric so it does not matter.
440 # We copy A if needed
441 if self.inplace:
442 L = A
443 else:
444 L = pygpu.array(A, copy=True)
445
446 # The output matrix will contain only the upper or lower
447 # triangular factorization of A. If L is C ordered (it
448 # probably is as it is the default in Theano) we just switch
449 # the fill mode parameter of cusolver
450 l_parameter = 0 if self.lower else 1
451 if L.flags['C_CONTIGUOUS']:
452 l_parameter = 1 - l_parameter
453
454 L_ptr = L.gpudata
455
456 with context:
457 workspace_size = cusolver.cusolverDnSpotrf_bufferSize(
458 context.cusolver_handle, l_parameter, n, L_ptr, lda)
459
460 workspace = pygpu.zeros(workspace_size, dtype='float32',
461 context=context)
462
463 dev_info = pygpu.zeros((1,), dtype='int32', context=context)
464
465 workspace_ptr = workspace.gpudata
466 dev_info_ptr = dev_info.gpudata
467
468 cusolver.cusolverDnSpotrf(
469 context.cusolver_handle, l_parameter, n, L_ptr, lda, workspace_ptr,
470 workspace_size, dev_info_ptr)
471
472 val_dev_info = np.asarray(dev_info)[0]
473 if val_dev_info > 0:
474 raise LinAlgError('Cholesky decomposition failed (is A SPD?)')
475
476 # cusolver leaves the elements in the matrix outside the considered
477 # upper or lower triangle unchanged, so we need to put zeros outside
478 # the triangle
479 if self.lower:
480 tril(L)
481 else:
482 triu(L)
483
484 outputs[0][0] = L
485
486
487 def gpu_cholesky(A, lower=True):
488 return GpuCholesky(lower)(A)
489
490
491 # TODO: add support for float64
492 class GpuMagmaBase(COp):
493 """Base class for magma related operations. Add the necessary headers,
494 libraries and optionally the location of headers and library.
495 """
496 def c_headers(self):
497 return ['gpuarray/types.h', 'gpuarray/array.h', 'gpuarray/ext_cuda.h',
498 'gpuarray_helper.h', 'magma.h']
499
500 def c_header_dirs(self):
501 dirs = [gpuarray_helper_inc_dir(), pygpu.get_include()]
502 if config.magma.include_path:
503 dirs.append(config.magma.include_path)
504 return dirs
505
506 def c_libraries(self):
507 return ['magma']
508
509 def c_lib_dirs(self):
510 if config.magma.library_path:
511 return [config.magma.library_path]
512 return []
513
514 def prepare_node(self, node, storage_map, compute_map, impl):
515 from skcuda.magma import magma_init
516 ctx = node.inputs[0].type.context
517 if not getattr(ctx, 'is_magma_initialized', False):
518 with ctx:
519 magma_init()
520 ctx.is_magma_initialized = True
521
522
523 class GpuMagmaSVD(GpuMagmaBase):
524 """Computes the svd of a matrix :math:`A` using magma library.
525
526 .. warning::
527
528 Because of implementation constraints, this Op returns outputs
529 in order ``S, U, VT``. Use :func:`theano.gpuarray.linalg.gpu_svd`
530 to get them in expected order ``U, S, VT``.
531
532 """
533 __props__ = ('full_matrices', 'compute_uv')
534 _cop_num_inputs = 1
535 _cop_num_outputs = 3
536 check_input = False
537 params_type = ParamsType(full_matrices=bool_t, context=gpu_context_type)
538
539 def __init__(self, full_matrices=True, compute_uv=True):
540 self.full_matrices = full_matrices
541 self.compute_uv = compute_uv
542 COp.__init__(self, ['c_code/magma_svd.c'], 'APPLY_SPECIFIC(magma_svd)')
543
544 def make_node(self, A):
545 ctx_name = infer_context_name(A)
546 A = as_gpuarray_variable(A, ctx_name)
547 A = gpu_contiguous(A)
548 if A.ndim != 2:
549 raise LinAlgError("Matrix rank error")
550 if A.dtype != 'float32':
551 raise TypeError("only `float32` is supported for now")
552 if self.compute_uv:
553 return theano.Apply(self, [A],
554 # return S, U, VT
555 [GpuArrayType(A.dtype, broadcastable=[False],
556 context_name=ctx_name)(),
557 A.type(),
558 A.type()])
559 else:
560 return theano.Apply(self, [A],
561 # return only S
562 [GpuArrayType(A.dtype, broadcastable=[False],
563 context_name=ctx_name)()])
564
565 def prepare_node(self, node, storage_map, compute_map, impl):
566 super(GpuMagmaSVD, self).prepare_node(node, storage_map, compute_map, impl)
567 # Check node to prevent eventual errors with old pickled nodes.
568 if self.compute_uv:
569 A, B, C = node.outputs
570 # We expect order: S (vector), U (matrix), VT (matrix)
571 assert A.type.ndim == 1 and B.type.ndim == C.type.ndim == 2, \
572 "Due to implementation constraints, GpuMagmaSVD interface has changed and now returns (S, U, VT) " \
573 "instead of (U, S, VT). Either update your code, or use gpu_svd() to get the expected (U, S, VT) order."
574
575 def get_params(self, node):
576 return self.params_type.get_params(self, context=node.inputs[0].type.context)
577
578 def infer_shape(self, node, shapes):
579 x_shape, = shapes
580 M, N = x_shape
581 K = tensor.minimum(M, N)
582 s_shape = (K, )
583 if self.compute_uv:
584 u_shape = (M, M) if self.full_matrices else (M, K)
585 vt_shape = (N, N) if self.full_matrices else (K, N)
586 return [s_shape, u_shape, vt_shape]
587 else:
588 return [s_shape]
589
590
591 def gpu_svd(a, full_matrices=1, compute_uv=1):
592 """
593 This function performs the SVD on GPU.
594
595 Parameters
596 ----------
597 full_matrices : bool, optional
598 If True (default), u and v have the shapes (M, M) and (N, N),
599 respectively.
600 Otherwise, the shapes are (M, K) and (K, N), respectively,
601 where K = min(M, N).
602 compute_uv : bool, optional
603 Whether or not to compute u and v in addition to s.
604 True by default.
605
606 Returns
607 -------
608 U, V, D : matrices
609
610 """
611 out = GpuMagmaSVD(full_matrices, compute_uv)(a)
612 if compute_uv:
613 S, U, VT = out
614 out = [U, S, VT]
615 return out
616
617
618 class GpuMagmaMatrixInverse(GpuMagmaBase):
619 """Computes the inverse of a matrix :math:`A` using magma library.
620 """
621 __props__ = ('inplace', )
622 check_input = False
623 params_type = ParamsType(inplace=bool_t, context=gpu_context_type)
624
625 def __init__(self, inplace=False):
626 COp.__init__(self, ['c_code/magma_inv.c'], 'APPLY_SPECIFIC(magma_inv)')
627 self.inplace = inplace
628 if self.inplace:
629 self.destroy_map = {0: [0]}
630
631 def clone_inplace(self):
632 return self.__class__(inplace=True)
633
634 def make_node(self, A):
635 ctx_name = infer_context_name(A)
636 A = as_gpuarray_variable(A, ctx_name)
637 A = gpu_contiguous(A)
638 if A.ndim != 2:
639 raise LinAlgError("Matrix rank error")
640 if A.dtype != 'float32':
641 raise TypeError("only `float32` is supported for now")
642 return theano.Apply(self, [A], [A.type()])
643
644 def get_params(self, node):
645 return self.params_type.get_params(self, context=node.inputs[0].type.context)
646
647 def infer_shape(self, node, shapes):
648 return shapes
649
650
651 def gpu_matrix_inverse(a):
652 """
653 This function performs the matrix inverse on GPU.
654
655 Returns
656 -------
657 a_inv: matrix
658
659 """
660 return GpuMagmaMatrixInverse()(a)
661
662
663 class GpuMagmaCholesky(GpuMagmaBase, CGpuKernelBase):
664 """Computes the cholesky decomposition of a matrix :math:`A` using magma
665 library.
666
667 """
668 __props__ = ('lower', 'inplace')
669 check_input = False
670 params_type = ParamsType(lower=bool_t, inplace=bool_t, context=gpu_context_type)
671
672 def __init__(self, lower=True, inplace=False):
673 self.lower = lower
674 COp.__init__(self, ['c_code/magma_cholesky.c'], 'APPLY_SPECIFIC(magma_cholesky)')
675 self.inplace = inplace
676 if self.inplace:
677 self.destroy_map = {0: [0]}
678
679 def clone_inplace(self):
680 return self.__class__(lower=self.lower, inplace=True)
681
682 def make_node(self, A):
683 ctx_name = infer_context_name(A)
684 A = as_gpuarray_variable(A, ctx_name)
685 A = gpu_contiguous(A)
686 if A.ndim != 2:
687 raise LinAlgError("Matrix rank error")
688 if A.dtype != 'float32':
689 raise TypeError("only `float32` is supported for now")
690 return theano.Apply(self, [A], [A.type()])
691
692 def get_params(self, node):
693 return self.params_type.get_params(self, context=node.inputs[0].type.context)
694
695 def infer_shape(self, node, shapes):
696 return [shapes[0]]
697
698
699 class GpuMagmaQR(GpuMagmaBase, CGpuKernelBase):
700 """Computes the qr decomposition of a matrix :math:`A` using magma
701 library.
702
703 Parameters
704 ----------
705 complete : If `False`, returns only r.
706
707 .. warning::
708
709 Because of implementation constraints, this Op returns outputs
710 in order ``R, Q``. Use :func:`theano.gpuarray.linalg.gpu_qr`
711 to get them in expected order ``Q, R``.
712 """
713 __props__ = ('complete', )
714 _cop_num_inputs = 1
715 _cop_num_outputs = 2
716 check_input = False
717 params_type = ParamsType(complete=bool_t, context=gpu_context_type)
718
719 def __init__(self, complete=True):
720 self.complete = complete
721 COp.__init__(self, ['c_code/magma_qr.c'], 'APPLY_SPECIFIC(magma_qr)')
722
723 def make_node(self, A):
724 ctx_name = infer_context_name(A)
725 A = as_gpuarray_variable(A, ctx_name)
726 A = gpu_contiguous(A)
727 if A.ndim != 2:
728 raise LinAlgError("Matrix rank error")
729 if A.dtype != 'float32':
730 raise TypeError("only `float32` is supported for now")
731 if self.complete:
732 return theano.Apply(self, [A],
733 # return R, Q
734 [A.type(), A.type()])
735 else:
736 return theano.Apply(self, [A],
737 # return R
738 [A.type()])
739
740 def get_params(self, node):
741 return self.params_type.get_params(self, context=node.inputs[0].type.context)
742
743
744 def gpu_qr(a, complete=True):
745 """
746 This function performs the QR on GPU.
747
748 Parameters
749 ----------
750 complete : bool, optional
751 If `False`, returns only r.
752
753 Returns
754 -------
755 Q, R : matrices
756
757 """
758 out = GpuMagmaQR(complete)(a)
759 if complete:
760 R, Q = out
761 out = [Q, R]
762 return out
763
764
765 class GpuMagmaEigh(GpuMagmaBase):
766 """Computes the eigen decomposition of a symmetric matrix :math:`A` using magma
767 library.
768
769 Parameters
770 ----------
771 UPLO : Specifies whether the calculation is done with the lower triangular
772 part of matrix (`L`, default) or the upper triangular part (`U`).
773 compute_v : If `True`, computes eigenvalues and eigenvectors (`True`,
774 default). If `False`, computes only eigenvalues of matrix.
775 """
776 __props__ = ('lower', 'compute_v')
777 _cop_num_inputs = 1
778 _cop_num_outputs = 2
779 check_input = False
780 params_type = ParamsType(lower=bool_t, compute_v=bool_t,
781 context=gpu_context_type)
782
783 def __init__(self, UPLO='L', compute_v=True):
784 assert UPLO in ['L', 'U']
785 self.lower = UPLO == 'L'
786 self.compute_v = compute_v
787 COp.__init__(self, ['c_code/magma_eigh.c'], 'APPLY_SPECIFIC(magma_eigh)')
788
789 def make_node(self, A):
790 ctx_name = infer_context_name(A)
791 A = as_gpuarray_variable(A, ctx_name)
792 A = gpu_contiguous(A)
793 if A.ndim != 2:
794 raise LinAlgError("Matrix rank error")
795 if A.dtype != 'float32':
796 raise TypeError("only `float32` is supported for now")
797 if self.compute_v:
798 return theano.Apply(self, [A],
799 # return D, V
800 [GpuArrayType(A.dtype, broadcastable=[False],
801 context_name=ctx_name)(),
802 A.type()])
803 else:
804 return theano.Apply(self, [A],
805 # return D
806 [GpuArrayType(A.dtype, broadcastable=[False],
807 context_name=ctx_name)()])
808
809 def get_params(self, node):
810 return self.params_type.get_params(self, context=node.inputs[0].type.context)
811
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/theano/gpuarray/linalg.py b/theano/gpuarray/linalg.py
--- a/theano/gpuarray/linalg.py
+++ b/theano/gpuarray/linalg.py
@@ -498,7 +498,7 @@
'gpuarray_helper.h', 'magma.h']
def c_header_dirs(self):
- dirs = [gpuarray_helper_inc_dir(), pygpu.get_include()]
+ dirs = [gpuarray_helper_inc_dir(), pygpu.get_include(), config.cuda.include_path]
if config.magma.include_path:
dirs.append(config.magma.include_path)
return dirs
| {"golden_diff": "diff --git a/theano/gpuarray/linalg.py b/theano/gpuarray/linalg.py\n--- a/theano/gpuarray/linalg.py\n+++ b/theano/gpuarray/linalg.py\n@@ -498,7 +498,7 @@\n 'gpuarray_helper.h', 'magma.h']\n \n def c_header_dirs(self):\n- dirs = [gpuarray_helper_inc_dir(), pygpu.get_include()]\n+ dirs = [gpuarray_helper_inc_dir(), pygpu.get_include(), config.cuda.include_path]\n if config.magma.include_path:\n dirs.append(config.magma.include_path)\n return dirs\n", "issue": "CUDA headers not included in compilation of GpuMagmaMatrixInverse node\nI'm using theano with magma (enabled in theano config) to invert a matrix on the GPU:\r\n```\r\nimport theano\r\nimport theano.tensor as T\r\nA = T.matrix()\r\nB = T.nlinalg.matrix_inverse(A)\r\nf = theano.function([A], [B])\r\n```\r\n\r\nHowever, the function does not compile:\r\n```\r\nException: ('The following error happened while compiling the node', GpuMagmaMatrixInverse{inplace=True}(GpuContiguous.0), '\\n', 'Compilation failed (return status=1): In file included from /home/user/.theano/compiledir_Linux-4.4--generic-x86_64-with-Ubuntu-16.04-xenial-x86_64-2.7.12-64/tmpQn5ybC/mod.cpp:15:0:. /usr/local/include/gpuarray/ext_cuda.h:4:18: fatal error: cuda.h: No such file or directory. compilation terminated.. ', '[GpuMagmaMatrixInverse{inplace=True}(<GpuArrayType<None>(float32, matrix)>)]')\r\n```\r\n\r\nLooking at the compile string:\r\n```\r\nProblem occurred during compilation with the command line below:\r\n/usr/bin/g++ -shared -g -O3 -fno-math-errno -Wno-unused-label -Wno-unused-variable -Wno-write-strings -march=haswell -mmmx -mno-3dnow -msse -msse2 -msse3 -mssse3 -mno-sse4a -mcx16 -msahf -mmovbe -maes -mno-sha -mpclmul -mpopcnt -mabm -mno-lwp -mfma -mno-fma4 -mno-xop -mbmi -mbmi2 -mno-tbm -mavx -mavx2 -msse4.2 -msse4.1 -mlzcnt -mno-rtm -mno-hle -mrdrnd -mf16c -mfsgsbase -mno-rdseed -mno-prfchw -mno-adx -mfxsr -mxsave -mxsaveopt -mno-avx512f -mno-avx512er -mno-avx512cd -mno-avx512pf -mno-prefetchwt1 -mno-clflushopt -mno-xsavec -mno-xsaves -mno-avx512dq -mno-avx512bw -mno-avx512vl -mno-avx512ifma -mno-avx512vbmi -mno-clwb -mno-pcommit -mno-mwaitx --param l1-cache-size=32 --param l1-cache-line-size=64 --param l2-cache-size=6144 -mtune=haswell -DNPY_NO_DEPRECATED_API=NPY_1_7_API_VERSION -m64 -fPIC -I/usr/local/lib/python2.7/dist-packages/pygpu-0.7.5-py2.7-linux-x86_64.egg/pygpu -I/usr/local/lib/python2.7/dist-packages/numpy/core/include -I/usr/include -I/usr/local/lib/python2.7/dist-packages/theano/gpuarray/c_code -I/usr/local/magma/include -I/usr/local/lib/python2.7/dist-packages/numpy/core/include -I/usr/include/python2.7 -I/usr/local/lib/python2.7/dist-packages/theano/gof/c_code -L/usr/lib -L/usr/local/magma/lib -L/usr/lib -fvisibility=hidden -o /home/user/.theano/compiledir_Linux-4.4--generic-x86_64-with-Ubuntu-16.04-xenial-x86_64-2.7.12-64/tmpEwOSsC/328bc07052efa6880d04b71f5ab1c93b229fc94fe91f2ab4f71631da1bc97d76.so /home/user/.theano/compiledir_Linux-4.4--generic-x86_64-with-Ubuntu-16.04-xenial-x86_64-2.7.12-64/tmpEwOSsC/mod.cpp -lgpuarray -lmagma -lpython2.7\r\nIn file included from /home/user/.theano/compiledir_Linux-4.4--generic-x86_64-with-Ubuntu-16.04-xenial-x86_64-2.7.12-64/tmpEwOSsC/mod.cpp:15:0:\r\n/usr/local/include/gpuarray/ext_cuda.h:4:18: fatal error: cuda.h: No such file or directory\r\ncompilation terminated.\r\n```\r\nit is apparent that `gpuarray/ext_cuda.h` includes `<cuda.h>` but the CUDA headers are not being provided to the compiler (g++).\r\n\r\nAs a workaround, I'm pointing the compiler to the CUDA headers by setting:\r\n```\r\nexport CPATH=/usr/local/cuda/include\r\n```\r\nHowever, it would make more sense to do this with a `-I` in the compile string.\r\n\r\nProbably, `std_include_dirs()` in `gof/cmodule.py` should include the CUDA headers as well (?)\n", "before_files": [{"content": "from __future__ import absolute_import, division, print_function\n\nimport warnings\n\nimport pkg_resources\nimport numpy as np\nfrom numpy.linalg.linalg import LinAlgError\n\nimport theano\nfrom theano import Op, config, tensor\nfrom theano.scalar import bool as bool_t\nfrom theano.gof import COp, ParamsType\nfrom theano.gpuarray import GpuArrayType\n\nfrom .basic_ops import (CGpuKernelBase, as_gpuarray_variable, gpu_contiguous, gpuarray_helper_inc_dir,\n infer_context_name)\nfrom .type import gpu_context_type\n\ntry:\n import pygpu\n from pygpu.basic import triu, tril\n pygpu_available = True\nexcept ImportError:\n pygpu_available = False\n\ncusolver_available = False\ntry:\n import skcuda\n from skcuda import cusolver\n cusolver_available = True\nexcept (ImportError, OSError, RuntimeError, pkg_resources.DistributionNotFound):\n pass\n\ncublas_available = False\ntry:\n from skcuda import cublas\n cublas_available = True\nexcept (ImportError, OSError, RuntimeError, pkg_resources.DistributionNotFound):\n pass\n\nif cusolver_available:\n # Add cusolver call as it is missing in skcuda\n # SPOTRS\n cusolver._libcusolver.cusolverDnSpotrs.restype = int\n cusolver._libcusolver.cusolverDnSpotrs.argtypes = [cusolver.ctypes.c_void_p,\n cusolver.ctypes.c_int,\n cusolver.ctypes.c_int,\n cusolver.ctypes.c_int,\n cusolver.ctypes.c_void_p,\n cusolver.ctypes.c_int,\n cusolver.ctypes.c_void_p,\n cusolver.ctypes.c_int,\n cusolver.ctypes.c_void_p]\n\n def cusolverDnSpotrs(handle, uplo, n, nrhs, A, lda,\n B, ldb, devInfo):\n \"\"\"\n Solve real single precision linear system for hermitian matrices.\n References\n ----------\n `cusolverDn<t>potrs <http://docs.nvidia.com/cuda/cusolver/index.html#cuds-lt-t-gt-potrs>`_\n \"\"\"\n\n status = cusolver._libcusolver.cusolverDnSpotrs(handle, uplo, n, nrhs,\n int(A), lda, int(B),\n ldb, int(devInfo))\n cusolver.cusolverCheckStatus(status)\n\n\ndef attach_cusolver_handle_to_context(ctx):\n handle = getattr(ctx, 'cusolver_handle', None)\n if handle is None:\n with ctx:\n ctx.cusolver_handle = cusolver.cusolverDnCreate()\n\n\ndef attach_cublas_handle_to_context(ctx):\n handle = getattr(ctx, 'cublas_handle', None)\n if handle is None:\n with ctx:\n ctx.cublas_handle = cublas.cublasCreate()\n\n\n# it is a subset of all cases available in slinalg's MATRIX_STRUCTURE\nMATRIX_STRUCTURES_SOLVE = (\n 'general',\n 'symmetric',\n 'lower_triangular',\n 'upper_triangular')\n\n\nclass GpuCusolverSolve(Op):\n \"\"\"\n CUSOLVER GPU solver OP.\n\n Parameters\n ----------\n trans\n Whether to take the transpose of the input matrix or not.\n\n \"\"\"\n\n __props__ = ('A_structure', 'trans', 'inplace')\n\n def __init__(self, A_structure='general', trans='N', inplace=False):\n self.trans = trans\n self.inplace = inplace\n self.A_structure = A_structure\n if self.inplace:\n self.destroy_map = {0: [0]}\n assert A_structure in MATRIX_STRUCTURES_SOLVE\n super(GpuCusolverSolve, self).__init__()\n\n def make_node(self, inp1, inp2):\n if not cusolver_available:\n raise RuntimeError('CUSOLVER is not available and '\n 'GpuCusolverSolve Op can not be constructed.')\n if skcuda.__version__ <= '0.5.1':\n warnings.warn('The GpuSolve op requires scikit-cuda > 0.5.1 to work with CUDA 8')\n context_name = infer_context_name(inp1, inp2)\n\n inp1 = as_gpuarray_variable(inp1, context_name)\n inp2 = as_gpuarray_variable(inp2, context_name)\n\n inp1 = gpu_contiguous(inp1)\n inp2 = gpu_contiguous(inp2)\n\n # this op can only operate on float32 matrices\n assert inp1.ndim == 2\n assert inp2.ndim == 2\n assert inp1.dtype == 'float32'\n assert inp2.dtype == 'float32'\n\n return theano.Apply(\n self, [inp1, inp2],\n [GpuArrayType('float32',\n broadcastable=inp1.broadcastable,\n context_name=context_name)()])\n\n def prepare_node(self, node, storage_map, compute_map, impl):\n ctx = node.inputs[0].type.context\n attach_cusolver_handle_to_context(ctx)\n\n def check_dev_info(self, dev_info):\n val = np.asarray(dev_info)[0]\n if val > 0:\n raise LinAlgError('A is singular')\n\n def perform(self, node, inputs, outputs):\n context = inputs[0][0].context\n\n # Size of the matrices to invert.\n z = outputs[0]\n\n # Matrix.\n A = inputs[0]\n\n # Solution vectors.\n b = inputs[1]\n\n assert(len(A.shape) == 2)\n assert(len(b.shape) == 2)\n\n if self.trans in ['T', 'C']:\n trans = 1\n l, n = A.shape\n k, m = b.shape\n elif self.trans == 'N':\n trans = 0\n n, l = A.shape\n k, m = b.shape\n else:\n raise ValueError('Invalid value for trans')\n if l != n:\n raise ValueError('A must be a square matrix')\n if n != k:\n raise ValueError('A and b must be aligned.')\n\n lda = max(1, n)\n ldb = max(1, k)\n\n # We copy A and b as cusolver operates inplace\n b = pygpu.array(b, copy=True, order='F')\n if not self.inplace:\n A = pygpu.array(A, copy=True)\n A_ptr = A.gpudata\n b_ptr = b.gpudata\n\n # cusolver expects a F ordered matrix, but A is not explicitly\n # converted between C and F order, instead we switch the\n # \"transpose\" flag.\n if A.flags['C_CONTIGUOUS']:\n trans = 1 - trans\n\n if self.A_structure == 'symmetric':\n with context:\n workspace_size = cusolver.cusolverDnSpotrf_bufferSize(\n context.cusolver_handle, 0, n, A_ptr, lda)\n\n workspace = pygpu.zeros(workspace_size, dtype='float32',\n context=context)\n\n dev_info = pygpu.zeros((1,), dtype='int32', context=context)\n\n workspace_ptr = workspace.gpudata\n dev_info_ptr = dev_info.gpudata\n\n with context:\n cusolver.cusolverDnSpotrf(\n context.cusolver_handle, 0, n, A_ptr, lda, workspace_ptr,\n workspace_size, dev_info_ptr)\n self.check_dev_info(dev_info)\n\n cusolverDnSpotrs(\n context.cusolver_handle, 0, n, m, A_ptr, lda,\n b_ptr, ldb, dev_info_ptr)\n\n else:\n # general case for A\n with context:\n workspace_size = cusolver.cusolverDnSgetrf_bufferSize(\n context.cusolver_handle, n, n, A_ptr, lda)\n\n workspace = pygpu.zeros(workspace_size, dtype='float32',\n context=context)\n\n pivots = pygpu.zeros(n, dtype='int32', context=context)\n\n dev_info = pygpu.zeros((1,), dtype='int32', context=context)\n\n workspace_ptr = workspace.gpudata\n pivots_ptr = pivots.gpudata\n dev_info_ptr = dev_info.gpudata\n\n with context:\n cusolver.cusolverDnSgetrf(\n context.cusolver_handle, n, n, A_ptr, lda, workspace_ptr,\n pivots_ptr, dev_info_ptr)\n self.check_dev_info(dev_info)\n\n cusolver.cusolverDnSgetrs(\n context.cusolver_handle, trans, n, m, A_ptr, lda,\n pivots_ptr, b_ptr, ldb, dev_info_ptr)\n\n z[0] = b\n\n\nclass GpuCublasTriangularSolve(Op):\n \"\"\"\n CUBLAS GPU Triangular Solve Op.\n\n Parameters\n ----------\n lower\n Whether system is lower-triangular (True) or upper-triangular (False).\n trans\n Whether to take the transpose of the input matrix or not.\n \"\"\"\n __props__ = ('trans', 'lower')\n\n def __init__(self, lower=True, trans='N'):\n self.trans = trans\n self.lower = lower\n super(GpuCublasTriangularSolve, self).__init__()\n\n def make_node(self, inp1, inp2):\n if not cublas_available:\n raise RuntimeError('CUBLAS is not available and '\n 'GpuCublasTriangularSolve Op can not be constructed.')\n context_name = infer_context_name(inp1, inp2)\n\n inp1 = as_gpuarray_variable(inp1, context_name)\n inp2 = as_gpuarray_variable(inp2, context_name)\n\n inp1 = gpu_contiguous(inp1)\n inp2 = gpu_contiguous(inp2)\n\n # this op can only operate on float32 matrices\n assert inp1.ndim == 2\n assert inp2.ndim in [1, 2]\n assert inp1.dtype == 'float32'\n assert inp2.dtype == 'float32'\n\n return theano.Apply(self, [inp1, inp2],\n [GpuArrayType('float32',\n broadcastable=inp2.broadcastable,\n context_name=context_name)()])\n\n def prepare_node(self, node, storage_map, compute_map, impl):\n ctx = node.inputs[0].type.context\n attach_cublas_handle_to_context(ctx)\n\n def perform(self, node, inputs, outputs):\n ctx = node.inputs[0].type.context\n\n # Solution set\n x = outputs[0]\n\n # Matrix.\n A = inputs[0]\n\n # right hand side\n b = inputs[1]\n\n assert(len(A.shape) == 2)\n assert(len(b.shape) in [1, 2])\n\n # implicitly deal with the difference between C order\n # and fortran order by flipping the trans and lower flags\n lower = not self.lower\n trans = self.trans\n if trans in ['T', 'C']:\n trans = 'N'\n l, n = A.shape\n elif trans == 'N':\n trans = 'T'\n n, l = A.shape\n else:\n raise ValueError('Invalid value for trans')\n\n if b.ndim == 2:\n k, m = b.shape\n else:\n k, = b.shape\n m = 1\n\n if l != n:\n raise ValueError('A must be a square matrix')\n if n != k:\n raise ValueError('A and b must be aligned.')\n\n lda = max(1, n)\n ldb = max(1, k)\n\n # solution overwrites right hand side on exit\n b = pygpu.array(b, copy=True, order='F')\n\n A_ptr = A.gpudata\n b_ptr = b.gpudata\n\n # unit scalar used for multiplication\n alpha = 1.0\n # indicates matrix A is on left of B\n side = 'l'\n # set whether upper or lower part of matrix A stored\n uplo = 'l' if lower else 'u'\n # indicates elements on diagonal of matrix A may not be unity\n diag = 'n'\n\n with ctx:\n if b.ndim == 1:\n # matrix vector solve\n cublas.cublasStrsv(ctx.cublas_handle, uplo, trans, diag, n,\n A_ptr, lda, b_ptr, 1)\n else:\n cublas.cublasStrsm(ctx.cublas_handle, side, uplo, trans, diag,\n n, m, alpha, A_ptr, lda, b_ptr, ldb)\n\n x[0] = b\n\n\ndef gpu_solve(A, b, A_structure='general', trans='N'):\n if A_structure == 'lower':\n return GpuCublasTriangularSolve(True, trans)(A, b)\n elif A_structure == 'upper':\n return GpuCublasTriangularSolve(False, trans)(A, b)\n\n return GpuCusolverSolve(A_structure, trans)(A, b)\n\n\nclass GpuCholesky(Op):\n \"\"\"\n CUSOLVER GPU Cholesky Op.\n\n Given a real positive definite matrix `A` returns either a lower\n triangular matrix `L` such that `A == dot(L, L.T)` if `lower == True`\n else returns an upper triangular matrix `U` such that `A == dot(U.T, U)`\n if `lower == False`.\n\n Parameters\n ----------\n lower\n Whether to return a lower rather than upper triangular decomposition.\n\n \"\"\"\n\n __props__ = ('lower', 'inplace')\n\n def __init__(self, lower=True, inplace=False):\n self.lower = lower\n self.inplace = inplace\n if self.inplace:\n self.destroy_map = {0: [0]}\n super(GpuCholesky, self).__init__()\n\n def clone_inplace(self):\n return self.__class__(lower=self.lower, inplace=True)\n\n def make_node(self, inp):\n if not cusolver_available:\n raise RuntimeError('CUSOLVER is not available and '\n 'GpuCholesky Op can not be constructed.')\n if skcuda.__version__ <= '0.5.1':\n warnings.warn('The GpuCholesky op requires scikit-cuda > 0.5.1 to work with CUDA 8')\n if not pygpu_available:\n raise RuntimeError('Missing pygpu or triu/tril functions.'\n 'Install or update libgpuarray.')\n context_name = infer_context_name(inp)\n\n inp = as_gpuarray_variable(inp, context_name)\n\n inp = gpu_contiguous(inp)\n\n # this op can only operate on float32 matrices\n # because of current implementation of triu/tril.\n # TODO: support float64 for triu/tril in GpuArray and for GpuCholesky/GpuCusolverSolve in Theano.\n assert inp.ndim == 2\n assert inp.dtype == 'float32'\n\n return theano.Apply(self, [inp], [inp.type()])\n\n def prepare_node(self, node, storage_map, compute_map, impl):\n ctx = node.inputs[0].type.context\n attach_cusolver_handle_to_context(ctx)\n\n def perform(self, node, inputs, outputs):\n context = inputs[0][0].context\n\n # Input matrix.\n A = inputs[0]\n\n l, n = A.shape\n if l != n:\n raise ValueError('A must be a square matrix')\n\n lda = max(1, n)\n\n # cusolver operates on F ordered matrices, but A is expected\n # to be symmetric so it does not matter.\n # We copy A if needed\n if self.inplace:\n L = A\n else:\n L = pygpu.array(A, copy=True)\n\n # The output matrix will contain only the upper or lower\n # triangular factorization of A. If L is C ordered (it\n # probably is as it is the default in Theano) we just switch\n # the fill mode parameter of cusolver\n l_parameter = 0 if self.lower else 1\n if L.flags['C_CONTIGUOUS']:\n l_parameter = 1 - l_parameter\n\n L_ptr = L.gpudata\n\n with context:\n workspace_size = cusolver.cusolverDnSpotrf_bufferSize(\n context.cusolver_handle, l_parameter, n, L_ptr, lda)\n\n workspace = pygpu.zeros(workspace_size, dtype='float32',\n context=context)\n\n dev_info = pygpu.zeros((1,), dtype='int32', context=context)\n\n workspace_ptr = workspace.gpudata\n dev_info_ptr = dev_info.gpudata\n\n cusolver.cusolverDnSpotrf(\n context.cusolver_handle, l_parameter, n, L_ptr, lda, workspace_ptr,\n workspace_size, dev_info_ptr)\n\n val_dev_info = np.asarray(dev_info)[0]\n if val_dev_info > 0:\n raise LinAlgError('Cholesky decomposition failed (is A SPD?)')\n\n # cusolver leaves the elements in the matrix outside the considered\n # upper or lower triangle unchanged, so we need to put zeros outside\n # the triangle\n if self.lower:\n tril(L)\n else:\n triu(L)\n\n outputs[0][0] = L\n\n\ndef gpu_cholesky(A, lower=True):\n return GpuCholesky(lower)(A)\n\n\n# TODO: add support for float64\nclass GpuMagmaBase(COp):\n \"\"\"Base class for magma related operations. Add the necessary headers,\n libraries and optionally the location of headers and library.\n \"\"\"\n def c_headers(self):\n return ['gpuarray/types.h', 'gpuarray/array.h', 'gpuarray/ext_cuda.h',\n 'gpuarray_helper.h', 'magma.h']\n\n def c_header_dirs(self):\n dirs = [gpuarray_helper_inc_dir(), pygpu.get_include()]\n if config.magma.include_path:\n dirs.append(config.magma.include_path)\n return dirs\n\n def c_libraries(self):\n return ['magma']\n\n def c_lib_dirs(self):\n if config.magma.library_path:\n return [config.magma.library_path]\n return []\n\n def prepare_node(self, node, storage_map, compute_map, impl):\n from skcuda.magma import magma_init\n ctx = node.inputs[0].type.context\n if not getattr(ctx, 'is_magma_initialized', False):\n with ctx:\n magma_init()\n ctx.is_magma_initialized = True\n\n\nclass GpuMagmaSVD(GpuMagmaBase):\n \"\"\"Computes the svd of a matrix :math:`A` using magma library.\n\n .. warning::\n\n Because of implementation constraints, this Op returns outputs\n in order ``S, U, VT``. Use :func:`theano.gpuarray.linalg.gpu_svd`\n to get them in expected order ``U, S, VT``.\n\n \"\"\"\n __props__ = ('full_matrices', 'compute_uv')\n _cop_num_inputs = 1\n _cop_num_outputs = 3\n check_input = False\n params_type = ParamsType(full_matrices=bool_t, context=gpu_context_type)\n\n def __init__(self, full_matrices=True, compute_uv=True):\n self.full_matrices = full_matrices\n self.compute_uv = compute_uv\n COp.__init__(self, ['c_code/magma_svd.c'], 'APPLY_SPECIFIC(magma_svd)')\n\n def make_node(self, A):\n ctx_name = infer_context_name(A)\n A = as_gpuarray_variable(A, ctx_name)\n A = gpu_contiguous(A)\n if A.ndim != 2:\n raise LinAlgError(\"Matrix rank error\")\n if A.dtype != 'float32':\n raise TypeError(\"only `float32` is supported for now\")\n if self.compute_uv:\n return theano.Apply(self, [A],\n # return S, U, VT\n [GpuArrayType(A.dtype, broadcastable=[False],\n context_name=ctx_name)(),\n A.type(),\n A.type()])\n else:\n return theano.Apply(self, [A],\n # return only S\n [GpuArrayType(A.dtype, broadcastable=[False],\n context_name=ctx_name)()])\n\n def prepare_node(self, node, storage_map, compute_map, impl):\n super(GpuMagmaSVD, self).prepare_node(node, storage_map, compute_map, impl)\n # Check node to prevent eventual errors with old pickled nodes.\n if self.compute_uv:\n A, B, C = node.outputs\n # We expect order: S (vector), U (matrix), VT (matrix)\n assert A.type.ndim == 1 and B.type.ndim == C.type.ndim == 2, \\\n \"Due to implementation constraints, GpuMagmaSVD interface has changed and now returns (S, U, VT) \" \\\n \"instead of (U, S, VT). Either update your code, or use gpu_svd() to get the expected (U, S, VT) order.\"\n\n def get_params(self, node):\n return self.params_type.get_params(self, context=node.inputs[0].type.context)\n\n def infer_shape(self, node, shapes):\n x_shape, = shapes\n M, N = x_shape\n K = tensor.minimum(M, N)\n s_shape = (K, )\n if self.compute_uv:\n u_shape = (M, M) if self.full_matrices else (M, K)\n vt_shape = (N, N) if self.full_matrices else (K, N)\n return [s_shape, u_shape, vt_shape]\n else:\n return [s_shape]\n\n\ndef gpu_svd(a, full_matrices=1, compute_uv=1):\n \"\"\"\n This function performs the SVD on GPU.\n\n Parameters\n ----------\n full_matrices : bool, optional\n If True (default), u and v have the shapes (M, M) and (N, N),\n respectively.\n Otherwise, the shapes are (M, K) and (K, N), respectively,\n where K = min(M, N).\n compute_uv : bool, optional\n Whether or not to compute u and v in addition to s.\n True by default.\n\n Returns\n -------\n U, V, D : matrices\n\n \"\"\"\n out = GpuMagmaSVD(full_matrices, compute_uv)(a)\n if compute_uv:\n S, U, VT = out\n out = [U, S, VT]\n return out\n\n\nclass GpuMagmaMatrixInverse(GpuMagmaBase):\n \"\"\"Computes the inverse of a matrix :math:`A` using magma library.\n \"\"\"\n __props__ = ('inplace', )\n check_input = False\n params_type = ParamsType(inplace=bool_t, context=gpu_context_type)\n\n def __init__(self, inplace=False):\n COp.__init__(self, ['c_code/magma_inv.c'], 'APPLY_SPECIFIC(magma_inv)')\n self.inplace = inplace\n if self.inplace:\n self.destroy_map = {0: [0]}\n\n def clone_inplace(self):\n return self.__class__(inplace=True)\n\n def make_node(self, A):\n ctx_name = infer_context_name(A)\n A = as_gpuarray_variable(A, ctx_name)\n A = gpu_contiguous(A)\n if A.ndim != 2:\n raise LinAlgError(\"Matrix rank error\")\n if A.dtype != 'float32':\n raise TypeError(\"only `float32` is supported for now\")\n return theano.Apply(self, [A], [A.type()])\n\n def get_params(self, node):\n return self.params_type.get_params(self, context=node.inputs[0].type.context)\n\n def infer_shape(self, node, shapes):\n return shapes\n\n\ndef gpu_matrix_inverse(a):\n \"\"\"\n This function performs the matrix inverse on GPU.\n\n Returns\n -------\n a_inv: matrix\n\n \"\"\"\n return GpuMagmaMatrixInverse()(a)\n\n\nclass GpuMagmaCholesky(GpuMagmaBase, CGpuKernelBase):\n \"\"\"Computes the cholesky decomposition of a matrix :math:`A` using magma\n library.\n\n \"\"\"\n __props__ = ('lower', 'inplace')\n check_input = False\n params_type = ParamsType(lower=bool_t, inplace=bool_t, context=gpu_context_type)\n\n def __init__(self, lower=True, inplace=False):\n self.lower = lower\n COp.__init__(self, ['c_code/magma_cholesky.c'], 'APPLY_SPECIFIC(magma_cholesky)')\n self.inplace = inplace\n if self.inplace:\n self.destroy_map = {0: [0]}\n\n def clone_inplace(self):\n return self.__class__(lower=self.lower, inplace=True)\n\n def make_node(self, A):\n ctx_name = infer_context_name(A)\n A = as_gpuarray_variable(A, ctx_name)\n A = gpu_contiguous(A)\n if A.ndim != 2:\n raise LinAlgError(\"Matrix rank error\")\n if A.dtype != 'float32':\n raise TypeError(\"only `float32` is supported for now\")\n return theano.Apply(self, [A], [A.type()])\n\n def get_params(self, node):\n return self.params_type.get_params(self, context=node.inputs[0].type.context)\n\n def infer_shape(self, node, shapes):\n return [shapes[0]]\n\n\nclass GpuMagmaQR(GpuMagmaBase, CGpuKernelBase):\n \"\"\"Computes the qr decomposition of a matrix :math:`A` using magma\n library.\n\n Parameters\n ----------\n complete : If `False`, returns only r.\n\n .. warning::\n\n Because of implementation constraints, this Op returns outputs\n in order ``R, Q``. Use :func:`theano.gpuarray.linalg.gpu_qr`\n to get them in expected order ``Q, R``.\n \"\"\"\n __props__ = ('complete', )\n _cop_num_inputs = 1\n _cop_num_outputs = 2\n check_input = False\n params_type = ParamsType(complete=bool_t, context=gpu_context_type)\n\n def __init__(self, complete=True):\n self.complete = complete\n COp.__init__(self, ['c_code/magma_qr.c'], 'APPLY_SPECIFIC(magma_qr)')\n\n def make_node(self, A):\n ctx_name = infer_context_name(A)\n A = as_gpuarray_variable(A, ctx_name)\n A = gpu_contiguous(A)\n if A.ndim != 2:\n raise LinAlgError(\"Matrix rank error\")\n if A.dtype != 'float32':\n raise TypeError(\"only `float32` is supported for now\")\n if self.complete:\n return theano.Apply(self, [A],\n # return R, Q\n [A.type(), A.type()])\n else:\n return theano.Apply(self, [A],\n # return R\n [A.type()])\n\n def get_params(self, node):\n return self.params_type.get_params(self, context=node.inputs[0].type.context)\n\n\ndef gpu_qr(a, complete=True):\n \"\"\"\n This function performs the QR on GPU.\n\n Parameters\n ----------\n complete : bool, optional\n If `False`, returns only r.\n\n Returns\n -------\n Q, R : matrices\n\n \"\"\"\n out = GpuMagmaQR(complete)(a)\n if complete:\n R, Q = out\n out = [Q, R]\n return out\n\n\nclass GpuMagmaEigh(GpuMagmaBase):\n \"\"\"Computes the eigen decomposition of a symmetric matrix :math:`A` using magma\n library.\n\n Parameters\n ----------\n UPLO : Specifies whether the calculation is done with the lower triangular\n part of matrix (`L`, default) or the upper triangular part (`U`).\n compute_v : If `True`, computes eigenvalues and eigenvectors (`True`,\n default). If `False`, computes only eigenvalues of matrix.\n \"\"\"\n __props__ = ('lower', 'compute_v')\n _cop_num_inputs = 1\n _cop_num_outputs = 2\n check_input = False\n params_type = ParamsType(lower=bool_t, compute_v=bool_t,\n context=gpu_context_type)\n\n def __init__(self, UPLO='L', compute_v=True):\n assert UPLO in ['L', 'U']\n self.lower = UPLO == 'L'\n self.compute_v = compute_v\n COp.__init__(self, ['c_code/magma_eigh.c'], 'APPLY_SPECIFIC(magma_eigh)')\n\n def make_node(self, A):\n ctx_name = infer_context_name(A)\n A = as_gpuarray_variable(A, ctx_name)\n A = gpu_contiguous(A)\n if A.ndim != 2:\n raise LinAlgError(\"Matrix rank error\")\n if A.dtype != 'float32':\n raise TypeError(\"only `float32` is supported for now\")\n if self.compute_v:\n return theano.Apply(self, [A],\n # return D, V\n [GpuArrayType(A.dtype, broadcastable=[False],\n context_name=ctx_name)(),\n A.type()])\n else:\n return theano.Apply(self, [A],\n # return D\n [GpuArrayType(A.dtype, broadcastable=[False],\n context_name=ctx_name)()])\n\n def get_params(self, node):\n return self.params_type.get_params(self, context=node.inputs[0].type.context)\n", "path": "theano/gpuarray/linalg.py"}], "after_files": [{"content": "from __future__ import absolute_import, division, print_function\n\nimport warnings\n\nimport pkg_resources\nimport numpy as np\nfrom numpy.linalg.linalg import LinAlgError\n\nimport theano\nfrom theano import Op, config, tensor\nfrom theano.scalar import bool as bool_t\nfrom theano.gof import COp, ParamsType\nfrom theano.gpuarray import GpuArrayType\n\nfrom .basic_ops import (CGpuKernelBase, as_gpuarray_variable, gpu_contiguous, gpuarray_helper_inc_dir,\n infer_context_name)\nfrom .type import gpu_context_type\n\ntry:\n import pygpu\n from pygpu.basic import triu, tril\n pygpu_available = True\nexcept ImportError:\n pygpu_available = False\n\ncusolver_available = False\ntry:\n import skcuda\n from skcuda import cusolver\n cusolver_available = True\nexcept (ImportError, OSError, RuntimeError, pkg_resources.DistributionNotFound):\n pass\n\ncublas_available = False\ntry:\n from skcuda import cublas\n cublas_available = True\nexcept (ImportError, OSError, RuntimeError, pkg_resources.DistributionNotFound):\n pass\n\nif cusolver_available:\n # Add cusolver call as it is missing in skcuda\n # SPOTRS\n cusolver._libcusolver.cusolverDnSpotrs.restype = int\n cusolver._libcusolver.cusolverDnSpotrs.argtypes = [cusolver.ctypes.c_void_p,\n cusolver.ctypes.c_int,\n cusolver.ctypes.c_int,\n cusolver.ctypes.c_int,\n cusolver.ctypes.c_void_p,\n cusolver.ctypes.c_int,\n cusolver.ctypes.c_void_p,\n cusolver.ctypes.c_int,\n cusolver.ctypes.c_void_p]\n\n def cusolverDnSpotrs(handle, uplo, n, nrhs, A, lda,\n B, ldb, devInfo):\n \"\"\"\n Solve real single precision linear system for hermitian matrices.\n References\n ----------\n `cusolverDn<t>potrs <http://docs.nvidia.com/cuda/cusolver/index.html#cuds-lt-t-gt-potrs>`_\n \"\"\"\n\n status = cusolver._libcusolver.cusolverDnSpotrs(handle, uplo, n, nrhs,\n int(A), lda, int(B),\n ldb, int(devInfo))\n cusolver.cusolverCheckStatus(status)\n\n\ndef attach_cusolver_handle_to_context(ctx):\n handle = getattr(ctx, 'cusolver_handle', None)\n if handle is None:\n with ctx:\n ctx.cusolver_handle = cusolver.cusolverDnCreate()\n\n\ndef attach_cublas_handle_to_context(ctx):\n handle = getattr(ctx, 'cublas_handle', None)\n if handle is None:\n with ctx:\n ctx.cublas_handle = cublas.cublasCreate()\n\n\n# it is a subset of all cases available in slinalg's MATRIX_STRUCTURE\nMATRIX_STRUCTURES_SOLVE = (\n 'general',\n 'symmetric',\n 'lower_triangular',\n 'upper_triangular')\n\n\nclass GpuCusolverSolve(Op):\n \"\"\"\n CUSOLVER GPU solver OP.\n\n Parameters\n ----------\n trans\n Whether to take the transpose of the input matrix or not.\n\n \"\"\"\n\n __props__ = ('A_structure', 'trans', 'inplace')\n\n def __init__(self, A_structure='general', trans='N', inplace=False):\n self.trans = trans\n self.inplace = inplace\n self.A_structure = A_structure\n if self.inplace:\n self.destroy_map = {0: [0]}\n assert A_structure in MATRIX_STRUCTURES_SOLVE\n super(GpuCusolverSolve, self).__init__()\n\n def make_node(self, inp1, inp2):\n if not cusolver_available:\n raise RuntimeError('CUSOLVER is not available and '\n 'GpuCusolverSolve Op can not be constructed.')\n if skcuda.__version__ <= '0.5.1':\n warnings.warn('The GpuSolve op requires scikit-cuda > 0.5.1 to work with CUDA 8')\n context_name = infer_context_name(inp1, inp2)\n\n inp1 = as_gpuarray_variable(inp1, context_name)\n inp2 = as_gpuarray_variable(inp2, context_name)\n\n inp1 = gpu_contiguous(inp1)\n inp2 = gpu_contiguous(inp2)\n\n # this op can only operate on float32 matrices\n assert inp1.ndim == 2\n assert inp2.ndim == 2\n assert inp1.dtype == 'float32'\n assert inp2.dtype == 'float32'\n\n return theano.Apply(\n self, [inp1, inp2],\n [GpuArrayType('float32',\n broadcastable=inp1.broadcastable,\n context_name=context_name)()])\n\n def prepare_node(self, node, storage_map, compute_map, impl):\n ctx = node.inputs[0].type.context\n attach_cusolver_handle_to_context(ctx)\n\n def check_dev_info(self, dev_info):\n val = np.asarray(dev_info)[0]\n if val > 0:\n raise LinAlgError('A is singular')\n\n def perform(self, node, inputs, outputs):\n context = inputs[0][0].context\n\n # Size of the matrices to invert.\n z = outputs[0]\n\n # Matrix.\n A = inputs[0]\n\n # Solution vectors.\n b = inputs[1]\n\n assert(len(A.shape) == 2)\n assert(len(b.shape) == 2)\n\n if self.trans in ['T', 'C']:\n trans = 1\n l, n = A.shape\n k, m = b.shape\n elif self.trans == 'N':\n trans = 0\n n, l = A.shape\n k, m = b.shape\n else:\n raise ValueError('Invalid value for trans')\n if l != n:\n raise ValueError('A must be a square matrix')\n if n != k:\n raise ValueError('A and b must be aligned.')\n\n lda = max(1, n)\n ldb = max(1, k)\n\n # We copy A and b as cusolver operates inplace\n b = pygpu.array(b, copy=True, order='F')\n if not self.inplace:\n A = pygpu.array(A, copy=True)\n A_ptr = A.gpudata\n b_ptr = b.gpudata\n\n # cusolver expects a F ordered matrix, but A is not explicitly\n # converted between C and F order, instead we switch the\n # \"transpose\" flag.\n if A.flags['C_CONTIGUOUS']:\n trans = 1 - trans\n\n if self.A_structure == 'symmetric':\n with context:\n workspace_size = cusolver.cusolverDnSpotrf_bufferSize(\n context.cusolver_handle, 0, n, A_ptr, lda)\n\n workspace = pygpu.zeros(workspace_size, dtype='float32',\n context=context)\n\n dev_info = pygpu.zeros((1,), dtype='int32', context=context)\n\n workspace_ptr = workspace.gpudata\n dev_info_ptr = dev_info.gpudata\n\n with context:\n cusolver.cusolverDnSpotrf(\n context.cusolver_handle, 0, n, A_ptr, lda, workspace_ptr,\n workspace_size, dev_info_ptr)\n self.check_dev_info(dev_info)\n\n cusolverDnSpotrs(\n context.cusolver_handle, 0, n, m, A_ptr, lda,\n b_ptr, ldb, dev_info_ptr)\n\n else:\n # general case for A\n with context:\n workspace_size = cusolver.cusolverDnSgetrf_bufferSize(\n context.cusolver_handle, n, n, A_ptr, lda)\n\n workspace = pygpu.zeros(workspace_size, dtype='float32',\n context=context)\n\n pivots = pygpu.zeros(n, dtype='int32', context=context)\n\n dev_info = pygpu.zeros((1,), dtype='int32', context=context)\n\n workspace_ptr = workspace.gpudata\n pivots_ptr = pivots.gpudata\n dev_info_ptr = dev_info.gpudata\n\n with context:\n cusolver.cusolverDnSgetrf(\n context.cusolver_handle, n, n, A_ptr, lda, workspace_ptr,\n pivots_ptr, dev_info_ptr)\n self.check_dev_info(dev_info)\n\n cusolver.cusolverDnSgetrs(\n context.cusolver_handle, trans, n, m, A_ptr, lda,\n pivots_ptr, b_ptr, ldb, dev_info_ptr)\n\n z[0] = b\n\n\nclass GpuCublasTriangularSolve(Op):\n \"\"\"\n CUBLAS GPU Triangular Solve Op.\n\n Parameters\n ----------\n lower\n Whether system is lower-triangular (True) or upper-triangular (False).\n trans\n Whether to take the transpose of the input matrix or not.\n \"\"\"\n __props__ = ('trans', 'lower')\n\n def __init__(self, lower=True, trans='N'):\n self.trans = trans\n self.lower = lower\n super(GpuCublasTriangularSolve, self).__init__()\n\n def make_node(self, inp1, inp2):\n if not cublas_available:\n raise RuntimeError('CUBLAS is not available and '\n 'GpuCublasTriangularSolve Op can not be constructed.')\n context_name = infer_context_name(inp1, inp2)\n\n inp1 = as_gpuarray_variable(inp1, context_name)\n inp2 = as_gpuarray_variable(inp2, context_name)\n\n inp1 = gpu_contiguous(inp1)\n inp2 = gpu_contiguous(inp2)\n\n # this op can only operate on float32 matrices\n assert inp1.ndim == 2\n assert inp2.ndim in [1, 2]\n assert inp1.dtype == 'float32'\n assert inp2.dtype == 'float32'\n\n return theano.Apply(self, [inp1, inp2],\n [GpuArrayType('float32',\n broadcastable=inp2.broadcastable,\n context_name=context_name)()])\n\n def prepare_node(self, node, storage_map, compute_map, impl):\n ctx = node.inputs[0].type.context\n attach_cublas_handle_to_context(ctx)\n\n def perform(self, node, inputs, outputs):\n ctx = node.inputs[0].type.context\n\n # Solution set\n x = outputs[0]\n\n # Matrix.\n A = inputs[0]\n\n # right hand side\n b = inputs[1]\n\n assert(len(A.shape) == 2)\n assert(len(b.shape) in [1, 2])\n\n # implicitly deal with the difference between C order\n # and fortran order by flipping the trans and lower flags\n lower = not self.lower\n trans = self.trans\n if trans in ['T', 'C']:\n trans = 'N'\n l, n = A.shape\n elif trans == 'N':\n trans = 'T'\n n, l = A.shape\n else:\n raise ValueError('Invalid value for trans')\n\n if b.ndim == 2:\n k, m = b.shape\n else:\n k, = b.shape\n m = 1\n\n if l != n:\n raise ValueError('A must be a square matrix')\n if n != k:\n raise ValueError('A and b must be aligned.')\n\n lda = max(1, n)\n ldb = max(1, k)\n\n # solution overwrites right hand side on exit\n b = pygpu.array(b, copy=True, order='F')\n\n A_ptr = A.gpudata\n b_ptr = b.gpudata\n\n # unit scalar used for multiplication\n alpha = 1.0\n # indicates matrix A is on left of B\n side = 'l'\n # set whether upper or lower part of matrix A stored\n uplo = 'l' if lower else 'u'\n # indicates elements on diagonal of matrix A may not be unity\n diag = 'n'\n\n with ctx:\n if b.ndim == 1:\n # matrix vector solve\n cublas.cublasStrsv(ctx.cublas_handle, uplo, trans, diag, n,\n A_ptr, lda, b_ptr, 1)\n else:\n cublas.cublasStrsm(ctx.cublas_handle, side, uplo, trans, diag,\n n, m, alpha, A_ptr, lda, b_ptr, ldb)\n\n x[0] = b\n\n\ndef gpu_solve(A, b, A_structure='general', trans='N'):\n if A_structure == 'lower':\n return GpuCublasTriangularSolve(True, trans)(A, b)\n elif A_structure == 'upper':\n return GpuCublasTriangularSolve(False, trans)(A, b)\n\n return GpuCusolverSolve(A_structure, trans)(A, b)\n\n\nclass GpuCholesky(Op):\n \"\"\"\n CUSOLVER GPU Cholesky Op.\n\n Given a real positive definite matrix `A` returns either a lower\n triangular matrix `L` such that `A == dot(L, L.T)` if `lower == True`\n else returns an upper triangular matrix `U` such that `A == dot(U.T, U)`\n if `lower == False`.\n\n Parameters\n ----------\n lower\n Whether to return a lower rather than upper triangular decomposition.\n\n \"\"\"\n\n __props__ = ('lower', 'inplace')\n\n def __init__(self, lower=True, inplace=False):\n self.lower = lower\n self.inplace = inplace\n if self.inplace:\n self.destroy_map = {0: [0]}\n super(GpuCholesky, self).__init__()\n\n def clone_inplace(self):\n return self.__class__(lower=self.lower, inplace=True)\n\n def make_node(self, inp):\n if not cusolver_available:\n raise RuntimeError('CUSOLVER is not available and '\n 'GpuCholesky Op can not be constructed.')\n if skcuda.__version__ <= '0.5.1':\n warnings.warn('The GpuCholesky op requires scikit-cuda > 0.5.1 to work with CUDA 8')\n if not pygpu_available:\n raise RuntimeError('Missing pygpu or triu/tril functions.'\n 'Install or update libgpuarray.')\n context_name = infer_context_name(inp)\n\n inp = as_gpuarray_variable(inp, context_name)\n\n inp = gpu_contiguous(inp)\n\n # this op can only operate on float32 matrices\n # because of current implementation of triu/tril.\n # TODO: support float64 for triu/tril in GpuArray and for GpuCholesky/GpuCusolverSolve in Theano.\n assert inp.ndim == 2\n assert inp.dtype == 'float32'\n\n return theano.Apply(self, [inp], [inp.type()])\n\n def prepare_node(self, node, storage_map, compute_map, impl):\n ctx = node.inputs[0].type.context\n attach_cusolver_handle_to_context(ctx)\n\n def perform(self, node, inputs, outputs):\n context = inputs[0][0].context\n\n # Input matrix.\n A = inputs[0]\n\n l, n = A.shape\n if l != n:\n raise ValueError('A must be a square matrix')\n\n lda = max(1, n)\n\n # cusolver operates on F ordered matrices, but A is expected\n # to be symmetric so it does not matter.\n # We copy A if needed\n if self.inplace:\n L = A\n else:\n L = pygpu.array(A, copy=True)\n\n # The output matrix will contain only the upper or lower\n # triangular factorization of A. If L is C ordered (it\n # probably is as it is the default in Theano) we just switch\n # the fill mode parameter of cusolver\n l_parameter = 0 if self.lower else 1\n if L.flags['C_CONTIGUOUS']:\n l_parameter = 1 - l_parameter\n\n L_ptr = L.gpudata\n\n with context:\n workspace_size = cusolver.cusolverDnSpotrf_bufferSize(\n context.cusolver_handle, l_parameter, n, L_ptr, lda)\n\n workspace = pygpu.zeros(workspace_size, dtype='float32',\n context=context)\n\n dev_info = pygpu.zeros((1,), dtype='int32', context=context)\n\n workspace_ptr = workspace.gpudata\n dev_info_ptr = dev_info.gpudata\n\n cusolver.cusolverDnSpotrf(\n context.cusolver_handle, l_parameter, n, L_ptr, lda, workspace_ptr,\n workspace_size, dev_info_ptr)\n\n val_dev_info = np.asarray(dev_info)[0]\n if val_dev_info > 0:\n raise LinAlgError('Cholesky decomposition failed (is A SPD?)')\n\n # cusolver leaves the elements in the matrix outside the considered\n # upper or lower triangle unchanged, so we need to put zeros outside\n # the triangle\n if self.lower:\n tril(L)\n else:\n triu(L)\n\n outputs[0][0] = L\n\n\ndef gpu_cholesky(A, lower=True):\n return GpuCholesky(lower)(A)\n\n\n# TODO: add support for float64\nclass GpuMagmaBase(COp):\n \"\"\"Base class for magma related operations. Add the necessary headers,\n libraries and optionally the location of headers and library.\n \"\"\"\n def c_headers(self):\n return ['gpuarray/types.h', 'gpuarray/array.h', 'gpuarray/ext_cuda.h',\n 'gpuarray_helper.h', 'magma.h']\n\n def c_header_dirs(self):\n dirs = [gpuarray_helper_inc_dir(), pygpu.get_include(), config.cuda.include_path]\n if config.magma.include_path:\n dirs.append(config.magma.include_path)\n return dirs\n\n def c_libraries(self):\n return ['magma']\n\n def c_lib_dirs(self):\n if config.magma.library_path:\n return [config.magma.library_path]\n return []\n\n def prepare_node(self, node, storage_map, compute_map, impl):\n from skcuda.magma import magma_init\n ctx = node.inputs[0].type.context\n if not getattr(ctx, 'is_magma_initialized', False):\n with ctx:\n magma_init()\n ctx.is_magma_initialized = True\n\n\nclass GpuMagmaSVD(GpuMagmaBase):\n \"\"\"Computes the svd of a matrix :math:`A` using magma library.\n\n .. warning::\n\n Because of implementation constraints, this Op returns outputs\n in order ``S, U, VT``. Use :func:`theano.gpuarray.linalg.gpu_svd`\n to get them in expected order ``U, S, VT``.\n\n \"\"\"\n __props__ = ('full_matrices', 'compute_uv')\n _cop_num_inputs = 1\n _cop_num_outputs = 3\n check_input = False\n params_type = ParamsType(full_matrices=bool_t, context=gpu_context_type)\n\n def __init__(self, full_matrices=True, compute_uv=True):\n self.full_matrices = full_matrices\n self.compute_uv = compute_uv\n COp.__init__(self, ['c_code/magma_svd.c'], 'APPLY_SPECIFIC(magma_svd)')\n\n def make_node(self, A):\n ctx_name = infer_context_name(A)\n A = as_gpuarray_variable(A, ctx_name)\n A = gpu_contiguous(A)\n if A.ndim != 2:\n raise LinAlgError(\"Matrix rank error\")\n if A.dtype != 'float32':\n raise TypeError(\"only `float32` is supported for now\")\n if self.compute_uv:\n return theano.Apply(self, [A],\n # return S, U, VT\n [GpuArrayType(A.dtype, broadcastable=[False],\n context_name=ctx_name)(),\n A.type(),\n A.type()])\n else:\n return theano.Apply(self, [A],\n # return only S\n [GpuArrayType(A.dtype, broadcastable=[False],\n context_name=ctx_name)()])\n\n def prepare_node(self, node, storage_map, compute_map, impl):\n super(GpuMagmaSVD, self).prepare_node(node, storage_map, compute_map, impl)\n # Check node to prevent eventual errors with old pickled nodes.\n if self.compute_uv:\n A, B, C = node.outputs\n # We expect order: S (vector), U (matrix), VT (matrix)\n assert A.type.ndim == 1 and B.type.ndim == C.type.ndim == 2, \\\n \"Due to implementation constraints, GpuMagmaSVD interface has changed and now returns (S, U, VT) \" \\\n \"instead of (U, S, VT). Either update your code, or use gpu_svd() to get the expected (U, S, VT) order.\"\n\n def get_params(self, node):\n return self.params_type.get_params(self, context=node.inputs[0].type.context)\n\n def infer_shape(self, node, shapes):\n x_shape, = shapes\n M, N = x_shape\n K = tensor.minimum(M, N)\n s_shape = (K, )\n if self.compute_uv:\n u_shape = (M, M) if self.full_matrices else (M, K)\n vt_shape = (N, N) if self.full_matrices else (K, N)\n return [s_shape, u_shape, vt_shape]\n else:\n return [s_shape]\n\n\ndef gpu_svd(a, full_matrices=1, compute_uv=1):\n \"\"\"\n This function performs the SVD on GPU.\n\n Parameters\n ----------\n full_matrices : bool, optional\n If True (default), u and v have the shapes (M, M) and (N, N),\n respectively.\n Otherwise, the shapes are (M, K) and (K, N), respectively,\n where K = min(M, N).\n compute_uv : bool, optional\n Whether or not to compute u and v in addition to s.\n True by default.\n\n Returns\n -------\n U, V, D : matrices\n\n \"\"\"\n out = GpuMagmaSVD(full_matrices, compute_uv)(a)\n if compute_uv:\n S, U, VT = out\n out = [U, S, VT]\n return out\n\n\nclass GpuMagmaMatrixInverse(GpuMagmaBase):\n \"\"\"Computes the inverse of a matrix :math:`A` using magma library.\n \"\"\"\n __props__ = ('inplace', )\n check_input = False\n params_type = ParamsType(inplace=bool_t, context=gpu_context_type)\n\n def __init__(self, inplace=False):\n COp.__init__(self, ['c_code/magma_inv.c'], 'APPLY_SPECIFIC(magma_inv)')\n self.inplace = inplace\n if self.inplace:\n self.destroy_map = {0: [0]}\n\n def clone_inplace(self):\n return self.__class__(inplace=True)\n\n def make_node(self, A):\n ctx_name = infer_context_name(A)\n A = as_gpuarray_variable(A, ctx_name)\n A = gpu_contiguous(A)\n if A.ndim != 2:\n raise LinAlgError(\"Matrix rank error\")\n if A.dtype != 'float32':\n raise TypeError(\"only `float32` is supported for now\")\n return theano.Apply(self, [A], [A.type()])\n\n def get_params(self, node):\n return self.params_type.get_params(self, context=node.inputs[0].type.context)\n\n def infer_shape(self, node, shapes):\n return shapes\n\n\ndef gpu_matrix_inverse(a):\n \"\"\"\n This function performs the matrix inverse on GPU.\n\n Returns\n -------\n a_inv: matrix\n\n \"\"\"\n return GpuMagmaMatrixInverse()(a)\n\n\nclass GpuMagmaCholesky(GpuMagmaBase, CGpuKernelBase):\n \"\"\"Computes the cholesky decomposition of a matrix :math:`A` using magma\n library.\n\n \"\"\"\n __props__ = ('lower', 'inplace')\n check_input = False\n params_type = ParamsType(lower=bool_t, inplace=bool_t, context=gpu_context_type)\n\n def __init__(self, lower=True, inplace=False):\n self.lower = lower\n COp.__init__(self, ['c_code/magma_cholesky.c'], 'APPLY_SPECIFIC(magma_cholesky)')\n self.inplace = inplace\n if self.inplace:\n self.destroy_map = {0: [0]}\n\n def clone_inplace(self):\n return self.__class__(lower=self.lower, inplace=True)\n\n def make_node(self, A):\n ctx_name = infer_context_name(A)\n A = as_gpuarray_variable(A, ctx_name)\n A = gpu_contiguous(A)\n if A.ndim != 2:\n raise LinAlgError(\"Matrix rank error\")\n if A.dtype != 'float32':\n raise TypeError(\"only `float32` is supported for now\")\n return theano.Apply(self, [A], [A.type()])\n\n def get_params(self, node):\n return self.params_type.get_params(self, context=node.inputs[0].type.context)\n\n def infer_shape(self, node, shapes):\n return [shapes[0]]\n\n\nclass GpuMagmaQR(GpuMagmaBase, CGpuKernelBase):\n \"\"\"Computes the qr decomposition of a matrix :math:`A` using magma\n library.\n\n Parameters\n ----------\n complete : If `False`, returns only r.\n\n .. warning::\n\n Because of implementation constraints, this Op returns outputs\n in order ``R, Q``. Use :func:`theano.gpuarray.linalg.gpu_qr`\n to get them in expected order ``Q, R``.\n \"\"\"\n __props__ = ('complete', )\n _cop_num_inputs = 1\n _cop_num_outputs = 2\n check_input = False\n params_type = ParamsType(complete=bool_t, context=gpu_context_type)\n\n def __init__(self, complete=True):\n self.complete = complete\n COp.__init__(self, ['c_code/magma_qr.c'], 'APPLY_SPECIFIC(magma_qr)')\n\n def make_node(self, A):\n ctx_name = infer_context_name(A)\n A = as_gpuarray_variable(A, ctx_name)\n A = gpu_contiguous(A)\n if A.ndim != 2:\n raise LinAlgError(\"Matrix rank error\")\n if A.dtype != 'float32':\n raise TypeError(\"only `float32` is supported for now\")\n if self.complete:\n return theano.Apply(self, [A],\n # return R, Q\n [A.type(), A.type()])\n else:\n return theano.Apply(self, [A],\n # return R\n [A.type()])\n\n def get_params(self, node):\n return self.params_type.get_params(self, context=node.inputs[0].type.context)\n\n\ndef gpu_qr(a, complete=True):\n \"\"\"\n This function performs the QR on GPU.\n\n Parameters\n ----------\n complete : bool, optional\n If `False`, returns only r.\n\n Returns\n -------\n Q, R : matrices\n\n \"\"\"\n out = GpuMagmaQR(complete)(a)\n if complete:\n R, Q = out\n out = [Q, R]\n return out\n\n\nclass GpuMagmaEigh(GpuMagmaBase):\n \"\"\"Computes the eigen decomposition of a symmetric matrix :math:`A` using magma\n library.\n\n Parameters\n ----------\n UPLO : Specifies whether the calculation is done with the lower triangular\n part of matrix (`L`, default) or the upper triangular part (`U`).\n compute_v : If `True`, computes eigenvalues and eigenvectors (`True`,\n default). If `False`, computes only eigenvalues of matrix.\n \"\"\"\n __props__ = ('lower', 'compute_v')\n _cop_num_inputs = 1\n _cop_num_outputs = 2\n check_input = False\n params_type = ParamsType(lower=bool_t, compute_v=bool_t,\n context=gpu_context_type)\n\n def __init__(self, UPLO='L', compute_v=True):\n assert UPLO in ['L', 'U']\n self.lower = UPLO == 'L'\n self.compute_v = compute_v\n COp.__init__(self, ['c_code/magma_eigh.c'], 'APPLY_SPECIFIC(magma_eigh)')\n\n def make_node(self, A):\n ctx_name = infer_context_name(A)\n A = as_gpuarray_variable(A, ctx_name)\n A = gpu_contiguous(A)\n if A.ndim != 2:\n raise LinAlgError(\"Matrix rank error\")\n if A.dtype != 'float32':\n raise TypeError(\"only `float32` is supported for now\")\n if self.compute_v:\n return theano.Apply(self, [A],\n # return D, V\n [GpuArrayType(A.dtype, broadcastable=[False],\n context_name=ctx_name)(),\n A.type()])\n else:\n return theano.Apply(self, [A],\n # return D\n [GpuArrayType(A.dtype, broadcastable=[False],\n context_name=ctx_name)()])\n\n def get_params(self, node):\n return self.params_type.get_params(self, context=node.inputs[0].type.context)\n", "path": "theano/gpuarray/linalg.py"}]} |
gh_patches_debug_1316 | rasdani/github-patches | git_diff | numba__numba-1397 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
CUDA local array should be allocated at entry block
https://github.com/numba/numba/issues/1341#issuecomment-134612689
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `numba/cuda/cudaimpl.py`
Content:
```
1 from __future__ import print_function, absolute_import, division
2 from functools import reduce
3 import operator
4 from llvmlite.llvmpy.core import Type
5 import llvmlite.llvmpy.core as lc
6 import llvmlite.llvmpy.ee as le
7 import llvmlite.binding as ll
8 from numba.targets.imputils import implement, Registry
9 from numba import cgutils
10 from numba import types
11 from .cudadrv import nvvm
12 from . import nvvmutils, stubs
13
14 registry = Registry()
15 register = registry.register
16
17
18 @register
19 @implement('ptx.grid.1d', types.intp)
20 def ptx_grid1d(context, builder, sig, args):
21 assert len(args) == 1
22 return nvvmutils.get_global_id(builder, dim=1)
23
24
25 @register
26 @implement('ptx.grid.2d', types.intp)
27 def ptx_grid2d(context, builder, sig, args):
28 assert len(args) == 1
29 r1, r2 = nvvmutils.get_global_id(builder, dim=2)
30 return cgutils.pack_array(builder, [r1, r2])
31
32
33 @register
34 @implement('ptx.grid.3d', types.intp)
35 def ptx_grid3d(context, builder, sig, args):
36 assert len(args) == 1
37 r1, r2, r3 = nvvmutils.get_global_id(builder, dim=3)
38 return cgutils.pack_array(builder, [r1, r2, r3])
39
40
41 @register
42 @implement('ptx.gridsize.1d', types.intp)
43 def ptx_gridsize1d(context, builder, sig, args):
44 assert len(args) == 1
45 ntidx = nvvmutils.call_sreg(builder, "ntid.x")
46 nctaidx = nvvmutils.call_sreg(builder, "nctaid.x")
47
48 res = builder.mul(ntidx, nctaidx)
49 return res
50
51
52 @register
53 @implement('ptx.gridsize.2d', types.intp)
54 def ptx_gridsize2d(context, builder, sig, args):
55 assert len(args) == 1
56 ntidx = nvvmutils.call_sreg(builder, "ntid.x")
57 nctaidx = nvvmutils.call_sreg(builder, "nctaid.x")
58
59 ntidy = nvvmutils.call_sreg(builder, "ntid.y")
60 nctaidy = nvvmutils.call_sreg(builder, "nctaid.y")
61
62 r1 = builder.mul(ntidx, nctaidx)
63 r2 = builder.mul(ntidy, nctaidy)
64 return cgutils.pack_array(builder, [r1, r2])
65
66
67 @register
68 @implement('ptx.gridsize.3d', types.intp)
69 def ptx_gridsize3d(context, builder, sig, args):
70 assert len(args) == 1
71 ntidx = nvvmutils.call_sreg(builder, "ntid.x")
72 nctaidx = nvvmutils.call_sreg(builder, "nctaid.x")
73
74 ntidy = nvvmutils.call_sreg(builder, "ntid.y")
75 nctaidy = nvvmutils.call_sreg(builder, "nctaid.y")
76
77 ntidz = nvvmutils.call_sreg(builder, "ntid.z")
78 nctaidz = nvvmutils.call_sreg(builder, "nctaid.z")
79
80 r1 = builder.mul(ntidx, nctaidx)
81 r2 = builder.mul(ntidy, nctaidy)
82 r3 = builder.mul(ntidz, nctaidz)
83 return cgutils.pack_array(builder, [r1, r2, r3])
84
85
86 # -----------------------------------------------------------------------------
87
88 def ptx_sreg_template(sreg):
89 def ptx_sreg_impl(context, builder, sig, args):
90 assert not args
91 return nvvmutils.call_sreg(builder, sreg)
92
93 return ptx_sreg_impl
94
95
96 # Dynamic create all special register
97 for sreg in nvvmutils.SREG_MAPPING.keys():
98 register(implement(sreg)(ptx_sreg_template(sreg)))
99
100
101 # -----------------------------------------------------------------------------
102
103 @register
104 @implement('ptx.cmem.arylike', types.Kind(types.Array))
105 def ptx_cmem_arylike(context, builder, sig, args):
106 lmod = builder.module
107 [arr] = args
108 flat = arr.flatten(order='A')
109 aryty = sig.return_type
110 dtype = aryty.dtype
111
112 if isinstance(dtype, types.Complex):
113 elemtype = (types.float32
114 if dtype == types.complex64
115 else types.float64)
116 constvals = []
117 for i in range(flat.size):
118 elem = flat[i]
119 real = context.get_constant(elemtype, elem.real)
120 imag = context.get_constant(elemtype, elem.imag)
121 constvals.extend([real, imag])
122
123 elif dtype in types.number_domain:
124 constvals = [context.get_constant(dtype, flat[i])
125 for i in range(flat.size)]
126
127 else:
128 raise TypeError("unsupport type: %s" % dtype)
129
130 constary = lc.Constant.array(constvals[0].type, constvals)
131
132 addrspace = nvvm.ADDRSPACE_CONSTANT
133 gv = lmod.add_global_variable(constary.type, name="_cudapy_cmem",
134 addrspace=addrspace)
135 gv.linkage = lc.LINKAGE_INTERNAL
136 gv.global_constant = True
137 gv.initializer = constary
138
139 # Convert to generic address-space
140 conv = nvvmutils.insert_addrspace_conv(lmod, Type.int(8), addrspace)
141 addrspaceptr = gv.bitcast(Type.pointer(Type.int(8), addrspace))
142 genptr = builder.call(conv, [addrspaceptr])
143
144 # Create array object
145 ary = context.make_array(aryty)(context, builder)
146 kshape = [context.get_constant(types.intp, s) for s in arr.shape]
147 kstrides = [context.get_constant(types.intp, s) for s in arr.strides]
148 context.populate_array(ary,
149 data=builder.bitcast(genptr, ary.data.type),
150 shape=cgutils.pack_array(builder, kshape),
151 strides=cgutils.pack_array(builder, kstrides),
152 itemsize=ary.itemsize,
153 parent=ary.parent,
154 meminfo=None)
155
156 return ary._getvalue()
157
158
159 _unique_smem_id = 0
160
161
162 def _get_unique_smem_id(name):
163 """Due to bug with NVVM invalid internalizing of shared memory in the
164 PTX output. We can't mark shared memory to be internal. We have to
165 ensure unique name is generated for shared memory symbol.
166 """
167 global _unique_smem_id
168 _unique_smem_id += 1
169 return "{0}_{1}".format(name, _unique_smem_id)
170
171
172 @register
173 @implement('ptx.smem.alloc', types.intp, types.Any)
174 def ptx_smem_alloc_intp(context, builder, sig, args):
175 length, dtype = args
176 return _generic_array(context, builder, shape=(length,), dtype=dtype,
177 symbol_name=_get_unique_smem_id('_cudapy_smem'),
178 addrspace=nvvm.ADDRSPACE_SHARED,
179 can_dynsized=True)
180
181
182 @register
183 @implement('ptx.smem.alloc', types.Kind(types.UniTuple), types.Any)
184 def ptx_smem_alloc_array(context, builder, sig, args):
185 shape, dtype = args
186 return _generic_array(context, builder, shape=shape, dtype=dtype,
187 symbol_name=_get_unique_smem_id('_cudapy_smem'),
188 addrspace=nvvm.ADDRSPACE_SHARED,
189 can_dynsized=True)
190
191
192 @register
193 @implement('ptx.lmem.alloc', types.intp, types.Any)
194 def ptx_lmem_alloc_intp(context, builder, sig, args):
195 length, dtype = args
196 return _generic_array(context, builder, shape=(length,), dtype=dtype,
197 symbol_name='_cudapy_lmem',
198 addrspace=nvvm.ADDRSPACE_LOCAL,
199 can_dynsized=False)
200
201
202 @register
203 @implement('ptx.lmem.alloc', types.Kind(types.UniTuple), types.Any)
204 def ptx_lmem_alloc_array(context, builder, sig, args):
205 shape, dtype = args
206 return _generic_array(context, builder, shape=shape, dtype=dtype,
207 symbol_name='_cudapy_lmem',
208 addrspace=nvvm.ADDRSPACE_LOCAL,
209 can_dynsized=False)
210
211
212 @register
213 @implement(stubs.syncthreads)
214 def ptx_syncthreads(context, builder, sig, args):
215 assert not args
216 fname = 'llvm.nvvm.barrier0'
217 lmod = builder.module
218 fnty = Type.function(Type.void(), ())
219 sync = lmod.get_or_insert_function(fnty, name=fname)
220 builder.call(sync, ())
221 return context.get_dummy_value()
222
223
224 @register
225 @implement(stubs.atomic.add, types.Kind(types.Array), types.intp, types.Any)
226 def ptx_atomic_add_intp(context, builder, sig, args):
227 aryty, indty, valty = sig.args
228 ary, ind, val = args
229 dtype = aryty.dtype
230
231 if dtype != valty:
232 raise TypeError("expect %s but got %s" % (dtype, valty))
233 if aryty.ndim != 1:
234 raise TypeError("indexing %d-D array with 1-D index" % (aryty.ndim,))
235
236 lary = context.make_array(aryty)(context, builder, ary)
237 ptr = cgutils.get_item_pointer(builder, aryty, lary, [ind])
238
239 if aryty.dtype == types.float32:
240 lmod = builder.module
241 return builder.call(nvvmutils.declare_atomic_add_float32(lmod), (ptr, val))
242 elif aryty.dtype == types.float64:
243 lmod = builder.module
244 return builder.call(nvvmutils.declare_atomic_add_float64(lmod), (ptr, val))
245 else:
246 return builder.atomic_rmw('add', ptr, val, 'monotonic')
247
248
249 @register
250 @implement(stubs.atomic.add, types.Kind(types.Array),
251 types.Kind(types.UniTuple), types.Any)
252 @implement(stubs.atomic.add, types.Kind(types.Array),
253 types.Kind(types.Tuple), types.Any)
254 def ptx_atomic_add_tuple(context, builder, sig, args):
255 aryty, indty, valty = sig.args
256 ary, inds, val = args
257 dtype = aryty.dtype
258
259 indices = cgutils.unpack_tuple(builder, inds, count=len(indty))
260 indices = [context.cast(builder, i, t, types.intp)
261 for t, i in zip(indty, indices)]
262
263 if dtype != valty:
264 raise TypeError("expect %s but got %s" % (dtype, valty))
265
266 if aryty.ndim != len(indty):
267 raise TypeError("indexing %d-D array with %d-D index" %
268 (aryty.ndim, len(indty)))
269
270 lary = context.make_array(aryty)(context, builder, ary)
271 ptr = cgutils.get_item_pointer(builder, aryty, lary, indices)
272
273 if aryty.dtype == types.float32:
274 lmod = builder.module
275 return builder.call(nvvmutils.declare_atomic_add_float32(lmod), (ptr, val))
276 elif aryty.dtype == types.float64:
277 lmod = builder.module
278 return builder.call(nvvmutils.declare_atomic_add_float64(lmod), (ptr, val))
279 else:
280 return builder.atomic_rmw('add', ptr, val, 'monotonic')
281
282
283 @register
284 @implement(stubs.atomic.max, types.Kind(types.Array), types.intp, types.Any)
285 def ptx_atomic_max_intp(context, builder, sig, args):
286 aryty, indty, valty = sig.args
287 ary, ind, val = args
288 dtype = aryty.dtype
289
290 if dtype != valty:
291 raise TypeError("expect %s but got %s" % (dtype, valty))
292 if aryty.ndim != 1:
293 raise TypeError("indexing %d-D array with 1-D index" % (aryty.ndim,))
294
295 lary = context.make_array(aryty)(context, builder, ary)
296 ptr = cgutils.get_item_pointer(builder, aryty, lary, [ind])
297
298 if dtype == types.float64:
299 lmod = builder.module
300 return builder.call(nvvmutils.declare_atomic_max_float64(lmod), (ptr, val))
301 else:
302 raise TypeError('Unimplemented atomic max with %s array' % dtype)
303
304
305 @register
306 @implement(stubs.atomic.max, types.Kind(types.Array),
307 types.Kind(types.Tuple), types.Any)
308 @implement(stubs.atomic.max, types.Kind(types.Array),
309 types.Kind(types.UniTuple), types.Any)
310 def ptx_atomic_max_tuple(context, builder, sig, args):
311 aryty, indty, valty = sig.args
312 ary, inds, val = args
313 dtype = aryty.dtype
314
315 indices = cgutils.unpack_tuple(builder, inds, count=len(indty))
316 indices = [context.cast(builder, i, t, types.intp)
317 for t, i in zip(indty, indices)]
318
319 if dtype != valty:
320 raise TypeError("expect %s but got %s" % (dtype, valty))
321
322 if aryty.ndim != len(indty):
323 raise TypeError("indexing %d-D array with %d-D index" %
324 (aryty.ndim, len(indty)))
325
326 lary = context.make_array(aryty)(context, builder, ary)
327 ptr = cgutils.get_item_pointer(builder, aryty, lary, indices)
328
329 if aryty.dtype == types.float64:
330 lmod = builder.module
331 return builder.call(nvvmutils.declare_atomic_max_float64(lmod), (ptr, val))
332 else:
333 raise TypeError('Unimplemented atomic max with %s array' % dtype)
334
335
336
337 # -----------------------------------------------------------------------------
338
339
340 def _get_target_data(context):
341 return ll.create_target_data(nvvm.data_layout[context.address_size])
342
343
344 def _generic_array(context, builder, shape, dtype, symbol_name, addrspace,
345 can_dynsized=False):
346 elemcount = reduce(operator.mul, shape)
347 lldtype = context.get_data_type(dtype)
348 laryty = Type.array(lldtype, elemcount)
349
350 if addrspace == nvvm.ADDRSPACE_LOCAL:
351 # Special case local addrespace allocation to use alloca
352 # NVVM is smart enough to only use local memory if no register is
353 # available
354 dataptr = builder.alloca(laryty, name=symbol_name)
355 else:
356 lmod = builder.module
357
358 # Create global variable in the requested address-space
359 gvmem = lmod.add_global_variable(laryty, symbol_name, addrspace)
360
361 if elemcount <= 0:
362 if can_dynsized: # dynamic shared memory
363 gvmem.linkage = lc.LINKAGE_EXTERNAL
364 else:
365 raise ValueError("array length <= 0")
366 else:
367 ## Comment out the following line to workaround a NVVM bug
368 ## which generates a invalid symbol name when the linkage
369 ## is internal and in some situation.
370 ## See _get_unique_smem_id()
371 # gvmem.linkage = lc.LINKAGE_INTERNAL
372
373 gvmem.initializer = lc.Constant.undef(laryty)
374
375 if dtype not in types.number_domain:
376 raise TypeError("unsupported type: %s" % dtype)
377
378 # Convert to generic address-space
379 conv = nvvmutils.insert_addrspace_conv(lmod, Type.int(8), addrspace)
380 addrspaceptr = gvmem.bitcast(Type.pointer(Type.int(8), addrspace))
381 dataptr = builder.call(conv, [addrspaceptr])
382
383 return _make_array(context, builder, dataptr, dtype, shape)
384
385
386 def _make_array(context, builder, dataptr, dtype, shape, layout='C'):
387 ndim = len(shape)
388 # Create array object
389 aryty = types.Array(dtype=dtype, ndim=ndim, layout='C')
390 ary = context.make_array(aryty)(context, builder)
391
392 targetdata = _get_target_data(context)
393 lldtype = context.get_data_type(dtype)
394 itemsize = lldtype.get_abi_size(targetdata)
395 # Compute strides
396 rstrides = [itemsize]
397 for i, lastsize in enumerate(reversed(shape[1:])):
398 rstrides.append(lastsize * rstrides[-1])
399 strides = [s for s in reversed(rstrides)]
400
401 kshape = [context.get_constant(types.intp, s) for s in shape]
402 kstrides = [context.get_constant(types.intp, s) for s in strides]
403
404 context.populate_array(ary,
405 data=builder.bitcast(dataptr, ary.data.type),
406 shape=cgutils.pack_array(builder, kshape),
407 strides=cgutils.pack_array(builder, kstrides),
408 itemsize=context.get_constant(types.intp, itemsize),
409 meminfo=None)
410 return ary._getvalue()
411
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/numba/cuda/cudaimpl.py b/numba/cuda/cudaimpl.py
--- a/numba/cuda/cudaimpl.py
+++ b/numba/cuda/cudaimpl.py
@@ -351,7 +351,7 @@
# Special case local addrespace allocation to use alloca
# NVVM is smart enough to only use local memory if no register is
# available
- dataptr = builder.alloca(laryty, name=symbol_name)
+ dataptr = cgutils.alloca_once(builder, laryty, name=symbol_name)
else:
lmod = builder.module
| {"golden_diff": "diff --git a/numba/cuda/cudaimpl.py b/numba/cuda/cudaimpl.py\n--- a/numba/cuda/cudaimpl.py\n+++ b/numba/cuda/cudaimpl.py\n@@ -351,7 +351,7 @@\n # Special case local addrespace allocation to use alloca\n # NVVM is smart enough to only use local memory if no register is\n # available\n- dataptr = builder.alloca(laryty, name=symbol_name)\n+ dataptr = cgutils.alloca_once(builder, laryty, name=symbol_name)\n else:\n lmod = builder.module\n", "issue": "CUDA local array should be allocated at entry block\nhttps://github.com/numba/numba/issues/1341#issuecomment-134612689\n\n", "before_files": [{"content": "from __future__ import print_function, absolute_import, division\nfrom functools import reduce\nimport operator\nfrom llvmlite.llvmpy.core import Type\nimport llvmlite.llvmpy.core as lc\nimport llvmlite.llvmpy.ee as le\nimport llvmlite.binding as ll\nfrom numba.targets.imputils import implement, Registry\nfrom numba import cgutils\nfrom numba import types\nfrom .cudadrv import nvvm\nfrom . import nvvmutils, stubs\n\nregistry = Registry()\nregister = registry.register\n\n\n@register\n@implement('ptx.grid.1d', types.intp)\ndef ptx_grid1d(context, builder, sig, args):\n assert len(args) == 1\n return nvvmutils.get_global_id(builder, dim=1)\n\n\n@register\n@implement('ptx.grid.2d', types.intp)\ndef ptx_grid2d(context, builder, sig, args):\n assert len(args) == 1\n r1, r2 = nvvmutils.get_global_id(builder, dim=2)\n return cgutils.pack_array(builder, [r1, r2])\n\n\n@register\n@implement('ptx.grid.3d', types.intp)\ndef ptx_grid3d(context, builder, sig, args):\n assert len(args) == 1\n r1, r2, r3 = nvvmutils.get_global_id(builder, dim=3)\n return cgutils.pack_array(builder, [r1, r2, r3])\n\n\n@register\n@implement('ptx.gridsize.1d', types.intp)\ndef ptx_gridsize1d(context, builder, sig, args):\n assert len(args) == 1\n ntidx = nvvmutils.call_sreg(builder, \"ntid.x\")\n nctaidx = nvvmutils.call_sreg(builder, \"nctaid.x\")\n\n res = builder.mul(ntidx, nctaidx)\n return res\n\n\n@register\n@implement('ptx.gridsize.2d', types.intp)\ndef ptx_gridsize2d(context, builder, sig, args):\n assert len(args) == 1\n ntidx = nvvmutils.call_sreg(builder, \"ntid.x\")\n nctaidx = nvvmutils.call_sreg(builder, \"nctaid.x\")\n\n ntidy = nvvmutils.call_sreg(builder, \"ntid.y\")\n nctaidy = nvvmutils.call_sreg(builder, \"nctaid.y\")\n\n r1 = builder.mul(ntidx, nctaidx)\n r2 = builder.mul(ntidy, nctaidy)\n return cgutils.pack_array(builder, [r1, r2])\n\n\n@register\n@implement('ptx.gridsize.3d', types.intp)\ndef ptx_gridsize3d(context, builder, sig, args):\n assert len(args) == 1\n ntidx = nvvmutils.call_sreg(builder, \"ntid.x\")\n nctaidx = nvvmutils.call_sreg(builder, \"nctaid.x\")\n\n ntidy = nvvmutils.call_sreg(builder, \"ntid.y\")\n nctaidy = nvvmutils.call_sreg(builder, \"nctaid.y\")\n\n ntidz = nvvmutils.call_sreg(builder, \"ntid.z\")\n nctaidz = nvvmutils.call_sreg(builder, \"nctaid.z\")\n\n r1 = builder.mul(ntidx, nctaidx)\n r2 = builder.mul(ntidy, nctaidy)\n r3 = builder.mul(ntidz, nctaidz)\n return cgutils.pack_array(builder, [r1, r2, r3])\n\n\n# -----------------------------------------------------------------------------\n\ndef ptx_sreg_template(sreg):\n def ptx_sreg_impl(context, builder, sig, args):\n assert not args\n return nvvmutils.call_sreg(builder, sreg)\n\n return ptx_sreg_impl\n\n\n# Dynamic create all special register\nfor sreg in nvvmutils.SREG_MAPPING.keys():\n register(implement(sreg)(ptx_sreg_template(sreg)))\n\n\n# -----------------------------------------------------------------------------\n\n@register\n@implement('ptx.cmem.arylike', types.Kind(types.Array))\ndef ptx_cmem_arylike(context, builder, sig, args):\n lmod = builder.module\n [arr] = args\n flat = arr.flatten(order='A')\n aryty = sig.return_type\n dtype = aryty.dtype\n\n if isinstance(dtype, types.Complex):\n elemtype = (types.float32\n if dtype == types.complex64\n else types.float64)\n constvals = []\n for i in range(flat.size):\n elem = flat[i]\n real = context.get_constant(elemtype, elem.real)\n imag = context.get_constant(elemtype, elem.imag)\n constvals.extend([real, imag])\n\n elif dtype in types.number_domain:\n constvals = [context.get_constant(dtype, flat[i])\n for i in range(flat.size)]\n\n else:\n raise TypeError(\"unsupport type: %s\" % dtype)\n\n constary = lc.Constant.array(constvals[0].type, constvals)\n\n addrspace = nvvm.ADDRSPACE_CONSTANT\n gv = lmod.add_global_variable(constary.type, name=\"_cudapy_cmem\",\n addrspace=addrspace)\n gv.linkage = lc.LINKAGE_INTERNAL\n gv.global_constant = True\n gv.initializer = constary\n\n # Convert to generic address-space\n conv = nvvmutils.insert_addrspace_conv(lmod, Type.int(8), addrspace)\n addrspaceptr = gv.bitcast(Type.pointer(Type.int(8), addrspace))\n genptr = builder.call(conv, [addrspaceptr])\n\n # Create array object\n ary = context.make_array(aryty)(context, builder)\n kshape = [context.get_constant(types.intp, s) for s in arr.shape]\n kstrides = [context.get_constant(types.intp, s) for s in arr.strides]\n context.populate_array(ary,\n data=builder.bitcast(genptr, ary.data.type),\n shape=cgutils.pack_array(builder, kshape),\n strides=cgutils.pack_array(builder, kstrides),\n itemsize=ary.itemsize,\n parent=ary.parent,\n meminfo=None)\n\n return ary._getvalue()\n\n\n_unique_smem_id = 0\n\n\ndef _get_unique_smem_id(name):\n \"\"\"Due to bug with NVVM invalid internalizing of shared memory in the\n PTX output. We can't mark shared memory to be internal. We have to\n ensure unique name is generated for shared memory symbol.\n \"\"\"\n global _unique_smem_id\n _unique_smem_id += 1\n return \"{0}_{1}\".format(name, _unique_smem_id)\n\n\n@register\n@implement('ptx.smem.alloc', types.intp, types.Any)\ndef ptx_smem_alloc_intp(context, builder, sig, args):\n length, dtype = args\n return _generic_array(context, builder, shape=(length,), dtype=dtype,\n symbol_name=_get_unique_smem_id('_cudapy_smem'),\n addrspace=nvvm.ADDRSPACE_SHARED,\n can_dynsized=True)\n\n\n@register\n@implement('ptx.smem.alloc', types.Kind(types.UniTuple), types.Any)\ndef ptx_smem_alloc_array(context, builder, sig, args):\n shape, dtype = args\n return _generic_array(context, builder, shape=shape, dtype=dtype,\n symbol_name=_get_unique_smem_id('_cudapy_smem'),\n addrspace=nvvm.ADDRSPACE_SHARED,\n can_dynsized=True)\n\n\n@register\n@implement('ptx.lmem.alloc', types.intp, types.Any)\ndef ptx_lmem_alloc_intp(context, builder, sig, args):\n length, dtype = args\n return _generic_array(context, builder, shape=(length,), dtype=dtype,\n symbol_name='_cudapy_lmem',\n addrspace=nvvm.ADDRSPACE_LOCAL,\n can_dynsized=False)\n\n\n@register\n@implement('ptx.lmem.alloc', types.Kind(types.UniTuple), types.Any)\ndef ptx_lmem_alloc_array(context, builder, sig, args):\n shape, dtype = args\n return _generic_array(context, builder, shape=shape, dtype=dtype,\n symbol_name='_cudapy_lmem',\n addrspace=nvvm.ADDRSPACE_LOCAL,\n can_dynsized=False)\n\n\n@register\n@implement(stubs.syncthreads)\ndef ptx_syncthreads(context, builder, sig, args):\n assert not args\n fname = 'llvm.nvvm.barrier0'\n lmod = builder.module\n fnty = Type.function(Type.void(), ())\n sync = lmod.get_or_insert_function(fnty, name=fname)\n builder.call(sync, ())\n return context.get_dummy_value()\n\n\n@register\n@implement(stubs.atomic.add, types.Kind(types.Array), types.intp, types.Any)\ndef ptx_atomic_add_intp(context, builder, sig, args):\n aryty, indty, valty = sig.args\n ary, ind, val = args\n dtype = aryty.dtype\n\n if dtype != valty:\n raise TypeError(\"expect %s but got %s\" % (dtype, valty))\n if aryty.ndim != 1:\n raise TypeError(\"indexing %d-D array with 1-D index\" % (aryty.ndim,))\n\n lary = context.make_array(aryty)(context, builder, ary)\n ptr = cgutils.get_item_pointer(builder, aryty, lary, [ind])\n\n if aryty.dtype == types.float32:\n lmod = builder.module\n return builder.call(nvvmutils.declare_atomic_add_float32(lmod), (ptr, val))\n elif aryty.dtype == types.float64:\n lmod = builder.module\n return builder.call(nvvmutils.declare_atomic_add_float64(lmod), (ptr, val))\n else:\n return builder.atomic_rmw('add', ptr, val, 'monotonic')\n\n\n@register\n@implement(stubs.atomic.add, types.Kind(types.Array),\n types.Kind(types.UniTuple), types.Any)\n@implement(stubs.atomic.add, types.Kind(types.Array),\n types.Kind(types.Tuple), types.Any)\ndef ptx_atomic_add_tuple(context, builder, sig, args):\n aryty, indty, valty = sig.args\n ary, inds, val = args\n dtype = aryty.dtype\n\n indices = cgutils.unpack_tuple(builder, inds, count=len(indty))\n indices = [context.cast(builder, i, t, types.intp)\n for t, i in zip(indty, indices)]\n\n if dtype != valty:\n raise TypeError(\"expect %s but got %s\" % (dtype, valty))\n\n if aryty.ndim != len(indty):\n raise TypeError(\"indexing %d-D array with %d-D index\" %\n (aryty.ndim, len(indty)))\n\n lary = context.make_array(aryty)(context, builder, ary)\n ptr = cgutils.get_item_pointer(builder, aryty, lary, indices)\n\n if aryty.dtype == types.float32:\n lmod = builder.module\n return builder.call(nvvmutils.declare_atomic_add_float32(lmod), (ptr, val))\n elif aryty.dtype == types.float64:\n lmod = builder.module\n return builder.call(nvvmutils.declare_atomic_add_float64(lmod), (ptr, val))\n else:\n return builder.atomic_rmw('add', ptr, val, 'monotonic')\n\n\n@register\n@implement(stubs.atomic.max, types.Kind(types.Array), types.intp, types.Any)\ndef ptx_atomic_max_intp(context, builder, sig, args):\n aryty, indty, valty = sig.args\n ary, ind, val = args\n dtype = aryty.dtype\n\n if dtype != valty:\n raise TypeError(\"expect %s but got %s\" % (dtype, valty))\n if aryty.ndim != 1:\n raise TypeError(\"indexing %d-D array with 1-D index\" % (aryty.ndim,))\n\n lary = context.make_array(aryty)(context, builder, ary)\n ptr = cgutils.get_item_pointer(builder, aryty, lary, [ind])\n\n if dtype == types.float64:\n lmod = builder.module\n return builder.call(nvvmutils.declare_atomic_max_float64(lmod), (ptr, val))\n else:\n raise TypeError('Unimplemented atomic max with %s array' % dtype)\n\n\n@register\n@implement(stubs.atomic.max, types.Kind(types.Array),\n types.Kind(types.Tuple), types.Any)\n@implement(stubs.atomic.max, types.Kind(types.Array),\n types.Kind(types.UniTuple), types.Any)\ndef ptx_atomic_max_tuple(context, builder, sig, args):\n aryty, indty, valty = sig.args\n ary, inds, val = args\n dtype = aryty.dtype\n\n indices = cgutils.unpack_tuple(builder, inds, count=len(indty))\n indices = [context.cast(builder, i, t, types.intp)\n for t, i in zip(indty, indices)]\n\n if dtype != valty:\n raise TypeError(\"expect %s but got %s\" % (dtype, valty))\n\n if aryty.ndim != len(indty):\n raise TypeError(\"indexing %d-D array with %d-D index\" %\n (aryty.ndim, len(indty)))\n\n lary = context.make_array(aryty)(context, builder, ary)\n ptr = cgutils.get_item_pointer(builder, aryty, lary, indices)\n\n if aryty.dtype == types.float64:\n lmod = builder.module\n return builder.call(nvvmutils.declare_atomic_max_float64(lmod), (ptr, val))\n else:\n raise TypeError('Unimplemented atomic max with %s array' % dtype)\n\n\n\n# -----------------------------------------------------------------------------\n\n\ndef _get_target_data(context):\n return ll.create_target_data(nvvm.data_layout[context.address_size])\n\n\ndef _generic_array(context, builder, shape, dtype, symbol_name, addrspace,\n can_dynsized=False):\n elemcount = reduce(operator.mul, shape)\n lldtype = context.get_data_type(dtype)\n laryty = Type.array(lldtype, elemcount)\n\n if addrspace == nvvm.ADDRSPACE_LOCAL:\n # Special case local addrespace allocation to use alloca\n # NVVM is smart enough to only use local memory if no register is\n # available\n dataptr = builder.alloca(laryty, name=symbol_name)\n else:\n lmod = builder.module\n\n # Create global variable in the requested address-space\n gvmem = lmod.add_global_variable(laryty, symbol_name, addrspace)\n\n if elemcount <= 0:\n if can_dynsized: # dynamic shared memory\n gvmem.linkage = lc.LINKAGE_EXTERNAL\n else:\n raise ValueError(\"array length <= 0\")\n else:\n ## Comment out the following line to workaround a NVVM bug\n ## which generates a invalid symbol name when the linkage\n ## is internal and in some situation.\n ## See _get_unique_smem_id()\n # gvmem.linkage = lc.LINKAGE_INTERNAL\n\n gvmem.initializer = lc.Constant.undef(laryty)\n\n if dtype not in types.number_domain:\n raise TypeError(\"unsupported type: %s\" % dtype)\n\n # Convert to generic address-space\n conv = nvvmutils.insert_addrspace_conv(lmod, Type.int(8), addrspace)\n addrspaceptr = gvmem.bitcast(Type.pointer(Type.int(8), addrspace))\n dataptr = builder.call(conv, [addrspaceptr])\n\n return _make_array(context, builder, dataptr, dtype, shape)\n\n\ndef _make_array(context, builder, dataptr, dtype, shape, layout='C'):\n ndim = len(shape)\n # Create array object\n aryty = types.Array(dtype=dtype, ndim=ndim, layout='C')\n ary = context.make_array(aryty)(context, builder)\n\n targetdata = _get_target_data(context)\n lldtype = context.get_data_type(dtype)\n itemsize = lldtype.get_abi_size(targetdata)\n # Compute strides\n rstrides = [itemsize]\n for i, lastsize in enumerate(reversed(shape[1:])):\n rstrides.append(lastsize * rstrides[-1])\n strides = [s for s in reversed(rstrides)]\n\n kshape = [context.get_constant(types.intp, s) for s in shape]\n kstrides = [context.get_constant(types.intp, s) for s in strides]\n\n context.populate_array(ary,\n data=builder.bitcast(dataptr, ary.data.type),\n shape=cgutils.pack_array(builder, kshape),\n strides=cgutils.pack_array(builder, kstrides),\n itemsize=context.get_constant(types.intp, itemsize),\n meminfo=None)\n return ary._getvalue()\n", "path": "numba/cuda/cudaimpl.py"}], "after_files": [{"content": "from __future__ import print_function, absolute_import, division\nfrom functools import reduce\nimport operator\nfrom llvmlite.llvmpy.core import Type\nimport llvmlite.llvmpy.core as lc\nimport llvmlite.llvmpy.ee as le\nimport llvmlite.binding as ll\nfrom numba.targets.imputils import implement, Registry\nfrom numba import cgutils\nfrom numba import types\nfrom .cudadrv import nvvm\nfrom . import nvvmutils, stubs\n\nregistry = Registry()\nregister = registry.register\n\n\n@register\n@implement('ptx.grid.1d', types.intp)\ndef ptx_grid1d(context, builder, sig, args):\n assert len(args) == 1\n return nvvmutils.get_global_id(builder, dim=1)\n\n\n@register\n@implement('ptx.grid.2d', types.intp)\ndef ptx_grid2d(context, builder, sig, args):\n assert len(args) == 1\n r1, r2 = nvvmutils.get_global_id(builder, dim=2)\n return cgutils.pack_array(builder, [r1, r2])\n\n\n@register\n@implement('ptx.grid.3d', types.intp)\ndef ptx_grid3d(context, builder, sig, args):\n assert len(args) == 1\n r1, r2, r3 = nvvmutils.get_global_id(builder, dim=3)\n return cgutils.pack_array(builder, [r1, r2, r3])\n\n\n@register\n@implement('ptx.gridsize.1d', types.intp)\ndef ptx_gridsize1d(context, builder, sig, args):\n assert len(args) == 1\n ntidx = nvvmutils.call_sreg(builder, \"ntid.x\")\n nctaidx = nvvmutils.call_sreg(builder, \"nctaid.x\")\n\n res = builder.mul(ntidx, nctaidx)\n return res\n\n\n@register\n@implement('ptx.gridsize.2d', types.intp)\ndef ptx_gridsize2d(context, builder, sig, args):\n assert len(args) == 1\n ntidx = nvvmutils.call_sreg(builder, \"ntid.x\")\n nctaidx = nvvmutils.call_sreg(builder, \"nctaid.x\")\n\n ntidy = nvvmutils.call_sreg(builder, \"ntid.y\")\n nctaidy = nvvmutils.call_sreg(builder, \"nctaid.y\")\n\n r1 = builder.mul(ntidx, nctaidx)\n r2 = builder.mul(ntidy, nctaidy)\n return cgutils.pack_array(builder, [r1, r2])\n\n\n@register\n@implement('ptx.gridsize.3d', types.intp)\ndef ptx_gridsize3d(context, builder, sig, args):\n assert len(args) == 1\n ntidx = nvvmutils.call_sreg(builder, \"ntid.x\")\n nctaidx = nvvmutils.call_sreg(builder, \"nctaid.x\")\n\n ntidy = nvvmutils.call_sreg(builder, \"ntid.y\")\n nctaidy = nvvmutils.call_sreg(builder, \"nctaid.y\")\n\n ntidz = nvvmutils.call_sreg(builder, \"ntid.z\")\n nctaidz = nvvmutils.call_sreg(builder, \"nctaid.z\")\n\n r1 = builder.mul(ntidx, nctaidx)\n r2 = builder.mul(ntidy, nctaidy)\n r3 = builder.mul(ntidz, nctaidz)\n return cgutils.pack_array(builder, [r1, r2, r3])\n\n\n# -----------------------------------------------------------------------------\n\ndef ptx_sreg_template(sreg):\n def ptx_sreg_impl(context, builder, sig, args):\n assert not args\n return nvvmutils.call_sreg(builder, sreg)\n\n return ptx_sreg_impl\n\n\n# Dynamic create all special register\nfor sreg in nvvmutils.SREG_MAPPING.keys():\n register(implement(sreg)(ptx_sreg_template(sreg)))\n\n\n# -----------------------------------------------------------------------------\n\n@register\n@implement('ptx.cmem.arylike', types.Kind(types.Array))\ndef ptx_cmem_arylike(context, builder, sig, args):\n lmod = builder.module\n [arr] = args\n flat = arr.flatten(order='A')\n aryty = sig.return_type\n dtype = aryty.dtype\n\n if isinstance(dtype, types.Complex):\n elemtype = (types.float32\n if dtype == types.complex64\n else types.float64)\n constvals = []\n for i in range(flat.size):\n elem = flat[i]\n real = context.get_constant(elemtype, elem.real)\n imag = context.get_constant(elemtype, elem.imag)\n constvals.extend([real, imag])\n\n elif dtype in types.number_domain:\n constvals = [context.get_constant(dtype, flat[i])\n for i in range(flat.size)]\n\n else:\n raise TypeError(\"unsupport type: %s\" % dtype)\n\n constary = lc.Constant.array(constvals[0].type, constvals)\n\n addrspace = nvvm.ADDRSPACE_CONSTANT\n gv = lmod.add_global_variable(constary.type, name=\"_cudapy_cmem\",\n addrspace=addrspace)\n gv.linkage = lc.LINKAGE_INTERNAL\n gv.global_constant = True\n gv.initializer = constary\n\n # Convert to generic address-space\n conv = nvvmutils.insert_addrspace_conv(lmod, Type.int(8), addrspace)\n addrspaceptr = gv.bitcast(Type.pointer(Type.int(8), addrspace))\n genptr = builder.call(conv, [addrspaceptr])\n\n # Create array object\n ary = context.make_array(aryty)(context, builder)\n kshape = [context.get_constant(types.intp, s) for s in arr.shape]\n kstrides = [context.get_constant(types.intp, s) for s in arr.strides]\n context.populate_array(ary,\n data=builder.bitcast(genptr, ary.data.type),\n shape=cgutils.pack_array(builder, kshape),\n strides=cgutils.pack_array(builder, kstrides),\n itemsize=ary.itemsize,\n parent=ary.parent,\n meminfo=None)\n\n return ary._getvalue()\n\n\n_unique_smem_id = 0\n\n\ndef _get_unique_smem_id(name):\n \"\"\"Due to bug with NVVM invalid internalizing of shared memory in the\n PTX output. We can't mark shared memory to be internal. We have to\n ensure unique name is generated for shared memory symbol.\n \"\"\"\n global _unique_smem_id\n _unique_smem_id += 1\n return \"{0}_{1}\".format(name, _unique_smem_id)\n\n\n@register\n@implement('ptx.smem.alloc', types.intp, types.Any)\ndef ptx_smem_alloc_intp(context, builder, sig, args):\n length, dtype = args\n return _generic_array(context, builder, shape=(length,), dtype=dtype,\n symbol_name=_get_unique_smem_id('_cudapy_smem'),\n addrspace=nvvm.ADDRSPACE_SHARED,\n can_dynsized=True)\n\n\n@register\n@implement('ptx.smem.alloc', types.Kind(types.UniTuple), types.Any)\ndef ptx_smem_alloc_array(context, builder, sig, args):\n shape, dtype = args\n return _generic_array(context, builder, shape=shape, dtype=dtype,\n symbol_name=_get_unique_smem_id('_cudapy_smem'),\n addrspace=nvvm.ADDRSPACE_SHARED,\n can_dynsized=True)\n\n\n@register\n@implement('ptx.lmem.alloc', types.intp, types.Any)\ndef ptx_lmem_alloc_intp(context, builder, sig, args):\n length, dtype = args\n return _generic_array(context, builder, shape=(length,), dtype=dtype,\n symbol_name='_cudapy_lmem',\n addrspace=nvvm.ADDRSPACE_LOCAL,\n can_dynsized=False)\n\n\n@register\n@implement('ptx.lmem.alloc', types.Kind(types.UniTuple), types.Any)\ndef ptx_lmem_alloc_array(context, builder, sig, args):\n shape, dtype = args\n return _generic_array(context, builder, shape=shape, dtype=dtype,\n symbol_name='_cudapy_lmem',\n addrspace=nvvm.ADDRSPACE_LOCAL,\n can_dynsized=False)\n\n\n@register\n@implement(stubs.syncthreads)\ndef ptx_syncthreads(context, builder, sig, args):\n assert not args\n fname = 'llvm.nvvm.barrier0'\n lmod = builder.module\n fnty = Type.function(Type.void(), ())\n sync = lmod.get_or_insert_function(fnty, name=fname)\n builder.call(sync, ())\n return context.get_dummy_value()\n\n\n@register\n@implement(stubs.atomic.add, types.Kind(types.Array), types.intp, types.Any)\ndef ptx_atomic_add_intp(context, builder, sig, args):\n aryty, indty, valty = sig.args\n ary, ind, val = args\n dtype = aryty.dtype\n\n if dtype != valty:\n raise TypeError(\"expect %s but got %s\" % (dtype, valty))\n if aryty.ndim != 1:\n raise TypeError(\"indexing %d-D array with 1-D index\" % (aryty.ndim,))\n\n lary = context.make_array(aryty)(context, builder, ary)\n ptr = cgutils.get_item_pointer(builder, aryty, lary, [ind])\n\n if aryty.dtype == types.float32:\n lmod = builder.module\n return builder.call(nvvmutils.declare_atomic_add_float32(lmod), (ptr, val))\n elif aryty.dtype == types.float64:\n lmod = builder.module\n return builder.call(nvvmutils.declare_atomic_add_float64(lmod), (ptr, val))\n else:\n return builder.atomic_rmw('add', ptr, val, 'monotonic')\n\n\n@register\n@implement(stubs.atomic.add, types.Kind(types.Array),\n types.Kind(types.UniTuple), types.Any)\n@implement(stubs.atomic.add, types.Kind(types.Array),\n types.Kind(types.Tuple), types.Any)\ndef ptx_atomic_add_tuple(context, builder, sig, args):\n aryty, indty, valty = sig.args\n ary, inds, val = args\n dtype = aryty.dtype\n\n indices = cgutils.unpack_tuple(builder, inds, count=len(indty))\n indices = [context.cast(builder, i, t, types.intp)\n for t, i in zip(indty, indices)]\n\n if dtype != valty:\n raise TypeError(\"expect %s but got %s\" % (dtype, valty))\n\n if aryty.ndim != len(indty):\n raise TypeError(\"indexing %d-D array with %d-D index\" %\n (aryty.ndim, len(indty)))\n\n lary = context.make_array(aryty)(context, builder, ary)\n ptr = cgutils.get_item_pointer(builder, aryty, lary, indices)\n\n if aryty.dtype == types.float32:\n lmod = builder.module\n return builder.call(nvvmutils.declare_atomic_add_float32(lmod), (ptr, val))\n elif aryty.dtype == types.float64:\n lmod = builder.module\n return builder.call(nvvmutils.declare_atomic_add_float64(lmod), (ptr, val))\n else:\n return builder.atomic_rmw('add', ptr, val, 'monotonic')\n\n\n@register\n@implement(stubs.atomic.max, types.Kind(types.Array), types.intp, types.Any)\ndef ptx_atomic_max_intp(context, builder, sig, args):\n aryty, indty, valty = sig.args\n ary, ind, val = args\n dtype = aryty.dtype\n\n if dtype != valty:\n raise TypeError(\"expect %s but got %s\" % (dtype, valty))\n if aryty.ndim != 1:\n raise TypeError(\"indexing %d-D array with 1-D index\" % (aryty.ndim,))\n\n lary = context.make_array(aryty)(context, builder, ary)\n ptr = cgutils.get_item_pointer(builder, aryty, lary, [ind])\n\n if dtype == types.float64:\n lmod = builder.module\n return builder.call(nvvmutils.declare_atomic_max_float64(lmod), (ptr, val))\n else:\n raise TypeError('Unimplemented atomic max with %s array' % dtype)\n\n\n@register\n@implement(stubs.atomic.max, types.Kind(types.Array),\n types.Kind(types.Tuple), types.Any)\n@implement(stubs.atomic.max, types.Kind(types.Array),\n types.Kind(types.UniTuple), types.Any)\ndef ptx_atomic_max_tuple(context, builder, sig, args):\n aryty, indty, valty = sig.args\n ary, inds, val = args\n dtype = aryty.dtype\n\n indices = cgutils.unpack_tuple(builder, inds, count=len(indty))\n indices = [context.cast(builder, i, t, types.intp)\n for t, i in zip(indty, indices)]\n\n if dtype != valty:\n raise TypeError(\"expect %s but got %s\" % (dtype, valty))\n\n if aryty.ndim != len(indty):\n raise TypeError(\"indexing %d-D array with %d-D index\" %\n (aryty.ndim, len(indty)))\n\n lary = context.make_array(aryty)(context, builder, ary)\n ptr = cgutils.get_item_pointer(builder, aryty, lary, indices)\n\n if aryty.dtype == types.float64:\n lmod = builder.module\n return builder.call(nvvmutils.declare_atomic_max_float64(lmod), (ptr, val))\n else:\n raise TypeError('Unimplemented atomic max with %s array' % dtype)\n\n\n\n# -----------------------------------------------------------------------------\n\n\ndef _get_target_data(context):\n return ll.create_target_data(nvvm.data_layout[context.address_size])\n\n\ndef _generic_array(context, builder, shape, dtype, symbol_name, addrspace,\n can_dynsized=False):\n elemcount = reduce(operator.mul, shape)\n lldtype = context.get_data_type(dtype)\n laryty = Type.array(lldtype, elemcount)\n\n if addrspace == nvvm.ADDRSPACE_LOCAL:\n # Special case local addrespace allocation to use alloca\n # NVVM is smart enough to only use local memory if no register is\n # available\n dataptr = cgutils.alloca_once(builder, laryty, name=symbol_name)\n else:\n lmod = builder.module\n\n # Create global variable in the requested address-space\n gvmem = lmod.add_global_variable(laryty, symbol_name, addrspace)\n\n if elemcount <= 0:\n if can_dynsized: # dynamic shared memory\n gvmem.linkage = lc.LINKAGE_EXTERNAL\n else:\n raise ValueError(\"array length <= 0\")\n else:\n ## Comment out the following line to workaround a NVVM bug\n ## which generates a invalid symbol name when the linkage\n ## is internal and in some situation.\n ## See _get_unique_smem_id()\n # gvmem.linkage = lc.LINKAGE_INTERNAL\n\n gvmem.initializer = lc.Constant.undef(laryty)\n\n if dtype not in types.number_domain:\n raise TypeError(\"unsupported type: %s\" % dtype)\n\n # Convert to generic address-space\n conv = nvvmutils.insert_addrspace_conv(lmod, Type.int(8), addrspace)\n addrspaceptr = gvmem.bitcast(Type.pointer(Type.int(8), addrspace))\n dataptr = builder.call(conv, [addrspaceptr])\n\n return _make_array(context, builder, dataptr, dtype, shape)\n\n\ndef _make_array(context, builder, dataptr, dtype, shape, layout='C'):\n ndim = len(shape)\n # Create array object\n aryty = types.Array(dtype=dtype, ndim=ndim, layout='C')\n ary = context.make_array(aryty)(context, builder)\n\n targetdata = _get_target_data(context)\n lldtype = context.get_data_type(dtype)\n itemsize = lldtype.get_abi_size(targetdata)\n # Compute strides\n rstrides = [itemsize]\n for i, lastsize in enumerate(reversed(shape[1:])):\n rstrides.append(lastsize * rstrides[-1])\n strides = [s for s in reversed(rstrides)]\n\n kshape = [context.get_constant(types.intp, s) for s in shape]\n kstrides = [context.get_constant(types.intp, s) for s in strides]\n\n context.populate_array(ary,\n data=builder.bitcast(dataptr, ary.data.type),\n shape=cgutils.pack_array(builder, kshape),\n strides=cgutils.pack_array(builder, kstrides),\n itemsize=context.get_constant(types.intp, itemsize),\n meminfo=None)\n return ary._getvalue()\n", "path": "numba/cuda/cudaimpl.py"}]} |
gh_patches_debug_1317 | rasdani/github-patches | git_diff | SCons__scons-3556 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
['SUBSTFILEPREFIX', 'TEXTFILESUFFIX']
**Describe the bug**
From git head, just now.
File: src/engine/SCons/Tool/textfile.py
Line 165:
_text_varlist = _common_varlist + ['TEXTFILEPREFIX', 'TEXTFILESUFFIX']
Line 174:
_subst_varlist = _common_varlist + ['SUBSTFILEPREFIX', 'TEXTFILESUFFIX']
Looks like a cur/paste/edit error. My guess, from the rest of the code, is that
'TEXTFILESUFFIX' should be 'SUBSTFILESUFFIX' on line 174
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/engine/SCons/Tool/textfile.py`
Content:
```
1 # -*- python -*-
2 #
3 # __COPYRIGHT__
4 #
5 # Permission is hereby granted, free of charge, to any person obtaining
6 # a copy of this software and associated documentation files (the
7 # "Software"), to deal in the Software without restriction, including
8 # without limitation the rights to use, copy, modify, merge, publish,
9 # distribute, sublicense, and/or sell copies of the Software, and to
10 # permit persons to whom the Software is furnished to do so, subject to
11 # the following conditions:
12 #
13 # The above copyright notice and this permission notice shall be included
14 # in all copies or substantial portions of the Software.
15 #
16 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
17 # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
18 # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
19 # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
20 # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
21 # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
22 # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
23 #
24
25 __doc__ = """
26 Textfile/Substfile builder for SCons.
27
28 Create file 'target' which typically is a textfile. The 'source'
29 may be any combination of strings, Nodes, or lists of same. A
30 'linesep' will be put between any part written and defaults to
31 os.linesep.
32
33 The only difference between the Textfile builder and the Substfile
34 builder is that strings are converted to Value() nodes for the
35 former and File() nodes for the latter. To insert files in the
36 former or strings in the latter, wrap them in a File() or Value(),
37 respectively.
38
39 The values of SUBST_DICT first have any construction variables
40 expanded (its keys are not expanded). If a value of SUBST_DICT is
41 a python callable function, it is called and the result is expanded
42 as the value. Values are substituted in a "random" order; if any
43 substitution could be further expanded by another substitution, it
44 is unpredictable whether the expansion will occur.
45 """
46
47 __revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
48
49 import SCons
50
51 import os
52 import re
53
54 from SCons.Node import Node
55 from SCons.Node.Python import Value
56 from SCons.Util import is_String, is_Sequence, is_Dict, to_bytes
57
58
59 TEXTFILE_FILE_WRITE_MODE = 'w'
60
61 LINESEP = '\n'
62
63 def _do_subst(node, subs):
64 """
65 Fetch the node contents and replace all instances of the keys with
66 their values. For example, if subs is
67 {'%VERSION%': '1.2345', '%BASE%': 'MyProg', '%prefix%': '/bin'},
68 then all instances of %VERSION% in the file will be replaced with
69 1.2345 and so forth.
70 """
71 contents = node.get_text_contents()
72 if subs:
73 for (k, val) in subs:
74 contents = contents.replace(k, val)
75
76 if 'b' in TEXTFILE_FILE_WRITE_MODE:
77 try:
78 contents = bytearray(contents, 'utf-8')
79 except UnicodeDecodeError:
80 # contents is already utf-8 encoded python 2 str i.e. a byte array
81 contents = bytearray(contents)
82
83 return contents
84
85
86 def _action(target, source, env):
87
88 # prepare the line separator
89 linesep = env['LINESEPARATOR']
90 if linesep is None:
91 linesep = LINESEP # os.linesep
92 elif is_String(linesep):
93 pass
94 elif isinstance(linesep, Value):
95 linesep = linesep.get_text_contents()
96 else:
97 raise SCons.Errors.UserError('unexpected type/class for LINESEPARATOR: %s'
98 % repr(linesep), None)
99
100 if 'b' in TEXTFILE_FILE_WRITE_MODE:
101 linesep = to_bytes(linesep)
102
103 # create a dictionary to use for the substitutions
104 if 'SUBST_DICT' not in env:
105 subs = None # no substitutions
106 else:
107 subst_dict = env['SUBST_DICT']
108 if is_Dict(subst_dict):
109 subst_dict = list(subst_dict.items())
110 elif is_Sequence(subst_dict):
111 pass
112 else:
113 raise SCons.Errors.UserError('SUBST_DICT must be dict or sequence')
114 subs = []
115 for (k, value) in subst_dict:
116 if callable(value):
117 value = value()
118 if is_String(value):
119 value = env.subst(value)
120 else:
121 value = str(value)
122 subs.append((k, value))
123
124 # write the file
125 try:
126 target_file = open(target[0].get_path(), TEXTFILE_FILE_WRITE_MODE, newline='')
127 except (OSError, IOError):
128 raise SCons.Errors.UserError("Can't write target file %s" % target[0])
129
130 # separate lines by 'linesep' only if linesep is not empty
131 lsep = None
132 for line in source:
133 if lsep:
134 target_file.write(lsep)
135
136 target_file.write(_do_subst(line, subs))
137 lsep = linesep
138 target_file.close()
139
140
141 def _strfunc(target, source, env):
142 return "Creating '%s'" % target[0]
143
144
145 def _convert_list_R(newlist, sources):
146 for elem in sources:
147 if is_Sequence(elem):
148 _convert_list_R(newlist, elem)
149 elif isinstance(elem, Node):
150 newlist.append(elem)
151 else:
152 newlist.append(Value(elem))
153
154
155 def _convert_list(target, source, env):
156 if len(target) != 1:
157 raise SCons.Errors.UserError("Only one target file allowed")
158 newlist = []
159 _convert_list_R(newlist, source)
160 return target, newlist
161
162
163 _common_varlist = ['SUBST_DICT', 'LINESEPARATOR']
164
165 _text_varlist = _common_varlist + ['TEXTFILEPREFIX', 'TEXTFILESUFFIX']
166 _text_builder = SCons.Builder.Builder(
167 action=SCons.Action.Action(_action, _strfunc, varlist=_text_varlist),
168 source_factory=Value,
169 emitter=_convert_list,
170 prefix='$TEXTFILEPREFIX',
171 suffix='$TEXTFILESUFFIX',
172 )
173
174 _subst_varlist = _common_varlist + ['SUBSTFILEPREFIX', 'TEXTFILESUFFIX']
175 _subst_builder = SCons.Builder.Builder(
176 action=SCons.Action.Action(_action, _strfunc, varlist=_subst_varlist),
177 source_factory=SCons.Node.FS.File,
178 emitter=_convert_list,
179 prefix='$SUBSTFILEPREFIX',
180 suffix='$SUBSTFILESUFFIX',
181 src_suffix=['.in'],
182 )
183
184
185 def generate(env):
186 env['LINESEPARATOR'] = LINESEP # os.linesep
187 env['BUILDERS']['Textfile'] = _text_builder
188 env['TEXTFILEPREFIX'] = ''
189 env['TEXTFILESUFFIX'] = '.txt'
190 env['BUILDERS']['Substfile'] = _subst_builder
191 env['SUBSTFILEPREFIX'] = ''
192 env['SUBSTFILESUFFIX'] = ''
193
194
195 def exists(env):
196 return 1
197
198 # Local Variables:
199 # tab-width:4
200 # indent-tabs-mode:nil
201 # End:
202 # vim: set expandtab tabstop=4 shiftwidth=4:
203
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/engine/SCons/Tool/textfile.py b/src/engine/SCons/Tool/textfile.py
--- a/src/engine/SCons/Tool/textfile.py
+++ b/src/engine/SCons/Tool/textfile.py
@@ -171,7 +171,7 @@
suffix='$TEXTFILESUFFIX',
)
-_subst_varlist = _common_varlist + ['SUBSTFILEPREFIX', 'TEXTFILESUFFIX']
+_subst_varlist = _common_varlist + ['SUBSTFILEPREFIX', 'SUBSTFILESUFFIX']
_subst_builder = SCons.Builder.Builder(
action=SCons.Action.Action(_action, _strfunc, varlist=_subst_varlist),
source_factory=SCons.Node.FS.File,
| {"golden_diff": "diff --git a/src/engine/SCons/Tool/textfile.py b/src/engine/SCons/Tool/textfile.py\n--- a/src/engine/SCons/Tool/textfile.py\n+++ b/src/engine/SCons/Tool/textfile.py\n@@ -171,7 +171,7 @@\n suffix='$TEXTFILESUFFIX',\n )\n \n-_subst_varlist = _common_varlist + ['SUBSTFILEPREFIX', 'TEXTFILESUFFIX']\n+_subst_varlist = _common_varlist + ['SUBSTFILEPREFIX', 'SUBSTFILESUFFIX']\n _subst_builder = SCons.Builder.Builder(\n action=SCons.Action.Action(_action, _strfunc, varlist=_subst_varlist),\n source_factory=SCons.Node.FS.File,\n", "issue": "['SUBSTFILEPREFIX', 'TEXTFILESUFFIX']\n\r\n**Describe the bug**\r\nFrom git head, just now. \r\nFile: src/engine/SCons/Tool/textfile.py\r\nLine 165:\r\n_text_varlist = _common_varlist + ['TEXTFILEPREFIX', 'TEXTFILESUFFIX']\r\n\r\nLine 174:\r\n_subst_varlist = _common_varlist + ['SUBSTFILEPREFIX', 'TEXTFILESUFFIX']\r\n\r\nLooks like a cur/paste/edit error. My guess, from the rest of the code, is that\r\n 'TEXTFILESUFFIX' should be 'SUBSTFILESUFFIX' on line 174\r\n\n", "before_files": [{"content": "# -*- python -*-\n#\n# __COPYRIGHT__\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be included\n# in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE\n# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION\n# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n#\n\n__doc__ = \"\"\"\nTextfile/Substfile builder for SCons.\n\n Create file 'target' which typically is a textfile. The 'source'\n may be any combination of strings, Nodes, or lists of same. A\n 'linesep' will be put between any part written and defaults to\n os.linesep.\n\n The only difference between the Textfile builder and the Substfile\n builder is that strings are converted to Value() nodes for the\n former and File() nodes for the latter. To insert files in the\n former or strings in the latter, wrap them in a File() or Value(),\n respectively.\n\n The values of SUBST_DICT first have any construction variables\n expanded (its keys are not expanded). If a value of SUBST_DICT is\n a python callable function, it is called and the result is expanded\n as the value. Values are substituted in a \"random\" order; if any\n substitution could be further expanded by another substitution, it\n is unpredictable whether the expansion will occur.\n\"\"\"\n\n__revision__ = \"__FILE__ __REVISION__ __DATE__ __DEVELOPER__\"\n\nimport SCons\n\nimport os\nimport re\n\nfrom SCons.Node import Node\nfrom SCons.Node.Python import Value\nfrom SCons.Util import is_String, is_Sequence, is_Dict, to_bytes\n\n\nTEXTFILE_FILE_WRITE_MODE = 'w'\n\nLINESEP = '\\n'\n\ndef _do_subst(node, subs):\n \"\"\"\n Fetch the node contents and replace all instances of the keys with\n their values. For example, if subs is\n {'%VERSION%': '1.2345', '%BASE%': 'MyProg', '%prefix%': '/bin'},\n then all instances of %VERSION% in the file will be replaced with\n 1.2345 and so forth.\n \"\"\"\n contents = node.get_text_contents()\n if subs:\n for (k, val) in subs:\n contents = contents.replace(k, val)\n\n if 'b' in TEXTFILE_FILE_WRITE_MODE:\n try:\n contents = bytearray(contents, 'utf-8')\n except UnicodeDecodeError:\n # contents is already utf-8 encoded python 2 str i.e. a byte array\n contents = bytearray(contents)\n\n return contents\n\n\ndef _action(target, source, env):\n\n # prepare the line separator\n linesep = env['LINESEPARATOR']\n if linesep is None:\n linesep = LINESEP # os.linesep\n elif is_String(linesep):\n pass\n elif isinstance(linesep, Value):\n linesep = linesep.get_text_contents()\n else:\n raise SCons.Errors.UserError('unexpected type/class for LINESEPARATOR: %s'\n % repr(linesep), None)\n\n if 'b' in TEXTFILE_FILE_WRITE_MODE:\n linesep = to_bytes(linesep)\n\n # create a dictionary to use for the substitutions\n if 'SUBST_DICT' not in env:\n subs = None # no substitutions\n else:\n subst_dict = env['SUBST_DICT']\n if is_Dict(subst_dict):\n subst_dict = list(subst_dict.items())\n elif is_Sequence(subst_dict):\n pass\n else:\n raise SCons.Errors.UserError('SUBST_DICT must be dict or sequence')\n subs = []\n for (k, value) in subst_dict:\n if callable(value):\n value = value()\n if is_String(value):\n value = env.subst(value)\n else:\n value = str(value)\n subs.append((k, value))\n\n # write the file\n try:\n target_file = open(target[0].get_path(), TEXTFILE_FILE_WRITE_MODE, newline='')\n except (OSError, IOError):\n raise SCons.Errors.UserError(\"Can't write target file %s\" % target[0])\n\n # separate lines by 'linesep' only if linesep is not empty\n lsep = None\n for line in source:\n if lsep:\n target_file.write(lsep)\n\n target_file.write(_do_subst(line, subs))\n lsep = linesep\n target_file.close()\n\n\ndef _strfunc(target, source, env):\n return \"Creating '%s'\" % target[0]\n\n\ndef _convert_list_R(newlist, sources):\n for elem in sources:\n if is_Sequence(elem):\n _convert_list_R(newlist, elem)\n elif isinstance(elem, Node):\n newlist.append(elem)\n else:\n newlist.append(Value(elem))\n\n\ndef _convert_list(target, source, env):\n if len(target) != 1:\n raise SCons.Errors.UserError(\"Only one target file allowed\")\n newlist = []\n _convert_list_R(newlist, source)\n return target, newlist\n\n\n_common_varlist = ['SUBST_DICT', 'LINESEPARATOR']\n\n_text_varlist = _common_varlist + ['TEXTFILEPREFIX', 'TEXTFILESUFFIX']\n_text_builder = SCons.Builder.Builder(\n action=SCons.Action.Action(_action, _strfunc, varlist=_text_varlist),\n source_factory=Value,\n emitter=_convert_list,\n prefix='$TEXTFILEPREFIX',\n suffix='$TEXTFILESUFFIX',\n)\n\n_subst_varlist = _common_varlist + ['SUBSTFILEPREFIX', 'TEXTFILESUFFIX']\n_subst_builder = SCons.Builder.Builder(\n action=SCons.Action.Action(_action, _strfunc, varlist=_subst_varlist),\n source_factory=SCons.Node.FS.File,\n emitter=_convert_list,\n prefix='$SUBSTFILEPREFIX',\n suffix='$SUBSTFILESUFFIX',\n src_suffix=['.in'],\n)\n\n\ndef generate(env):\n env['LINESEPARATOR'] = LINESEP # os.linesep\n env['BUILDERS']['Textfile'] = _text_builder\n env['TEXTFILEPREFIX'] = ''\n env['TEXTFILESUFFIX'] = '.txt'\n env['BUILDERS']['Substfile'] = _subst_builder\n env['SUBSTFILEPREFIX'] = ''\n env['SUBSTFILESUFFIX'] = ''\n\n\ndef exists(env):\n return 1\n\n# Local Variables:\n# tab-width:4\n# indent-tabs-mode:nil\n# End:\n# vim: set expandtab tabstop=4 shiftwidth=4:\n", "path": "src/engine/SCons/Tool/textfile.py"}], "after_files": [{"content": "# -*- python -*-\n#\n# __COPYRIGHT__\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be included\n# in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE\n# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION\n# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n#\n\n__doc__ = \"\"\"\nTextfile/Substfile builder for SCons.\n\n Create file 'target' which typically is a textfile. The 'source'\n may be any combination of strings, Nodes, or lists of same. A\n 'linesep' will be put between any part written and defaults to\n os.linesep.\n\n The only difference between the Textfile builder and the Substfile\n builder is that strings are converted to Value() nodes for the\n former and File() nodes for the latter. To insert files in the\n former or strings in the latter, wrap them in a File() or Value(),\n respectively.\n\n The values of SUBST_DICT first have any construction variables\n expanded (its keys are not expanded). If a value of SUBST_DICT is\n a python callable function, it is called and the result is expanded\n as the value. Values are substituted in a \"random\" order; if any\n substitution could be further expanded by another substitution, it\n is unpredictable whether the expansion will occur.\n\"\"\"\n\n__revision__ = \"__FILE__ __REVISION__ __DATE__ __DEVELOPER__\"\n\nimport SCons\n\nimport os\nimport re\n\nfrom SCons.Node import Node\nfrom SCons.Node.Python import Value\nfrom SCons.Util import is_String, is_Sequence, is_Dict, to_bytes\n\n\nTEXTFILE_FILE_WRITE_MODE = 'w'\n\nLINESEP = '\\n'\n\ndef _do_subst(node, subs):\n \"\"\"\n Fetch the node contents and replace all instances of the keys with\n their values. For example, if subs is\n {'%VERSION%': '1.2345', '%BASE%': 'MyProg', '%prefix%': '/bin'},\n then all instances of %VERSION% in the file will be replaced with\n 1.2345 and so forth.\n \"\"\"\n contents = node.get_text_contents()\n if subs:\n for (k, val) in subs:\n contents = contents.replace(k, val)\n\n if 'b' in TEXTFILE_FILE_WRITE_MODE:\n try:\n contents = bytearray(contents, 'utf-8')\n except UnicodeDecodeError:\n # contents is already utf-8 encoded python 2 str i.e. a byte array\n contents = bytearray(contents)\n\n return contents\n\n\ndef _action(target, source, env):\n\n # prepare the line separator\n linesep = env['LINESEPARATOR']\n if linesep is None:\n linesep = LINESEP # os.linesep\n elif is_String(linesep):\n pass\n elif isinstance(linesep, Value):\n linesep = linesep.get_text_contents()\n else:\n raise SCons.Errors.UserError('unexpected type/class for LINESEPARATOR: %s'\n % repr(linesep), None)\n\n if 'b' in TEXTFILE_FILE_WRITE_MODE:\n linesep = to_bytes(linesep)\n\n # create a dictionary to use for the substitutions\n if 'SUBST_DICT' not in env:\n subs = None # no substitutions\n else:\n subst_dict = env['SUBST_DICT']\n if is_Dict(subst_dict):\n subst_dict = list(subst_dict.items())\n elif is_Sequence(subst_dict):\n pass\n else:\n raise SCons.Errors.UserError('SUBST_DICT must be dict or sequence')\n subs = []\n for (k, value) in subst_dict:\n if callable(value):\n value = value()\n if is_String(value):\n value = env.subst(value)\n else:\n value = str(value)\n subs.append((k, value))\n\n # write the file\n try:\n target_file = open(target[0].get_path(), TEXTFILE_FILE_WRITE_MODE, newline='')\n except (OSError, IOError):\n raise SCons.Errors.UserError(\"Can't write target file %s\" % target[0])\n\n # separate lines by 'linesep' only if linesep is not empty\n lsep = None\n for line in source:\n if lsep:\n target_file.write(lsep)\n\n target_file.write(_do_subst(line, subs))\n lsep = linesep\n target_file.close()\n\n\ndef _strfunc(target, source, env):\n return \"Creating '%s'\" % target[0]\n\n\ndef _convert_list_R(newlist, sources):\n for elem in sources:\n if is_Sequence(elem):\n _convert_list_R(newlist, elem)\n elif isinstance(elem, Node):\n newlist.append(elem)\n else:\n newlist.append(Value(elem))\n\n\ndef _convert_list(target, source, env):\n if len(target) != 1:\n raise SCons.Errors.UserError(\"Only one target file allowed\")\n newlist = []\n _convert_list_R(newlist, source)\n return target, newlist\n\n\n_common_varlist = ['SUBST_DICT', 'LINESEPARATOR']\n\n_text_varlist = _common_varlist + ['TEXTFILEPREFIX', 'TEXTFILESUFFIX']\n_text_builder = SCons.Builder.Builder(\n action=SCons.Action.Action(_action, _strfunc, varlist=_text_varlist),\n source_factory=Value,\n emitter=_convert_list,\n prefix='$TEXTFILEPREFIX',\n suffix='$TEXTFILESUFFIX',\n)\n\n_subst_varlist = _common_varlist + ['SUBSTFILEPREFIX', 'SUBSTFILESUFFIX']\n_subst_builder = SCons.Builder.Builder(\n action=SCons.Action.Action(_action, _strfunc, varlist=_subst_varlist),\n source_factory=SCons.Node.FS.File,\n emitter=_convert_list,\n prefix='$SUBSTFILEPREFIX',\n suffix='$SUBSTFILESUFFIX',\n src_suffix=['.in'],\n)\n\n\ndef generate(env):\n env['LINESEPARATOR'] = LINESEP # os.linesep\n env['BUILDERS']['Textfile'] = _text_builder\n env['TEXTFILEPREFIX'] = ''\n env['TEXTFILESUFFIX'] = '.txt'\n env['BUILDERS']['Substfile'] = _subst_builder\n env['SUBSTFILEPREFIX'] = ''\n env['SUBSTFILESUFFIX'] = ''\n\n\ndef exists(env):\n return 1\n\n# Local Variables:\n# tab-width:4\n# indent-tabs-mode:nil\n# End:\n# vim: set expandtab tabstop=4 shiftwidth=4:\n", "path": "src/engine/SCons/Tool/textfile.py"}]} |
gh_patches_debug_1318 | rasdani/github-patches | git_diff | mindsdb__mindsdb-1845 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] Selecting from files is not work
selecting from files like this:
```
select * from file.file_name
```
is not working
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mindsdb/api/mysql/mysql_proxy/classes/sql_query.py`
Content:
```
1 """
2 *******************************************************
3 * Copyright (C) 2017 MindsDB Inc. <[email protected]>
4 *
5 * This file is part of MindsDB Server.
6 *
7 * MindsDB Server can not be copied and/or distributed without the express
8 * permission of MindsDB Inc
9 *******************************************************
10 """
11
12 import re
13 import pandas as pd
14 import datetime
15 import time
16
17 import duckdb
18 from lightwood.api import dtype
19 from mindsdb_sql import parse_sql
20 from mindsdb_sql.planner import plan_query
21 from mindsdb_sql.parser.dialects.mindsdb.latest import Latest
22 from mindsdb_sql.parser.ast import (
23 BinaryOperation,
24 UnaryOperation,
25 Identifier,
26 Operation,
27 Constant,
28 OrderBy,
29 Select,
30 Union,
31 Join,
32 Star
33 )
34 from mindsdb_sql.planner.steps import (
35 ApplyTimeseriesPredictorStep,
36 ApplyPredictorRowStep,
37 GetPredictorColumns,
38 FetchDataframeStep,
39 ApplyPredictorStep,
40 MapReduceStep,
41 MultipleSteps,
42 ProjectStep,
43 FilterStep,
44 UnionStep,
45 JoinStep
46 )
47
48 from mindsdb.api.mysql.mysql_proxy.classes.com_operators import operator_map
49 from mindsdb.api.mysql.mysql_proxy.libs.constants.mysql import TYPES, ERR
50 from mindsdb.api.mysql.mysql_proxy.utilities import log
51 from mindsdb.interfaces.ai_table.ai_table import AITableStore
52 import mindsdb.interfaces.storage.db as db
53 from mindsdb.api.mysql.mysql_proxy.utilities.sql import query_df
54
55
56 superset_subquery = re.compile(r'from[\s\n]*(\(.*\))[\s\n]*as[\s\n]*virtual_table', flags=re.IGNORECASE | re.MULTILINE | re.S)
57
58
59 class NotImplementedError(Exception):
60 pass
61
62
63 class SqlError(Exception):
64 pass
65
66
67 def get_preditor_alias(step, mindsdb_database):
68 predictor_name = '.'.join(step.predictor.parts)
69 predictor_alias = '.'.join(step.predictor.alias.parts) if step.predictor.alias is not None else predictor_name
70 return (mindsdb_database, predictor_name, predictor_alias)
71
72
73 def get_table_alias(table_obj, default_db_name):
74 # (database, table, alias)
75 if len(table_obj.parts) > 2:
76 raise Exception(f'Table name must contain no more than 2 parts. Got name: {table_obj.parts}')
77 elif len(table_obj.parts) == 1:
78 name = (default_db_name, table_obj.parts[0])
79 else:
80 name = tuple(table_obj.parts)
81 if table_obj.alias is not None:
82 name = name + ('.'.join(table_obj.alias.parts),)
83 else:
84 name = name + (None,)
85 return name
86
87
88 def get_all_tables(stmt):
89 if isinstance(stmt, Union):
90 left = get_all_tables(stmt.left)
91 right = get_all_tables(stmt.right)
92 return left + right
93
94 if isinstance(stmt, Select):
95 from_stmt = stmt.from_table
96 elif isinstance(stmt, (Identifier, Join)):
97 from_stmt = stmt
98 else:
99 raise Exception(f'Unknown type of identifier: {stmt}')
100
101 result = []
102 if isinstance(from_stmt, Identifier):
103 result.append(from_stmt.parts[-1])
104 elif isinstance(from_stmt, Join):
105 result.extend(get_all_tables(from_stmt.left))
106 result.extend(get_all_tables(from_stmt.right))
107 return result
108
109
110 def markQueryVar(where):
111 if isinstance(where, BinaryOperation):
112 markQueryVar(where.args[0])
113 markQueryVar(where.args[1])
114 elif isinstance(where, UnaryOperation):
115 markQueryVar(where.args[0])
116 elif isinstance(where, Constant):
117 if where.value == '$var':
118 where.is_var = True
119
120
121 def replaceQueryVar(where, val):
122 if isinstance(where, BinaryOperation):
123 replaceQueryVar(where.args[0], val)
124 replaceQueryVar(where.args[1], val)
125 elif isinstance(where, UnaryOperation):
126 replaceQueryVar(where.args[0], val)
127 elif isinstance(where, Constant):
128 if hasattr(where, 'is_var') and where.is_var is True:
129 where.value = val
130
131
132 def join_query_data(target, source):
133 target['values'].extend(source['values'])
134 target['tables'].extend(source['tables'])
135 target['tables'] = list(set(target['tables']))
136 for table_name in source['columns']:
137 if table_name not in target['columns']:
138 target['columns'][table_name] = source['columns'][table_name]
139 else:
140 target['columns'][table_name].extend(source['columns'][table_name])
141 target['columns'][table_name] = list(set(target['columns'][table_name]))
142
143
144 class SQLQuery():
145 def __init__(self, sql, session):
146 self.session = session
147 self.integration = session.integration
148 self.database = None if session.database == '' else session.database.lower()
149 self.datahub = session.datahub
150 self.ai_table = None
151 self.outer_query = None
152 self.row_id = 0
153 self.columns_list = None
154
155 self.mindsdb_database_name = 'mindsdb'
156
157 # +++ workaround for subqueries in superset
158 if 'as virtual_table' in sql.lower():
159 subquery = re.findall(superset_subquery, sql)
160 if isinstance(subquery, list) and len(subquery) == 1:
161 subquery = subquery[0]
162 self.outer_query = sql.replace(subquery, 'dataframe')
163 sql = subquery.strip('()')
164 # ---
165
166 self.raw = sql
167 self.model_types = {}
168 self._parse_query(sql)
169
170 def fetch(self, datahub, view='list'):
171 data = self.fetched_data
172
173 if view == 'list':
174 self.result = self._make_list_result_view(data)
175 elif view == 'dict':
176 self.result = self._make_dict_result_view(data)
177 else:
178 raise Exception('Only "list" and "dict" views supported atm')
179
180 return {
181 'success': True,
182 'result': self.result
183 }
184
185 def _fetch_dataframe_step(self, step):
186 dn = self.datahub.get(step.integration)
187 query = step.query
188
189 table_alias = get_table_alias(step.query.from_table, self.database)
190 # TODO for information_schema we have 'database' = 'mindsdb'
191
192 data, column_names = dn.select(
193 query=query
194 )
195
196 columns = [(column_name, column_name) for column_name in column_names]
197 columns.append(('__mindsdb_row_id', '__mindsdb_row_id'))
198
199 for i, row in enumerate(data):
200 row['__mindsdb_row_id'] = self.row_id + i
201 self.row_id = self.row_id + len(data)
202
203 data = [{(key, key): value for key, value in row.items()} for row in data]
204 data = [{table_alias: x} for x in data]
205
206 data = {
207 'values': data,
208 'columns': {table_alias: columns},
209 'tables': [table_alias]
210 }
211 return data
212
213 def _multiple_steps(self, step):
214 data = {
215 'values': [],
216 'columns': {},
217 'tables': []
218 }
219 for substep in step.steps:
220 sub_data = self._fetch_dataframe_step(substep)
221 join_query_data(data, sub_data)
222 return data
223
224 def _multiple_steps_reduce(self, step, values):
225 if step.reduce != 'union':
226 raise Exception(f'Unknown MultipleSteps type: {step.reduce}')
227
228 data = {
229 'values': [],
230 'columns': {},
231 'tables': []
232 }
233
234 for substep in step.steps:
235 if isinstance(substep, FetchDataframeStep) is False:
236 raise Exception(f'Wrong step type for MultipleSteps: {step}')
237 markQueryVar(substep.query.where)
238
239 for v in values:
240 for substep in step.steps:
241 replaceQueryVar(substep.query.where, v)
242 sub_data = self._multiple_steps(step)
243 join_query_data(data, sub_data)
244
245 return data
246
247 def _parse_query(self, sql):
248 mindsdb_sql_struct = parse_sql(sql, dialect='mindsdb')
249
250 # is it query to 'predictors'?
251 if (
252 isinstance(mindsdb_sql_struct.from_table, Identifier)
253 and mindsdb_sql_struct.from_table.parts[-1].lower() == 'predictors'
254 and (
255 self.database == 'mindsdb'
256 or mindsdb_sql_struct.from_table.parts[0].lower() == 'mindsdb'
257 )
258 ):
259 dn = self.datahub.get(self.mindsdb_database_name)
260 data, columns = dn.get_predictors(mindsdb_sql_struct)
261 table_name = ('mindsdb', 'predictors', 'predictors')
262 data = [{(key, key): value for key, value in row.items()} for row in data]
263 data = [{table_name: x} for x in data]
264 self.columns_list = [
265 (table_name + (column_name, column_name))
266 for column_name in columns
267 ]
268
269 columns = [(column_name, column_name) for column_name in columns]
270
271 self.fetched_data = {
272 'values': data,
273 'columns': {table_name: columns},
274 'tables': [table_name]
275 }
276 return
277
278 # is it query to 'commands'?
279 if (
280 isinstance(mindsdb_sql_struct.from_table, Identifier)
281 and mindsdb_sql_struct.from_table.parts[-1].lower() == 'commands'
282 and (
283 self.database == 'mindsdb'
284 or mindsdb_sql_struct.from_table.parts[0].lower() == 'mindsdb'
285 )
286 ):
287 self.fetched_data = {
288 'values': [],
289 'columns': {('mindsdb', 'commands', 'commands'): [('command', 'command')]},
290 'tables': [('mindsdb', 'commands', 'commands')]
291 }
292 self.columns_list = [('mindsdb', 'commands', 'commands', 'command', 'command')]
293 return
294
295 # is it query to 'datasources'?
296 if (
297 isinstance(mindsdb_sql_struct.from_table, Identifier)
298 and mindsdb_sql_struct.from_table.parts[-1].lower() == 'datasources'
299 and (
300 self.database == 'mindsdb'
301 or mindsdb_sql_struct.from_table.parts[0].lower() == 'mindsdb'
302 )
303 ):
304 dn = self.datahub.get(self.mindsdb_database_name)
305 data, columns = dn.get_datasources(mindsdb_sql_struct)
306 table_name = ('mindsdb', 'datasources', 'datasources')
307 data = [{(key, key): value for key, value in row.items()} for row in data]
308 data = [{table_name: x} for x in data]
309
310 self.columns_list = [
311 (table_name + (column_name, column_name))
312 for column_name in columns
313 ]
314
315 columns = [(column_name, column_name) for column_name in columns]
316
317 self.fetched_data = {
318 'values': data,
319 'columns': {table_name: columns},
320 'tables': [table_name]
321 }
322 return
323
324 integrations_names = self.datahub.get_datasources_names()
325 integrations_names.append('INFORMATION_SCHEMA')
326 integrations_names.append('information_schema')
327
328 all_tables = get_all_tables(mindsdb_sql_struct)
329
330 predictor_metadata = {}
331 predictors = db.session.query(db.Predictor).filter_by(company_id=self.session.company_id)
332 for model_name in set(all_tables):
333 for p in predictors:
334 if p.name == model_name:
335 if isinstance(p.data, dict) and 'error' not in p.data:
336 ts_settings = p.learn_args.get('timeseries_settings', {})
337 if ts_settings.get('is_timeseries') is True:
338 window = ts_settings.get('window')
339 order_by = ts_settings.get('order_by')[0]
340 group_by = ts_settings.get('group_by')
341 if isinstance(group_by, list):
342 group_by = ts_settings.get('group_by')[0]
343 predictor_metadata[model_name] = {
344 'timeseries': True,
345 'window': window,
346 'order_by_column': order_by,
347 'group_by_column': group_by
348 }
349 else:
350 predictor_metadata[model_name] = {
351 'timeseries': False
352 }
353 self.model_types.update(p.data.get('dtypes', {}))
354
355 plan = plan_query(
356 mindsdb_sql_struct,
357 integrations=integrations_names,
358 predictor_namespace=self.mindsdb_database_name,
359 predictor_metadata=predictor_metadata,
360 default_namespace=self.database
361 )
362
363 steps_data = []
364 for step in plan.steps:
365 data = []
366 if type(step) == GetPredictorColumns:
367 predictor_name = step.predictor.parts[-1]
368 dn = self.datahub.get(self.mindsdb_database_name)
369 columns = dn.get_table_columns(predictor_name)
370 columns = [
371 (column_name, column_name) for column_name in columns
372 ]
373 data = {
374 'values': [],
375 'columns': {
376 (self.mindsdb_database_name, predictor_name, predictor_name): columns
377 },
378 'tables': [(self.mindsdb_database_name, predictor_name, predictor_name)]
379 }
380 elif type(step) == FetchDataframeStep:
381 data = self._fetch_dataframe_step(step)
382 elif type(step) == UnionStep:
383 raise Exception('Union step is not implemented')
384 # TODO add union support
385 # left_data = steps_data[step.left.step_num]
386 # right_data = steps_data[step.right.step_num]
387 # data = left_data + right_data
388 elif type(step) == MapReduceStep:
389 if step.reduce != 'union':
390 raise Exception(f'Unknown MapReduceStep type: {step.reduce}')
391
392 step_data = steps_data[step.values.step_num]
393 values = []
394 step_data_values = step_data['values']
395 for row in step_data_values:
396 for row_data in row.values():
397 for name, value in row_data.items():
398 if name[0] != '__mindsdb_row_id':
399 values.append(value)
400
401 data = {
402 'values': [],
403 'columns': {},
404 'tables': []
405 }
406 substep = step.step
407 if type(substep) == FetchDataframeStep:
408 query = substep.query
409 markQueryVar(query.where)
410 for value in values:
411 replaceQueryVar(query.where, value)
412 sub_data = self._fetch_dataframe_step(substep)
413 if len(data['columns']) == 0:
414 data['columns'] = sub_data['columns']
415 if len(data['tables']) == 0:
416 data['tables'] = sub_data['tables']
417 data['values'].extend(sub_data['values'])
418 elif type(substep) == MultipleSteps:
419 data = self._multiple_steps_reduce(substep, values)
420 else:
421 raise Exception(f'Unknown step type: {step.step}')
422 elif type(step) == ApplyPredictorRowStep:
423 predictor = '.'.join(step.predictor.parts)
424 dn = self.datahub.get(self.mindsdb_database_name)
425 where_data = step.row_dict
426
427 data = dn.select(
428 table=predictor,
429 columns=None,
430 where_data=where_data,
431 integration_name=self.session.integration,
432 integration_type=self.session.integration_type
433 )
434
435 data = [{(key, key): value for key, value in row.items()} for row in data]
436
437 table_name = get_preditor_alias(step, self.database)
438 values = [{table_name: x} for x in data]
439 columns = {table_name: []}
440 if len(data) > 0:
441 row = data[0]
442 columns[table_name] = list(row.keys())
443 # TODO else
444
445 data = {
446 'values': values,
447 'columns': columns,
448 'tables': [table_name]
449 }
450 elif type(step) == ApplyPredictorStep or type(step) == ApplyTimeseriesPredictorStep:
451 dn = self.datahub.get(self.mindsdb_database_name)
452 predictor = '.'.join(step.predictor.parts)
453 where_data = []
454 for row in steps_data[step.dataframe.step_num]['values']:
455 new_row = {}
456 for table_name in row:
457 keys_intersection = set(new_row) & set(row[table_name])
458 if len(keys_intersection) > 0:
459 raise Exception(
460 f'The predictor got two identical keys from different datasources: {keys_intersection}'
461 )
462 new_row.update(row[table_name])
463 where_data.append(new_row)
464
465 where_data = [{key[1]: value for key, value in row.items()} for row in where_data]
466
467 is_timeseries = predictor_metadata[predictor]['timeseries']
468 _mdb_make_predictions = None
469 if is_timeseries:
470 if 'LATEST' in self.raw:
471 _mdb_make_predictions = False
472 else:
473 _mdb_make_predictions = True
474 for row in where_data:
475 if '__mdb_make_predictions' not in row:
476 row['__mdb_make_predictions'] = _mdb_make_predictions
477
478 for row in where_data:
479 for key in row:
480 if isinstance(row[key], datetime.date):
481 row[key] = str(row[key])
482
483 data = dn.select(
484 table=predictor,
485 columns=None,
486 where_data=where_data,
487 integration_name=self.session.integration,
488 integration_type=self.session.integration_type
489 )
490
491 data = [{(key, key): value for key, value in row.items()} for row in data]
492
493 table_name = get_preditor_alias(step, self.database)
494 values = [{table_name: x} for x in data]
495 columns = {table_name: []}
496 if len(data) > 0:
497 row = data[0]
498 columns[table_name] = list(row.keys())
499 # TODO else
500
501 data = {
502 'values': values,
503 'columns': columns,
504 'tables': [table_name]
505 }
506 elif type(step) == JoinStep:
507 left_data = steps_data[step.left.step_num]
508 right_data = steps_data[step.right.step_num]
509
510 # FIXME https://github.com/mindsdb/mindsdb_sql/issues/136
511 if True in [type(step) == ApplyTimeseriesPredictorStep for step in plan.steps]:
512 right_data = steps_data[step.left.step_num]
513 left_data = steps_data[step.right.step_num]
514
515 if step.query.condition is not None:
516 raise Exception('At this moment supported only JOIN without condition')
517 if step.query.join_type.upper() not in ('LEFT JOIN', 'JOIN'):
518 raise Exception('At this moment supported only JOIN and LEFT JOIN')
519 if (
520 len(left_data['tables']) != 1 or len(right_data['tables']) != 1
521 or left_data['tables'][0] == right_data['tables'][0]
522 ):
523 raise Exception('At this moment supported only JOIN of two different tables')
524
525 data = {
526 'values': [],
527 'columns': {},
528 'tables': list(set(left_data['tables'] + right_data['tables']))
529 }
530
531 for data_part in [left_data, right_data]:
532 for table_name in data_part['columns']:
533 if table_name not in data['columns']:
534 data['columns'][table_name] = data_part['columns'][table_name]
535 else:
536 data['columns'][table_name].extend(data_part['columns'][table_name])
537 for table_name in data['columns']:
538 data['columns'][table_name] = list(set(data['columns'][table_name]))
539
540 left_key = left_data['tables'][0]
541 right_key = right_data['tables'][0]
542
543 left_columns_map = {}
544 left_columns_map_reverse = {}
545 for i, column_name in enumerate(left_data['columns'][left_key]):
546 left_columns_map[f'a{i}'] = column_name
547 left_columns_map_reverse[column_name] = f'a{i}'
548
549 right_columns_map = {}
550 right_columns_map_reverse = {}
551 for i, column_name in enumerate(right_data['columns'][right_key]):
552 right_columns_map[f'b{i}'] = column_name
553 right_columns_map_reverse[column_name] = f'b{i}'
554
555 left_df_data = []
556 for row in left_data['values']:
557 row = row[left_key]
558 left_df_data.append({left_columns_map_reverse[key]: value for key, value in row.items()})
559
560 right_df_data = []
561 for row in right_data['values']:
562 row = row[right_key]
563 right_df_data.append({right_columns_map_reverse[key]: value for key, value in row.items()})
564
565 df_a = pd.DataFrame(left_df_data)
566 df_b = pd.DataFrame(right_df_data)
567
568 a_name = f'a{round(time.time()*1000)}'
569 b_name = f'b{round(time.time()*1000)}'
570 con = duckdb.connect(database=':memory:')
571 con.register(a_name, df_a)
572 con.register(b_name, df_b)
573 resp_df = con.execute(f"""
574 SELECT * FROM {a_name} as ta full join {b_name} as tb
575 ON ta.{left_columns_map_reverse[('__mindsdb_row_id', '__mindsdb_row_id')]}
576 = tb.{right_columns_map_reverse[('__mindsdb_row_id', '__mindsdb_row_id')]}
577 """).fetchdf()
578 con.unregister(a_name)
579 con.unregister(b_name)
580 con.close()
581 resp_df = resp_df.where(pd.notnull(resp_df), None)
582 resp_dict = resp_df.to_dict(orient='records')
583
584 for row in resp_dict:
585 new_row = {left_key: {}, right_key: {}}
586 for key, value in row.items():
587 if key.startswith('a'):
588 new_row[left_key][left_columns_map[key]] = value
589 else:
590 new_row[right_key][right_columns_map[key]] = value
591 data['values'].append(new_row)
592 elif type(step) == FilterStep:
593 raise Exception('FilterStep is not implemented')
594 # elif type(step) == ApplyTimeseriesPredictorStep:
595 # raise Exception('ApplyTimeseriesPredictorStep is not implemented')
596 elif type(step) == ProjectStep:
597 step_data = steps_data[step.dataframe.step_num]
598 columns_list = []
599 for column_full_name in step.columns:
600 table_name = None
601 if type(column_full_name) == Star:
602 for table_name, table_columns_list in step_data['columns'].items():
603 for column in table_columns_list:
604 columns_list.append(table_name + column)
605 elif type(column_full_name) == Identifier:
606 column_name_parts = column_full_name.parts
607 column_alias = None if column_full_name.alias is None else '.'.join(column_full_name.alias.parts)
608 if len(column_name_parts) > 2:
609 raise Exception(f'Column name must contain no more than 2 parts. Got name: {".".join(column_full_name)}')
610 elif len(column_name_parts) == 1:
611 column_name = column_name_parts[0]
612
613 appropriate_table = None
614 if len(step_data['tables']) == 1:
615 appropriate_table = step_data['tables'][0]
616 else:
617 for table_name, table_columns in step_data['columns'].items():
618 if (column_name, column_name) in table_columns:
619 if appropriate_table is not None:
620 raise Exception('Found multiple appropriate tables for column {column_name}')
621 else:
622 appropriate_table = table_name
623 if appropriate_table is None:
624 # it is probably constaint
625 # FIXME https://github.com/mindsdb/mindsdb_sql/issues/133
626 # column_name = column_name.strip("'")
627 # name_or_alias = column_alias or column_name
628 # column_alias = name_or_alias
629 # for row in step_data['values']:
630 # for table in row:
631 # row[table][(column_name, name_or_alias)] = row[table][(column_name, column_name)]
632 # appropriate_table = step_data['tables'][0]
633 columns_list.append(appropriate_table + (column_alias, column_alias))
634 else:
635 columns_list.append(appropriate_table + (column_name, column_alias or column_name)) # column_name
636 elif len(column_name_parts) == 2:
637 table_name_or_alias = column_name_parts[0]
638 column_name = column_name_parts[1]
639
640 appropriate_table = None
641 for table_name, table_columns in step_data['columns'].items():
642 checkig_table_name_or_alias = table_name[2] or table_name[1]
643 if table_name_or_alias == checkig_table_name_or_alias:
644 for table_column_name in table_columns:
645 if (
646 table_column_name[1] == column_name
647 or table_column_name[1] is None and table_column_name[0] == column_name
648 ):
649 break
650 else:
651 raise Exception(f'Can not find column "{column_name}" in table "{table_name}"')
652 appropriate_table = table_name
653 break
654 if appropriate_table is None:
655 raise Exception(f'Can not find approproate table for column {column_name}')
656
657 columns_to_copy = None
658 for column in step_data['columns'][appropriate_table]:
659 if column[0] == column_name and (column[1] is None or column[1] == column_name):
660 columns_to_copy = column
661 break
662 else:
663 raise Exception(f'Can not find approproate column in data: {(column_name, column_alias)}')
664
665 for row in step_data['values']:
666 row[appropriate_table][(column_name, column_alias)] = row[appropriate_table][columns_to_copy]
667
668 columns_list.append(appropriate_table + (column_name, column_alias))
669 else:
670 raise Exception('Undefined column name')
671 else:
672 raise Exception(f'Unexpected column name type: {column_full_name}')
673
674 self.columns_list = columns_list
675 data = step_data
676 else:
677 raise Exception(F'Unknown planner step: {step}')
678 steps_data.append(data)
679
680 if self.outer_query is not None:
681 data = []
682 # +++
683 result = []
684 for row in steps_data[-1]:
685 data_row = {}
686 for column_record in self.columns_list:
687 table_name = column_record[:3]
688 column_name = column_record[3]
689 data_row[column_record[4] or column_record[3]] = row[table_name][column_name]
690 result.append(data_row)
691 # ---
692 data = self._make_list_result_view(result)
693 df = pd.DataFrame(data)
694 result = query_df(df, self.outer_query)
695
696 try:
697 self.columns_list = [
698 ('', '', '', x, x) for x in result.columns
699 ]
700 except Exception:
701 self.columns_list = [
702 ('', '', '', result.name, result.name)
703 ]
704
705 # +++ make list result view
706 new_result = []
707 for row in result.to_dict(orient='records'):
708 data_row = []
709 for column_record in self.columns_list:
710 column_name = column_record[4] or column_record[3]
711 data_row.append(row.get(column_name))
712 new_result.append(data_row)
713 result = new_result
714 # ---
715
716 self.fetched_data = result
717 else:
718 self.fetched_data = steps_data[-1]
719
720 if hasattr(self, 'columns_list') is False:
721 self.columns_list = []
722 for row in self.fetched_data:
723 for table_key in row:
724 for column_name in row[table_key]:
725 if (table_key + (column_name, column_name)) not in self.columns_list:
726 self.columns_list.append((table_key + (column_name, column_name)))
727
728 # if there was no 'ProjectStep', then get columns list from last step:
729 if self.columns_list is None:
730 self.columns_list = []
731 for table_name in self.fetched_data['columns']:
732 self.columns_list.extend([
733 table_name + column for column in self.fetched_data['columns'][table_name]
734 ])
735
736 self.columns_list = [x for x in self.columns_list if x[3] != '__mindsdb_row_id']
737
738 def _apply_where_filter(self, row, where):
739 if isinstance(where, Identifier):
740 return row[where.value]
741 elif isinstance(where, Constant):
742 return where.value
743 elif not isinstance(where, (UnaryOperation, BinaryOperation)):
744 Exception(f'Unknown operation type: {where}')
745
746 op_fn = operator_map.get(where.op)
747 if op_fn is None:
748 raise Exception(f'unknown operator {where.op}')
749
750 args = [self._apply_where_filter(row, arg) for arg in where.args]
751 result = op_fn(*args)
752 return result
753
754 def _make_list_result_view(self, data):
755 if self.outer_query is not None:
756 return data['values']
757 result = []
758 for row in data['values']:
759 data_row = []
760 for column_record in self.columns_list:
761 table_name = column_record[:3]
762 column_name = column_record[3:]
763 data_row.append(row[table_name][column_name])
764 result.append(data_row)
765 return result
766
767 def _make_dict_result_view(self, data):
768 result = []
769 for row in data:
770 data_row = {}
771 for table_name in row:
772 data_row.update(row[table_name])
773 result.append(data_row)
774 return result
775
776 @property
777 def columns(self):
778 result = []
779 for column_record in self.columns_list:
780 try:
781 field_type = self.model_types.get(column_record[3])
782 except Exception:
783 field_type = None
784
785 column_type = TYPES.MYSQL_TYPE_VAR_STRING
786 if field_type == dtype.date:
787 column_type = TYPES.MYSQL_TYPE_DATE
788 elif field_type == dtype.datetime:
789 column_type = TYPES.MYSQL_TYPE_DATETIME
790
791 result.append({
792 'database': column_record[0] or self.database,
793 # TODO add 'original_table'
794 'table_name': column_record[1],
795 'name': column_record[3],
796 'alias': column_record[4] or column_record[3],
797 # NOTE all work with text-type, but if/when wanted change types to real,
798 # it will need to check all types casts in BinaryResultsetRowPacket
799 'type': column_type
800 })
801 return result
802
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mindsdb/api/mysql/mysql_proxy/classes/sql_query.py b/mindsdb/api/mysql/mysql_proxy/classes/sql_query.py
--- a/mindsdb/api/mysql/mysql_proxy/classes/sql_query.py
+++ b/mindsdb/api/mysql/mysql_proxy/classes/sql_query.py
@@ -322,8 +322,8 @@
return
integrations_names = self.datahub.get_datasources_names()
- integrations_names.append('INFORMATION_SCHEMA')
integrations_names.append('information_schema')
+ integrations_names.append('file')
all_tables = get_all_tables(mindsdb_sql_struct)
| {"golden_diff": "diff --git a/mindsdb/api/mysql/mysql_proxy/classes/sql_query.py b/mindsdb/api/mysql/mysql_proxy/classes/sql_query.py\n--- a/mindsdb/api/mysql/mysql_proxy/classes/sql_query.py\n+++ b/mindsdb/api/mysql/mysql_proxy/classes/sql_query.py\n@@ -322,8 +322,8 @@\n return\n \n integrations_names = self.datahub.get_datasources_names()\n- integrations_names.append('INFORMATION_SCHEMA')\n integrations_names.append('information_schema')\n+ integrations_names.append('file')\n \n all_tables = get_all_tables(mindsdb_sql_struct)\n", "issue": "[BUG] Selecting from files is not work\nselecting from files like this:\r\n```\r\nselect * from file.file_name\r\n```\r\nis not working \n", "before_files": [{"content": "\"\"\"\n*******************************************************\n * Copyright (C) 2017 MindsDB Inc. <[email protected]>\n *\n * This file is part of MindsDB Server.\n *\n * MindsDB Server can not be copied and/or distributed without the express\n * permission of MindsDB Inc\n *******************************************************\n\"\"\"\n\nimport re\nimport pandas as pd\nimport datetime\nimport time\n\nimport duckdb\nfrom lightwood.api import dtype\nfrom mindsdb_sql import parse_sql\nfrom mindsdb_sql.planner import plan_query\nfrom mindsdb_sql.parser.dialects.mindsdb.latest import Latest\nfrom mindsdb_sql.parser.ast import (\n BinaryOperation,\n UnaryOperation,\n Identifier,\n Operation,\n Constant,\n OrderBy,\n Select,\n Union,\n Join,\n Star\n)\nfrom mindsdb_sql.planner.steps import (\n ApplyTimeseriesPredictorStep,\n ApplyPredictorRowStep,\n GetPredictorColumns,\n FetchDataframeStep,\n ApplyPredictorStep,\n MapReduceStep,\n MultipleSteps,\n ProjectStep,\n FilterStep,\n UnionStep,\n JoinStep\n)\n\nfrom mindsdb.api.mysql.mysql_proxy.classes.com_operators import operator_map\nfrom mindsdb.api.mysql.mysql_proxy.libs.constants.mysql import TYPES, ERR\nfrom mindsdb.api.mysql.mysql_proxy.utilities import log\nfrom mindsdb.interfaces.ai_table.ai_table import AITableStore\nimport mindsdb.interfaces.storage.db as db\nfrom mindsdb.api.mysql.mysql_proxy.utilities.sql import query_df\n\n\nsuperset_subquery = re.compile(r'from[\\s\\n]*(\\(.*\\))[\\s\\n]*as[\\s\\n]*virtual_table', flags=re.IGNORECASE | re.MULTILINE | re.S)\n\n\nclass NotImplementedError(Exception):\n pass\n\n\nclass SqlError(Exception):\n pass\n\n\ndef get_preditor_alias(step, mindsdb_database):\n predictor_name = '.'.join(step.predictor.parts)\n predictor_alias = '.'.join(step.predictor.alias.parts) if step.predictor.alias is not None else predictor_name\n return (mindsdb_database, predictor_name, predictor_alias)\n\n\ndef get_table_alias(table_obj, default_db_name):\n # (database, table, alias)\n if len(table_obj.parts) > 2:\n raise Exception(f'Table name must contain no more than 2 parts. Got name: {table_obj.parts}')\n elif len(table_obj.parts) == 1:\n name = (default_db_name, table_obj.parts[0])\n else:\n name = tuple(table_obj.parts)\n if table_obj.alias is not None:\n name = name + ('.'.join(table_obj.alias.parts),)\n else:\n name = name + (None,)\n return name\n\n\ndef get_all_tables(stmt):\n if isinstance(stmt, Union):\n left = get_all_tables(stmt.left)\n right = get_all_tables(stmt.right)\n return left + right\n\n if isinstance(stmt, Select):\n from_stmt = stmt.from_table\n elif isinstance(stmt, (Identifier, Join)):\n from_stmt = stmt\n else:\n raise Exception(f'Unknown type of identifier: {stmt}')\n\n result = []\n if isinstance(from_stmt, Identifier):\n result.append(from_stmt.parts[-1])\n elif isinstance(from_stmt, Join):\n result.extend(get_all_tables(from_stmt.left))\n result.extend(get_all_tables(from_stmt.right))\n return result\n\n\ndef markQueryVar(where):\n if isinstance(where, BinaryOperation):\n markQueryVar(where.args[0])\n markQueryVar(where.args[1])\n elif isinstance(where, UnaryOperation):\n markQueryVar(where.args[0])\n elif isinstance(where, Constant):\n if where.value == '$var':\n where.is_var = True\n\n\ndef replaceQueryVar(where, val):\n if isinstance(where, BinaryOperation):\n replaceQueryVar(where.args[0], val)\n replaceQueryVar(where.args[1], val)\n elif isinstance(where, UnaryOperation):\n replaceQueryVar(where.args[0], val)\n elif isinstance(where, Constant):\n if hasattr(where, 'is_var') and where.is_var is True:\n where.value = val\n\n\ndef join_query_data(target, source):\n target['values'].extend(source['values'])\n target['tables'].extend(source['tables'])\n target['tables'] = list(set(target['tables']))\n for table_name in source['columns']:\n if table_name not in target['columns']:\n target['columns'][table_name] = source['columns'][table_name]\n else:\n target['columns'][table_name].extend(source['columns'][table_name])\n target['columns'][table_name] = list(set(target['columns'][table_name]))\n\n\nclass SQLQuery():\n def __init__(self, sql, session):\n self.session = session\n self.integration = session.integration\n self.database = None if session.database == '' else session.database.lower()\n self.datahub = session.datahub\n self.ai_table = None\n self.outer_query = None\n self.row_id = 0\n self.columns_list = None\n\n self.mindsdb_database_name = 'mindsdb'\n\n # +++ workaround for subqueries in superset\n if 'as virtual_table' in sql.lower():\n subquery = re.findall(superset_subquery, sql)\n if isinstance(subquery, list) and len(subquery) == 1:\n subquery = subquery[0]\n self.outer_query = sql.replace(subquery, 'dataframe')\n sql = subquery.strip('()')\n # ---\n\n self.raw = sql\n self.model_types = {}\n self._parse_query(sql)\n\n def fetch(self, datahub, view='list'):\n data = self.fetched_data\n\n if view == 'list':\n self.result = self._make_list_result_view(data)\n elif view == 'dict':\n self.result = self._make_dict_result_view(data)\n else:\n raise Exception('Only \"list\" and \"dict\" views supported atm')\n\n return {\n 'success': True,\n 'result': self.result\n }\n\n def _fetch_dataframe_step(self, step):\n dn = self.datahub.get(step.integration)\n query = step.query\n\n table_alias = get_table_alias(step.query.from_table, self.database)\n # TODO for information_schema we have 'database' = 'mindsdb'\n\n data, column_names = dn.select(\n query=query\n )\n\n columns = [(column_name, column_name) for column_name in column_names]\n columns.append(('__mindsdb_row_id', '__mindsdb_row_id'))\n\n for i, row in enumerate(data):\n row['__mindsdb_row_id'] = self.row_id + i\n self.row_id = self.row_id + len(data)\n\n data = [{(key, key): value for key, value in row.items()} for row in data]\n data = [{table_alias: x} for x in data]\n\n data = {\n 'values': data,\n 'columns': {table_alias: columns},\n 'tables': [table_alias]\n }\n return data\n\n def _multiple_steps(self, step):\n data = {\n 'values': [],\n 'columns': {},\n 'tables': []\n }\n for substep in step.steps:\n sub_data = self._fetch_dataframe_step(substep)\n join_query_data(data, sub_data)\n return data\n\n def _multiple_steps_reduce(self, step, values):\n if step.reduce != 'union':\n raise Exception(f'Unknown MultipleSteps type: {step.reduce}')\n\n data = {\n 'values': [],\n 'columns': {},\n 'tables': []\n }\n\n for substep in step.steps:\n if isinstance(substep, FetchDataframeStep) is False:\n raise Exception(f'Wrong step type for MultipleSteps: {step}')\n markQueryVar(substep.query.where)\n\n for v in values:\n for substep in step.steps:\n replaceQueryVar(substep.query.where, v)\n sub_data = self._multiple_steps(step)\n join_query_data(data, sub_data)\n\n return data\n\n def _parse_query(self, sql):\n mindsdb_sql_struct = parse_sql(sql, dialect='mindsdb')\n\n # is it query to 'predictors'?\n if (\n isinstance(mindsdb_sql_struct.from_table, Identifier)\n and mindsdb_sql_struct.from_table.parts[-1].lower() == 'predictors'\n and (\n self.database == 'mindsdb'\n or mindsdb_sql_struct.from_table.parts[0].lower() == 'mindsdb'\n )\n ):\n dn = self.datahub.get(self.mindsdb_database_name)\n data, columns = dn.get_predictors(mindsdb_sql_struct)\n table_name = ('mindsdb', 'predictors', 'predictors')\n data = [{(key, key): value for key, value in row.items()} for row in data]\n data = [{table_name: x} for x in data]\n self.columns_list = [\n (table_name + (column_name, column_name))\n for column_name in columns\n ]\n\n columns = [(column_name, column_name) for column_name in columns]\n\n self.fetched_data = {\n 'values': data,\n 'columns': {table_name: columns},\n 'tables': [table_name]\n }\n return\n\n # is it query to 'commands'?\n if (\n isinstance(mindsdb_sql_struct.from_table, Identifier)\n and mindsdb_sql_struct.from_table.parts[-1].lower() == 'commands'\n and (\n self.database == 'mindsdb'\n or mindsdb_sql_struct.from_table.parts[0].lower() == 'mindsdb'\n )\n ):\n self.fetched_data = {\n 'values': [],\n 'columns': {('mindsdb', 'commands', 'commands'): [('command', 'command')]},\n 'tables': [('mindsdb', 'commands', 'commands')]\n }\n self.columns_list = [('mindsdb', 'commands', 'commands', 'command', 'command')]\n return\n\n # is it query to 'datasources'?\n if (\n isinstance(mindsdb_sql_struct.from_table, Identifier)\n and mindsdb_sql_struct.from_table.parts[-1].lower() == 'datasources'\n and (\n self.database == 'mindsdb'\n or mindsdb_sql_struct.from_table.parts[0].lower() == 'mindsdb'\n )\n ):\n dn = self.datahub.get(self.mindsdb_database_name)\n data, columns = dn.get_datasources(mindsdb_sql_struct)\n table_name = ('mindsdb', 'datasources', 'datasources')\n data = [{(key, key): value for key, value in row.items()} for row in data]\n data = [{table_name: x} for x in data]\n\n self.columns_list = [\n (table_name + (column_name, column_name))\n for column_name in columns\n ]\n\n columns = [(column_name, column_name) for column_name in columns]\n\n self.fetched_data = {\n 'values': data,\n 'columns': {table_name: columns},\n 'tables': [table_name]\n }\n return\n\n integrations_names = self.datahub.get_datasources_names()\n integrations_names.append('INFORMATION_SCHEMA')\n integrations_names.append('information_schema')\n\n all_tables = get_all_tables(mindsdb_sql_struct)\n\n predictor_metadata = {}\n predictors = db.session.query(db.Predictor).filter_by(company_id=self.session.company_id)\n for model_name in set(all_tables):\n for p in predictors:\n if p.name == model_name:\n if isinstance(p.data, dict) and 'error' not in p.data:\n ts_settings = p.learn_args.get('timeseries_settings', {})\n if ts_settings.get('is_timeseries') is True:\n window = ts_settings.get('window')\n order_by = ts_settings.get('order_by')[0]\n group_by = ts_settings.get('group_by')\n if isinstance(group_by, list):\n group_by = ts_settings.get('group_by')[0]\n predictor_metadata[model_name] = {\n 'timeseries': True,\n 'window': window,\n 'order_by_column': order_by,\n 'group_by_column': group_by\n }\n else:\n predictor_metadata[model_name] = {\n 'timeseries': False\n }\n self.model_types.update(p.data.get('dtypes', {}))\n\n plan = plan_query(\n mindsdb_sql_struct,\n integrations=integrations_names,\n predictor_namespace=self.mindsdb_database_name,\n predictor_metadata=predictor_metadata,\n default_namespace=self.database\n )\n\n steps_data = []\n for step in plan.steps:\n data = []\n if type(step) == GetPredictorColumns:\n predictor_name = step.predictor.parts[-1]\n dn = self.datahub.get(self.mindsdb_database_name)\n columns = dn.get_table_columns(predictor_name)\n columns = [\n (column_name, column_name) for column_name in columns\n ]\n data = {\n 'values': [],\n 'columns': {\n (self.mindsdb_database_name, predictor_name, predictor_name): columns\n },\n 'tables': [(self.mindsdb_database_name, predictor_name, predictor_name)]\n }\n elif type(step) == FetchDataframeStep:\n data = self._fetch_dataframe_step(step)\n elif type(step) == UnionStep:\n raise Exception('Union step is not implemented')\n # TODO add union support\n # left_data = steps_data[step.left.step_num]\n # right_data = steps_data[step.right.step_num]\n # data = left_data + right_data\n elif type(step) == MapReduceStep:\n if step.reduce != 'union':\n raise Exception(f'Unknown MapReduceStep type: {step.reduce}')\n\n step_data = steps_data[step.values.step_num]\n values = []\n step_data_values = step_data['values']\n for row in step_data_values:\n for row_data in row.values():\n for name, value in row_data.items():\n if name[0] != '__mindsdb_row_id':\n values.append(value)\n\n data = {\n 'values': [],\n 'columns': {},\n 'tables': []\n }\n substep = step.step\n if type(substep) == FetchDataframeStep:\n query = substep.query\n markQueryVar(query.where)\n for value in values:\n replaceQueryVar(query.where, value)\n sub_data = self._fetch_dataframe_step(substep)\n if len(data['columns']) == 0:\n data['columns'] = sub_data['columns']\n if len(data['tables']) == 0:\n data['tables'] = sub_data['tables']\n data['values'].extend(sub_data['values'])\n elif type(substep) == MultipleSteps:\n data = self._multiple_steps_reduce(substep, values)\n else:\n raise Exception(f'Unknown step type: {step.step}')\n elif type(step) == ApplyPredictorRowStep:\n predictor = '.'.join(step.predictor.parts)\n dn = self.datahub.get(self.mindsdb_database_name)\n where_data = step.row_dict\n\n data = dn.select(\n table=predictor,\n columns=None,\n where_data=where_data,\n integration_name=self.session.integration,\n integration_type=self.session.integration_type\n )\n\n data = [{(key, key): value for key, value in row.items()} for row in data]\n\n table_name = get_preditor_alias(step, self.database)\n values = [{table_name: x} for x in data]\n columns = {table_name: []}\n if len(data) > 0:\n row = data[0]\n columns[table_name] = list(row.keys())\n # TODO else\n\n data = {\n 'values': values,\n 'columns': columns,\n 'tables': [table_name]\n }\n elif type(step) == ApplyPredictorStep or type(step) == ApplyTimeseriesPredictorStep:\n dn = self.datahub.get(self.mindsdb_database_name)\n predictor = '.'.join(step.predictor.parts)\n where_data = []\n for row in steps_data[step.dataframe.step_num]['values']:\n new_row = {}\n for table_name in row:\n keys_intersection = set(new_row) & set(row[table_name])\n if len(keys_intersection) > 0:\n raise Exception(\n f'The predictor got two identical keys from different datasources: {keys_intersection}'\n )\n new_row.update(row[table_name])\n where_data.append(new_row)\n\n where_data = [{key[1]: value for key, value in row.items()} for row in where_data]\n\n is_timeseries = predictor_metadata[predictor]['timeseries']\n _mdb_make_predictions = None\n if is_timeseries:\n if 'LATEST' in self.raw:\n _mdb_make_predictions = False\n else:\n _mdb_make_predictions = True\n for row in where_data:\n if '__mdb_make_predictions' not in row:\n row['__mdb_make_predictions'] = _mdb_make_predictions\n\n for row in where_data:\n for key in row:\n if isinstance(row[key], datetime.date):\n row[key] = str(row[key])\n\n data = dn.select(\n table=predictor,\n columns=None,\n where_data=where_data,\n integration_name=self.session.integration,\n integration_type=self.session.integration_type\n )\n\n data = [{(key, key): value for key, value in row.items()} for row in data]\n\n table_name = get_preditor_alias(step, self.database)\n values = [{table_name: x} for x in data]\n columns = {table_name: []}\n if len(data) > 0:\n row = data[0]\n columns[table_name] = list(row.keys())\n # TODO else\n\n data = {\n 'values': values,\n 'columns': columns,\n 'tables': [table_name]\n }\n elif type(step) == JoinStep:\n left_data = steps_data[step.left.step_num]\n right_data = steps_data[step.right.step_num]\n\n # FIXME https://github.com/mindsdb/mindsdb_sql/issues/136\n if True in [type(step) == ApplyTimeseriesPredictorStep for step in plan.steps]:\n right_data = steps_data[step.left.step_num]\n left_data = steps_data[step.right.step_num]\n\n if step.query.condition is not None:\n raise Exception('At this moment supported only JOIN without condition')\n if step.query.join_type.upper() not in ('LEFT JOIN', 'JOIN'):\n raise Exception('At this moment supported only JOIN and LEFT JOIN')\n if (\n len(left_data['tables']) != 1 or len(right_data['tables']) != 1\n or left_data['tables'][0] == right_data['tables'][0]\n ):\n raise Exception('At this moment supported only JOIN of two different tables')\n\n data = {\n 'values': [],\n 'columns': {},\n 'tables': list(set(left_data['tables'] + right_data['tables']))\n }\n\n for data_part in [left_data, right_data]:\n for table_name in data_part['columns']:\n if table_name not in data['columns']:\n data['columns'][table_name] = data_part['columns'][table_name]\n else:\n data['columns'][table_name].extend(data_part['columns'][table_name])\n for table_name in data['columns']:\n data['columns'][table_name] = list(set(data['columns'][table_name]))\n\n left_key = left_data['tables'][0]\n right_key = right_data['tables'][0]\n\n left_columns_map = {}\n left_columns_map_reverse = {}\n for i, column_name in enumerate(left_data['columns'][left_key]):\n left_columns_map[f'a{i}'] = column_name\n left_columns_map_reverse[column_name] = f'a{i}'\n\n right_columns_map = {}\n right_columns_map_reverse = {}\n for i, column_name in enumerate(right_data['columns'][right_key]):\n right_columns_map[f'b{i}'] = column_name\n right_columns_map_reverse[column_name] = f'b{i}'\n\n left_df_data = []\n for row in left_data['values']:\n row = row[left_key]\n left_df_data.append({left_columns_map_reverse[key]: value for key, value in row.items()})\n\n right_df_data = []\n for row in right_data['values']:\n row = row[right_key]\n right_df_data.append({right_columns_map_reverse[key]: value for key, value in row.items()})\n\n df_a = pd.DataFrame(left_df_data)\n df_b = pd.DataFrame(right_df_data)\n\n a_name = f'a{round(time.time()*1000)}'\n b_name = f'b{round(time.time()*1000)}'\n con = duckdb.connect(database=':memory:')\n con.register(a_name, df_a)\n con.register(b_name, df_b)\n resp_df = con.execute(f\"\"\"\n SELECT * FROM {a_name} as ta full join {b_name} as tb\n ON ta.{left_columns_map_reverse[('__mindsdb_row_id', '__mindsdb_row_id')]}\n = tb.{right_columns_map_reverse[('__mindsdb_row_id', '__mindsdb_row_id')]}\n \"\"\").fetchdf()\n con.unregister(a_name)\n con.unregister(b_name)\n con.close()\n resp_df = resp_df.where(pd.notnull(resp_df), None)\n resp_dict = resp_df.to_dict(orient='records')\n\n for row in resp_dict:\n new_row = {left_key: {}, right_key: {}}\n for key, value in row.items():\n if key.startswith('a'):\n new_row[left_key][left_columns_map[key]] = value\n else:\n new_row[right_key][right_columns_map[key]] = value\n data['values'].append(new_row)\n elif type(step) == FilterStep:\n raise Exception('FilterStep is not implemented')\n # elif type(step) == ApplyTimeseriesPredictorStep:\n # raise Exception('ApplyTimeseriesPredictorStep is not implemented')\n elif type(step) == ProjectStep:\n step_data = steps_data[step.dataframe.step_num]\n columns_list = []\n for column_full_name in step.columns:\n table_name = None\n if type(column_full_name) == Star:\n for table_name, table_columns_list in step_data['columns'].items():\n for column in table_columns_list:\n columns_list.append(table_name + column)\n elif type(column_full_name) == Identifier:\n column_name_parts = column_full_name.parts\n column_alias = None if column_full_name.alias is None else '.'.join(column_full_name.alias.parts)\n if len(column_name_parts) > 2:\n raise Exception(f'Column name must contain no more than 2 parts. Got name: {\".\".join(column_full_name)}')\n elif len(column_name_parts) == 1:\n column_name = column_name_parts[0]\n\n appropriate_table = None\n if len(step_data['tables']) == 1:\n appropriate_table = step_data['tables'][0]\n else:\n for table_name, table_columns in step_data['columns'].items():\n if (column_name, column_name) in table_columns:\n if appropriate_table is not None:\n raise Exception('Found multiple appropriate tables for column {column_name}')\n else:\n appropriate_table = table_name\n if appropriate_table is None:\n # it is probably constaint\n # FIXME https://github.com/mindsdb/mindsdb_sql/issues/133\n # column_name = column_name.strip(\"'\")\n # name_or_alias = column_alias or column_name\n # column_alias = name_or_alias\n # for row in step_data['values']:\n # for table in row:\n # row[table][(column_name, name_or_alias)] = row[table][(column_name, column_name)]\n # appropriate_table = step_data['tables'][0]\n columns_list.append(appropriate_table + (column_alias, column_alias))\n else:\n columns_list.append(appropriate_table + (column_name, column_alias or column_name)) # column_name\n elif len(column_name_parts) == 2:\n table_name_or_alias = column_name_parts[0]\n column_name = column_name_parts[1]\n\n appropriate_table = None\n for table_name, table_columns in step_data['columns'].items():\n checkig_table_name_or_alias = table_name[2] or table_name[1]\n if table_name_or_alias == checkig_table_name_or_alias:\n for table_column_name in table_columns:\n if (\n table_column_name[1] == column_name\n or table_column_name[1] is None and table_column_name[0] == column_name\n ):\n break\n else:\n raise Exception(f'Can not find column \"{column_name}\" in table \"{table_name}\"')\n appropriate_table = table_name\n break\n if appropriate_table is None:\n raise Exception(f'Can not find approproate table for column {column_name}')\n\n columns_to_copy = None\n for column in step_data['columns'][appropriate_table]:\n if column[0] == column_name and (column[1] is None or column[1] == column_name):\n columns_to_copy = column\n break\n else:\n raise Exception(f'Can not find approproate column in data: {(column_name, column_alias)}')\n\n for row in step_data['values']:\n row[appropriate_table][(column_name, column_alias)] = row[appropriate_table][columns_to_copy]\n\n columns_list.append(appropriate_table + (column_name, column_alias))\n else:\n raise Exception('Undefined column name')\n else:\n raise Exception(f'Unexpected column name type: {column_full_name}')\n\n self.columns_list = columns_list\n data = step_data\n else:\n raise Exception(F'Unknown planner step: {step}')\n steps_data.append(data)\n\n if self.outer_query is not None:\n data = []\n # +++\n result = []\n for row in steps_data[-1]:\n data_row = {}\n for column_record in self.columns_list:\n table_name = column_record[:3]\n column_name = column_record[3]\n data_row[column_record[4] or column_record[3]] = row[table_name][column_name]\n result.append(data_row)\n # ---\n data = self._make_list_result_view(result)\n df = pd.DataFrame(data)\n result = query_df(df, self.outer_query)\n\n try:\n self.columns_list = [\n ('', '', '', x, x) for x in result.columns\n ]\n except Exception:\n self.columns_list = [\n ('', '', '', result.name, result.name)\n ]\n\n # +++ make list result view\n new_result = []\n for row in result.to_dict(orient='records'):\n data_row = []\n for column_record in self.columns_list:\n column_name = column_record[4] or column_record[3]\n data_row.append(row.get(column_name))\n new_result.append(data_row)\n result = new_result\n # ---\n\n self.fetched_data = result\n else:\n self.fetched_data = steps_data[-1]\n\n if hasattr(self, 'columns_list') is False:\n self.columns_list = []\n for row in self.fetched_data:\n for table_key in row:\n for column_name in row[table_key]:\n if (table_key + (column_name, column_name)) not in self.columns_list:\n self.columns_list.append((table_key + (column_name, column_name)))\n\n # if there was no 'ProjectStep', then get columns list from last step:\n if self.columns_list is None:\n self.columns_list = []\n for table_name in self.fetched_data['columns']:\n self.columns_list.extend([\n table_name + column for column in self.fetched_data['columns'][table_name]\n ])\n\n self.columns_list = [x for x in self.columns_list if x[3] != '__mindsdb_row_id']\n\n def _apply_where_filter(self, row, where):\n if isinstance(where, Identifier):\n return row[where.value]\n elif isinstance(where, Constant):\n return where.value\n elif not isinstance(where, (UnaryOperation, BinaryOperation)):\n Exception(f'Unknown operation type: {where}')\n\n op_fn = operator_map.get(where.op)\n if op_fn is None:\n raise Exception(f'unknown operator {where.op}')\n\n args = [self._apply_where_filter(row, arg) for arg in where.args]\n result = op_fn(*args)\n return result\n\n def _make_list_result_view(self, data):\n if self.outer_query is not None:\n return data['values']\n result = []\n for row in data['values']:\n data_row = []\n for column_record in self.columns_list:\n table_name = column_record[:3]\n column_name = column_record[3:]\n data_row.append(row[table_name][column_name])\n result.append(data_row)\n return result\n\n def _make_dict_result_view(self, data):\n result = []\n for row in data:\n data_row = {}\n for table_name in row:\n data_row.update(row[table_name])\n result.append(data_row)\n return result\n\n @property\n def columns(self):\n result = []\n for column_record in self.columns_list:\n try:\n field_type = self.model_types.get(column_record[3])\n except Exception:\n field_type = None\n\n column_type = TYPES.MYSQL_TYPE_VAR_STRING\n if field_type == dtype.date:\n column_type = TYPES.MYSQL_TYPE_DATE\n elif field_type == dtype.datetime:\n column_type = TYPES.MYSQL_TYPE_DATETIME\n\n result.append({\n 'database': column_record[0] or self.database,\n # TODO add 'original_table'\n 'table_name': column_record[1],\n 'name': column_record[3],\n 'alias': column_record[4] or column_record[3],\n # NOTE all work with text-type, but if/when wanted change types to real,\n # it will need to check all types casts in BinaryResultsetRowPacket\n 'type': column_type\n })\n return result\n", "path": "mindsdb/api/mysql/mysql_proxy/classes/sql_query.py"}], "after_files": [{"content": "\"\"\"\n*******************************************************\n * Copyright (C) 2017 MindsDB Inc. <[email protected]>\n *\n * This file is part of MindsDB Server.\n *\n * MindsDB Server can not be copied and/or distributed without the express\n * permission of MindsDB Inc\n *******************************************************\n\"\"\"\n\nimport re\nimport pandas as pd\nimport datetime\nimport time\n\nimport duckdb\nfrom lightwood.api import dtype\nfrom mindsdb_sql import parse_sql\nfrom mindsdb_sql.planner import plan_query\nfrom mindsdb_sql.parser.dialects.mindsdb.latest import Latest\nfrom mindsdb_sql.parser.ast import (\n BinaryOperation,\n UnaryOperation,\n Identifier,\n Operation,\n Constant,\n OrderBy,\n Select,\n Union,\n Join,\n Star\n)\nfrom mindsdb_sql.planner.steps import (\n ApplyTimeseriesPredictorStep,\n ApplyPredictorRowStep,\n GetPredictorColumns,\n FetchDataframeStep,\n ApplyPredictorStep,\n MapReduceStep,\n MultipleSteps,\n ProjectStep,\n FilterStep,\n UnionStep,\n JoinStep\n)\n\nfrom mindsdb.api.mysql.mysql_proxy.classes.com_operators import operator_map\nfrom mindsdb.api.mysql.mysql_proxy.libs.constants.mysql import TYPES, ERR\nfrom mindsdb.api.mysql.mysql_proxy.utilities import log\nfrom mindsdb.interfaces.ai_table.ai_table import AITableStore\nimport mindsdb.interfaces.storage.db as db\nfrom mindsdb.api.mysql.mysql_proxy.utilities.sql import query_df\n\n\nsuperset_subquery = re.compile(r'from[\\s\\n]*(\\(.*\\))[\\s\\n]*as[\\s\\n]*virtual_table', flags=re.IGNORECASE | re.MULTILINE | re.S)\n\n\nclass NotImplementedError(Exception):\n pass\n\n\nclass SqlError(Exception):\n pass\n\n\ndef get_preditor_alias(step, mindsdb_database):\n predictor_name = '.'.join(step.predictor.parts)\n predictor_alias = '.'.join(step.predictor.alias.parts) if step.predictor.alias is not None else predictor_name\n return (mindsdb_database, predictor_name, predictor_alias)\n\n\ndef get_table_alias(table_obj, default_db_name):\n # (database, table, alias)\n if len(table_obj.parts) > 2:\n raise Exception(f'Table name must contain no more than 2 parts. Got name: {table_obj.parts}')\n elif len(table_obj.parts) == 1:\n name = (default_db_name, table_obj.parts[0])\n else:\n name = tuple(table_obj.parts)\n if table_obj.alias is not None:\n name = name + ('.'.join(table_obj.alias.parts),)\n else:\n name = name + (None,)\n return name\n\n\ndef get_all_tables(stmt):\n if isinstance(stmt, Union):\n left = get_all_tables(stmt.left)\n right = get_all_tables(stmt.right)\n return left + right\n\n if isinstance(stmt, Select):\n from_stmt = stmt.from_table\n elif isinstance(stmt, (Identifier, Join)):\n from_stmt = stmt\n else:\n raise Exception(f'Unknown type of identifier: {stmt}')\n\n result = []\n if isinstance(from_stmt, Identifier):\n result.append(from_stmt.parts[-1])\n elif isinstance(from_stmt, Join):\n result.extend(get_all_tables(from_stmt.left))\n result.extend(get_all_tables(from_stmt.right))\n return result\n\n\ndef markQueryVar(where):\n if isinstance(where, BinaryOperation):\n markQueryVar(where.args[0])\n markQueryVar(where.args[1])\n elif isinstance(where, UnaryOperation):\n markQueryVar(where.args[0])\n elif isinstance(where, Constant):\n if where.value == '$var':\n where.is_var = True\n\n\ndef replaceQueryVar(where, val):\n if isinstance(where, BinaryOperation):\n replaceQueryVar(where.args[0], val)\n replaceQueryVar(where.args[1], val)\n elif isinstance(where, UnaryOperation):\n replaceQueryVar(where.args[0], val)\n elif isinstance(where, Constant):\n if hasattr(where, 'is_var') and where.is_var is True:\n where.value = val\n\n\ndef join_query_data(target, source):\n target['values'].extend(source['values'])\n target['tables'].extend(source['tables'])\n target['tables'] = list(set(target['tables']))\n for table_name in source['columns']:\n if table_name not in target['columns']:\n target['columns'][table_name] = source['columns'][table_name]\n else:\n target['columns'][table_name].extend(source['columns'][table_name])\n target['columns'][table_name] = list(set(target['columns'][table_name]))\n\n\nclass SQLQuery():\n def __init__(self, sql, session):\n self.session = session\n self.integration = session.integration\n self.database = None if session.database == '' else session.database.lower()\n self.datahub = session.datahub\n self.ai_table = None\n self.outer_query = None\n self.row_id = 0\n self.columns_list = None\n\n self.mindsdb_database_name = 'mindsdb'\n\n # +++ workaround for subqueries in superset\n if 'as virtual_table' in sql.lower():\n subquery = re.findall(superset_subquery, sql)\n if isinstance(subquery, list) and len(subquery) == 1:\n subquery = subquery[0]\n self.outer_query = sql.replace(subquery, 'dataframe')\n sql = subquery.strip('()')\n # ---\n\n self.raw = sql\n self.model_types = {}\n self._parse_query(sql)\n\n def fetch(self, datahub, view='list'):\n data = self.fetched_data\n\n if view == 'list':\n self.result = self._make_list_result_view(data)\n elif view == 'dict':\n self.result = self._make_dict_result_view(data)\n else:\n raise Exception('Only \"list\" and \"dict\" views supported atm')\n\n return {\n 'success': True,\n 'result': self.result\n }\n\n def _fetch_dataframe_step(self, step):\n dn = self.datahub.get(step.integration)\n query = step.query\n\n table_alias = get_table_alias(step.query.from_table, self.database)\n # TODO for information_schema we have 'database' = 'mindsdb'\n\n data, column_names = dn.select(\n query=query\n )\n\n columns = [(column_name, column_name) for column_name in column_names]\n columns.append(('__mindsdb_row_id', '__mindsdb_row_id'))\n\n for i, row in enumerate(data):\n row['__mindsdb_row_id'] = self.row_id + i\n self.row_id = self.row_id + len(data)\n\n data = [{(key, key): value for key, value in row.items()} for row in data]\n data = [{table_alias: x} for x in data]\n\n data = {\n 'values': data,\n 'columns': {table_alias: columns},\n 'tables': [table_alias]\n }\n return data\n\n def _multiple_steps(self, step):\n data = {\n 'values': [],\n 'columns': {},\n 'tables': []\n }\n for substep in step.steps:\n sub_data = self._fetch_dataframe_step(substep)\n join_query_data(data, sub_data)\n return data\n\n def _multiple_steps_reduce(self, step, values):\n if step.reduce != 'union':\n raise Exception(f'Unknown MultipleSteps type: {step.reduce}')\n\n data = {\n 'values': [],\n 'columns': {},\n 'tables': []\n }\n\n for substep in step.steps:\n if isinstance(substep, FetchDataframeStep) is False:\n raise Exception(f'Wrong step type for MultipleSteps: {step}')\n markQueryVar(substep.query.where)\n\n for v in values:\n for substep in step.steps:\n replaceQueryVar(substep.query.where, v)\n sub_data = self._multiple_steps(step)\n join_query_data(data, sub_data)\n\n return data\n\n def _parse_query(self, sql):\n mindsdb_sql_struct = parse_sql(sql, dialect='mindsdb')\n\n # is it query to 'predictors'?\n if (\n isinstance(mindsdb_sql_struct.from_table, Identifier)\n and mindsdb_sql_struct.from_table.parts[-1].lower() == 'predictors'\n and (\n self.database == 'mindsdb'\n or mindsdb_sql_struct.from_table.parts[0].lower() == 'mindsdb'\n )\n ):\n dn = self.datahub.get(self.mindsdb_database_name)\n data, columns = dn.get_predictors(mindsdb_sql_struct)\n table_name = ('mindsdb', 'predictors', 'predictors')\n data = [{(key, key): value for key, value in row.items()} for row in data]\n data = [{table_name: x} for x in data]\n self.columns_list = [\n (table_name + (column_name, column_name))\n for column_name in columns\n ]\n\n columns = [(column_name, column_name) for column_name in columns]\n\n self.fetched_data = {\n 'values': data,\n 'columns': {table_name: columns},\n 'tables': [table_name]\n }\n return\n\n # is it query to 'commands'?\n if (\n isinstance(mindsdb_sql_struct.from_table, Identifier)\n and mindsdb_sql_struct.from_table.parts[-1].lower() == 'commands'\n and (\n self.database == 'mindsdb'\n or mindsdb_sql_struct.from_table.parts[0].lower() == 'mindsdb'\n )\n ):\n self.fetched_data = {\n 'values': [],\n 'columns': {('mindsdb', 'commands', 'commands'): [('command', 'command')]},\n 'tables': [('mindsdb', 'commands', 'commands')]\n }\n self.columns_list = [('mindsdb', 'commands', 'commands', 'command', 'command')]\n return\n\n # is it query to 'datasources'?\n if (\n isinstance(mindsdb_sql_struct.from_table, Identifier)\n and mindsdb_sql_struct.from_table.parts[-1].lower() == 'datasources'\n and (\n self.database == 'mindsdb'\n or mindsdb_sql_struct.from_table.parts[0].lower() == 'mindsdb'\n )\n ):\n dn = self.datahub.get(self.mindsdb_database_name)\n data, columns = dn.get_datasources(mindsdb_sql_struct)\n table_name = ('mindsdb', 'datasources', 'datasources')\n data = [{(key, key): value for key, value in row.items()} for row in data]\n data = [{table_name: x} for x in data]\n\n self.columns_list = [\n (table_name + (column_name, column_name))\n for column_name in columns\n ]\n\n columns = [(column_name, column_name) for column_name in columns]\n\n self.fetched_data = {\n 'values': data,\n 'columns': {table_name: columns},\n 'tables': [table_name]\n }\n return\n\n integrations_names = self.datahub.get_datasources_names()\n integrations_names.append('information_schema')\n integrations_names.append('file')\n\n all_tables = get_all_tables(mindsdb_sql_struct)\n\n predictor_metadata = {}\n predictors = db.session.query(db.Predictor).filter_by(company_id=self.session.company_id)\n for model_name in set(all_tables):\n for p in predictors:\n if p.name == model_name:\n if isinstance(p.data, dict) and 'error' not in p.data:\n ts_settings = p.learn_args.get('timeseries_settings', {})\n if ts_settings.get('is_timeseries') is True:\n window = ts_settings.get('window')\n order_by = ts_settings.get('order_by')[0]\n group_by = ts_settings.get('group_by')\n if isinstance(group_by, list):\n group_by = ts_settings.get('group_by')[0]\n predictor_metadata[model_name] = {\n 'timeseries': True,\n 'window': window,\n 'order_by_column': order_by,\n 'group_by_column': group_by\n }\n else:\n predictor_metadata[model_name] = {\n 'timeseries': False\n }\n self.model_types.update(p.data.get('dtypes', {}))\n\n plan = plan_query(\n mindsdb_sql_struct,\n integrations=integrations_names,\n predictor_namespace=self.mindsdb_database_name,\n predictor_metadata=predictor_metadata,\n default_namespace=self.database\n )\n\n steps_data = []\n for step in plan.steps:\n data = []\n if type(step) == GetPredictorColumns:\n predictor_name = step.predictor.parts[-1]\n dn = self.datahub.get(self.mindsdb_database_name)\n columns = dn.get_table_columns(predictor_name)\n columns = [\n (column_name, column_name) for column_name in columns\n ]\n data = {\n 'values': [],\n 'columns': {\n (self.mindsdb_database_name, predictor_name, predictor_name): columns\n },\n 'tables': [(self.mindsdb_database_name, predictor_name, predictor_name)]\n }\n elif type(step) == FetchDataframeStep:\n data = self._fetch_dataframe_step(step)\n elif type(step) == UnionStep:\n raise Exception('Union step is not implemented')\n # TODO add union support\n # left_data = steps_data[step.left.step_num]\n # right_data = steps_data[step.right.step_num]\n # data = left_data + right_data\n elif type(step) == MapReduceStep:\n if step.reduce != 'union':\n raise Exception(f'Unknown MapReduceStep type: {step.reduce}')\n\n step_data = steps_data[step.values.step_num]\n values = []\n step_data_values = step_data['values']\n for row in step_data_values:\n for row_data in row.values():\n for name, value in row_data.items():\n if name[0] != '__mindsdb_row_id':\n values.append(value)\n\n data = {\n 'values': [],\n 'columns': {},\n 'tables': []\n }\n substep = step.step\n if type(substep) == FetchDataframeStep:\n query = substep.query\n markQueryVar(query.where)\n for value in values:\n replaceQueryVar(query.where, value)\n sub_data = self._fetch_dataframe_step(substep)\n if len(data['columns']) == 0:\n data['columns'] = sub_data['columns']\n if len(data['tables']) == 0:\n data['tables'] = sub_data['tables']\n data['values'].extend(sub_data['values'])\n elif type(substep) == MultipleSteps:\n data = self._multiple_steps_reduce(substep, values)\n else:\n raise Exception(f'Unknown step type: {step.step}')\n elif type(step) == ApplyPredictorRowStep:\n predictor = '.'.join(step.predictor.parts)\n dn = self.datahub.get(self.mindsdb_database_name)\n where_data = step.row_dict\n\n data = dn.select(\n table=predictor,\n columns=None,\n where_data=where_data,\n integration_name=self.session.integration,\n integration_type=self.session.integration_type\n )\n\n data = [{(key, key): value for key, value in row.items()} for row in data]\n\n table_name = get_preditor_alias(step, self.database)\n values = [{table_name: x} for x in data]\n columns = {table_name: []}\n if len(data) > 0:\n row = data[0]\n columns[table_name] = list(row.keys())\n # TODO else\n\n data = {\n 'values': values,\n 'columns': columns,\n 'tables': [table_name]\n }\n elif type(step) == ApplyPredictorStep or type(step) == ApplyTimeseriesPredictorStep:\n dn = self.datahub.get(self.mindsdb_database_name)\n predictor = '.'.join(step.predictor.parts)\n where_data = []\n for row in steps_data[step.dataframe.step_num]['values']:\n new_row = {}\n for table_name in row:\n keys_intersection = set(new_row) & set(row[table_name])\n if len(keys_intersection) > 0:\n raise Exception(\n f'The predictor got two identical keys from different datasources: {keys_intersection}'\n )\n new_row.update(row[table_name])\n where_data.append(new_row)\n\n where_data = [{key[1]: value for key, value in row.items()} for row in where_data]\n\n is_timeseries = predictor_metadata[predictor]['timeseries']\n _mdb_make_predictions = None\n if is_timeseries:\n if 'LATEST' in self.raw:\n _mdb_make_predictions = False\n else:\n _mdb_make_predictions = True\n for row in where_data:\n if '__mdb_make_predictions' not in row:\n row['__mdb_make_predictions'] = _mdb_make_predictions\n\n for row in where_data:\n for key in row:\n if isinstance(row[key], datetime.date):\n row[key] = str(row[key])\n\n data = dn.select(\n table=predictor,\n columns=None,\n where_data=where_data,\n integration_name=self.session.integration,\n integration_type=self.session.integration_type\n )\n\n data = [{(key, key): value for key, value in row.items()} for row in data]\n\n table_name = get_preditor_alias(step, self.database)\n values = [{table_name: x} for x in data]\n columns = {table_name: []}\n if len(data) > 0:\n row = data[0]\n columns[table_name] = list(row.keys())\n # TODO else\n\n data = {\n 'values': values,\n 'columns': columns,\n 'tables': [table_name]\n }\n elif type(step) == JoinStep:\n left_data = steps_data[step.left.step_num]\n right_data = steps_data[step.right.step_num]\n\n # FIXME https://github.com/mindsdb/mindsdb_sql/issues/136\n if True in [type(step) == ApplyTimeseriesPredictorStep for step in plan.steps]:\n right_data = steps_data[step.left.step_num]\n left_data = steps_data[step.right.step_num]\n\n if step.query.condition is not None:\n raise Exception('At this moment supported only JOIN without condition')\n if step.query.join_type.upper() not in ('LEFT JOIN', 'JOIN'):\n raise Exception('At this moment supported only JOIN and LEFT JOIN')\n if (\n len(left_data['tables']) != 1 or len(right_data['tables']) != 1\n or left_data['tables'][0] == right_data['tables'][0]\n ):\n raise Exception('At this moment supported only JOIN of two different tables')\n\n data = {\n 'values': [],\n 'columns': {},\n 'tables': list(set(left_data['tables'] + right_data['tables']))\n }\n\n for data_part in [left_data, right_data]:\n for table_name in data_part['columns']:\n if table_name not in data['columns']:\n data['columns'][table_name] = data_part['columns'][table_name]\n else:\n data['columns'][table_name].extend(data_part['columns'][table_name])\n for table_name in data['columns']:\n data['columns'][table_name] = list(set(data['columns'][table_name]))\n\n left_key = left_data['tables'][0]\n right_key = right_data['tables'][0]\n\n left_columns_map = {}\n left_columns_map_reverse = {}\n for i, column_name in enumerate(left_data['columns'][left_key]):\n left_columns_map[f'a{i}'] = column_name\n left_columns_map_reverse[column_name] = f'a{i}'\n\n right_columns_map = {}\n right_columns_map_reverse = {}\n for i, column_name in enumerate(right_data['columns'][right_key]):\n right_columns_map[f'b{i}'] = column_name\n right_columns_map_reverse[column_name] = f'b{i}'\n\n left_df_data = []\n for row in left_data['values']:\n row = row[left_key]\n left_df_data.append({left_columns_map_reverse[key]: value for key, value in row.items()})\n\n right_df_data = []\n for row in right_data['values']:\n row = row[right_key]\n right_df_data.append({right_columns_map_reverse[key]: value for key, value in row.items()})\n\n df_a = pd.DataFrame(left_df_data)\n df_b = pd.DataFrame(right_df_data)\n\n a_name = f'a{round(time.time()*1000)}'\n b_name = f'b{round(time.time()*1000)}'\n con = duckdb.connect(database=':memory:')\n con.register(a_name, df_a)\n con.register(b_name, df_b)\n resp_df = con.execute(f\"\"\"\n SELECT * FROM {a_name} as ta full join {b_name} as tb\n ON ta.{left_columns_map_reverse[('__mindsdb_row_id', '__mindsdb_row_id')]}\n = tb.{right_columns_map_reverse[('__mindsdb_row_id', '__mindsdb_row_id')]}\n \"\"\").fetchdf()\n con.unregister(a_name)\n con.unregister(b_name)\n con.close()\n resp_df = resp_df.where(pd.notnull(resp_df), None)\n resp_dict = resp_df.to_dict(orient='records')\n\n for row in resp_dict:\n new_row = {left_key: {}, right_key: {}}\n for key, value in row.items():\n if key.startswith('a'):\n new_row[left_key][left_columns_map[key]] = value\n else:\n new_row[right_key][right_columns_map[key]] = value\n data['values'].append(new_row)\n elif type(step) == FilterStep:\n raise Exception('FilterStep is not implemented')\n # elif type(step) == ApplyTimeseriesPredictorStep:\n # raise Exception('ApplyTimeseriesPredictorStep is not implemented')\n elif type(step) == ProjectStep:\n step_data = steps_data[step.dataframe.step_num]\n columns_list = []\n for column_full_name in step.columns:\n table_name = None\n if type(column_full_name) == Star:\n for table_name, table_columns_list in step_data['columns'].items():\n for column in table_columns_list:\n columns_list.append(table_name + column)\n elif type(column_full_name) == Identifier:\n column_name_parts = column_full_name.parts\n column_alias = None if column_full_name.alias is None else '.'.join(column_full_name.alias.parts)\n if len(column_name_parts) > 2:\n raise Exception(f'Column name must contain no more than 2 parts. Got name: {\".\".join(column_full_name)}')\n elif len(column_name_parts) == 1:\n column_name = column_name_parts[0]\n\n appropriate_table = None\n if len(step_data['tables']) == 1:\n appropriate_table = step_data['tables'][0]\n else:\n for table_name, table_columns in step_data['columns'].items():\n if (column_name, column_name) in table_columns:\n if appropriate_table is not None:\n raise Exception('Found multiple appropriate tables for column {column_name}')\n else:\n appropriate_table = table_name\n if appropriate_table is None:\n # it is probably constaint\n # FIXME https://github.com/mindsdb/mindsdb_sql/issues/133\n # column_name = column_name.strip(\"'\")\n # name_or_alias = column_alias or column_name\n # column_alias = name_or_alias\n # for row in step_data['values']:\n # for table in row:\n # row[table][(column_name, name_or_alias)] = row[table][(column_name, column_name)]\n # appropriate_table = step_data['tables'][0]\n columns_list.append(appropriate_table + (column_alias, column_alias))\n else:\n columns_list.append(appropriate_table + (column_name, column_alias or column_name)) # column_name\n elif len(column_name_parts) == 2:\n table_name_or_alias = column_name_parts[0]\n column_name = column_name_parts[1]\n\n appropriate_table = None\n for table_name, table_columns in step_data['columns'].items():\n checkig_table_name_or_alias = table_name[2] or table_name[1]\n if table_name_or_alias == checkig_table_name_or_alias:\n for table_column_name in table_columns:\n if (\n table_column_name[1] == column_name\n or table_column_name[1] is None and table_column_name[0] == column_name\n ):\n break\n else:\n raise Exception(f'Can not find column \"{column_name}\" in table \"{table_name}\"')\n appropriate_table = table_name\n break\n if appropriate_table is None:\n raise Exception(f'Can not find approproate table for column {column_name}')\n\n columns_to_copy = None\n for column in step_data['columns'][appropriate_table]:\n if column[0] == column_name and (column[1] is None or column[1] == column_name):\n columns_to_copy = column\n break\n else:\n raise Exception(f'Can not find approproate column in data: {(column_name, column_alias)}')\n\n for row in step_data['values']:\n row[appropriate_table][(column_name, column_alias)] = row[appropriate_table][columns_to_copy]\n\n columns_list.append(appropriate_table + (column_name, column_alias))\n else:\n raise Exception('Undefined column name')\n else:\n raise Exception(f'Unexpected column name type: {column_full_name}')\n\n self.columns_list = columns_list\n data = step_data\n else:\n raise Exception(F'Unknown planner step: {step}')\n steps_data.append(data)\n\n if self.outer_query is not None:\n data = []\n # +++\n result = []\n for row in steps_data[-1]:\n data_row = {}\n for column_record in self.columns_list:\n table_name = column_record[:3]\n column_name = column_record[3]\n data_row[column_record[4] or column_record[3]] = row[table_name][column_name]\n result.append(data_row)\n # ---\n data = self._make_list_result_view(result)\n df = pd.DataFrame(data)\n result = query_df(df, self.outer_query)\n\n try:\n self.columns_list = [\n ('', '', '', x, x) for x in result.columns\n ]\n except Exception:\n self.columns_list = [\n ('', '', '', result.name, result.name)\n ]\n\n # +++ make list result view\n new_result = []\n for row in result.to_dict(orient='records'):\n data_row = []\n for column_record in self.columns_list:\n column_name = column_record[4] or column_record[3]\n data_row.append(row.get(column_name))\n new_result.append(data_row)\n result = new_result\n # ---\n\n self.fetched_data = result\n else:\n self.fetched_data = steps_data[-1]\n\n if hasattr(self, 'columns_list') is False:\n self.columns_list = []\n for row in self.fetched_data:\n for table_key in row:\n for column_name in row[table_key]:\n if (table_key + (column_name, column_name)) not in self.columns_list:\n self.columns_list.append((table_key + (column_name, column_name)))\n\n # if there was no 'ProjectStep', then get columns list from last step:\n if self.columns_list is None:\n self.columns_list = []\n for table_name in self.fetched_data['columns']:\n self.columns_list.extend([\n table_name + column for column in self.fetched_data['columns'][table_name]\n ])\n\n self.columns_list = [x for x in self.columns_list if x[3] != '__mindsdb_row_id']\n\n def _apply_where_filter(self, row, where):\n if isinstance(where, Identifier):\n return row[where.value]\n elif isinstance(where, Constant):\n return where.value\n elif not isinstance(where, (UnaryOperation, BinaryOperation)):\n Exception(f'Unknown operation type: {where}')\n\n op_fn = operator_map.get(where.op)\n if op_fn is None:\n raise Exception(f'unknown operator {where.op}')\n\n args = [self._apply_where_filter(row, arg) for arg in where.args]\n result = op_fn(*args)\n return result\n\n def _make_list_result_view(self, data):\n if self.outer_query is not None:\n return data['values']\n result = []\n for row in data['values']:\n data_row = []\n for column_record in self.columns_list:\n table_name = column_record[:3]\n column_name = column_record[3:]\n data_row.append(row[table_name][column_name])\n result.append(data_row)\n return result\n\n def _make_dict_result_view(self, data):\n result = []\n for row in data:\n data_row = {}\n for table_name in row:\n data_row.update(row[table_name])\n result.append(data_row)\n return result\n\n @property\n def columns(self):\n result = []\n for column_record in self.columns_list:\n try:\n field_type = self.model_types.get(column_record[3])\n except Exception:\n field_type = None\n\n column_type = TYPES.MYSQL_TYPE_VAR_STRING\n if field_type == dtype.date:\n column_type = TYPES.MYSQL_TYPE_DATE\n elif field_type == dtype.datetime:\n column_type = TYPES.MYSQL_TYPE_DATETIME\n\n result.append({\n 'database': column_record[0] or self.database,\n # TODO add 'original_table'\n 'table_name': column_record[1],\n 'name': column_record[3],\n 'alias': column_record[4] or column_record[3],\n # NOTE all work with text-type, but if/when wanted change types to real,\n # it will need to check all types casts in BinaryResultsetRowPacket\n 'type': column_type\n })\n return result\n", "path": "mindsdb/api/mysql/mysql_proxy/classes/sql_query.py"}]} |
gh_patches_debug_1319 | rasdani/github-patches | git_diff | plotly__dash-333 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
The README is in markdown and doesn't render properly on pypi.io
See: https://pypi.org/project/dash/
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 import io
2 from setuptools import setup, find_packages
3
4 main_ns = {}
5 exec(open('dash/version.py').read(), main_ns) # pylint: disable=exec-used
6
7 setup(
8 name='dash',
9 version=main_ns['__version__'],
10 author='chris p',
11 author_email='[email protected]',
12 packages=find_packages(exclude=['tests*']),
13 license='MIT',
14 description=('A Python framework for building reactive web-apps. '
15 'Developed by Plotly.'),
16 long_description=io.open('README.md', encoding='utf-8').read(),
17 install_requires=[
18 'Flask>=0.12',
19 'flask-compress',
20 'plotly',
21 'dash_renderer',
22 ],
23 url='https://plot.ly/dash',
24 classifiers=[
25 'Development Status :: 5 - Production/Stable',
26 'Environment :: Web Environment',
27 'Framework :: Flask',
28 'Intended Audience :: Developers',
29 'Intended Audience :: Education',
30 'Intended Audience :: Financial and Insurance Industry',
31 'Intended Audience :: Healthcare Industry',
32 'Intended Audience :: Manufacturing',
33 'Intended Audience :: Science/Research',
34 'License :: OSI Approved :: MIT License',
35 'Programming Language :: Python :: 2.7',
36 'Programming Language :: Python :: 3.3',
37 'Programming Language :: Python :: 3.4',
38 'Programming Language :: Python :: 3.5',
39 'Programming Language :: Python :: 3.6',
40 'Topic :: Database :: Front-Ends',
41 'Topic :: Office/Business :: Financial :: Spreadsheet',
42 'Topic :: Scientific/Engineering :: Visualization',
43 'Topic :: Software Development :: Libraries :: Application Frameworks',
44 'Topic :: Software Development :: Widget Sets'
45 ]
46 )
47
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -14,6 +14,7 @@
description=('A Python framework for building reactive web-apps. '
'Developed by Plotly.'),
long_description=io.open('README.md', encoding='utf-8').read(),
+ long_description_content_type='text/markdown',
install_requires=[
'Flask>=0.12',
'flask-compress',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -14,6 +14,7 @@\n description=('A Python framework for building reactive web-apps. '\n 'Developed by Plotly.'),\n long_description=io.open('README.md', encoding='utf-8').read(),\n+ long_description_content_type='text/markdown',\n install_requires=[\n 'Flask>=0.12',\n 'flask-compress',\n", "issue": "The README is in markdown and doesn't render properly on pypi.io\nSee: https://pypi.org/project/dash/\r\n\n", "before_files": [{"content": "import io\nfrom setuptools import setup, find_packages\n\nmain_ns = {}\nexec(open('dash/version.py').read(), main_ns) # pylint: disable=exec-used\n\nsetup(\n name='dash',\n version=main_ns['__version__'],\n author='chris p',\n author_email='[email protected]',\n packages=find_packages(exclude=['tests*']),\n license='MIT',\n description=('A Python framework for building reactive web-apps. '\n 'Developed by Plotly.'),\n long_description=io.open('README.md', encoding='utf-8').read(),\n install_requires=[\n 'Flask>=0.12',\n 'flask-compress',\n 'plotly',\n 'dash_renderer',\n ],\n url='https://plot.ly/dash',\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Web Environment',\n 'Framework :: Flask',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Financial and Insurance Industry',\n 'Intended Audience :: Healthcare Industry',\n 'Intended Audience :: Manufacturing',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Database :: Front-Ends',\n 'Topic :: Office/Business :: Financial :: Spreadsheet',\n 'Topic :: Scientific/Engineering :: Visualization',\n 'Topic :: Software Development :: Libraries :: Application Frameworks',\n 'Topic :: Software Development :: Widget Sets'\n ]\n)\n", "path": "setup.py"}], "after_files": [{"content": "import io\nfrom setuptools import setup, find_packages\n\nmain_ns = {}\nexec(open('dash/version.py').read(), main_ns) # pylint: disable=exec-used\n\nsetup(\n name='dash',\n version=main_ns['__version__'],\n author='chris p',\n author_email='[email protected]',\n packages=find_packages(exclude=['tests*']),\n license='MIT',\n description=('A Python framework for building reactive web-apps. '\n 'Developed by Plotly.'),\n long_description=io.open('README.md', encoding='utf-8').read(),\n long_description_content_type='text/markdown',\n install_requires=[\n 'Flask>=0.12',\n 'flask-compress',\n 'plotly',\n 'dash_renderer',\n ],\n url='https://plot.ly/dash',\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Web Environment',\n 'Framework :: Flask',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Financial and Insurance Industry',\n 'Intended Audience :: Healthcare Industry',\n 'Intended Audience :: Manufacturing',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Database :: Front-Ends',\n 'Topic :: Office/Business :: Financial :: Spreadsheet',\n 'Topic :: Scientific/Engineering :: Visualization',\n 'Topic :: Software Development :: Libraries :: Application Frameworks',\n 'Topic :: Software Development :: Widget Sets'\n ]\n)\n", "path": "setup.py"}]} |
gh_patches_debug_1320 | rasdani/github-patches | git_diff | AUTOMATIC1111__stable-diffusion-webui-6772 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Bug]: New SHA256 hash takes extremely long time up to a point of of model load being unusable
### Is there an existing issue for this?
- [X] I have searched the existing issues and checked the recent builds/commits
### What happened?
Newly added sha-256 hash takes extremely long time to calculate on model load up to a point where loading appears to hang (i've restarted server twice before i even let it run until completion)
Previously switching to a new model was sub 10 sec, now switching to a new model (that does not have hash stored already) takes 100-150 sec (and this is a high end system)!
And to make it worse, messages about hash calculation are only printed **after** it has been calculated, there is no progress info or anything to indicate system is actually doing anything for 2 min!
### Steps to reproduce the problem
1. Switch to a new model and wait for completion - it takes forever
### What should have happened?
Model load should **never** take over 2 minutes to complete.
### Commit where the problem happens
f8c512478568293155539f616dce26c5e4495055
### What platforms do you use to access UI ?
Windows, Linux
### What browsers do you use to access the UI ?
Google Chrome, Microsoft Edge
### Command Line Arguments
```Shell
--api --xformers
```
### Additional information, context and logs
Console log showing model load taking 142 seconds!
```text
Calculating sha256 for /home/vlado/dev/automatic/models/Stable-diffusion/mood-beautyreal-v01.ckpt: bcc0afd3b264ea028928187f56f70840f8d87ccf283b020982beba35d9c7e4ef
Loading weights [bcc0afd3b2] from /home/vlado/dev/automatic/models/Stable-diffusion/mood-beautyreal-v01.ckpt
Couldn't find VAE named vae-ft-mse-840000-ema-pruned; using None instead
Applying xformers cross attention optimization.
Weights loaded in 142.6s.
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `modules/hashes.py`
Content:
```
1 import hashlib
2 import json
3 import os.path
4
5 import filelock
6
7
8 cache_filename = "cache.json"
9 cache_data = None
10
11
12 def dump_cache():
13 with filelock.FileLock(cache_filename+".lock"):
14 with open(cache_filename, "w", encoding="utf8") as file:
15 json.dump(cache_data, file, indent=4)
16
17
18 def cache(subsection):
19 global cache_data
20
21 if cache_data is None:
22 with filelock.FileLock(cache_filename+".lock"):
23 if not os.path.isfile(cache_filename):
24 cache_data = {}
25 else:
26 with open(cache_filename, "r", encoding="utf8") as file:
27 cache_data = json.load(file)
28
29 s = cache_data.get(subsection, {})
30 cache_data[subsection] = s
31
32 return s
33
34
35 def calculate_sha256(filename):
36 hash_sha256 = hashlib.sha256()
37
38 with open(filename, "rb") as f:
39 for chunk in iter(lambda: f.read(4096), b""):
40 hash_sha256.update(chunk)
41
42 return hash_sha256.hexdigest()
43
44
45 def sha256_from_cache(filename, title):
46 hashes = cache("hashes")
47 ondisk_mtime = os.path.getmtime(filename)
48
49 if title not in hashes:
50 return None
51
52 cached_sha256 = hashes[title].get("sha256", None)
53 cached_mtime = hashes[title].get("mtime", 0)
54
55 if ondisk_mtime > cached_mtime or cached_sha256 is None:
56 return None
57
58 return cached_sha256
59
60
61 def sha256(filename, title):
62 hashes = cache("hashes")
63
64 sha256_value = sha256_from_cache(filename, title)
65 if sha256_value is not None:
66 return sha256_value
67
68 print(f"Calculating sha256 for {filename}: ", end='')
69 sha256_value = calculate_sha256(filename)
70 print(f"{sha256_value}")
71
72 hashes[title] = {
73 "mtime": os.path.getmtime(filename),
74 "sha256": sha256_value,
75 }
76
77 dump_cache()
78
79 return sha256_value
80
81
82
83
84
85
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/modules/hashes.py b/modules/hashes.py
--- a/modules/hashes.py
+++ b/modules/hashes.py
@@ -34,9 +34,10 @@
def calculate_sha256(filename):
hash_sha256 = hashlib.sha256()
+ blksize = 1024 * 1024
with open(filename, "rb") as f:
- for chunk in iter(lambda: f.read(4096), b""):
+ for chunk in iter(lambda: f.read(blksize), b""):
hash_sha256.update(chunk)
return hash_sha256.hexdigest()
| {"golden_diff": "diff --git a/modules/hashes.py b/modules/hashes.py\n--- a/modules/hashes.py\n+++ b/modules/hashes.py\n@@ -34,9 +34,10 @@\n \r\n def calculate_sha256(filename):\r\n hash_sha256 = hashlib.sha256()\r\n+ blksize = 1024 * 1024\r\n \r\n with open(filename, \"rb\") as f:\r\n- for chunk in iter(lambda: f.read(4096), b\"\"):\r\n+ for chunk in iter(lambda: f.read(blksize), b\"\"):\r\n hash_sha256.update(chunk)\r\n \r\n return hash_sha256.hexdigest()\n", "issue": "[Bug]: New SHA256 hash takes extremely long time up to a point of of model load being unusable\n### Is there an existing issue for this?\r\n\r\n- [X] I have searched the existing issues and checked the recent builds/commits\r\n\r\n### What happened?\r\n\r\nNewly added sha-256 hash takes extremely long time to calculate on model load up to a point where loading appears to hang (i've restarted server twice before i even let it run until completion) \r\n\r\nPreviously switching to a new model was sub 10 sec, now switching to a new model (that does not have hash stored already) takes 100-150 sec (and this is a high end system)!\r\n\r\nAnd to make it worse, messages about hash calculation are only printed **after** it has been calculated, there is no progress info or anything to indicate system is actually doing anything for 2 min!\r\n\r\n\r\n### Steps to reproduce the problem\r\n\r\n1. Switch to a new model and wait for completion - it takes forever\r\n\r\n\r\n### What should have happened?\r\n\r\nModel load should **never** take over 2 minutes to complete.\r\n\r\n### Commit where the problem happens\r\n\r\nf8c512478568293155539f616dce26c5e4495055\r\n\r\n### What platforms do you use to access UI ?\r\n\r\nWindows, Linux\r\n\r\n### What browsers do you use to access the UI ?\r\n\r\nGoogle Chrome, Microsoft Edge\r\n\r\n### Command Line Arguments\r\n\r\n```Shell\r\n--api --xformers\r\n```\r\n\r\n\r\n### Additional information, context and logs\r\n\r\nConsole log showing model load taking 142 seconds!\r\n\r\n```text\r\nCalculating sha256 for /home/vlado/dev/automatic/models/Stable-diffusion/mood-beautyreal-v01.ckpt: bcc0afd3b264ea028928187f56f70840f8d87ccf283b020982beba35d9c7e4ef\r\nLoading weights [bcc0afd3b2] from /home/vlado/dev/automatic/models/Stable-diffusion/mood-beautyreal-v01.ckpt\r\nCouldn't find VAE named vae-ft-mse-840000-ema-pruned; using None instead\r\nApplying xformers cross attention optimization.\r\nWeights loaded in 142.6s.\r\n```\r\n\n", "before_files": [{"content": "import hashlib\r\nimport json\r\nimport os.path\r\n\r\nimport filelock\r\n\r\n\r\ncache_filename = \"cache.json\"\r\ncache_data = None\r\n\r\n\r\ndef dump_cache():\r\n with filelock.FileLock(cache_filename+\".lock\"):\r\n with open(cache_filename, \"w\", encoding=\"utf8\") as file:\r\n json.dump(cache_data, file, indent=4)\r\n\r\n\r\ndef cache(subsection):\r\n global cache_data\r\n\r\n if cache_data is None:\r\n with filelock.FileLock(cache_filename+\".lock\"):\r\n if not os.path.isfile(cache_filename):\r\n cache_data = {}\r\n else:\r\n with open(cache_filename, \"r\", encoding=\"utf8\") as file:\r\n cache_data = json.load(file)\r\n\r\n s = cache_data.get(subsection, {})\r\n cache_data[subsection] = s\r\n\r\n return s\r\n\r\n\r\ndef calculate_sha256(filename):\r\n hash_sha256 = hashlib.sha256()\r\n\r\n with open(filename, \"rb\") as f:\r\n for chunk in iter(lambda: f.read(4096), b\"\"):\r\n hash_sha256.update(chunk)\r\n\r\n return hash_sha256.hexdigest()\r\n\r\n\r\ndef sha256_from_cache(filename, title):\r\n hashes = cache(\"hashes\")\r\n ondisk_mtime = os.path.getmtime(filename)\r\n\r\n if title not in hashes:\r\n return None\r\n\r\n cached_sha256 = hashes[title].get(\"sha256\", None)\r\n cached_mtime = hashes[title].get(\"mtime\", 0)\r\n\r\n if ondisk_mtime > cached_mtime or cached_sha256 is None:\r\n return None\r\n\r\n return cached_sha256\r\n\r\n\r\ndef sha256(filename, title):\r\n hashes = cache(\"hashes\")\r\n\r\n sha256_value = sha256_from_cache(filename, title)\r\n if sha256_value is not None:\r\n return sha256_value\r\n\r\n print(f\"Calculating sha256 for {filename}: \", end='')\r\n sha256_value = calculate_sha256(filename)\r\n print(f\"{sha256_value}\")\r\n\r\n hashes[title] = {\r\n \"mtime\": os.path.getmtime(filename),\r\n \"sha256\": sha256_value,\r\n }\r\n\r\n dump_cache()\r\n\r\n return sha256_value\r\n\r\n\r\n\r\n\r\n\r\n", "path": "modules/hashes.py"}], "after_files": [{"content": "import hashlib\r\nimport json\r\nimport os.path\r\n\r\nimport filelock\r\n\r\n\r\ncache_filename = \"cache.json\"\r\ncache_data = None\r\n\r\n\r\ndef dump_cache():\r\n with filelock.FileLock(cache_filename+\".lock\"):\r\n with open(cache_filename, \"w\", encoding=\"utf8\") as file:\r\n json.dump(cache_data, file, indent=4)\r\n\r\n\r\ndef cache(subsection):\r\n global cache_data\r\n\r\n if cache_data is None:\r\n with filelock.FileLock(cache_filename+\".lock\"):\r\n if not os.path.isfile(cache_filename):\r\n cache_data = {}\r\n else:\r\n with open(cache_filename, \"r\", encoding=\"utf8\") as file:\r\n cache_data = json.load(file)\r\n\r\n s = cache_data.get(subsection, {})\r\n cache_data[subsection] = s\r\n\r\n return s\r\n\r\n\r\ndef calculate_sha256(filename):\r\n hash_sha256 = hashlib.sha256()\r\n blksize = 1024 * 1024\r\n\r\n with open(filename, \"rb\") as f:\r\n for chunk in iter(lambda: f.read(blksize), b\"\"):\r\n hash_sha256.update(chunk)\r\n\r\n return hash_sha256.hexdigest()\r\n\r\n\r\ndef sha256_from_cache(filename, title):\r\n hashes = cache(\"hashes\")\r\n ondisk_mtime = os.path.getmtime(filename)\r\n\r\n if title not in hashes:\r\n return None\r\n\r\n cached_sha256 = hashes[title].get(\"sha256\", None)\r\n cached_mtime = hashes[title].get(\"mtime\", 0)\r\n\r\n if ondisk_mtime > cached_mtime or cached_sha256 is None:\r\n return None\r\n\r\n return cached_sha256\r\n\r\n\r\ndef sha256(filename, title):\r\n hashes = cache(\"hashes\")\r\n\r\n sha256_value = sha256_from_cache(filename, title)\r\n if sha256_value is not None:\r\n return sha256_value\r\n\r\n print(f\"Calculating sha256 for {filename}: \", end='')\r\n sha256_value = calculate_sha256(filename)\r\n print(f\"{sha256_value}\")\r\n\r\n hashes[title] = {\r\n \"mtime\": os.path.getmtime(filename),\r\n \"sha256\": sha256_value,\r\n }\r\n\r\n dump_cache()\r\n\r\n return sha256_value\r\n\r\n\r\n\r\n\r\n\r\n", "path": "modules/hashes.py"}]} |
gh_patches_debug_1321 | rasdani/github-patches | git_diff | kivy__kivy-7301 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
postproc does not invert on_touch_up events and therefore does not properly dispatch on_release
**Software Versions**
* Python: 3.7
* OS: Raspberry Pi OS
* Kivy: v2.0.0rc3, git-Unknown, 20200723
* Kivy installation method: Master
**Describe the bug**
on_touch_up is not inverted by post processing and therefore on_release is not dispatched on a button release unless moving your finger to the inverted position on the screen before releasing.
**Expected behavior**
Having applied post processing to touch input events to invert y axis, on_press works but on_release does not
**To Reproduce**
Invert your input events in config.ini:
```
[input]
mtdev_%(name)s = probesysfs,provider=mtdev
[postproc:calibration]
(mtdev) = xoffset=1,yoffset=1,xratio=-1,yratio=-1
```
Run this code. Assuming your touch is usually ok and you don't need any postproc, this will be slightly different for you than me: Touch the top of the screen which should press the button at the bottom of the screen due to inversion; and release, no on_release is dispatched. Now, touch the top of the screen, move your finger to the bottom half, release and on_release is dispatched. You can try in horizontal mode too by toggling comments.
```
from kivy.app import App
from kivy.uix.label import Label
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.button import Button
class MyPage(BoxLayout):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.orientation = 'vertical'
#self.orientation = 'horizontal'
# Message
self.message = Label(text='Top section, no button')
self.add_widget(self.message)
# Button
self.btn = Button(text='Bottom section, button')
self.btn.bind(on_press=self.my_press)
self.btn.bind(on_release=self.my_release)
#self.btn.bind(on_touch_up=self.my_touch_up)
self.add_widget(self.btn)
def my_press(self, touch):
print('on press', self.btn.collide_point(*touch.pos))
def my_release(self, touch):
print('on release', self.btn.collide_point(*touch.pos))
def my_touch_up(self, instance, touch):
print('on_touch_up', self.btn.collide_point(*touch.pos))
class TouchApp(App):
def build(self):
page = MyPage()
return page
if __name__ == "__main__":
app = TouchApp()
app.run()
```
If you remove the post processing, you will likely see that everything is normal, this shows that it is an error with post processing (or at lease the way it is applied to on_touch_up) as the on_press reverts to the same as on_release.
I hope this is all clear but please let me know any more info required. I have not posted the trace as this is not an issue that crashes and therefore I don't think it is useful but let me know if that's wrong and I will post.
I also have an issue that is likely related to this when using a horizontal carousel within a vertical carousel: with the same layout as above (i.e. button on the bottom half of the screen) when tapping the top half, the button presses and releases but when holding the bottom half, the button presses and releases when released. I am still trying to simplify an example for this issue but referencing here in case it effects above.
Thanks!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kivy/input/providers/mtdev.py`
Content:
```
1 '''
2 Native support for Multitouch devices on Linux, using libmtdev.
3 ===============================================================
4
5 The Mtdev project is a part of the Ubuntu Maverick multitouch architecture.
6 You can read more on http://wiki.ubuntu.com/Multitouch
7
8 To configure MTDev, it's preferable to use probesysfs providers.
9 Check :py:class:`~kivy.input.providers.probesysfs` for more information.
10
11 Otherwise, add this to your configuration::
12
13 [input]
14 # devicename = hidinput,/dev/input/eventXX
15 acert230h = mtdev,/dev/input/event2
16
17 .. note::
18 You must have read access to the input event.
19
20 You can use a custom range for the X, Y and pressure values.
21 On some drivers, the range reported is invalid.
22 To fix that, you can add these options to the argument line:
23
24 * invert_x : 1 to invert X axis
25 * invert_y : 1 to invert Y axis
26 * min_position_x : X minimum
27 * max_position_x : X maximum
28 * min_position_y : Y minimum
29 * max_position_y : Y maximum
30 * min_pressure : pressure minimum
31 * max_pressure : pressure maximum
32 * min_touch_major : width shape minimum
33 * max_touch_major : width shape maximum
34 * min_touch_minor : width shape minimum
35 * max_touch_minor : height shape maximum
36 * rotation : 0,90,180 or 270 to rotate
37 '''
38
39 __all__ = ('MTDMotionEventProvider', 'MTDMotionEvent')
40
41 import os
42 import os.path
43 import time
44 from kivy.input.motionevent import MotionEvent
45 from kivy.input.shape import ShapeRect
46
47
48 class MTDMotionEvent(MotionEvent):
49
50 def depack(self, args):
51 self.is_touch = True
52 if 'x' in args:
53 self.sx = args['x']
54 else:
55 self.sx = -1
56 if 'y' in args:
57 self.sy = args['y']
58 else:
59 self.sy = -1
60 self.profile = ['pos']
61 if 'size_w' in args and 'size_h' in args:
62 self.shape = ShapeRect()
63 self.shape.width = args['size_w']
64 self.shape.height = args['size_h']
65 self.profile.append('shape')
66 if 'pressure' in args:
67 self.pressure = args['pressure']
68 self.profile.append('pressure')
69 super(MTDMotionEvent, self).depack(args)
70
71 def __str__(self):
72 i, sx, sy, d = (self.id, self.sx, self.sy, self.device)
73 return '<MTDMotionEvent id=%d pos=(%f, %f) device=%s>' % (i, sx, sy, d)
74
75
76 if 'KIVY_DOC' in os.environ:
77
78 # documentation hack
79 MTDMotionEventProvider = None
80
81 else:
82 import threading
83 import collections
84 from kivy.lib.mtdev import Device, \
85 MTDEV_TYPE_EV_ABS, MTDEV_CODE_SLOT, MTDEV_CODE_POSITION_X, \
86 MTDEV_CODE_POSITION_Y, MTDEV_CODE_PRESSURE, \
87 MTDEV_CODE_TOUCH_MAJOR, MTDEV_CODE_TOUCH_MINOR, \
88 MTDEV_CODE_TRACKING_ID, MTDEV_ABS_POSITION_X, \
89 MTDEV_ABS_POSITION_Y, MTDEV_ABS_TOUCH_MINOR, \
90 MTDEV_ABS_TOUCH_MAJOR
91 from kivy.input.provider import MotionEventProvider
92 from kivy.input.factory import MotionEventFactory
93 from kivy.logger import Logger
94
95 class MTDMotionEventProvider(MotionEventProvider):
96
97 options = ('min_position_x', 'max_position_x',
98 'min_position_y', 'max_position_y',
99 'min_pressure', 'max_pressure',
100 'min_touch_major', 'max_touch_major',
101 'min_touch_minor', 'max_touch_minor',
102 'invert_x', 'invert_y',
103 'rotation')
104
105 def __init__(self, device, args):
106 super(MTDMotionEventProvider, self).__init__(device, args)
107 self._device = None
108 self.input_fn = None
109 self.default_ranges = dict()
110
111 # split arguments
112 args = args.split(',')
113 if not args:
114 Logger.error('MTD: No filename pass to MTD configuration')
115 Logger.error('MTD: Use /dev/input/event0 for example')
116 return
117
118 # read filename
119 self.input_fn = args[0]
120 Logger.info('MTD: Read event from <%s>' % self.input_fn)
121
122 # read parameters
123 for arg in args[1:]:
124 if arg == '':
125 continue
126 arg = arg.split('=')
127
128 # ensure it's a key = value
129 if len(arg) != 2:
130 err = 'MTD: Bad parameter %s: Not in key=value format' %\
131 arg
132 Logger.error(err)
133 continue
134
135 # ensure the key exist
136 key, value = arg
137 if key not in MTDMotionEventProvider.options:
138 Logger.error('MTD: unknown %s option' % key)
139 continue
140
141 # ensure the value
142 try:
143 self.default_ranges[key] = int(value)
144 except ValueError:
145 err = 'MTD: invalid value %s for option %s' % (key, value)
146 Logger.error(err)
147 continue
148
149 # all good!
150 Logger.info('MTD: Set custom %s to %d' % (key, int(value)))
151
152 if 'rotation' not in self.default_ranges:
153 self.default_ranges['rotation'] = 0
154 elif self.default_ranges['rotation'] not in (0, 90, 180, 270):
155 Logger.error('HIDInput: invalid rotation value ({})'.format(
156 self.default_ranges['rotation']))
157 self.default_ranges['rotation'] = 0
158
159 def start(self):
160 if self.input_fn is None:
161 return
162 self.uid = 0
163 self.queue = collections.deque()
164 self.thread = threading.Thread(
165 name=self.__class__.__name__,
166 target=self._thread_run,
167 kwargs=dict(
168 queue=self.queue,
169 input_fn=self.input_fn,
170 device=self.device,
171 default_ranges=self.default_ranges))
172 self.thread.daemon = True
173 self.thread.start()
174
175 def _thread_run(self, **kwargs):
176 input_fn = kwargs.get('input_fn')
177 queue = kwargs.get('queue')
178 device = kwargs.get('device')
179 drs = kwargs.get('default_ranges').get
180 touches = {}
181 touches_sent = []
182 point = {}
183 l_points = {}
184
185 def assign_coord(point, value, invert, coords):
186 cx, cy = coords
187 if invert:
188 value = 1. - value
189 if rotation == 0:
190 point[cx] = value
191 elif rotation == 90:
192 point[cy] = value
193 elif rotation == 180:
194 point[cx] = 1. - value
195 elif rotation == 270:
196 point[cy] = 1. - value
197
198 def process(points):
199 for args in points:
200 # this can happen if we have a touch going on already at
201 # the start of the app
202 if 'id' not in args:
203 continue
204 tid = args['id']
205 try:
206 touch = touches[tid]
207 except KeyError:
208 touch = MTDMotionEvent(device, tid, args)
209 touches[touch.id] = touch
210 touch.move(args)
211 action = 'update'
212 if tid not in touches_sent:
213 action = 'begin'
214 touches_sent.append(tid)
215 if 'delete' in args:
216 action = 'end'
217 del args['delete']
218 del touches[touch.id]
219 touches_sent.remove(tid)
220 touch.update_time_end()
221 queue.append((action, touch))
222
223 def normalize(value, vmin, vmax):
224 try:
225 return (value - vmin) / float(vmax - vmin)
226 except ZeroDivisionError: # it's both in py2 and py3
227 return (value - vmin)
228
229 # open mtdev device
230 _fn = input_fn
231 _slot = 0
232 try:
233 _device = Device(_fn)
234 except OSError as e:
235 if e.errno == 13: # Permission denied
236 Logger.warn(
237 'MTD: Unable to open device "{0}". Please ensure you'
238 ' have the appropriate permissions.'.format(_fn))
239 return
240 else:
241 raise
242 _changes = set()
243
244 # prepare some vars to get limit of some component
245 ab = _device.get_abs(MTDEV_ABS_POSITION_X)
246 range_min_position_x = drs('min_position_x', ab.minimum)
247 range_max_position_x = drs('max_position_x', ab.maximum)
248 Logger.info('MTD: <%s> range position X is %d - %d' %
249 (_fn, range_min_position_x, range_max_position_x))
250
251 ab = _device.get_abs(MTDEV_ABS_POSITION_Y)
252 range_min_position_y = drs('min_position_y', ab.minimum)
253 range_max_position_y = drs('max_position_y', ab.maximum)
254 Logger.info('MTD: <%s> range position Y is %d - %d' %
255 (_fn, range_min_position_y, range_max_position_y))
256
257 ab = _device.get_abs(MTDEV_ABS_TOUCH_MAJOR)
258 range_min_major = drs('min_touch_major', ab.minimum)
259 range_max_major = drs('max_touch_major', ab.maximum)
260 Logger.info('MTD: <%s> range touch major is %d - %d' %
261 (_fn, range_min_major, range_max_major))
262
263 ab = _device.get_abs(MTDEV_ABS_TOUCH_MINOR)
264 range_min_minor = drs('min_touch_minor', ab.minimum)
265 range_max_minor = drs('max_touch_minor', ab.maximum)
266 Logger.info('MTD: <%s> range touch minor is %d - %d' %
267 (_fn, range_min_minor, range_max_minor))
268
269 range_min_pressure = drs('min_pressure', 0)
270 range_max_pressure = drs('max_pressure', 255)
271 Logger.info('MTD: <%s> range pressure is %d - %d' %
272 (_fn, range_min_pressure, range_max_pressure))
273
274 invert_x = int(bool(drs('invert_x', 0)))
275 invert_y = int(bool(drs('invert_y', 0)))
276 Logger.info('MTD: <%s> axes invertion: X is %d, Y is %d' %
277 (_fn, invert_x, invert_y))
278
279 rotation = drs('rotation', 0)
280 Logger.info('MTD: <%s> rotation set to %d' %
281 (_fn, rotation))
282 failures = 0
283 while _device:
284 # if device have disconnected lets try to connect
285 if failures > 1000:
286 Logger.info('MTD: <%s> input device disconnected' % _fn)
287 while not os.path.exists(_fn):
288 time.sleep(0.05)
289 # input device is back online let's recreate device
290 _device.close()
291 _device = Device(_fn)
292 Logger.info('MTD: <%s> input device reconnected' % _fn)
293 failures = 0
294 continue
295
296 # idle as much as we can.
297 while _device.idle(1000):
298 continue
299
300 # got data, read all without redoing idle
301 while True:
302 data = _device.get()
303 if data is None:
304 failures += 1
305 break
306
307 failures = 0
308
309 # set the working slot
310 if data.type == MTDEV_TYPE_EV_ABS and \
311 data.code == MTDEV_CODE_SLOT:
312 _slot = data.value
313 continue
314
315 # fill the slot
316 if not (_slot in l_points):
317 l_points[_slot] = dict()
318 point = l_points[_slot]
319 ev_value = data.value
320 ev_code = data.code
321 if ev_code == MTDEV_CODE_POSITION_X:
322 val = normalize(ev_value,
323 range_min_position_x,
324 range_max_position_x)
325 assign_coord(point, val, invert_x, 'xy')
326 elif ev_code == MTDEV_CODE_POSITION_Y:
327 val = 1. - normalize(ev_value,
328 range_min_position_y,
329 range_max_position_y)
330 assign_coord(point, val, invert_y, 'yx')
331 elif ev_code == MTDEV_CODE_PRESSURE:
332 point['pressure'] = normalize(ev_value,
333 range_min_pressure,
334 range_max_pressure)
335 elif ev_code == MTDEV_CODE_TOUCH_MAJOR:
336 point['size_w'] = normalize(ev_value,
337 range_min_major,
338 range_max_major)
339 elif ev_code == MTDEV_CODE_TOUCH_MINOR:
340 point['size_h'] = normalize(ev_value,
341 range_min_minor,
342 range_max_minor)
343 elif ev_code == MTDEV_CODE_TRACKING_ID:
344 if ev_value == -1:
345 point['delete'] = True
346 # force process of changes here, as the slot can be
347 # reused.
348 _changes.add(_slot)
349 process([l_points[x] for x in _changes])
350 _changes.clear()
351 continue
352 else:
353 point['id'] = ev_value
354 else:
355 # unrecognized command, ignore.
356 continue
357 _changes.add(_slot)
358
359 # push all changes
360 if _changes:
361 process([l_points[x] for x in _changes])
362 _changes.clear()
363
364 def update(self, dispatch_fn):
365 # dispatch all event from threads
366 try:
367 while True:
368 event_type, touch = self.queue.popleft()
369 dispatch_fn(event_type, touch)
370 except:
371 pass
372
373 MotionEventFactory.register('mtdev', MTDMotionEventProvider)
374
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kivy/input/providers/mtdev.py b/kivy/input/providers/mtdev.py
--- a/kivy/input/providers/mtdev.py
+++ b/kivy/input/providers/mtdev.py
@@ -34,6 +34,12 @@
* min_touch_minor : width shape minimum
* max_touch_minor : height shape maximum
* rotation : 0,90,180 or 270 to rotate
+
+An inverted display configuration will look like this::
+
+ [input]
+ # example for inverting touch events
+ display = mtdev,/dev/input/event0,invert_x=1,invert_y=1
'''
__all__ = ('MTDMotionEventProvider', 'MTDMotionEvent')
| {"golden_diff": "diff --git a/kivy/input/providers/mtdev.py b/kivy/input/providers/mtdev.py\n--- a/kivy/input/providers/mtdev.py\n+++ b/kivy/input/providers/mtdev.py\n@@ -34,6 +34,12 @@\n * min_touch_minor : width shape minimum\n * max_touch_minor : height shape maximum\n * rotation : 0,90,180 or 270 to rotate\n+\n+An inverted display configuration will look like this::\n+\n+ [input]\n+ # example for inverting touch events\n+ display = mtdev,/dev/input/event0,invert_x=1,invert_y=1\n '''\n \n __all__ = ('MTDMotionEventProvider', 'MTDMotionEvent')\n", "issue": "postproc does not invert on_touch_up events and therefore does not properly dispatch on_release\n**Software Versions**\r\n* Python: 3.7\r\n* OS: Raspberry Pi OS\r\n* Kivy: v2.0.0rc3, git-Unknown, 20200723\r\n* Kivy installation method: Master\r\n\r\n**Describe the bug**\r\non_touch_up is not inverted by post processing and therefore on_release is not dispatched on a button release unless moving your finger to the inverted position on the screen before releasing.\r\n\r\n**Expected behavior**\r\nHaving applied post processing to touch input events to invert y axis, on_press works but on_release does not\r\n\r\n**To Reproduce**\r\nInvert your input events in config.ini:\r\n```\r\n[input]\r\nmtdev_%(name)s = probesysfs,provider=mtdev\r\n\r\n[postproc:calibration]\r\n(mtdev) = xoffset=1,yoffset=1,xratio=-1,yratio=-1\r\n```\r\n\r\nRun this code. Assuming your touch is usually ok and you don't need any postproc, this will be slightly different for you than me: Touch the top of the screen which should press the button at the bottom of the screen due to inversion; and release, no on_release is dispatched. Now, touch the top of the screen, move your finger to the bottom half, release and on_release is dispatched. You can try in horizontal mode too by toggling comments.\r\n\r\n```\r\nfrom kivy.app import App\r\nfrom kivy.uix.label import Label\r\nfrom kivy.uix.boxlayout import BoxLayout\r\nfrom kivy.uix.button import Button\r\n\r\n\r\nclass MyPage(BoxLayout):\r\n def __init__(self, **kwargs):\r\n super().__init__(**kwargs)\r\n self.orientation = 'vertical'\r\n #self.orientation = 'horizontal'\r\n\r\n # Message\r\n self.message = Label(text='Top section, no button')\r\n self.add_widget(self.message)\r\n\r\n # Button\r\n self.btn = Button(text='Bottom section, button')\r\n self.btn.bind(on_press=self.my_press)\r\n self.btn.bind(on_release=self.my_release)\r\n #self.btn.bind(on_touch_up=self.my_touch_up)\r\n self.add_widget(self.btn)\r\n\r\n def my_press(self, touch):\r\n print('on press', self.btn.collide_point(*touch.pos))\r\n\r\n def my_release(self, touch):\r\n print('on release', self.btn.collide_point(*touch.pos))\r\n\r\n def my_touch_up(self, instance, touch):\r\n print('on_touch_up', self.btn.collide_point(*touch.pos))\r\n\r\n\r\nclass TouchApp(App):\r\n def build(self):\r\n page = MyPage()\r\n return page\r\n\r\n\r\nif __name__ == \"__main__\":\r\n app = TouchApp()\r\n app.run()\r\n```\r\n\r\n\r\nIf you remove the post processing, you will likely see that everything is normal, this shows that it is an error with post processing (or at lease the way it is applied to on_touch_up) as the on_press reverts to the same as on_release.\r\n\r\nI hope this is all clear but please let me know any more info required. I have not posted the trace as this is not an issue that crashes and therefore I don't think it is useful but let me know if that's wrong and I will post.\r\n\r\nI also have an issue that is likely related to this when using a horizontal carousel within a vertical carousel: with the same layout as above (i.e. button on the bottom half of the screen) when tapping the top half, the button presses and releases but when holding the bottom half, the button presses and releases when released. I am still trying to simplify an example for this issue but referencing here in case it effects above.\r\n\r\nThanks!\n", "before_files": [{"content": "'''\nNative support for Multitouch devices on Linux, using libmtdev.\n===============================================================\n\nThe Mtdev project is a part of the Ubuntu Maverick multitouch architecture.\nYou can read more on http://wiki.ubuntu.com/Multitouch\n\nTo configure MTDev, it's preferable to use probesysfs providers.\nCheck :py:class:`~kivy.input.providers.probesysfs` for more information.\n\nOtherwise, add this to your configuration::\n\n [input]\n # devicename = hidinput,/dev/input/eventXX\n acert230h = mtdev,/dev/input/event2\n\n.. note::\n You must have read access to the input event.\n\nYou can use a custom range for the X, Y and pressure values.\nOn some drivers, the range reported is invalid.\nTo fix that, you can add these options to the argument line:\n\n* invert_x : 1 to invert X axis\n* invert_y : 1 to invert Y axis\n* min_position_x : X minimum\n* max_position_x : X maximum\n* min_position_y : Y minimum\n* max_position_y : Y maximum\n* min_pressure : pressure minimum\n* max_pressure : pressure maximum\n* min_touch_major : width shape minimum\n* max_touch_major : width shape maximum\n* min_touch_minor : width shape minimum\n* max_touch_minor : height shape maximum\n* rotation : 0,90,180 or 270 to rotate\n'''\n\n__all__ = ('MTDMotionEventProvider', 'MTDMotionEvent')\n\nimport os\nimport os.path\nimport time\nfrom kivy.input.motionevent import MotionEvent\nfrom kivy.input.shape import ShapeRect\n\n\nclass MTDMotionEvent(MotionEvent):\n\n def depack(self, args):\n self.is_touch = True\n if 'x' in args:\n self.sx = args['x']\n else:\n self.sx = -1\n if 'y' in args:\n self.sy = args['y']\n else:\n self.sy = -1\n self.profile = ['pos']\n if 'size_w' in args and 'size_h' in args:\n self.shape = ShapeRect()\n self.shape.width = args['size_w']\n self.shape.height = args['size_h']\n self.profile.append('shape')\n if 'pressure' in args:\n self.pressure = args['pressure']\n self.profile.append('pressure')\n super(MTDMotionEvent, self).depack(args)\n\n def __str__(self):\n i, sx, sy, d = (self.id, self.sx, self.sy, self.device)\n return '<MTDMotionEvent id=%d pos=(%f, %f) device=%s>' % (i, sx, sy, d)\n\n\nif 'KIVY_DOC' in os.environ:\n\n # documentation hack\n MTDMotionEventProvider = None\n\nelse:\n import threading\n import collections\n from kivy.lib.mtdev import Device, \\\n MTDEV_TYPE_EV_ABS, MTDEV_CODE_SLOT, MTDEV_CODE_POSITION_X, \\\n MTDEV_CODE_POSITION_Y, MTDEV_CODE_PRESSURE, \\\n MTDEV_CODE_TOUCH_MAJOR, MTDEV_CODE_TOUCH_MINOR, \\\n MTDEV_CODE_TRACKING_ID, MTDEV_ABS_POSITION_X, \\\n MTDEV_ABS_POSITION_Y, MTDEV_ABS_TOUCH_MINOR, \\\n MTDEV_ABS_TOUCH_MAJOR\n from kivy.input.provider import MotionEventProvider\n from kivy.input.factory import MotionEventFactory\n from kivy.logger import Logger\n\n class MTDMotionEventProvider(MotionEventProvider):\n\n options = ('min_position_x', 'max_position_x',\n 'min_position_y', 'max_position_y',\n 'min_pressure', 'max_pressure',\n 'min_touch_major', 'max_touch_major',\n 'min_touch_minor', 'max_touch_minor',\n 'invert_x', 'invert_y',\n 'rotation')\n\n def __init__(self, device, args):\n super(MTDMotionEventProvider, self).__init__(device, args)\n self._device = None\n self.input_fn = None\n self.default_ranges = dict()\n\n # split arguments\n args = args.split(',')\n if not args:\n Logger.error('MTD: No filename pass to MTD configuration')\n Logger.error('MTD: Use /dev/input/event0 for example')\n return\n\n # read filename\n self.input_fn = args[0]\n Logger.info('MTD: Read event from <%s>' % self.input_fn)\n\n # read parameters\n for arg in args[1:]:\n if arg == '':\n continue\n arg = arg.split('=')\n\n # ensure it's a key = value\n if len(arg) != 2:\n err = 'MTD: Bad parameter %s: Not in key=value format' %\\\n arg\n Logger.error(err)\n continue\n\n # ensure the key exist\n key, value = arg\n if key not in MTDMotionEventProvider.options:\n Logger.error('MTD: unknown %s option' % key)\n continue\n\n # ensure the value\n try:\n self.default_ranges[key] = int(value)\n except ValueError:\n err = 'MTD: invalid value %s for option %s' % (key, value)\n Logger.error(err)\n continue\n\n # all good!\n Logger.info('MTD: Set custom %s to %d' % (key, int(value)))\n\n if 'rotation' not in self.default_ranges:\n self.default_ranges['rotation'] = 0\n elif self.default_ranges['rotation'] not in (0, 90, 180, 270):\n Logger.error('HIDInput: invalid rotation value ({})'.format(\n self.default_ranges['rotation']))\n self.default_ranges['rotation'] = 0\n\n def start(self):\n if self.input_fn is None:\n return\n self.uid = 0\n self.queue = collections.deque()\n self.thread = threading.Thread(\n name=self.__class__.__name__,\n target=self._thread_run,\n kwargs=dict(\n queue=self.queue,\n input_fn=self.input_fn,\n device=self.device,\n default_ranges=self.default_ranges))\n self.thread.daemon = True\n self.thread.start()\n\n def _thread_run(self, **kwargs):\n input_fn = kwargs.get('input_fn')\n queue = kwargs.get('queue')\n device = kwargs.get('device')\n drs = kwargs.get('default_ranges').get\n touches = {}\n touches_sent = []\n point = {}\n l_points = {}\n\n def assign_coord(point, value, invert, coords):\n cx, cy = coords\n if invert:\n value = 1. - value\n if rotation == 0:\n point[cx] = value\n elif rotation == 90:\n point[cy] = value\n elif rotation == 180:\n point[cx] = 1. - value\n elif rotation == 270:\n point[cy] = 1. - value\n\n def process(points):\n for args in points:\n # this can happen if we have a touch going on already at\n # the start of the app\n if 'id' not in args:\n continue\n tid = args['id']\n try:\n touch = touches[tid]\n except KeyError:\n touch = MTDMotionEvent(device, tid, args)\n touches[touch.id] = touch\n touch.move(args)\n action = 'update'\n if tid not in touches_sent:\n action = 'begin'\n touches_sent.append(tid)\n if 'delete' in args:\n action = 'end'\n del args['delete']\n del touches[touch.id]\n touches_sent.remove(tid)\n touch.update_time_end()\n queue.append((action, touch))\n\n def normalize(value, vmin, vmax):\n try:\n return (value - vmin) / float(vmax - vmin)\n except ZeroDivisionError: # it's both in py2 and py3\n return (value - vmin)\n\n # open mtdev device\n _fn = input_fn\n _slot = 0\n try:\n _device = Device(_fn)\n except OSError as e:\n if e.errno == 13: # Permission denied\n Logger.warn(\n 'MTD: Unable to open device \"{0}\". Please ensure you'\n ' have the appropriate permissions.'.format(_fn))\n return\n else:\n raise\n _changes = set()\n\n # prepare some vars to get limit of some component\n ab = _device.get_abs(MTDEV_ABS_POSITION_X)\n range_min_position_x = drs('min_position_x', ab.minimum)\n range_max_position_x = drs('max_position_x', ab.maximum)\n Logger.info('MTD: <%s> range position X is %d - %d' %\n (_fn, range_min_position_x, range_max_position_x))\n\n ab = _device.get_abs(MTDEV_ABS_POSITION_Y)\n range_min_position_y = drs('min_position_y', ab.minimum)\n range_max_position_y = drs('max_position_y', ab.maximum)\n Logger.info('MTD: <%s> range position Y is %d - %d' %\n (_fn, range_min_position_y, range_max_position_y))\n\n ab = _device.get_abs(MTDEV_ABS_TOUCH_MAJOR)\n range_min_major = drs('min_touch_major', ab.minimum)\n range_max_major = drs('max_touch_major', ab.maximum)\n Logger.info('MTD: <%s> range touch major is %d - %d' %\n (_fn, range_min_major, range_max_major))\n\n ab = _device.get_abs(MTDEV_ABS_TOUCH_MINOR)\n range_min_minor = drs('min_touch_minor', ab.minimum)\n range_max_minor = drs('max_touch_minor', ab.maximum)\n Logger.info('MTD: <%s> range touch minor is %d - %d' %\n (_fn, range_min_minor, range_max_minor))\n\n range_min_pressure = drs('min_pressure', 0)\n range_max_pressure = drs('max_pressure', 255)\n Logger.info('MTD: <%s> range pressure is %d - %d' %\n (_fn, range_min_pressure, range_max_pressure))\n\n invert_x = int(bool(drs('invert_x', 0)))\n invert_y = int(bool(drs('invert_y', 0)))\n Logger.info('MTD: <%s> axes invertion: X is %d, Y is %d' %\n (_fn, invert_x, invert_y))\n\n rotation = drs('rotation', 0)\n Logger.info('MTD: <%s> rotation set to %d' %\n (_fn, rotation))\n failures = 0\n while _device:\n # if device have disconnected lets try to connect\n if failures > 1000:\n Logger.info('MTD: <%s> input device disconnected' % _fn)\n while not os.path.exists(_fn):\n time.sleep(0.05)\n # input device is back online let's recreate device\n _device.close()\n _device = Device(_fn)\n Logger.info('MTD: <%s> input device reconnected' % _fn)\n failures = 0\n continue\n\n # idle as much as we can.\n while _device.idle(1000):\n continue\n\n # got data, read all without redoing idle\n while True:\n data = _device.get()\n if data is None:\n failures += 1\n break\n\n failures = 0\n\n # set the working slot\n if data.type == MTDEV_TYPE_EV_ABS and \\\n data.code == MTDEV_CODE_SLOT:\n _slot = data.value\n continue\n\n # fill the slot\n if not (_slot in l_points):\n l_points[_slot] = dict()\n point = l_points[_slot]\n ev_value = data.value\n ev_code = data.code\n if ev_code == MTDEV_CODE_POSITION_X:\n val = normalize(ev_value,\n range_min_position_x,\n range_max_position_x)\n assign_coord(point, val, invert_x, 'xy')\n elif ev_code == MTDEV_CODE_POSITION_Y:\n val = 1. - normalize(ev_value,\n range_min_position_y,\n range_max_position_y)\n assign_coord(point, val, invert_y, 'yx')\n elif ev_code == MTDEV_CODE_PRESSURE:\n point['pressure'] = normalize(ev_value,\n range_min_pressure,\n range_max_pressure)\n elif ev_code == MTDEV_CODE_TOUCH_MAJOR:\n point['size_w'] = normalize(ev_value,\n range_min_major,\n range_max_major)\n elif ev_code == MTDEV_CODE_TOUCH_MINOR:\n point['size_h'] = normalize(ev_value,\n range_min_minor,\n range_max_minor)\n elif ev_code == MTDEV_CODE_TRACKING_ID:\n if ev_value == -1:\n point['delete'] = True\n # force process of changes here, as the slot can be\n # reused.\n _changes.add(_slot)\n process([l_points[x] for x in _changes])\n _changes.clear()\n continue\n else:\n point['id'] = ev_value\n else:\n # unrecognized command, ignore.\n continue\n _changes.add(_slot)\n\n # push all changes\n if _changes:\n process([l_points[x] for x in _changes])\n _changes.clear()\n\n def update(self, dispatch_fn):\n # dispatch all event from threads\n try:\n while True:\n event_type, touch = self.queue.popleft()\n dispatch_fn(event_type, touch)\n except:\n pass\n\n MotionEventFactory.register('mtdev', MTDMotionEventProvider)\n", "path": "kivy/input/providers/mtdev.py"}], "after_files": [{"content": "'''\nNative support for Multitouch devices on Linux, using libmtdev.\n===============================================================\n\nThe Mtdev project is a part of the Ubuntu Maverick multitouch architecture.\nYou can read more on http://wiki.ubuntu.com/Multitouch\n\nTo configure MTDev, it's preferable to use probesysfs providers.\nCheck :py:class:`~kivy.input.providers.probesysfs` for more information.\n\nOtherwise, add this to your configuration::\n\n [input]\n # devicename = hidinput,/dev/input/eventXX\n acert230h = mtdev,/dev/input/event2\n\n.. note::\n You must have read access to the input event.\n\nYou can use a custom range for the X, Y and pressure values.\nOn some drivers, the range reported is invalid.\nTo fix that, you can add these options to the argument line:\n\n* invert_x : 1 to invert X axis\n* invert_y : 1 to invert Y axis\n* min_position_x : X minimum\n* max_position_x : X maximum\n* min_position_y : Y minimum\n* max_position_y : Y maximum\n* min_pressure : pressure minimum\n* max_pressure : pressure maximum\n* min_touch_major : width shape minimum\n* max_touch_major : width shape maximum\n* min_touch_minor : width shape minimum\n* max_touch_minor : height shape maximum\n* rotation : 0,90,180 or 270 to rotate\n\nAn inverted display configuration will look like this::\n\n [input]\n # example for inverting touch events\n display = mtdev,/dev/input/event0,invert_x=1,invert_y=1\n'''\n\n__all__ = ('MTDMotionEventProvider', 'MTDMotionEvent')\n\nimport os\nimport os.path\nimport time\nfrom kivy.input.motionevent import MotionEvent\nfrom kivy.input.shape import ShapeRect\n\n\nclass MTDMotionEvent(MotionEvent):\n\n def depack(self, args):\n self.is_touch = True\n if 'x' in args:\n self.sx = args['x']\n else:\n self.sx = -1\n if 'y' in args:\n self.sy = args['y']\n else:\n self.sy = -1\n self.profile = ['pos']\n if 'size_w' in args and 'size_h' in args:\n self.shape = ShapeRect()\n self.shape.width = args['size_w']\n self.shape.height = args['size_h']\n self.profile.append('shape')\n if 'pressure' in args:\n self.pressure = args['pressure']\n self.profile.append('pressure')\n super(MTDMotionEvent, self).depack(args)\n\n def __str__(self):\n i, sx, sy, d = (self.id, self.sx, self.sy, self.device)\n return '<MTDMotionEvent id=%d pos=(%f, %f) device=%s>' % (i, sx, sy, d)\n\n\nif 'KIVY_DOC' in os.environ:\n\n # documentation hack\n MTDMotionEventProvider = None\n\nelse:\n import threading\n import collections\n from kivy.lib.mtdev import Device, \\\n MTDEV_TYPE_EV_ABS, MTDEV_CODE_SLOT, MTDEV_CODE_POSITION_X, \\\n MTDEV_CODE_POSITION_Y, MTDEV_CODE_PRESSURE, \\\n MTDEV_CODE_TOUCH_MAJOR, MTDEV_CODE_TOUCH_MINOR, \\\n MTDEV_CODE_TRACKING_ID, MTDEV_ABS_POSITION_X, \\\n MTDEV_ABS_POSITION_Y, MTDEV_ABS_TOUCH_MINOR, \\\n MTDEV_ABS_TOUCH_MAJOR\n from kivy.input.provider import MotionEventProvider\n from kivy.input.factory import MotionEventFactory\n from kivy.logger import Logger\n\n class MTDMotionEventProvider(MotionEventProvider):\n\n options = ('min_position_x', 'max_position_x',\n 'min_position_y', 'max_position_y',\n 'min_pressure', 'max_pressure',\n 'min_touch_major', 'max_touch_major',\n 'min_touch_minor', 'max_touch_minor',\n 'invert_x', 'invert_y',\n 'rotation')\n\n def __init__(self, device, args):\n super(MTDMotionEventProvider, self).__init__(device, args)\n self._device = None\n self.input_fn = None\n self.default_ranges = dict()\n\n # split arguments\n args = args.split(',')\n if not args:\n Logger.error('MTD: No filename pass to MTD configuration')\n Logger.error('MTD: Use /dev/input/event0 for example')\n return\n\n # read filename\n self.input_fn = args[0]\n Logger.info('MTD: Read event from <%s>' % self.input_fn)\n\n # read parameters\n for arg in args[1:]:\n if arg == '':\n continue\n arg = arg.split('=')\n\n # ensure it's a key = value\n if len(arg) != 2:\n err = 'MTD: Bad parameter %s: Not in key=value format' %\\\n arg\n Logger.error(err)\n continue\n\n # ensure the key exist\n key, value = arg\n if key not in MTDMotionEventProvider.options:\n Logger.error('MTD: unknown %s option' % key)\n continue\n\n # ensure the value\n try:\n self.default_ranges[key] = int(value)\n except ValueError:\n err = 'MTD: invalid value %s for option %s' % (key, value)\n Logger.error(err)\n continue\n\n # all good!\n Logger.info('MTD: Set custom %s to %d' % (key, int(value)))\n\n if 'rotation' not in self.default_ranges:\n self.default_ranges['rotation'] = 0\n elif self.default_ranges['rotation'] not in (0, 90, 180, 270):\n Logger.error('HIDInput: invalid rotation value ({})'.format(\n self.default_ranges['rotation']))\n self.default_ranges['rotation'] = 0\n\n def start(self):\n if self.input_fn is None:\n return\n self.uid = 0\n self.queue = collections.deque()\n self.thread = threading.Thread(\n name=self.__class__.__name__,\n target=self._thread_run,\n kwargs=dict(\n queue=self.queue,\n input_fn=self.input_fn,\n device=self.device,\n default_ranges=self.default_ranges))\n self.thread.daemon = True\n self.thread.start()\n\n def _thread_run(self, **kwargs):\n input_fn = kwargs.get('input_fn')\n queue = kwargs.get('queue')\n device = kwargs.get('device')\n drs = kwargs.get('default_ranges').get\n touches = {}\n touches_sent = []\n point = {}\n l_points = {}\n\n def assign_coord(point, value, invert, coords):\n cx, cy = coords\n if invert:\n value = 1. - value\n if rotation == 0:\n point[cx] = value\n elif rotation == 90:\n point[cy] = value\n elif rotation == 180:\n point[cx] = 1. - value\n elif rotation == 270:\n point[cy] = 1. - value\n\n def process(points):\n for args in points:\n # this can happen if we have a touch going on already at\n # the start of the app\n if 'id' not in args:\n continue\n tid = args['id']\n try:\n touch = touches[tid]\n except KeyError:\n touch = MTDMotionEvent(device, tid, args)\n touches[touch.id] = touch\n touch.move(args)\n action = 'update'\n if tid not in touches_sent:\n action = 'begin'\n touches_sent.append(tid)\n if 'delete' in args:\n action = 'end'\n del args['delete']\n del touches[touch.id]\n touches_sent.remove(tid)\n touch.update_time_end()\n queue.append((action, touch))\n\n def normalize(value, vmin, vmax):\n try:\n return (value - vmin) / float(vmax - vmin)\n except ZeroDivisionError: # it's both in py2 and py3\n return (value - vmin)\n\n # open mtdev device\n _fn = input_fn\n _slot = 0\n try:\n _device = Device(_fn)\n except OSError as e:\n if e.errno == 13: # Permission denied\n Logger.warn(\n 'MTD: Unable to open device \"{0}\". Please ensure you'\n ' have the appropriate permissions.'.format(_fn))\n return\n else:\n raise\n _changes = set()\n\n # prepare some vars to get limit of some component\n ab = _device.get_abs(MTDEV_ABS_POSITION_X)\n range_min_position_x = drs('min_position_x', ab.minimum)\n range_max_position_x = drs('max_position_x', ab.maximum)\n Logger.info('MTD: <%s> range position X is %d - %d' %\n (_fn, range_min_position_x, range_max_position_x))\n\n ab = _device.get_abs(MTDEV_ABS_POSITION_Y)\n range_min_position_y = drs('min_position_y', ab.minimum)\n range_max_position_y = drs('max_position_y', ab.maximum)\n Logger.info('MTD: <%s> range position Y is %d - %d' %\n (_fn, range_min_position_y, range_max_position_y))\n\n ab = _device.get_abs(MTDEV_ABS_TOUCH_MAJOR)\n range_min_major = drs('min_touch_major', ab.minimum)\n range_max_major = drs('max_touch_major', ab.maximum)\n Logger.info('MTD: <%s> range touch major is %d - %d' %\n (_fn, range_min_major, range_max_major))\n\n ab = _device.get_abs(MTDEV_ABS_TOUCH_MINOR)\n range_min_minor = drs('min_touch_minor', ab.minimum)\n range_max_minor = drs('max_touch_minor', ab.maximum)\n Logger.info('MTD: <%s> range touch minor is %d - %d' %\n (_fn, range_min_minor, range_max_minor))\n\n range_min_pressure = drs('min_pressure', 0)\n range_max_pressure = drs('max_pressure', 255)\n Logger.info('MTD: <%s> range pressure is %d - %d' %\n (_fn, range_min_pressure, range_max_pressure))\n\n invert_x = int(bool(drs('invert_x', 0)))\n invert_y = int(bool(drs('invert_y', 0)))\n Logger.info('MTD: <%s> axes invertion: X is %d, Y is %d' %\n (_fn, invert_x, invert_y))\n\n rotation = drs('rotation', 0)\n Logger.info('MTD: <%s> rotation set to %d' %\n (_fn, rotation))\n failures = 0\n while _device:\n # if device have disconnected lets try to connect\n if failures > 1000:\n Logger.info('MTD: <%s> input device disconnected' % _fn)\n while not os.path.exists(_fn):\n time.sleep(0.05)\n # input device is back online let's recreate device\n _device.close()\n _device = Device(_fn)\n Logger.info('MTD: <%s> input device reconnected' % _fn)\n failures = 0\n continue\n\n # idle as much as we can.\n while _device.idle(1000):\n continue\n\n # got data, read all without redoing idle\n while True:\n data = _device.get()\n if data is None:\n failures += 1\n break\n\n failures = 0\n\n # set the working slot\n if data.type == MTDEV_TYPE_EV_ABS and \\\n data.code == MTDEV_CODE_SLOT:\n _slot = data.value\n continue\n\n # fill the slot\n if not (_slot in l_points):\n l_points[_slot] = dict()\n point = l_points[_slot]\n ev_value = data.value\n ev_code = data.code\n if ev_code == MTDEV_CODE_POSITION_X:\n val = normalize(ev_value,\n range_min_position_x,\n range_max_position_x)\n assign_coord(point, val, invert_x, 'xy')\n elif ev_code == MTDEV_CODE_POSITION_Y:\n val = 1. - normalize(ev_value,\n range_min_position_y,\n range_max_position_y)\n assign_coord(point, val, invert_y, 'yx')\n elif ev_code == MTDEV_CODE_PRESSURE:\n point['pressure'] = normalize(ev_value,\n range_min_pressure,\n range_max_pressure)\n elif ev_code == MTDEV_CODE_TOUCH_MAJOR:\n point['size_w'] = normalize(ev_value,\n range_min_major,\n range_max_major)\n elif ev_code == MTDEV_CODE_TOUCH_MINOR:\n point['size_h'] = normalize(ev_value,\n range_min_minor,\n range_max_minor)\n elif ev_code == MTDEV_CODE_TRACKING_ID:\n if ev_value == -1:\n point['delete'] = True\n # force process of changes here, as the slot can be\n # reused.\n _changes.add(_slot)\n process([l_points[x] for x in _changes])\n _changes.clear()\n continue\n else:\n point['id'] = ev_value\n else:\n # unrecognized command, ignore.\n continue\n _changes.add(_slot)\n\n # push all changes\n if _changes:\n process([l_points[x] for x in _changes])\n _changes.clear()\n\n def update(self, dispatch_fn):\n # dispatch all event from threads\n try:\n while True:\n event_type, touch = self.queue.popleft()\n dispatch_fn(event_type, touch)\n except:\n pass\n\n MotionEventFactory.register('mtdev', MTDMotionEventProvider)\n", "path": "kivy/input/providers/mtdev.py"}]} |
gh_patches_debug_1322 | rasdani/github-patches | git_diff | pyca__cryptography-4619 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Building against openssl-1.1.1 configured with no-psk results broken .so
If openssl 1.1.1 is detected, the TLSv1.3 psk stuff is enabled unconditionally. However, it can be disabled via `no-psk` configuration.
Building against such openssl will give the following:
```
$ ldd /usr/lib/python3.6/site-packages/cryptography/hazmat/bindings/_openssl.abi3.so
ldd (0x7ff279843000)
libssl.so.1.1 => /lib/libssl.so.1.1 (0x7ff279705000)
libcrypto.so.1.1 => /lib/libcrypto.so.1.1 (0x7ff279488000)
libpython3.6m.so.1.0 => /usr/lib/libpython3.6m.so.1.0 (0x7ff279202000)
libc.musl-x86_64.so.1 => ldd (0x7ff279843000)
Error relocating /usr/lib/python3.6/site-packages/cryptography/hazmat/bindings/_openssl.abi3.so: SSL_CTX_set_psk_client_callback: symbol not found
Error relocating /usr/lib/python3.6/site-packages/cryptography/hazmat/bindings/_openssl.abi3.so: SSL_CTX_use_psk_identity_hint: symbol not found
Error relocating /usr/lib/python3.6/site-packages/cryptography/hazmat/bindings/_openssl.abi3.so: SSL_CTX_set_psk_server_callback: symbol not found
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/_cffi_src/openssl/ssl.py`
Content:
```
1 # This file is dual licensed under the terms of the Apache License, Version
2 # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3 # for complete details.
4
5 from __future__ import absolute_import, division, print_function
6
7 INCLUDES = """
8 #include <openssl/ssl.h>
9
10 typedef STACK_OF(SSL_CIPHER) Cryptography_STACK_OF_SSL_CIPHER;
11 """
12
13 TYPES = """
14 static const long Cryptography_HAS_SSL_ST;
15 static const long Cryptography_HAS_TLS_ST;
16 static const long Cryptography_HAS_SSL2;
17 static const long Cryptography_HAS_SSL3_METHOD;
18 static const long Cryptography_HAS_TLSv1_1;
19 static const long Cryptography_HAS_TLSv1_2;
20 static const long Cryptography_HAS_TLSv1_3;
21 static const long Cryptography_HAS_SECURE_RENEGOTIATION;
22 static const long Cryptography_HAS_COMPRESSION;
23 static const long Cryptography_HAS_TLSEXT_STATUS_REQ_CB;
24 static const long Cryptography_HAS_STATUS_REQ_OCSP_RESP;
25 static const long Cryptography_HAS_TLSEXT_STATUS_REQ_TYPE;
26 static const long Cryptography_HAS_GET_SERVER_TMP_KEY;
27 static const long Cryptography_HAS_SSL_CTX_SET_CLIENT_CERT_ENGINE;
28 static const long Cryptography_HAS_SSL_CTX_CLEAR_OPTIONS;
29 static const long Cryptography_HAS_DTLS;
30 static const long Cryptography_HAS_GENERIC_DTLS_METHOD;
31 static const long Cryptography_HAS_SIGALGS;
32 static const long Cryptography_HAS_PSK;
33 static const long Cryptography_HAS_CIPHER_DETAILS;
34
35 /* Internally invented symbol to tell us if SNI is supported */
36 static const long Cryptography_HAS_TLSEXT_HOSTNAME;
37
38 /* Internally invented symbol to tell us if SSL_MODE_RELEASE_BUFFERS is
39 * supported
40 */
41 static const long Cryptography_HAS_RELEASE_BUFFERS;
42
43 /* Internally invented symbol to tell us if SSL_OP_NO_COMPRESSION is
44 * supported
45 */
46 static const long Cryptography_HAS_OP_NO_COMPRESSION;
47 static const long Cryptography_HAS_SSL_OP_MSIE_SSLV2_RSA_PADDING;
48 static const long Cryptography_HAS_SSL_SET_SSL_CTX;
49 static const long Cryptography_HAS_SSL_OP_NO_TICKET;
50 static const long Cryptography_HAS_ALPN;
51 static const long Cryptography_HAS_NEXTPROTONEG;
52 static const long Cryptography_HAS_SET_CERT_CB;
53 static const long Cryptography_HAS_CUSTOM_EXT;
54
55 static const long SSL_FILETYPE_PEM;
56 static const long SSL_FILETYPE_ASN1;
57 static const long SSL_ERROR_NONE;
58 static const long SSL_ERROR_ZERO_RETURN;
59 static const long SSL_ERROR_WANT_READ;
60 static const long SSL_ERROR_WANT_WRITE;
61 static const long SSL_ERROR_WANT_X509_LOOKUP;
62 static const long SSL_ERROR_WANT_CONNECT;
63 static const long SSL_ERROR_SYSCALL;
64 static const long SSL_ERROR_SSL;
65 static const long SSL_SENT_SHUTDOWN;
66 static const long SSL_RECEIVED_SHUTDOWN;
67 static const long SSL_OP_NO_SSLv2;
68 static const long SSL_OP_NO_SSLv3;
69 static const long SSL_OP_NO_TLSv1;
70 static const long SSL_OP_NO_TLSv1_1;
71 static const long SSL_OP_NO_TLSv1_2;
72 static const long SSL_OP_NO_TLSv1_3;
73 static const long SSL_OP_NO_DTLSv1;
74 static const long SSL_OP_NO_DTLSv1_2;
75 static const long SSL_OP_NO_COMPRESSION;
76 static const long SSL_OP_SINGLE_DH_USE;
77 static const long SSL_OP_EPHEMERAL_RSA;
78 static const long SSL_OP_MICROSOFT_SESS_ID_BUG;
79 static const long SSL_OP_NETSCAPE_CHALLENGE_BUG;
80 static const long SSL_OP_NETSCAPE_REUSE_CIPHER_CHANGE_BUG;
81 static const long SSL_OP_SSLREF2_REUSE_CERT_TYPE_BUG;
82 static const long SSL_OP_MICROSOFT_BIG_SSLV3_BUFFER;
83 static const long SSL_OP_MSIE_SSLV2_RSA_PADDING;
84 static const long SSL_OP_SSLEAY_080_CLIENT_DH_BUG;
85 static const long SSL_OP_TLS_D5_BUG;
86 static const long SSL_OP_TLS_BLOCK_PADDING_BUG;
87 static const long SSL_OP_DONT_INSERT_EMPTY_FRAGMENTS;
88 static const long SSL_OP_CIPHER_SERVER_PREFERENCE;
89 static const long SSL_OP_TLS_ROLLBACK_BUG;
90 static const long SSL_OP_PKCS1_CHECK_1;
91 static const long SSL_OP_PKCS1_CHECK_2;
92 static const long SSL_OP_NETSCAPE_CA_DN_BUG;
93 static const long SSL_OP_NETSCAPE_DEMO_CIPHER_CHANGE_BUG;
94 static const long SSL_OP_NO_QUERY_MTU;
95 static const long SSL_OP_COOKIE_EXCHANGE;
96 static const long SSL_OP_NO_TICKET;
97 static const long SSL_OP_ALL;
98 static const long SSL_OP_SINGLE_ECDH_USE;
99 static const long SSL_OP_ALLOW_UNSAFE_LEGACY_RENEGOTIATION;
100 static const long SSL_OP_LEGACY_SERVER_CONNECT;
101 static const long SSL_VERIFY_PEER;
102 static const long SSL_VERIFY_FAIL_IF_NO_PEER_CERT;
103 static const long SSL_VERIFY_CLIENT_ONCE;
104 static const long SSL_VERIFY_NONE;
105 static const long SSL_VERIFY_POST_HANDSHAKE;
106 static const long SSL_SESS_CACHE_OFF;
107 static const long SSL_SESS_CACHE_CLIENT;
108 static const long SSL_SESS_CACHE_SERVER;
109 static const long SSL_SESS_CACHE_BOTH;
110 static const long SSL_SESS_CACHE_NO_AUTO_CLEAR;
111 static const long SSL_SESS_CACHE_NO_INTERNAL_LOOKUP;
112 static const long SSL_SESS_CACHE_NO_INTERNAL_STORE;
113 static const long SSL_SESS_CACHE_NO_INTERNAL;
114 static const long SSL_ST_CONNECT;
115 static const long SSL_ST_ACCEPT;
116 static const long SSL_ST_MASK;
117 static const long SSL_ST_INIT;
118 static const long SSL_ST_BEFORE;
119 static const long SSL_ST_OK;
120 static const long SSL_ST_RENEGOTIATE;
121 static const long SSL_CB_LOOP;
122 static const long SSL_CB_EXIT;
123 static const long SSL_CB_READ;
124 static const long SSL_CB_WRITE;
125 static const long SSL_CB_ALERT;
126 static const long SSL_CB_READ_ALERT;
127 static const long SSL_CB_WRITE_ALERT;
128 static const long SSL_CB_ACCEPT_LOOP;
129 static const long SSL_CB_ACCEPT_EXIT;
130 static const long SSL_CB_CONNECT_LOOP;
131 static const long SSL_CB_CONNECT_EXIT;
132 static const long SSL_CB_HANDSHAKE_START;
133 static const long SSL_CB_HANDSHAKE_DONE;
134 static const long SSL_MODE_RELEASE_BUFFERS;
135 static const long SSL_MODE_ENABLE_PARTIAL_WRITE;
136 static const long SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER;
137 static const long SSL_MODE_AUTO_RETRY;
138 static const long SSL3_RANDOM_SIZE;
139 static const long TLS_ST_BEFORE;
140 static const long TLS_ST_OK;
141
142 static const long OPENSSL_NPN_NEGOTIATED;
143
144 typedef ... SSL_METHOD;
145 typedef ... SSL_CTX;
146
147 typedef ... SSL_SESSION;
148
149 typedef ... SSL;
150
151 static const long TLSEXT_NAMETYPE_host_name;
152 static const long TLSEXT_STATUSTYPE_ocsp;
153
154 typedef ... SSL_CIPHER;
155 typedef ... Cryptography_STACK_OF_SSL_CIPHER;
156 typedef ... COMP_METHOD;
157 """
158
159 FUNCTIONS = """
160 /* SSL */
161 const char *SSL_state_string_long(const SSL *);
162 SSL_SESSION *SSL_get1_session(SSL *);
163 int SSL_set_session(SSL *, SSL_SESSION *);
164 int SSL_get_verify_mode(const SSL *);
165 void SSL_set_verify(SSL *, int, int (*)(int, X509_STORE_CTX *));
166 void SSL_set_verify_depth(SSL *, int);
167 int SSL_get_verify_depth(const SSL *);
168 int (*SSL_get_verify_callback(const SSL *))(int, X509_STORE_CTX *);
169 void SSL_set_info_callback(SSL *ssl, void (*)(const SSL *, int, int));
170 void (*SSL_get_info_callback(const SSL *))(const SSL *, int, int);
171 SSL *SSL_new(SSL_CTX *);
172 void SSL_free(SSL *);
173 int SSL_set_fd(SSL *, int);
174 SSL_CTX *SSL_get_SSL_CTX(const SSL *);
175 SSL_CTX *SSL_set_SSL_CTX(SSL *, SSL_CTX *);
176 BIO *SSL_get_rbio(const SSL *);
177 BIO *SSL_get_wbio(const SSL *);
178 void SSL_set_bio(SSL *, BIO *, BIO *);
179 void SSL_set_connect_state(SSL *);
180 void SSL_set_accept_state(SSL *);
181 void SSL_set_shutdown(SSL *, int);
182 int SSL_get_shutdown(const SSL *);
183 int SSL_pending(const SSL *);
184 int SSL_write(SSL *, const void *, int);
185 int SSL_read(SSL *, void *, int);
186 int SSL_peek(SSL *, void *, int);
187 X509 *SSL_get_certificate(const SSL *);
188 X509 *SSL_get_peer_certificate(const SSL *);
189 int SSL_get_ex_data_X509_STORE_CTX_idx(void);
190
191 /* Added in 1.0.2 */
192 X509_VERIFY_PARAM *SSL_get0_param(SSL *);
193
194 int SSL_use_certificate(SSL *, X509 *);
195 int SSL_use_certificate_ASN1(SSL *, const unsigned char *, int);
196 int SSL_use_certificate_file(SSL *, const char *, int);
197 int SSL_use_PrivateKey(SSL *, EVP_PKEY *);
198 int SSL_use_PrivateKey_ASN1(int, SSL *, const unsigned char *, long);
199 int SSL_use_PrivateKey_file(SSL *, const char *, int);
200 int SSL_check_private_key(const SSL *);
201
202 int SSL_get_sigalgs(SSL *, int, int *, int *, int *, unsigned char *,
203 unsigned char *);
204
205 Cryptography_STACK_OF_X509 *SSL_get_peer_cert_chain(const SSL *);
206 Cryptography_STACK_OF_X509_NAME *SSL_get_client_CA_list(const SSL *);
207
208 int SSL_get_error(const SSL *, int);
209 int SSL_do_handshake(SSL *);
210 int SSL_shutdown(SSL *);
211 int SSL_renegotiate(SSL *);
212 int SSL_renegotiate_pending(SSL *);
213 const char *SSL_get_cipher_list(const SSL *, int);
214 Cryptography_STACK_OF_SSL_CIPHER *SSL_get_ciphers(const SSL *);
215
216 /* context */
217 void SSL_CTX_free(SSL_CTX *);
218 long SSL_CTX_set_timeout(SSL_CTX *, long);
219 int SSL_CTX_set_default_verify_paths(SSL_CTX *);
220 void SSL_CTX_set_verify(SSL_CTX *, int, int (*)(int, X509_STORE_CTX *));
221 void SSL_CTX_set_verify_depth(SSL_CTX *, int);
222 int (*SSL_CTX_get_verify_callback(const SSL_CTX *))(int, X509_STORE_CTX *);
223 int SSL_CTX_get_verify_mode(const SSL_CTX *);
224 int SSL_CTX_get_verify_depth(const SSL_CTX *);
225 int SSL_CTX_set_cipher_list(SSL_CTX *, const char *);
226 int SSL_CTX_load_verify_locations(SSL_CTX *, const char *, const char *);
227 void SSL_CTX_set_default_passwd_cb(SSL_CTX *, pem_password_cb *);
228 void SSL_CTX_set_default_passwd_cb_userdata(SSL_CTX *, void *);
229 int SSL_CTX_use_certificate(SSL_CTX *, X509 *);
230 int SSL_CTX_use_certificate_ASN1(SSL_CTX *, int, const unsigned char *);
231 int SSL_CTX_use_certificate_file(SSL_CTX *, const char *, int);
232 int SSL_CTX_use_certificate_chain_file(SSL_CTX *, const char *);
233 int SSL_CTX_use_PrivateKey(SSL_CTX *, EVP_PKEY *);
234 int SSL_CTX_use_PrivateKey_ASN1(int, SSL_CTX *, const unsigned char *, long);
235 int SSL_CTX_use_PrivateKey_file(SSL_CTX *, const char *, int);
236 int SSL_CTX_check_private_key(const SSL_CTX *);
237 void SSL_CTX_set_cert_verify_callback(SSL_CTX *,
238 int (*)(X509_STORE_CTX *, void *),
239 void *);
240
241 void SSL_CTX_set_cookie_generate_cb(SSL_CTX *,
242 int (*)(
243 SSL *,
244 unsigned char *,
245 unsigned int *
246 ));
247 long SSL_CTX_get_read_ahead(SSL_CTX *);
248 long SSL_CTX_set_read_ahead(SSL_CTX *, long);
249
250 int SSL_CTX_use_psk_identity_hint(SSL_CTX *, const char *);
251 void SSL_CTX_set_psk_server_callback(SSL_CTX *,
252 unsigned int (*)(
253 SSL *,
254 const char *,
255 unsigned char *,
256 unsigned int
257 ));
258 void SSL_CTX_set_psk_client_callback(SSL_CTX *,
259 unsigned int (*)(
260 SSL *,
261 const char *,
262 char *,
263 unsigned int,
264 unsigned char *,
265 unsigned int
266 ));
267
268 int SSL_CTX_set_session_id_context(SSL_CTX *, const unsigned char *,
269 unsigned int);
270
271 void SSL_CTX_set_cert_store(SSL_CTX *, X509_STORE *);
272 X509_STORE *SSL_CTX_get_cert_store(const SSL_CTX *);
273 int SSL_CTX_add_client_CA(SSL_CTX *, X509 *);
274
275 void SSL_CTX_set_client_CA_list(SSL_CTX *, Cryptography_STACK_OF_X509_NAME *);
276
277 void SSL_CTX_set_info_callback(SSL_CTX *, void (*)(const SSL *, int, int));
278 void (*SSL_CTX_get_info_callback(SSL_CTX *))(const SSL *, int, int);
279
280 long SSL_CTX_set1_sigalgs_list(SSL_CTX *, const char *);
281
282 /* SSL_SESSION */
283 void SSL_SESSION_free(SSL_SESSION *);
284
285 /* Information about actually used cipher */
286 const char *SSL_CIPHER_get_name(const SSL_CIPHER *);
287 int SSL_CIPHER_get_bits(const SSL_CIPHER *, int *);
288 /* the modern signature of this is uint32_t, but older openssl declared it
289 as unsigned long. To make our compiler flags happy we'll declare it as a
290 64-bit wide value, which should always be safe */
291 uint64_t SSL_CIPHER_get_id(const SSL_CIPHER *);
292 int SSL_CIPHER_is_aead(const SSL_CIPHER *);
293 int SSL_CIPHER_get_cipher_nid(const SSL_CIPHER *);
294 int SSL_CIPHER_get_digest_nid(const SSL_CIPHER *);
295 int SSL_CIPHER_get_kx_nid(const SSL_CIPHER *);
296 int SSL_CIPHER_get_auth_nid(const SSL_CIPHER *);
297
298 size_t SSL_get_finished(const SSL *, void *, size_t);
299 size_t SSL_get_peer_finished(const SSL *, void *, size_t);
300 Cryptography_STACK_OF_X509_NAME *SSL_load_client_CA_file(const char *);
301
302 const char *SSL_get_servername(const SSL *, const int);
303 /* Function signature changed to const char * in 1.1.0 */
304 const char *SSL_CIPHER_get_version(const SSL_CIPHER *);
305 /* These became macros in 1.1.0 */
306 int SSL_library_init(void);
307 void SSL_load_error_strings(void);
308
309 /* these CRYPTO_EX_DATA functions became macros in 1.1.0 */
310 int SSL_get_ex_new_index(long, void *, CRYPTO_EX_new *, CRYPTO_EX_dup *,
311 CRYPTO_EX_free *);
312 int SSL_set_ex_data(SSL *, int, void *);
313 int SSL_CTX_get_ex_new_index(long, void *, CRYPTO_EX_new *, CRYPTO_EX_dup *,
314 CRYPTO_EX_free *);
315 int SSL_CTX_set_ex_data(SSL_CTX *, int, void *);
316
317 SSL_SESSION *SSL_get_session(const SSL *);
318 const unsigned char *SSL_SESSION_get_id(const SSL_SESSION *, unsigned int *);
319 long SSL_SESSION_get_time(const SSL_SESSION *);
320 long SSL_SESSION_get_timeout(const SSL_SESSION *);
321 int SSL_SESSION_has_ticket(const SSL_SESSION *);
322 long SSL_SESSION_get_ticket_lifetime_hint(const SSL_SESSION *);
323
324 /* not a macro, but older OpenSSLs don't pass the args as const */
325 char *SSL_CIPHER_description(const SSL_CIPHER *, char *, int);
326 int SSL_SESSION_print(BIO *, const SSL_SESSION *);
327
328 /* not macros, but will be conditionally bound so can't live in functions */
329 const COMP_METHOD *SSL_get_current_compression(SSL *);
330 const COMP_METHOD *SSL_get_current_expansion(SSL *);
331 const char *SSL_COMP_get_name(const COMP_METHOD *);
332 int SSL_CTX_set_client_cert_engine(SSL_CTX *, ENGINE *);
333
334 unsigned long SSL_set_mode(SSL *, unsigned long);
335 unsigned long SSL_get_mode(SSL *);
336
337 unsigned long SSL_set_options(SSL *, unsigned long);
338 unsigned long SSL_get_options(SSL *);
339
340 void SSL_set_app_data(SSL *, char *);
341 char * SSL_get_app_data(SSL *);
342 void SSL_set_read_ahead(SSL *, int);
343
344 int SSL_want_read(const SSL *);
345 int SSL_want_write(const SSL *);
346
347 long SSL_total_renegotiations(SSL *);
348 long SSL_get_secure_renegotiation_support(SSL *);
349
350 /* Defined as unsigned long because SSL_OP_ALL is greater than signed 32-bit
351 and Windows defines long as 32-bit. */
352 unsigned long SSL_CTX_set_options(SSL_CTX *, unsigned long);
353 unsigned long SSL_CTX_clear_options(SSL_CTX *, unsigned long);
354 unsigned long SSL_CTX_get_options(SSL_CTX *);
355 unsigned long SSL_CTX_set_mode(SSL_CTX *, unsigned long);
356 unsigned long SSL_CTX_get_mode(SSL_CTX *);
357 unsigned long SSL_CTX_set_session_cache_mode(SSL_CTX *, unsigned long);
358 unsigned long SSL_CTX_get_session_cache_mode(SSL_CTX *);
359 unsigned long SSL_CTX_set_tmp_dh(SSL_CTX *, DH *);
360 unsigned long SSL_CTX_set_tmp_ecdh(SSL_CTX *, EC_KEY *);
361 unsigned long SSL_CTX_add_extra_chain_cert(SSL_CTX *, X509 *);
362
363 /*- These aren't macros these functions are all const X on openssl > 1.0.x -*/
364
365 /* methods */
366
367 /*
368 * TLSv1_1 and TLSv1_2 are recent additions. Only sufficiently new versions of
369 * OpenSSL support them.
370 */
371 const SSL_METHOD *TLSv1_1_method(void);
372 const SSL_METHOD *TLSv1_1_server_method(void);
373 const SSL_METHOD *TLSv1_1_client_method(void);
374
375 const SSL_METHOD *TLSv1_2_method(void);
376 const SSL_METHOD *TLSv1_2_server_method(void);
377 const SSL_METHOD *TLSv1_2_client_method(void);
378
379 const SSL_METHOD *SSLv3_method(void);
380 const SSL_METHOD *SSLv3_server_method(void);
381 const SSL_METHOD *SSLv3_client_method(void);
382
383 const SSL_METHOD *TLSv1_method(void);
384 const SSL_METHOD *TLSv1_server_method(void);
385 const SSL_METHOD *TLSv1_client_method(void);
386
387 const SSL_METHOD *DTLSv1_method(void);
388 const SSL_METHOD *DTLSv1_server_method(void);
389 const SSL_METHOD *DTLSv1_client_method(void);
390
391 /* Added in 1.0.2 */
392 const SSL_METHOD *DTLS_method(void);
393 const SSL_METHOD *DTLS_server_method(void);
394 const SSL_METHOD *DTLS_client_method(void);
395
396 const SSL_METHOD *SSLv23_method(void);
397 const SSL_METHOD *SSLv23_server_method(void);
398 const SSL_METHOD *SSLv23_client_method(void);
399
400 /*- These aren't macros these arguments are all const X on openssl > 1.0.x -*/
401 SSL_CTX *SSL_CTX_new(SSL_METHOD *);
402 long SSL_CTX_get_timeout(const SSL_CTX *);
403
404 const SSL_CIPHER *SSL_get_current_cipher(const SSL *);
405 const char *SSL_get_version(const SSL *);
406 int SSL_version(const SSL *);
407
408 void *SSL_CTX_get_ex_data(const SSL_CTX *, int);
409 void *SSL_get_ex_data(const SSL *, int);
410
411 void SSL_set_tlsext_host_name(SSL *, char *);
412 void SSL_CTX_set_tlsext_servername_callback(
413 SSL_CTX *,
414 int (*)(SSL *, int *, void *));
415 void SSL_CTX_set_tlsext_servername_arg(
416 SSL_CTX *, void *);
417
418 long SSL_set_tlsext_status_ocsp_resp(SSL *, unsigned char *, int);
419 long SSL_get_tlsext_status_ocsp_resp(SSL *, const unsigned char **);
420 long SSL_set_tlsext_status_type(SSL *, long);
421 long SSL_CTX_set_tlsext_status_cb(SSL_CTX *, int(*)(SSL *, void *));
422 long SSL_CTX_set_tlsext_status_arg(SSL_CTX *, void *);
423
424 int SSL_CTX_set_tlsext_use_srtp(SSL_CTX *, const char *);
425 int SSL_set_tlsext_use_srtp(SSL *, const char *);
426
427 long SSL_session_reused(SSL *);
428
429 void SSL_CTX_set_next_protos_advertised_cb(SSL_CTX *,
430 int (*)(SSL *,
431 const unsigned char **,
432 unsigned int *,
433 void *),
434 void *);
435 void SSL_CTX_set_next_proto_select_cb(SSL_CTX *,
436 int (*)(SSL *,
437 unsigned char **,
438 unsigned char *,
439 const unsigned char *,
440 unsigned int,
441 void *),
442 void *);
443 int SSL_select_next_proto(unsigned char **, unsigned char *,
444 const unsigned char *, unsigned int,
445 const unsigned char *, unsigned int);
446 void SSL_get0_next_proto_negotiated(const SSL *,
447 const unsigned char **, unsigned *);
448
449 int sk_SSL_CIPHER_num(Cryptography_STACK_OF_SSL_CIPHER *);
450 const SSL_CIPHER *sk_SSL_CIPHER_value(Cryptography_STACK_OF_SSL_CIPHER *, int);
451
452 /* ALPN APIs were introduced in OpenSSL 1.0.2. To continue to support earlier
453 * versions some special handling of these is necessary.
454 */
455 int SSL_CTX_set_alpn_protos(SSL_CTX *, const unsigned char *, unsigned);
456 int SSL_set_alpn_protos(SSL *, const unsigned char *, unsigned);
457 void SSL_CTX_set_alpn_select_cb(SSL_CTX *,
458 int (*) (SSL *,
459 const unsigned char **,
460 unsigned char *,
461 const unsigned char *,
462 unsigned int,
463 void *),
464 void *);
465 void SSL_get0_alpn_selected(const SSL *, const unsigned char **, unsigned *);
466
467 long SSL_get_server_tmp_key(SSL *, EVP_PKEY **);
468
469 /* SSL_CTX_set_cert_cb is introduced in OpenSSL 1.0.2. To continue to support
470 * earlier versions some special handling of these is necessary.
471 */
472 void SSL_CTX_set_cert_cb(SSL_CTX *, int (*)(SSL *, void *), void *);
473 void SSL_set_cert_cb(SSL *, int (*)(SSL *, void *), void *);
474
475 /* Added in 1.0.2 */
476 const SSL_METHOD *SSL_CTX_get_ssl_method(SSL_CTX *);
477
478 int SSL_SESSION_set1_id_context(SSL_SESSION *, const unsigned char *,
479 unsigned int);
480 /* Added in 1.1.0 for the great opaquing of structs */
481 size_t SSL_SESSION_get_master_key(const SSL_SESSION *, unsigned char *,
482 size_t);
483 size_t SSL_get_client_random(const SSL *, unsigned char *, size_t);
484 size_t SSL_get_server_random(const SSL *, unsigned char *, size_t);
485 int SSL_export_keying_material(SSL *, unsigned char *, size_t, const char *,
486 size_t, const unsigned char *, size_t, int);
487
488 long SSL_CTX_sess_number(SSL_CTX *);
489 long SSL_CTX_sess_connect(SSL_CTX *);
490 long SSL_CTX_sess_connect_good(SSL_CTX *);
491 long SSL_CTX_sess_connect_renegotiate(SSL_CTX *);
492 long SSL_CTX_sess_accept(SSL_CTX *);
493 long SSL_CTX_sess_accept_good(SSL_CTX *);
494 long SSL_CTX_sess_accept_renegotiate(SSL_CTX *);
495 long SSL_CTX_sess_hits(SSL_CTX *);
496 long SSL_CTX_sess_cb_hits(SSL_CTX *);
497 long SSL_CTX_sess_misses(SSL_CTX *);
498 long SSL_CTX_sess_timeouts(SSL_CTX *);
499 long SSL_CTX_sess_cache_full(SSL_CTX *);
500
501 /* DTLS support */
502 long Cryptography_DTLSv1_get_timeout(SSL *, time_t *, long *);
503 long DTLSv1_handle_timeout(SSL *);
504 long DTLS_set_link_mtu(SSL *, long);
505 long DTLS_get_link_min_mtu(SSL *);
506
507 /* Custom extensions. */
508 typedef int (*custom_ext_add_cb)(SSL *, unsigned int,
509 const unsigned char **,
510 size_t *, int *,
511 void *);
512
513 typedef void (*custom_ext_free_cb)(SSL *, unsigned int,
514 const unsigned char *,
515 void *);
516
517 typedef int (*custom_ext_parse_cb)(SSL *, unsigned int,
518 const unsigned char *,
519 size_t, int *,
520 void *);
521
522 int SSL_CTX_add_client_custom_ext(SSL_CTX *, unsigned int,
523 custom_ext_add_cb,
524 custom_ext_free_cb, void *,
525 custom_ext_parse_cb,
526 void *);
527
528 int SSL_CTX_add_server_custom_ext(SSL_CTX *, unsigned int,
529 custom_ext_add_cb,
530 custom_ext_free_cb, void *,
531 custom_ext_parse_cb,
532 void *);
533
534 int SSL_extension_supported(unsigned int);
535
536 int SSL_CTX_set_ciphersuites(SSL_CTX *, const char *);
537 int SSL_verify_client_post_handshake(SSL *);
538 void SSL_CTX_set_post_handshake_auth(SSL_CTX *, int);
539 void SSL_set_post_handshake_auth(SSL *, int);
540
541 uint32_t SSL_SESSION_get_max_early_data(const SSL_SESSION *);
542 int SSL_write_early_data(SSL *, const void *, size_t, size_t *);
543 int SSL_read_early_data(SSL *, void *, size_t, size_t *);
544 int SSL_CTX_set_max_early_data(SSL_CTX *, uint32_t);
545 """
546
547 CUSTOMIZATIONS = """
548 /* Added in 1.0.2 but we need it in all versions now due to the great
549 opaquing. */
550 #if CRYPTOGRAPHY_OPENSSL_LESS_THAN_102
551 /* from ssl/ssl_lib.c */
552 const SSL_METHOD *SSL_CTX_get_ssl_method(SSL_CTX *ctx) {
553 return ctx->method;
554 }
555 #endif
556
557 /* Added in 1.1.0 in the great opaquing, but we need to define it for older
558 OpenSSLs. Such is our burden. */
559 #if CRYPTOGRAPHY_OPENSSL_LESS_THAN_110 && !CRYPTOGRAPHY_LIBRESSL_27_OR_GREATER
560 /* from ssl/ssl_lib.c */
561 size_t SSL_get_client_random(const SSL *ssl, unsigned char *out, size_t outlen)
562 {
563 if (outlen == 0)
564 return sizeof(ssl->s3->client_random);
565 if (outlen > sizeof(ssl->s3->client_random))
566 outlen = sizeof(ssl->s3->client_random);
567 memcpy(out, ssl->s3->client_random, outlen);
568 return outlen;
569 }
570 /* Added in 1.1.0 as well */
571 /* from ssl/ssl_lib.c */
572 size_t SSL_get_server_random(const SSL *ssl, unsigned char *out, size_t outlen)
573 {
574 if (outlen == 0)
575 return sizeof(ssl->s3->server_random);
576 if (outlen > sizeof(ssl->s3->server_random))
577 outlen = sizeof(ssl->s3->server_random);
578 memcpy(out, ssl->s3->server_random, outlen);
579 return outlen;
580 }
581 /* Added in 1.1.0 as well */
582 /* from ssl/ssl_lib.c */
583 size_t SSL_SESSION_get_master_key(const SSL_SESSION *session,
584 unsigned char *out, size_t outlen)
585 {
586 if (session->master_key_length < 0) {
587 /* Should never happen */
588 return 0;
589 }
590 if (outlen == 0)
591 return session->master_key_length;
592 if (outlen > (size_t)session->master_key_length)
593 outlen = session->master_key_length;
594 memcpy(out, session->master_key, outlen);
595 return outlen;
596 }
597 /* from ssl/ssl_sess.c */
598 int SSL_SESSION_has_ticket(const SSL_SESSION *s)
599 {
600 return (s->tlsext_ticklen > 0) ? 1 : 0;
601 }
602 /* from ssl/ssl_sess.c */
603 unsigned long SSL_SESSION_get_ticket_lifetime_hint(const SSL_SESSION *s)
604 {
605 return s->tlsext_tick_lifetime_hint;
606 }
607 #endif
608
609 static const long Cryptography_HAS_SECURE_RENEGOTIATION = 1;
610
611 /* Cryptography now compiles out all SSLv2 bindings. This exists to allow
612 * clients that use it to check for SSLv2 support to keep functioning as
613 * expected.
614 */
615 static const long Cryptography_HAS_SSL2 = 0;
616
617 #ifdef OPENSSL_NO_SSL3_METHOD
618 static const long Cryptography_HAS_SSL3_METHOD = 0;
619 SSL_METHOD* (*SSLv3_method)(void) = NULL;
620 SSL_METHOD* (*SSLv3_client_method)(void) = NULL;
621 SSL_METHOD* (*SSLv3_server_method)(void) = NULL;
622 #else
623 static const long Cryptography_HAS_SSL3_METHOD = 1;
624 #endif
625
626 static const long Cryptography_HAS_TLSEXT_HOSTNAME = 1;
627 static const long Cryptography_HAS_TLSEXT_STATUS_REQ_CB = 1;
628 static const long Cryptography_HAS_STATUS_REQ_OCSP_RESP = 1;
629 static const long Cryptography_HAS_TLSEXT_STATUS_REQ_TYPE = 1;
630 static const long Cryptography_HAS_RELEASE_BUFFERS = 1;
631 static const long Cryptography_HAS_OP_NO_COMPRESSION = 1;
632 static const long Cryptography_HAS_TLSv1_1 = 1;
633 static const long Cryptography_HAS_TLSv1_2 = 1;
634 static const long Cryptography_HAS_SSL_OP_MSIE_SSLV2_RSA_PADDING = 1;
635 static const long Cryptography_HAS_SSL_OP_NO_TICKET = 1;
636 static const long Cryptography_HAS_SSL_SET_SSL_CTX = 1;
637 static const long Cryptography_HAS_NEXTPROTONEG = 1;
638
639 /* SSL_get0_param was added in OpenSSL 1.0.2. */
640 #if CRYPTOGRAPHY_OPENSSL_LESS_THAN_102 && !CRYPTOGRAPHY_LIBRESSL_27_OR_GREATER
641 X509_VERIFY_PARAM *(*SSL_get0_param)(SSL *) = NULL;
642 #else
643 #endif
644
645 /* ALPN was added in OpenSSL 1.0.2. */
646 #if CRYPTOGRAPHY_OPENSSL_LESS_THAN_102 && !CRYPTOGRAPHY_IS_LIBRESSL
647 int (*SSL_CTX_set_alpn_protos)(SSL_CTX *,
648 const unsigned char *,
649 unsigned) = NULL;
650 int (*SSL_set_alpn_protos)(SSL *, const unsigned char *, unsigned) = NULL;
651 void (*SSL_CTX_set_alpn_select_cb)(SSL_CTX *,
652 int (*) (SSL *,
653 const unsigned char **,
654 unsigned char *,
655 const unsigned char *,
656 unsigned int,
657 void *),
658 void *) = NULL;
659 void (*SSL_get0_alpn_selected)(const SSL *,
660 const unsigned char **,
661 unsigned *) = NULL;
662 static const long Cryptography_HAS_ALPN = 0;
663 #else
664 static const long Cryptography_HAS_ALPN = 1;
665 #endif
666
667 /* SSL_CTX_set_cert_cb was added in OpenSSL 1.0.2. */
668 #if CRYPTOGRAPHY_OPENSSL_LESS_THAN_102
669 void (*SSL_CTX_set_cert_cb)(SSL_CTX *, int (*)(SSL *, void *), void *) = NULL;
670 void (*SSL_set_cert_cb)(SSL *, int (*)(SSL *, void *), void *) = NULL;
671 static const long Cryptography_HAS_SET_CERT_CB = 0;
672 #else
673 static const long Cryptography_HAS_SET_CERT_CB = 1;
674 #endif
675
676
677 /* In OpenSSL 1.0.2i+ the handling of COMP_METHOD when OPENSSL_NO_COMP was
678 changed and we no longer need to typedef void */
679 #if (defined(OPENSSL_NO_COMP) && CRYPTOGRAPHY_OPENSSL_LESS_THAN_102I) || \
680 CRYPTOGRAPHY_IS_LIBRESSL
681 static const long Cryptography_HAS_COMPRESSION = 0;
682 typedef void COMP_METHOD;
683 #else
684 static const long Cryptography_HAS_COMPRESSION = 1;
685 #endif
686
687 #if defined(SSL_CTRL_GET_SERVER_TMP_KEY)
688 static const long Cryptography_HAS_GET_SERVER_TMP_KEY = 1;
689 #else
690 static const long Cryptography_HAS_GET_SERVER_TMP_KEY = 0;
691 long (*SSL_get_server_tmp_key)(SSL *, EVP_PKEY **) = NULL;
692 #endif
693
694 static const long Cryptography_HAS_SSL_CTX_SET_CLIENT_CERT_ENGINE = 1;
695
696 static const long Cryptography_HAS_SSL_CTX_CLEAR_OPTIONS = 1;
697
698 /* in OpenSSL 1.1.0 the SSL_ST values were renamed to TLS_ST and several were
699 removed */
700 #if CRYPTOGRAPHY_OPENSSL_LESS_THAN_110
701 static const long Cryptography_HAS_SSL_ST = 1;
702 #else
703 static const long Cryptography_HAS_SSL_ST = 0;
704 static const long SSL_ST_BEFORE = 0;
705 static const long SSL_ST_OK = 0;
706 static const long SSL_ST_INIT = 0;
707 static const long SSL_ST_RENEGOTIATE = 0;
708 #endif
709 #if CRYPTOGRAPHY_OPENSSL_110_OR_GREATER
710 static const long Cryptography_HAS_TLS_ST = 1;
711 #else
712 static const long Cryptography_HAS_TLS_ST = 0;
713 static const long TLS_ST_BEFORE = 0;
714 static const long TLS_ST_OK = 0;
715 #endif
716
717 #if CRYPTOGRAPHY_OPENSSL_LESS_THAN_102
718 static const long Cryptography_HAS_GENERIC_DTLS_METHOD = 0;
719 const SSL_METHOD *(*DTLS_method)(void) = NULL;
720 const SSL_METHOD *(*DTLS_server_method)(void) = NULL;
721 const SSL_METHOD *(*DTLS_client_method)(void) = NULL;
722 static const long SSL_OP_NO_DTLSv1 = 0;
723 static const long SSL_OP_NO_DTLSv1_2 = 0;
724 long (*DTLS_set_link_mtu)(SSL *, long) = NULL;
725 long (*DTLS_get_link_min_mtu)(SSL *) = NULL;
726 #else
727 static const long Cryptography_HAS_GENERIC_DTLS_METHOD = 1;
728 #endif
729
730 static const long Cryptography_HAS_DTLS = 1;
731 /* Wrap DTLSv1_get_timeout to avoid cffi to handle a 'struct timeval'. */
732 long Cryptography_DTLSv1_get_timeout(SSL *ssl, time_t *ptv_sec,
733 long *ptv_usec) {
734 struct timeval tv = { 0 };
735 long r = DTLSv1_get_timeout(ssl, &tv);
736
737 if (r == 1) {
738 if (ptv_sec) {
739 *ptv_sec = tv.tv_sec;
740 }
741
742 if (ptv_usec) {
743 *ptv_usec = tv.tv_usec;
744 }
745 }
746
747 return r;
748 }
749
750 #if CRYPTOGRAPHY_OPENSSL_LESS_THAN_102
751 static const long Cryptography_HAS_SIGALGS = 0;
752 const int (*SSL_get_sigalgs)(SSL *, int, int *, int *, int *, unsigned char *,
753 unsigned char *) = NULL;
754 const long (*SSL_CTX_set1_sigalgs_list)(SSL_CTX *, const char *) = NULL;
755 #else
756 static const long Cryptography_HAS_SIGALGS = 1;
757 #endif
758
759 #if CRYPTOGRAPHY_IS_LIBRESSL
760 static const long Cryptography_HAS_PSK = 0;
761 int (*SSL_CTX_use_psk_identity_hint)(SSL_CTX *, const char *) = NULL;
762 void (*SSL_CTX_set_psk_server_callback)(SSL_CTX *,
763 unsigned int (*)(
764 SSL *,
765 const char *,
766 unsigned char *,
767 unsigned int
768 )) = NULL;
769 void (*SSL_CTX_set_psk_client_callback)(SSL_CTX *,
770 unsigned int (*)(
771 SSL *,
772 const char *,
773 char *,
774 unsigned int,
775 unsigned char *,
776 unsigned int
777 )) = NULL;
778 #else
779 static const long Cryptography_HAS_PSK = 1;
780 #endif
781
782 /*
783 * Custom extensions were added in 1.0.2. 1.1.1 is adding a more general
784 * SSL_CTX_add_custom_ext function, but we're not binding that yet.
785 */
786 #if CRYPTOGRAPHY_OPENSSL_102_OR_GREATER
787 static const long Cryptography_HAS_CUSTOM_EXT = 1;
788 #else
789 static const long Cryptography_HAS_CUSTOM_EXT = 0;
790
791 typedef int (*custom_ext_add_cb)(SSL *, unsigned int,
792 const unsigned char **,
793 size_t *, int *,
794 void *);
795
796 typedef void (*custom_ext_free_cb)(SSL *, unsigned int,
797 const unsigned char *,
798 void *);
799
800 typedef int (*custom_ext_parse_cb)(SSL *, unsigned int,
801 const unsigned char *,
802 size_t, int *,
803 void *);
804
805 int (*SSL_CTX_add_client_custom_ext)(SSL_CTX *, unsigned int,
806 custom_ext_add_cb,
807 custom_ext_free_cb, void *,
808 custom_ext_parse_cb,
809 void *) = NULL;
810
811 int (*SSL_CTX_add_server_custom_ext)(SSL_CTX *, unsigned int,
812 custom_ext_add_cb,
813 custom_ext_free_cb, void *,
814 custom_ext_parse_cb,
815 void *) = NULL;
816
817 int (*SSL_extension_supported)(unsigned int) = NULL;
818 #endif
819
820 #if CRYPTOGRAPHY_OPENSSL_LESS_THAN_110 && !CRYPTOGRAPHY_LIBRESSL_27_OR_GREATER
821 int (*SSL_CIPHER_is_aead)(const SSL_CIPHER *) = NULL;
822 int (*SSL_CIPHER_get_cipher_nid)(const SSL_CIPHER *) = NULL;
823 int (*SSL_CIPHER_get_digest_nid)(const SSL_CIPHER *) = NULL;
824 int (*SSL_CIPHER_get_kx_nid)(const SSL_CIPHER *) = NULL;
825 int (*SSL_CIPHER_get_auth_nid)(const SSL_CIPHER *) = NULL;
826 static const long Cryptography_HAS_CIPHER_DETAILS = 0;
827 #else
828 static const long Cryptography_HAS_CIPHER_DETAILS = 1;
829 #endif
830
831 #if CRYPTOGRAPHY_OPENSSL_LESS_THAN_111
832 static const long Cryptography_HAS_TLSv1_3 = 0;
833 static const long SSL_OP_NO_TLSv1_3 = 0;
834 static const long SSL_VERIFY_POST_HANDSHAKE = 0;
835 int (*SSL_CTX_set_ciphersuites)(SSL_CTX *, const char *) = NULL;
836 int (*SSL_verify_client_post_handshake)(SSL *) = NULL;
837 void (*SSL_CTX_set_post_handshake_auth)(SSL_CTX *, int) = NULL;
838 void (*SSL_set_post_handshake_auth)(SSL *, int) = NULL;
839 uint32_t (*SSL_SESSION_get_max_early_data)(const SSL_SESSION *) = NULL;
840 int (*SSL_write_early_data)(SSL *, const void *, size_t, size_t *) = NULL;
841 int (*SSL_read_early_data)(SSL *, void *, size_t, size_t *) = NULL;
842 int (*SSL_CTX_set_max_early_data)(SSL_CTX *, uint32_t) = NULL;
843 #else
844 static const long Cryptography_HAS_TLSv1_3 = 1;
845 #endif
846 """
847
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/_cffi_src/openssl/ssl.py b/src/_cffi_src/openssl/ssl.py
--- a/src/_cffi_src/openssl/ssl.py
+++ b/src/_cffi_src/openssl/ssl.py
@@ -756,7 +756,7 @@
static const long Cryptography_HAS_SIGALGS = 1;
#endif
-#if CRYPTOGRAPHY_IS_LIBRESSL
+#if CRYPTOGRAPHY_IS_LIBRESSL || defined(OPENSSL_NO_PSK)
static const long Cryptography_HAS_PSK = 0;
int (*SSL_CTX_use_psk_identity_hint)(SSL_CTX *, const char *) = NULL;
void (*SSL_CTX_set_psk_server_callback)(SSL_CTX *,
| {"golden_diff": "diff --git a/src/_cffi_src/openssl/ssl.py b/src/_cffi_src/openssl/ssl.py\n--- a/src/_cffi_src/openssl/ssl.py\n+++ b/src/_cffi_src/openssl/ssl.py\n@@ -756,7 +756,7 @@\n static const long Cryptography_HAS_SIGALGS = 1;\n #endif\n \n-#if CRYPTOGRAPHY_IS_LIBRESSL\n+#if CRYPTOGRAPHY_IS_LIBRESSL || defined(OPENSSL_NO_PSK)\n static const long Cryptography_HAS_PSK = 0;\n int (*SSL_CTX_use_psk_identity_hint)(SSL_CTX *, const char *) = NULL;\n void (*SSL_CTX_set_psk_server_callback)(SSL_CTX *,\n", "issue": "Building against openssl-1.1.1 configured with no-psk results broken .so\nIf openssl 1.1.1 is detected, the TLSv1.3 psk stuff is enabled unconditionally. However, it can be disabled via `no-psk` configuration.\r\n\r\nBuilding against such openssl will give the following:\r\n```\r\n$ ldd /usr/lib/python3.6/site-packages/cryptography/hazmat/bindings/_openssl.abi3.so\r\n\tldd (0x7ff279843000)\r\n\tlibssl.so.1.1 => /lib/libssl.so.1.1 (0x7ff279705000)\r\n\tlibcrypto.so.1.1 => /lib/libcrypto.so.1.1 (0x7ff279488000)\r\n\tlibpython3.6m.so.1.0 => /usr/lib/libpython3.6m.so.1.0 (0x7ff279202000)\r\n\tlibc.musl-x86_64.so.1 => ldd (0x7ff279843000)\r\nError relocating /usr/lib/python3.6/site-packages/cryptography/hazmat/bindings/_openssl.abi3.so: SSL_CTX_set_psk_client_callback: symbol not found\r\nError relocating /usr/lib/python3.6/site-packages/cryptography/hazmat/bindings/_openssl.abi3.so: SSL_CTX_use_psk_identity_hint: symbol not found\r\nError relocating /usr/lib/python3.6/site-packages/cryptography/hazmat/bindings/_openssl.abi3.so: SSL_CTX_set_psk_server_callback: symbol not found\r\n```\n", "before_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nINCLUDES = \"\"\"\n#include <openssl/ssl.h>\n\ntypedef STACK_OF(SSL_CIPHER) Cryptography_STACK_OF_SSL_CIPHER;\n\"\"\"\n\nTYPES = \"\"\"\nstatic const long Cryptography_HAS_SSL_ST;\nstatic const long Cryptography_HAS_TLS_ST;\nstatic const long Cryptography_HAS_SSL2;\nstatic const long Cryptography_HAS_SSL3_METHOD;\nstatic const long Cryptography_HAS_TLSv1_1;\nstatic const long Cryptography_HAS_TLSv1_2;\nstatic const long Cryptography_HAS_TLSv1_3;\nstatic const long Cryptography_HAS_SECURE_RENEGOTIATION;\nstatic const long Cryptography_HAS_COMPRESSION;\nstatic const long Cryptography_HAS_TLSEXT_STATUS_REQ_CB;\nstatic const long Cryptography_HAS_STATUS_REQ_OCSP_RESP;\nstatic const long Cryptography_HAS_TLSEXT_STATUS_REQ_TYPE;\nstatic const long Cryptography_HAS_GET_SERVER_TMP_KEY;\nstatic const long Cryptography_HAS_SSL_CTX_SET_CLIENT_CERT_ENGINE;\nstatic const long Cryptography_HAS_SSL_CTX_CLEAR_OPTIONS;\nstatic const long Cryptography_HAS_DTLS;\nstatic const long Cryptography_HAS_GENERIC_DTLS_METHOD;\nstatic const long Cryptography_HAS_SIGALGS;\nstatic const long Cryptography_HAS_PSK;\nstatic const long Cryptography_HAS_CIPHER_DETAILS;\n\n/* Internally invented symbol to tell us if SNI is supported */\nstatic const long Cryptography_HAS_TLSEXT_HOSTNAME;\n\n/* Internally invented symbol to tell us if SSL_MODE_RELEASE_BUFFERS is\n * supported\n */\nstatic const long Cryptography_HAS_RELEASE_BUFFERS;\n\n/* Internally invented symbol to tell us if SSL_OP_NO_COMPRESSION is\n * supported\n */\nstatic const long Cryptography_HAS_OP_NO_COMPRESSION;\nstatic const long Cryptography_HAS_SSL_OP_MSIE_SSLV2_RSA_PADDING;\nstatic const long Cryptography_HAS_SSL_SET_SSL_CTX;\nstatic const long Cryptography_HAS_SSL_OP_NO_TICKET;\nstatic const long Cryptography_HAS_ALPN;\nstatic const long Cryptography_HAS_NEXTPROTONEG;\nstatic const long Cryptography_HAS_SET_CERT_CB;\nstatic const long Cryptography_HAS_CUSTOM_EXT;\n\nstatic const long SSL_FILETYPE_PEM;\nstatic const long SSL_FILETYPE_ASN1;\nstatic const long SSL_ERROR_NONE;\nstatic const long SSL_ERROR_ZERO_RETURN;\nstatic const long SSL_ERROR_WANT_READ;\nstatic const long SSL_ERROR_WANT_WRITE;\nstatic const long SSL_ERROR_WANT_X509_LOOKUP;\nstatic const long SSL_ERROR_WANT_CONNECT;\nstatic const long SSL_ERROR_SYSCALL;\nstatic const long SSL_ERROR_SSL;\nstatic const long SSL_SENT_SHUTDOWN;\nstatic const long SSL_RECEIVED_SHUTDOWN;\nstatic const long SSL_OP_NO_SSLv2;\nstatic const long SSL_OP_NO_SSLv3;\nstatic const long SSL_OP_NO_TLSv1;\nstatic const long SSL_OP_NO_TLSv1_1;\nstatic const long SSL_OP_NO_TLSv1_2;\nstatic const long SSL_OP_NO_TLSv1_3;\nstatic const long SSL_OP_NO_DTLSv1;\nstatic const long SSL_OP_NO_DTLSv1_2;\nstatic const long SSL_OP_NO_COMPRESSION;\nstatic const long SSL_OP_SINGLE_DH_USE;\nstatic const long SSL_OP_EPHEMERAL_RSA;\nstatic const long SSL_OP_MICROSOFT_SESS_ID_BUG;\nstatic const long SSL_OP_NETSCAPE_CHALLENGE_BUG;\nstatic const long SSL_OP_NETSCAPE_REUSE_CIPHER_CHANGE_BUG;\nstatic const long SSL_OP_SSLREF2_REUSE_CERT_TYPE_BUG;\nstatic const long SSL_OP_MICROSOFT_BIG_SSLV3_BUFFER;\nstatic const long SSL_OP_MSIE_SSLV2_RSA_PADDING;\nstatic const long SSL_OP_SSLEAY_080_CLIENT_DH_BUG;\nstatic const long SSL_OP_TLS_D5_BUG;\nstatic const long SSL_OP_TLS_BLOCK_PADDING_BUG;\nstatic const long SSL_OP_DONT_INSERT_EMPTY_FRAGMENTS;\nstatic const long SSL_OP_CIPHER_SERVER_PREFERENCE;\nstatic const long SSL_OP_TLS_ROLLBACK_BUG;\nstatic const long SSL_OP_PKCS1_CHECK_1;\nstatic const long SSL_OP_PKCS1_CHECK_2;\nstatic const long SSL_OP_NETSCAPE_CA_DN_BUG;\nstatic const long SSL_OP_NETSCAPE_DEMO_CIPHER_CHANGE_BUG;\nstatic const long SSL_OP_NO_QUERY_MTU;\nstatic const long SSL_OP_COOKIE_EXCHANGE;\nstatic const long SSL_OP_NO_TICKET;\nstatic const long SSL_OP_ALL;\nstatic const long SSL_OP_SINGLE_ECDH_USE;\nstatic const long SSL_OP_ALLOW_UNSAFE_LEGACY_RENEGOTIATION;\nstatic const long SSL_OP_LEGACY_SERVER_CONNECT;\nstatic const long SSL_VERIFY_PEER;\nstatic const long SSL_VERIFY_FAIL_IF_NO_PEER_CERT;\nstatic const long SSL_VERIFY_CLIENT_ONCE;\nstatic const long SSL_VERIFY_NONE;\nstatic const long SSL_VERIFY_POST_HANDSHAKE;\nstatic const long SSL_SESS_CACHE_OFF;\nstatic const long SSL_SESS_CACHE_CLIENT;\nstatic const long SSL_SESS_CACHE_SERVER;\nstatic const long SSL_SESS_CACHE_BOTH;\nstatic const long SSL_SESS_CACHE_NO_AUTO_CLEAR;\nstatic const long SSL_SESS_CACHE_NO_INTERNAL_LOOKUP;\nstatic const long SSL_SESS_CACHE_NO_INTERNAL_STORE;\nstatic const long SSL_SESS_CACHE_NO_INTERNAL;\nstatic const long SSL_ST_CONNECT;\nstatic const long SSL_ST_ACCEPT;\nstatic const long SSL_ST_MASK;\nstatic const long SSL_ST_INIT;\nstatic const long SSL_ST_BEFORE;\nstatic const long SSL_ST_OK;\nstatic const long SSL_ST_RENEGOTIATE;\nstatic const long SSL_CB_LOOP;\nstatic const long SSL_CB_EXIT;\nstatic const long SSL_CB_READ;\nstatic const long SSL_CB_WRITE;\nstatic const long SSL_CB_ALERT;\nstatic const long SSL_CB_READ_ALERT;\nstatic const long SSL_CB_WRITE_ALERT;\nstatic const long SSL_CB_ACCEPT_LOOP;\nstatic const long SSL_CB_ACCEPT_EXIT;\nstatic const long SSL_CB_CONNECT_LOOP;\nstatic const long SSL_CB_CONNECT_EXIT;\nstatic const long SSL_CB_HANDSHAKE_START;\nstatic const long SSL_CB_HANDSHAKE_DONE;\nstatic const long SSL_MODE_RELEASE_BUFFERS;\nstatic const long SSL_MODE_ENABLE_PARTIAL_WRITE;\nstatic const long SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER;\nstatic const long SSL_MODE_AUTO_RETRY;\nstatic const long SSL3_RANDOM_SIZE;\nstatic const long TLS_ST_BEFORE;\nstatic const long TLS_ST_OK;\n\nstatic const long OPENSSL_NPN_NEGOTIATED;\n\ntypedef ... SSL_METHOD;\ntypedef ... SSL_CTX;\n\ntypedef ... SSL_SESSION;\n\ntypedef ... SSL;\n\nstatic const long TLSEXT_NAMETYPE_host_name;\nstatic const long TLSEXT_STATUSTYPE_ocsp;\n\ntypedef ... SSL_CIPHER;\ntypedef ... Cryptography_STACK_OF_SSL_CIPHER;\ntypedef ... COMP_METHOD;\n\"\"\"\n\nFUNCTIONS = \"\"\"\n/* SSL */\nconst char *SSL_state_string_long(const SSL *);\nSSL_SESSION *SSL_get1_session(SSL *);\nint SSL_set_session(SSL *, SSL_SESSION *);\nint SSL_get_verify_mode(const SSL *);\nvoid SSL_set_verify(SSL *, int, int (*)(int, X509_STORE_CTX *));\nvoid SSL_set_verify_depth(SSL *, int);\nint SSL_get_verify_depth(const SSL *);\nint (*SSL_get_verify_callback(const SSL *))(int, X509_STORE_CTX *);\nvoid SSL_set_info_callback(SSL *ssl, void (*)(const SSL *, int, int));\nvoid (*SSL_get_info_callback(const SSL *))(const SSL *, int, int);\nSSL *SSL_new(SSL_CTX *);\nvoid SSL_free(SSL *);\nint SSL_set_fd(SSL *, int);\nSSL_CTX *SSL_get_SSL_CTX(const SSL *);\nSSL_CTX *SSL_set_SSL_CTX(SSL *, SSL_CTX *);\nBIO *SSL_get_rbio(const SSL *);\nBIO *SSL_get_wbio(const SSL *);\nvoid SSL_set_bio(SSL *, BIO *, BIO *);\nvoid SSL_set_connect_state(SSL *);\nvoid SSL_set_accept_state(SSL *);\nvoid SSL_set_shutdown(SSL *, int);\nint SSL_get_shutdown(const SSL *);\nint SSL_pending(const SSL *);\nint SSL_write(SSL *, const void *, int);\nint SSL_read(SSL *, void *, int);\nint SSL_peek(SSL *, void *, int);\nX509 *SSL_get_certificate(const SSL *);\nX509 *SSL_get_peer_certificate(const SSL *);\nint SSL_get_ex_data_X509_STORE_CTX_idx(void);\n\n/* Added in 1.0.2 */\nX509_VERIFY_PARAM *SSL_get0_param(SSL *);\n\nint SSL_use_certificate(SSL *, X509 *);\nint SSL_use_certificate_ASN1(SSL *, const unsigned char *, int);\nint SSL_use_certificate_file(SSL *, const char *, int);\nint SSL_use_PrivateKey(SSL *, EVP_PKEY *);\nint SSL_use_PrivateKey_ASN1(int, SSL *, const unsigned char *, long);\nint SSL_use_PrivateKey_file(SSL *, const char *, int);\nint SSL_check_private_key(const SSL *);\n\nint SSL_get_sigalgs(SSL *, int, int *, int *, int *, unsigned char *,\n unsigned char *);\n\nCryptography_STACK_OF_X509 *SSL_get_peer_cert_chain(const SSL *);\nCryptography_STACK_OF_X509_NAME *SSL_get_client_CA_list(const SSL *);\n\nint SSL_get_error(const SSL *, int);\nint SSL_do_handshake(SSL *);\nint SSL_shutdown(SSL *);\nint SSL_renegotiate(SSL *);\nint SSL_renegotiate_pending(SSL *);\nconst char *SSL_get_cipher_list(const SSL *, int);\nCryptography_STACK_OF_SSL_CIPHER *SSL_get_ciphers(const SSL *);\n\n/* context */\nvoid SSL_CTX_free(SSL_CTX *);\nlong SSL_CTX_set_timeout(SSL_CTX *, long);\nint SSL_CTX_set_default_verify_paths(SSL_CTX *);\nvoid SSL_CTX_set_verify(SSL_CTX *, int, int (*)(int, X509_STORE_CTX *));\nvoid SSL_CTX_set_verify_depth(SSL_CTX *, int);\nint (*SSL_CTX_get_verify_callback(const SSL_CTX *))(int, X509_STORE_CTX *);\nint SSL_CTX_get_verify_mode(const SSL_CTX *);\nint SSL_CTX_get_verify_depth(const SSL_CTX *);\nint SSL_CTX_set_cipher_list(SSL_CTX *, const char *);\nint SSL_CTX_load_verify_locations(SSL_CTX *, const char *, const char *);\nvoid SSL_CTX_set_default_passwd_cb(SSL_CTX *, pem_password_cb *);\nvoid SSL_CTX_set_default_passwd_cb_userdata(SSL_CTX *, void *);\nint SSL_CTX_use_certificate(SSL_CTX *, X509 *);\nint SSL_CTX_use_certificate_ASN1(SSL_CTX *, int, const unsigned char *);\nint SSL_CTX_use_certificate_file(SSL_CTX *, const char *, int);\nint SSL_CTX_use_certificate_chain_file(SSL_CTX *, const char *);\nint SSL_CTX_use_PrivateKey(SSL_CTX *, EVP_PKEY *);\nint SSL_CTX_use_PrivateKey_ASN1(int, SSL_CTX *, const unsigned char *, long);\nint SSL_CTX_use_PrivateKey_file(SSL_CTX *, const char *, int);\nint SSL_CTX_check_private_key(const SSL_CTX *);\nvoid SSL_CTX_set_cert_verify_callback(SSL_CTX *,\n int (*)(X509_STORE_CTX *, void *),\n void *);\n\nvoid SSL_CTX_set_cookie_generate_cb(SSL_CTX *,\n int (*)(\n SSL *,\n unsigned char *,\n unsigned int *\n ));\nlong SSL_CTX_get_read_ahead(SSL_CTX *);\nlong SSL_CTX_set_read_ahead(SSL_CTX *, long);\n\nint SSL_CTX_use_psk_identity_hint(SSL_CTX *, const char *);\nvoid SSL_CTX_set_psk_server_callback(SSL_CTX *,\n unsigned int (*)(\n SSL *,\n const char *,\n unsigned char *,\n unsigned int\n ));\nvoid SSL_CTX_set_psk_client_callback(SSL_CTX *,\n unsigned int (*)(\n SSL *,\n const char *,\n char *,\n unsigned int,\n unsigned char *,\n unsigned int\n ));\n\nint SSL_CTX_set_session_id_context(SSL_CTX *, const unsigned char *,\n unsigned int);\n\nvoid SSL_CTX_set_cert_store(SSL_CTX *, X509_STORE *);\nX509_STORE *SSL_CTX_get_cert_store(const SSL_CTX *);\nint SSL_CTX_add_client_CA(SSL_CTX *, X509 *);\n\nvoid SSL_CTX_set_client_CA_list(SSL_CTX *, Cryptography_STACK_OF_X509_NAME *);\n\nvoid SSL_CTX_set_info_callback(SSL_CTX *, void (*)(const SSL *, int, int));\nvoid (*SSL_CTX_get_info_callback(SSL_CTX *))(const SSL *, int, int);\n\nlong SSL_CTX_set1_sigalgs_list(SSL_CTX *, const char *);\n\n/* SSL_SESSION */\nvoid SSL_SESSION_free(SSL_SESSION *);\n\n/* Information about actually used cipher */\nconst char *SSL_CIPHER_get_name(const SSL_CIPHER *);\nint SSL_CIPHER_get_bits(const SSL_CIPHER *, int *);\n/* the modern signature of this is uint32_t, but older openssl declared it\n as unsigned long. To make our compiler flags happy we'll declare it as a\n 64-bit wide value, which should always be safe */\nuint64_t SSL_CIPHER_get_id(const SSL_CIPHER *);\nint SSL_CIPHER_is_aead(const SSL_CIPHER *);\nint SSL_CIPHER_get_cipher_nid(const SSL_CIPHER *);\nint SSL_CIPHER_get_digest_nid(const SSL_CIPHER *);\nint SSL_CIPHER_get_kx_nid(const SSL_CIPHER *);\nint SSL_CIPHER_get_auth_nid(const SSL_CIPHER *);\n\nsize_t SSL_get_finished(const SSL *, void *, size_t);\nsize_t SSL_get_peer_finished(const SSL *, void *, size_t);\nCryptography_STACK_OF_X509_NAME *SSL_load_client_CA_file(const char *);\n\nconst char *SSL_get_servername(const SSL *, const int);\n/* Function signature changed to const char * in 1.1.0 */\nconst char *SSL_CIPHER_get_version(const SSL_CIPHER *);\n/* These became macros in 1.1.0 */\nint SSL_library_init(void);\nvoid SSL_load_error_strings(void);\n\n/* these CRYPTO_EX_DATA functions became macros in 1.1.0 */\nint SSL_get_ex_new_index(long, void *, CRYPTO_EX_new *, CRYPTO_EX_dup *,\n CRYPTO_EX_free *);\nint SSL_set_ex_data(SSL *, int, void *);\nint SSL_CTX_get_ex_new_index(long, void *, CRYPTO_EX_new *, CRYPTO_EX_dup *,\n CRYPTO_EX_free *);\nint SSL_CTX_set_ex_data(SSL_CTX *, int, void *);\n\nSSL_SESSION *SSL_get_session(const SSL *);\nconst unsigned char *SSL_SESSION_get_id(const SSL_SESSION *, unsigned int *);\nlong SSL_SESSION_get_time(const SSL_SESSION *);\nlong SSL_SESSION_get_timeout(const SSL_SESSION *);\nint SSL_SESSION_has_ticket(const SSL_SESSION *);\nlong SSL_SESSION_get_ticket_lifetime_hint(const SSL_SESSION *);\n\n/* not a macro, but older OpenSSLs don't pass the args as const */\nchar *SSL_CIPHER_description(const SSL_CIPHER *, char *, int);\nint SSL_SESSION_print(BIO *, const SSL_SESSION *);\n\n/* not macros, but will be conditionally bound so can't live in functions */\nconst COMP_METHOD *SSL_get_current_compression(SSL *);\nconst COMP_METHOD *SSL_get_current_expansion(SSL *);\nconst char *SSL_COMP_get_name(const COMP_METHOD *);\nint SSL_CTX_set_client_cert_engine(SSL_CTX *, ENGINE *);\n\nunsigned long SSL_set_mode(SSL *, unsigned long);\nunsigned long SSL_get_mode(SSL *);\n\nunsigned long SSL_set_options(SSL *, unsigned long);\nunsigned long SSL_get_options(SSL *);\n\nvoid SSL_set_app_data(SSL *, char *);\nchar * SSL_get_app_data(SSL *);\nvoid SSL_set_read_ahead(SSL *, int);\n\nint SSL_want_read(const SSL *);\nint SSL_want_write(const SSL *);\n\nlong SSL_total_renegotiations(SSL *);\nlong SSL_get_secure_renegotiation_support(SSL *);\n\n/* Defined as unsigned long because SSL_OP_ALL is greater than signed 32-bit\n and Windows defines long as 32-bit. */\nunsigned long SSL_CTX_set_options(SSL_CTX *, unsigned long);\nunsigned long SSL_CTX_clear_options(SSL_CTX *, unsigned long);\nunsigned long SSL_CTX_get_options(SSL_CTX *);\nunsigned long SSL_CTX_set_mode(SSL_CTX *, unsigned long);\nunsigned long SSL_CTX_get_mode(SSL_CTX *);\nunsigned long SSL_CTX_set_session_cache_mode(SSL_CTX *, unsigned long);\nunsigned long SSL_CTX_get_session_cache_mode(SSL_CTX *);\nunsigned long SSL_CTX_set_tmp_dh(SSL_CTX *, DH *);\nunsigned long SSL_CTX_set_tmp_ecdh(SSL_CTX *, EC_KEY *);\nunsigned long SSL_CTX_add_extra_chain_cert(SSL_CTX *, X509 *);\n\n/*- These aren't macros these functions are all const X on openssl > 1.0.x -*/\n\n/* methods */\n\n/*\n * TLSv1_1 and TLSv1_2 are recent additions. Only sufficiently new versions of\n * OpenSSL support them.\n */\nconst SSL_METHOD *TLSv1_1_method(void);\nconst SSL_METHOD *TLSv1_1_server_method(void);\nconst SSL_METHOD *TLSv1_1_client_method(void);\n\nconst SSL_METHOD *TLSv1_2_method(void);\nconst SSL_METHOD *TLSv1_2_server_method(void);\nconst SSL_METHOD *TLSv1_2_client_method(void);\n\nconst SSL_METHOD *SSLv3_method(void);\nconst SSL_METHOD *SSLv3_server_method(void);\nconst SSL_METHOD *SSLv3_client_method(void);\n\nconst SSL_METHOD *TLSv1_method(void);\nconst SSL_METHOD *TLSv1_server_method(void);\nconst SSL_METHOD *TLSv1_client_method(void);\n\nconst SSL_METHOD *DTLSv1_method(void);\nconst SSL_METHOD *DTLSv1_server_method(void);\nconst SSL_METHOD *DTLSv1_client_method(void);\n\n/* Added in 1.0.2 */\nconst SSL_METHOD *DTLS_method(void);\nconst SSL_METHOD *DTLS_server_method(void);\nconst SSL_METHOD *DTLS_client_method(void);\n\nconst SSL_METHOD *SSLv23_method(void);\nconst SSL_METHOD *SSLv23_server_method(void);\nconst SSL_METHOD *SSLv23_client_method(void);\n\n/*- These aren't macros these arguments are all const X on openssl > 1.0.x -*/\nSSL_CTX *SSL_CTX_new(SSL_METHOD *);\nlong SSL_CTX_get_timeout(const SSL_CTX *);\n\nconst SSL_CIPHER *SSL_get_current_cipher(const SSL *);\nconst char *SSL_get_version(const SSL *);\nint SSL_version(const SSL *);\n\nvoid *SSL_CTX_get_ex_data(const SSL_CTX *, int);\nvoid *SSL_get_ex_data(const SSL *, int);\n\nvoid SSL_set_tlsext_host_name(SSL *, char *);\nvoid SSL_CTX_set_tlsext_servername_callback(\n SSL_CTX *,\n int (*)(SSL *, int *, void *));\nvoid SSL_CTX_set_tlsext_servername_arg(\n SSL_CTX *, void *);\n\nlong SSL_set_tlsext_status_ocsp_resp(SSL *, unsigned char *, int);\nlong SSL_get_tlsext_status_ocsp_resp(SSL *, const unsigned char **);\nlong SSL_set_tlsext_status_type(SSL *, long);\nlong SSL_CTX_set_tlsext_status_cb(SSL_CTX *, int(*)(SSL *, void *));\nlong SSL_CTX_set_tlsext_status_arg(SSL_CTX *, void *);\n\nint SSL_CTX_set_tlsext_use_srtp(SSL_CTX *, const char *);\nint SSL_set_tlsext_use_srtp(SSL *, const char *);\n\nlong SSL_session_reused(SSL *);\n\nvoid SSL_CTX_set_next_protos_advertised_cb(SSL_CTX *,\n int (*)(SSL *,\n const unsigned char **,\n unsigned int *,\n void *),\n void *);\nvoid SSL_CTX_set_next_proto_select_cb(SSL_CTX *,\n int (*)(SSL *,\n unsigned char **,\n unsigned char *,\n const unsigned char *,\n unsigned int,\n void *),\n void *);\nint SSL_select_next_proto(unsigned char **, unsigned char *,\n const unsigned char *, unsigned int,\n const unsigned char *, unsigned int);\nvoid SSL_get0_next_proto_negotiated(const SSL *,\n const unsigned char **, unsigned *);\n\nint sk_SSL_CIPHER_num(Cryptography_STACK_OF_SSL_CIPHER *);\nconst SSL_CIPHER *sk_SSL_CIPHER_value(Cryptography_STACK_OF_SSL_CIPHER *, int);\n\n/* ALPN APIs were introduced in OpenSSL 1.0.2. To continue to support earlier\n * versions some special handling of these is necessary.\n */\nint SSL_CTX_set_alpn_protos(SSL_CTX *, const unsigned char *, unsigned);\nint SSL_set_alpn_protos(SSL *, const unsigned char *, unsigned);\nvoid SSL_CTX_set_alpn_select_cb(SSL_CTX *,\n int (*) (SSL *,\n const unsigned char **,\n unsigned char *,\n const unsigned char *,\n unsigned int,\n void *),\n void *);\nvoid SSL_get0_alpn_selected(const SSL *, const unsigned char **, unsigned *);\n\nlong SSL_get_server_tmp_key(SSL *, EVP_PKEY **);\n\n/* SSL_CTX_set_cert_cb is introduced in OpenSSL 1.0.2. To continue to support\n * earlier versions some special handling of these is necessary.\n */\nvoid SSL_CTX_set_cert_cb(SSL_CTX *, int (*)(SSL *, void *), void *);\nvoid SSL_set_cert_cb(SSL *, int (*)(SSL *, void *), void *);\n\n/* Added in 1.0.2 */\nconst SSL_METHOD *SSL_CTX_get_ssl_method(SSL_CTX *);\n\nint SSL_SESSION_set1_id_context(SSL_SESSION *, const unsigned char *,\n unsigned int);\n/* Added in 1.1.0 for the great opaquing of structs */\nsize_t SSL_SESSION_get_master_key(const SSL_SESSION *, unsigned char *,\n size_t);\nsize_t SSL_get_client_random(const SSL *, unsigned char *, size_t);\nsize_t SSL_get_server_random(const SSL *, unsigned char *, size_t);\nint SSL_export_keying_material(SSL *, unsigned char *, size_t, const char *,\n size_t, const unsigned char *, size_t, int);\n\nlong SSL_CTX_sess_number(SSL_CTX *);\nlong SSL_CTX_sess_connect(SSL_CTX *);\nlong SSL_CTX_sess_connect_good(SSL_CTX *);\nlong SSL_CTX_sess_connect_renegotiate(SSL_CTX *);\nlong SSL_CTX_sess_accept(SSL_CTX *);\nlong SSL_CTX_sess_accept_good(SSL_CTX *);\nlong SSL_CTX_sess_accept_renegotiate(SSL_CTX *);\nlong SSL_CTX_sess_hits(SSL_CTX *);\nlong SSL_CTX_sess_cb_hits(SSL_CTX *);\nlong SSL_CTX_sess_misses(SSL_CTX *);\nlong SSL_CTX_sess_timeouts(SSL_CTX *);\nlong SSL_CTX_sess_cache_full(SSL_CTX *);\n\n/* DTLS support */\nlong Cryptography_DTLSv1_get_timeout(SSL *, time_t *, long *);\nlong DTLSv1_handle_timeout(SSL *);\nlong DTLS_set_link_mtu(SSL *, long);\nlong DTLS_get_link_min_mtu(SSL *);\n\n/* Custom extensions. */\ntypedef int (*custom_ext_add_cb)(SSL *, unsigned int,\n const unsigned char **,\n size_t *, int *,\n void *);\n\ntypedef void (*custom_ext_free_cb)(SSL *, unsigned int,\n const unsigned char *,\n void *);\n\ntypedef int (*custom_ext_parse_cb)(SSL *, unsigned int,\n const unsigned char *,\n size_t, int *,\n void *);\n\nint SSL_CTX_add_client_custom_ext(SSL_CTX *, unsigned int,\n custom_ext_add_cb,\n custom_ext_free_cb, void *,\n custom_ext_parse_cb,\n void *);\n\nint SSL_CTX_add_server_custom_ext(SSL_CTX *, unsigned int,\n custom_ext_add_cb,\n custom_ext_free_cb, void *,\n custom_ext_parse_cb,\n void *);\n\nint SSL_extension_supported(unsigned int);\n\nint SSL_CTX_set_ciphersuites(SSL_CTX *, const char *);\nint SSL_verify_client_post_handshake(SSL *);\nvoid SSL_CTX_set_post_handshake_auth(SSL_CTX *, int);\nvoid SSL_set_post_handshake_auth(SSL *, int);\n\nuint32_t SSL_SESSION_get_max_early_data(const SSL_SESSION *);\nint SSL_write_early_data(SSL *, const void *, size_t, size_t *);\nint SSL_read_early_data(SSL *, void *, size_t, size_t *);\nint SSL_CTX_set_max_early_data(SSL_CTX *, uint32_t);\n\"\"\"\n\nCUSTOMIZATIONS = \"\"\"\n/* Added in 1.0.2 but we need it in all versions now due to the great\n opaquing. */\n#if CRYPTOGRAPHY_OPENSSL_LESS_THAN_102\n/* from ssl/ssl_lib.c */\nconst SSL_METHOD *SSL_CTX_get_ssl_method(SSL_CTX *ctx) {\n return ctx->method;\n}\n#endif\n\n/* Added in 1.1.0 in the great opaquing, but we need to define it for older\n OpenSSLs. Such is our burden. */\n#if CRYPTOGRAPHY_OPENSSL_LESS_THAN_110 && !CRYPTOGRAPHY_LIBRESSL_27_OR_GREATER\n/* from ssl/ssl_lib.c */\nsize_t SSL_get_client_random(const SSL *ssl, unsigned char *out, size_t outlen)\n{\n if (outlen == 0)\n return sizeof(ssl->s3->client_random);\n if (outlen > sizeof(ssl->s3->client_random))\n outlen = sizeof(ssl->s3->client_random);\n memcpy(out, ssl->s3->client_random, outlen);\n return outlen;\n}\n/* Added in 1.1.0 as well */\n/* from ssl/ssl_lib.c */\nsize_t SSL_get_server_random(const SSL *ssl, unsigned char *out, size_t outlen)\n{\n if (outlen == 0)\n return sizeof(ssl->s3->server_random);\n if (outlen > sizeof(ssl->s3->server_random))\n outlen = sizeof(ssl->s3->server_random);\n memcpy(out, ssl->s3->server_random, outlen);\n return outlen;\n}\n/* Added in 1.1.0 as well */\n/* from ssl/ssl_lib.c */\nsize_t SSL_SESSION_get_master_key(const SSL_SESSION *session,\n unsigned char *out, size_t outlen)\n{\n if (session->master_key_length < 0) {\n /* Should never happen */\n return 0;\n }\n if (outlen == 0)\n return session->master_key_length;\n if (outlen > (size_t)session->master_key_length)\n outlen = session->master_key_length;\n memcpy(out, session->master_key, outlen);\n return outlen;\n}\n/* from ssl/ssl_sess.c */\nint SSL_SESSION_has_ticket(const SSL_SESSION *s)\n{\n return (s->tlsext_ticklen > 0) ? 1 : 0;\n}\n/* from ssl/ssl_sess.c */\nunsigned long SSL_SESSION_get_ticket_lifetime_hint(const SSL_SESSION *s)\n{\n return s->tlsext_tick_lifetime_hint;\n}\n#endif\n\nstatic const long Cryptography_HAS_SECURE_RENEGOTIATION = 1;\n\n/* Cryptography now compiles out all SSLv2 bindings. This exists to allow\n * clients that use it to check for SSLv2 support to keep functioning as\n * expected.\n */\nstatic const long Cryptography_HAS_SSL2 = 0;\n\n#ifdef OPENSSL_NO_SSL3_METHOD\nstatic const long Cryptography_HAS_SSL3_METHOD = 0;\nSSL_METHOD* (*SSLv3_method)(void) = NULL;\nSSL_METHOD* (*SSLv3_client_method)(void) = NULL;\nSSL_METHOD* (*SSLv3_server_method)(void) = NULL;\n#else\nstatic const long Cryptography_HAS_SSL3_METHOD = 1;\n#endif\n\nstatic const long Cryptography_HAS_TLSEXT_HOSTNAME = 1;\nstatic const long Cryptography_HAS_TLSEXT_STATUS_REQ_CB = 1;\nstatic const long Cryptography_HAS_STATUS_REQ_OCSP_RESP = 1;\nstatic const long Cryptography_HAS_TLSEXT_STATUS_REQ_TYPE = 1;\nstatic const long Cryptography_HAS_RELEASE_BUFFERS = 1;\nstatic const long Cryptography_HAS_OP_NO_COMPRESSION = 1;\nstatic const long Cryptography_HAS_TLSv1_1 = 1;\nstatic const long Cryptography_HAS_TLSv1_2 = 1;\nstatic const long Cryptography_HAS_SSL_OP_MSIE_SSLV2_RSA_PADDING = 1;\nstatic const long Cryptography_HAS_SSL_OP_NO_TICKET = 1;\nstatic const long Cryptography_HAS_SSL_SET_SSL_CTX = 1;\nstatic const long Cryptography_HAS_NEXTPROTONEG = 1;\n\n/* SSL_get0_param was added in OpenSSL 1.0.2. */\n#if CRYPTOGRAPHY_OPENSSL_LESS_THAN_102 && !CRYPTOGRAPHY_LIBRESSL_27_OR_GREATER\nX509_VERIFY_PARAM *(*SSL_get0_param)(SSL *) = NULL;\n#else\n#endif\n\n/* ALPN was added in OpenSSL 1.0.2. */\n#if CRYPTOGRAPHY_OPENSSL_LESS_THAN_102 && !CRYPTOGRAPHY_IS_LIBRESSL\nint (*SSL_CTX_set_alpn_protos)(SSL_CTX *,\n const unsigned char *,\n unsigned) = NULL;\nint (*SSL_set_alpn_protos)(SSL *, const unsigned char *, unsigned) = NULL;\nvoid (*SSL_CTX_set_alpn_select_cb)(SSL_CTX *,\n int (*) (SSL *,\n const unsigned char **,\n unsigned char *,\n const unsigned char *,\n unsigned int,\n void *),\n void *) = NULL;\nvoid (*SSL_get0_alpn_selected)(const SSL *,\n const unsigned char **,\n unsigned *) = NULL;\nstatic const long Cryptography_HAS_ALPN = 0;\n#else\nstatic const long Cryptography_HAS_ALPN = 1;\n#endif\n\n/* SSL_CTX_set_cert_cb was added in OpenSSL 1.0.2. */\n#if CRYPTOGRAPHY_OPENSSL_LESS_THAN_102\nvoid (*SSL_CTX_set_cert_cb)(SSL_CTX *, int (*)(SSL *, void *), void *) = NULL;\nvoid (*SSL_set_cert_cb)(SSL *, int (*)(SSL *, void *), void *) = NULL;\nstatic const long Cryptography_HAS_SET_CERT_CB = 0;\n#else\nstatic const long Cryptography_HAS_SET_CERT_CB = 1;\n#endif\n\n\n/* In OpenSSL 1.0.2i+ the handling of COMP_METHOD when OPENSSL_NO_COMP was\n changed and we no longer need to typedef void */\n#if (defined(OPENSSL_NO_COMP) && CRYPTOGRAPHY_OPENSSL_LESS_THAN_102I) || \\\n CRYPTOGRAPHY_IS_LIBRESSL\nstatic const long Cryptography_HAS_COMPRESSION = 0;\ntypedef void COMP_METHOD;\n#else\nstatic const long Cryptography_HAS_COMPRESSION = 1;\n#endif\n\n#if defined(SSL_CTRL_GET_SERVER_TMP_KEY)\nstatic const long Cryptography_HAS_GET_SERVER_TMP_KEY = 1;\n#else\nstatic const long Cryptography_HAS_GET_SERVER_TMP_KEY = 0;\nlong (*SSL_get_server_tmp_key)(SSL *, EVP_PKEY **) = NULL;\n#endif\n\nstatic const long Cryptography_HAS_SSL_CTX_SET_CLIENT_CERT_ENGINE = 1;\n\nstatic const long Cryptography_HAS_SSL_CTX_CLEAR_OPTIONS = 1;\n\n/* in OpenSSL 1.1.0 the SSL_ST values were renamed to TLS_ST and several were\n removed */\n#if CRYPTOGRAPHY_OPENSSL_LESS_THAN_110\nstatic const long Cryptography_HAS_SSL_ST = 1;\n#else\nstatic const long Cryptography_HAS_SSL_ST = 0;\nstatic const long SSL_ST_BEFORE = 0;\nstatic const long SSL_ST_OK = 0;\nstatic const long SSL_ST_INIT = 0;\nstatic const long SSL_ST_RENEGOTIATE = 0;\n#endif\n#if CRYPTOGRAPHY_OPENSSL_110_OR_GREATER\nstatic const long Cryptography_HAS_TLS_ST = 1;\n#else\nstatic const long Cryptography_HAS_TLS_ST = 0;\nstatic const long TLS_ST_BEFORE = 0;\nstatic const long TLS_ST_OK = 0;\n#endif\n\n#if CRYPTOGRAPHY_OPENSSL_LESS_THAN_102\nstatic const long Cryptography_HAS_GENERIC_DTLS_METHOD = 0;\nconst SSL_METHOD *(*DTLS_method)(void) = NULL;\nconst SSL_METHOD *(*DTLS_server_method)(void) = NULL;\nconst SSL_METHOD *(*DTLS_client_method)(void) = NULL;\nstatic const long SSL_OP_NO_DTLSv1 = 0;\nstatic const long SSL_OP_NO_DTLSv1_2 = 0;\nlong (*DTLS_set_link_mtu)(SSL *, long) = NULL;\nlong (*DTLS_get_link_min_mtu)(SSL *) = NULL;\n#else\nstatic const long Cryptography_HAS_GENERIC_DTLS_METHOD = 1;\n#endif\n\nstatic const long Cryptography_HAS_DTLS = 1;\n/* Wrap DTLSv1_get_timeout to avoid cffi to handle a 'struct timeval'. */\nlong Cryptography_DTLSv1_get_timeout(SSL *ssl, time_t *ptv_sec,\n long *ptv_usec) {\n struct timeval tv = { 0 };\n long r = DTLSv1_get_timeout(ssl, &tv);\n\n if (r == 1) {\n if (ptv_sec) {\n *ptv_sec = tv.tv_sec;\n }\n\n if (ptv_usec) {\n *ptv_usec = tv.tv_usec;\n }\n }\n\n return r;\n}\n\n#if CRYPTOGRAPHY_OPENSSL_LESS_THAN_102\nstatic const long Cryptography_HAS_SIGALGS = 0;\nconst int (*SSL_get_sigalgs)(SSL *, int, int *, int *, int *, unsigned char *,\n unsigned char *) = NULL;\nconst long (*SSL_CTX_set1_sigalgs_list)(SSL_CTX *, const char *) = NULL;\n#else\nstatic const long Cryptography_HAS_SIGALGS = 1;\n#endif\n\n#if CRYPTOGRAPHY_IS_LIBRESSL\nstatic const long Cryptography_HAS_PSK = 0;\nint (*SSL_CTX_use_psk_identity_hint)(SSL_CTX *, const char *) = NULL;\nvoid (*SSL_CTX_set_psk_server_callback)(SSL_CTX *,\n unsigned int (*)(\n SSL *,\n const char *,\n unsigned char *,\n unsigned int\n )) = NULL;\nvoid (*SSL_CTX_set_psk_client_callback)(SSL_CTX *,\n unsigned int (*)(\n SSL *,\n const char *,\n char *,\n unsigned int,\n unsigned char *,\n unsigned int\n )) = NULL;\n#else\nstatic const long Cryptography_HAS_PSK = 1;\n#endif\n\n/*\n * Custom extensions were added in 1.0.2. 1.1.1 is adding a more general\n * SSL_CTX_add_custom_ext function, but we're not binding that yet.\n */\n#if CRYPTOGRAPHY_OPENSSL_102_OR_GREATER\nstatic const long Cryptography_HAS_CUSTOM_EXT = 1;\n#else\nstatic const long Cryptography_HAS_CUSTOM_EXT = 0;\n\ntypedef int (*custom_ext_add_cb)(SSL *, unsigned int,\n const unsigned char **,\n size_t *, int *,\n void *);\n\ntypedef void (*custom_ext_free_cb)(SSL *, unsigned int,\n const unsigned char *,\n void *);\n\ntypedef int (*custom_ext_parse_cb)(SSL *, unsigned int,\n const unsigned char *,\n size_t, int *,\n void *);\n\nint (*SSL_CTX_add_client_custom_ext)(SSL_CTX *, unsigned int,\n custom_ext_add_cb,\n custom_ext_free_cb, void *,\n custom_ext_parse_cb,\n void *) = NULL;\n\nint (*SSL_CTX_add_server_custom_ext)(SSL_CTX *, unsigned int,\n custom_ext_add_cb,\n custom_ext_free_cb, void *,\n custom_ext_parse_cb,\n void *) = NULL;\n\nint (*SSL_extension_supported)(unsigned int) = NULL;\n#endif\n\n#if CRYPTOGRAPHY_OPENSSL_LESS_THAN_110 && !CRYPTOGRAPHY_LIBRESSL_27_OR_GREATER\nint (*SSL_CIPHER_is_aead)(const SSL_CIPHER *) = NULL;\nint (*SSL_CIPHER_get_cipher_nid)(const SSL_CIPHER *) = NULL;\nint (*SSL_CIPHER_get_digest_nid)(const SSL_CIPHER *) = NULL;\nint (*SSL_CIPHER_get_kx_nid)(const SSL_CIPHER *) = NULL;\nint (*SSL_CIPHER_get_auth_nid)(const SSL_CIPHER *) = NULL;\nstatic const long Cryptography_HAS_CIPHER_DETAILS = 0;\n#else\nstatic const long Cryptography_HAS_CIPHER_DETAILS = 1;\n#endif\n\n#if CRYPTOGRAPHY_OPENSSL_LESS_THAN_111\nstatic const long Cryptography_HAS_TLSv1_3 = 0;\nstatic const long SSL_OP_NO_TLSv1_3 = 0;\nstatic const long SSL_VERIFY_POST_HANDSHAKE = 0;\nint (*SSL_CTX_set_ciphersuites)(SSL_CTX *, const char *) = NULL;\nint (*SSL_verify_client_post_handshake)(SSL *) = NULL;\nvoid (*SSL_CTX_set_post_handshake_auth)(SSL_CTX *, int) = NULL;\nvoid (*SSL_set_post_handshake_auth)(SSL *, int) = NULL;\nuint32_t (*SSL_SESSION_get_max_early_data)(const SSL_SESSION *) = NULL;\nint (*SSL_write_early_data)(SSL *, const void *, size_t, size_t *) = NULL;\nint (*SSL_read_early_data)(SSL *, void *, size_t, size_t *) = NULL;\nint (*SSL_CTX_set_max_early_data)(SSL_CTX *, uint32_t) = NULL;\n#else\nstatic const long Cryptography_HAS_TLSv1_3 = 1;\n#endif\n\"\"\"\n", "path": "src/_cffi_src/openssl/ssl.py"}], "after_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nINCLUDES = \"\"\"\n#include <openssl/ssl.h>\n\ntypedef STACK_OF(SSL_CIPHER) Cryptography_STACK_OF_SSL_CIPHER;\n\"\"\"\n\nTYPES = \"\"\"\nstatic const long Cryptography_HAS_SSL_ST;\nstatic const long Cryptography_HAS_TLS_ST;\nstatic const long Cryptography_HAS_SSL2;\nstatic const long Cryptography_HAS_SSL3_METHOD;\nstatic const long Cryptography_HAS_TLSv1_1;\nstatic const long Cryptography_HAS_TLSv1_2;\nstatic const long Cryptography_HAS_TLSv1_3;\nstatic const long Cryptography_HAS_SECURE_RENEGOTIATION;\nstatic const long Cryptography_HAS_COMPRESSION;\nstatic const long Cryptography_HAS_TLSEXT_STATUS_REQ_CB;\nstatic const long Cryptography_HAS_STATUS_REQ_OCSP_RESP;\nstatic const long Cryptography_HAS_TLSEXT_STATUS_REQ_TYPE;\nstatic const long Cryptography_HAS_GET_SERVER_TMP_KEY;\nstatic const long Cryptography_HAS_SSL_CTX_SET_CLIENT_CERT_ENGINE;\nstatic const long Cryptography_HAS_SSL_CTX_CLEAR_OPTIONS;\nstatic const long Cryptography_HAS_DTLS;\nstatic const long Cryptography_HAS_GENERIC_DTLS_METHOD;\nstatic const long Cryptography_HAS_SIGALGS;\nstatic const long Cryptography_HAS_PSK;\nstatic const long Cryptography_HAS_CIPHER_DETAILS;\n\n/* Internally invented symbol to tell us if SNI is supported */\nstatic const long Cryptography_HAS_TLSEXT_HOSTNAME;\n\n/* Internally invented symbol to tell us if SSL_MODE_RELEASE_BUFFERS is\n * supported\n */\nstatic const long Cryptography_HAS_RELEASE_BUFFERS;\n\n/* Internally invented symbol to tell us if SSL_OP_NO_COMPRESSION is\n * supported\n */\nstatic const long Cryptography_HAS_OP_NO_COMPRESSION;\nstatic const long Cryptography_HAS_SSL_OP_MSIE_SSLV2_RSA_PADDING;\nstatic const long Cryptography_HAS_SSL_SET_SSL_CTX;\nstatic const long Cryptography_HAS_SSL_OP_NO_TICKET;\nstatic const long Cryptography_HAS_ALPN;\nstatic const long Cryptography_HAS_NEXTPROTONEG;\nstatic const long Cryptography_HAS_SET_CERT_CB;\nstatic const long Cryptography_HAS_CUSTOM_EXT;\n\nstatic const long SSL_FILETYPE_PEM;\nstatic const long SSL_FILETYPE_ASN1;\nstatic const long SSL_ERROR_NONE;\nstatic const long SSL_ERROR_ZERO_RETURN;\nstatic const long SSL_ERROR_WANT_READ;\nstatic const long SSL_ERROR_WANT_WRITE;\nstatic const long SSL_ERROR_WANT_X509_LOOKUP;\nstatic const long SSL_ERROR_WANT_CONNECT;\nstatic const long SSL_ERROR_SYSCALL;\nstatic const long SSL_ERROR_SSL;\nstatic const long SSL_SENT_SHUTDOWN;\nstatic const long SSL_RECEIVED_SHUTDOWN;\nstatic const long SSL_OP_NO_SSLv2;\nstatic const long SSL_OP_NO_SSLv3;\nstatic const long SSL_OP_NO_TLSv1;\nstatic const long SSL_OP_NO_TLSv1_1;\nstatic const long SSL_OP_NO_TLSv1_2;\nstatic const long SSL_OP_NO_TLSv1_3;\nstatic const long SSL_OP_NO_DTLSv1;\nstatic const long SSL_OP_NO_DTLSv1_2;\nstatic const long SSL_OP_NO_COMPRESSION;\nstatic const long SSL_OP_SINGLE_DH_USE;\nstatic const long SSL_OP_EPHEMERAL_RSA;\nstatic const long SSL_OP_MICROSOFT_SESS_ID_BUG;\nstatic const long SSL_OP_NETSCAPE_CHALLENGE_BUG;\nstatic const long SSL_OP_NETSCAPE_REUSE_CIPHER_CHANGE_BUG;\nstatic const long SSL_OP_SSLREF2_REUSE_CERT_TYPE_BUG;\nstatic const long SSL_OP_MICROSOFT_BIG_SSLV3_BUFFER;\nstatic const long SSL_OP_MSIE_SSLV2_RSA_PADDING;\nstatic const long SSL_OP_SSLEAY_080_CLIENT_DH_BUG;\nstatic const long SSL_OP_TLS_D5_BUG;\nstatic const long SSL_OP_TLS_BLOCK_PADDING_BUG;\nstatic const long SSL_OP_DONT_INSERT_EMPTY_FRAGMENTS;\nstatic const long SSL_OP_CIPHER_SERVER_PREFERENCE;\nstatic const long SSL_OP_TLS_ROLLBACK_BUG;\nstatic const long SSL_OP_PKCS1_CHECK_1;\nstatic const long SSL_OP_PKCS1_CHECK_2;\nstatic const long SSL_OP_NETSCAPE_CA_DN_BUG;\nstatic const long SSL_OP_NETSCAPE_DEMO_CIPHER_CHANGE_BUG;\nstatic const long SSL_OP_NO_QUERY_MTU;\nstatic const long SSL_OP_COOKIE_EXCHANGE;\nstatic const long SSL_OP_NO_TICKET;\nstatic const long SSL_OP_ALL;\nstatic const long SSL_OP_SINGLE_ECDH_USE;\nstatic const long SSL_OP_ALLOW_UNSAFE_LEGACY_RENEGOTIATION;\nstatic const long SSL_OP_LEGACY_SERVER_CONNECT;\nstatic const long SSL_VERIFY_PEER;\nstatic const long SSL_VERIFY_FAIL_IF_NO_PEER_CERT;\nstatic const long SSL_VERIFY_CLIENT_ONCE;\nstatic const long SSL_VERIFY_NONE;\nstatic const long SSL_VERIFY_POST_HANDSHAKE;\nstatic const long SSL_SESS_CACHE_OFF;\nstatic const long SSL_SESS_CACHE_CLIENT;\nstatic const long SSL_SESS_CACHE_SERVER;\nstatic const long SSL_SESS_CACHE_BOTH;\nstatic const long SSL_SESS_CACHE_NO_AUTO_CLEAR;\nstatic const long SSL_SESS_CACHE_NO_INTERNAL_LOOKUP;\nstatic const long SSL_SESS_CACHE_NO_INTERNAL_STORE;\nstatic const long SSL_SESS_CACHE_NO_INTERNAL;\nstatic const long SSL_ST_CONNECT;\nstatic const long SSL_ST_ACCEPT;\nstatic const long SSL_ST_MASK;\nstatic const long SSL_ST_INIT;\nstatic const long SSL_ST_BEFORE;\nstatic const long SSL_ST_OK;\nstatic const long SSL_ST_RENEGOTIATE;\nstatic const long SSL_CB_LOOP;\nstatic const long SSL_CB_EXIT;\nstatic const long SSL_CB_READ;\nstatic const long SSL_CB_WRITE;\nstatic const long SSL_CB_ALERT;\nstatic const long SSL_CB_READ_ALERT;\nstatic const long SSL_CB_WRITE_ALERT;\nstatic const long SSL_CB_ACCEPT_LOOP;\nstatic const long SSL_CB_ACCEPT_EXIT;\nstatic const long SSL_CB_CONNECT_LOOP;\nstatic const long SSL_CB_CONNECT_EXIT;\nstatic const long SSL_CB_HANDSHAKE_START;\nstatic const long SSL_CB_HANDSHAKE_DONE;\nstatic const long SSL_MODE_RELEASE_BUFFERS;\nstatic const long SSL_MODE_ENABLE_PARTIAL_WRITE;\nstatic const long SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER;\nstatic const long SSL_MODE_AUTO_RETRY;\nstatic const long SSL3_RANDOM_SIZE;\nstatic const long TLS_ST_BEFORE;\nstatic const long TLS_ST_OK;\n\nstatic const long OPENSSL_NPN_NEGOTIATED;\n\ntypedef ... SSL_METHOD;\ntypedef ... SSL_CTX;\n\ntypedef ... SSL_SESSION;\n\ntypedef ... SSL;\n\nstatic const long TLSEXT_NAMETYPE_host_name;\nstatic const long TLSEXT_STATUSTYPE_ocsp;\n\ntypedef ... SSL_CIPHER;\ntypedef ... Cryptography_STACK_OF_SSL_CIPHER;\ntypedef ... COMP_METHOD;\n\"\"\"\n\nFUNCTIONS = \"\"\"\n/* SSL */\nconst char *SSL_state_string_long(const SSL *);\nSSL_SESSION *SSL_get1_session(SSL *);\nint SSL_set_session(SSL *, SSL_SESSION *);\nint SSL_get_verify_mode(const SSL *);\nvoid SSL_set_verify(SSL *, int, int (*)(int, X509_STORE_CTX *));\nvoid SSL_set_verify_depth(SSL *, int);\nint SSL_get_verify_depth(const SSL *);\nint (*SSL_get_verify_callback(const SSL *))(int, X509_STORE_CTX *);\nvoid SSL_set_info_callback(SSL *ssl, void (*)(const SSL *, int, int));\nvoid (*SSL_get_info_callback(const SSL *))(const SSL *, int, int);\nSSL *SSL_new(SSL_CTX *);\nvoid SSL_free(SSL *);\nint SSL_set_fd(SSL *, int);\nSSL_CTX *SSL_get_SSL_CTX(const SSL *);\nSSL_CTX *SSL_set_SSL_CTX(SSL *, SSL_CTX *);\nBIO *SSL_get_rbio(const SSL *);\nBIO *SSL_get_wbio(const SSL *);\nvoid SSL_set_bio(SSL *, BIO *, BIO *);\nvoid SSL_set_connect_state(SSL *);\nvoid SSL_set_accept_state(SSL *);\nvoid SSL_set_shutdown(SSL *, int);\nint SSL_get_shutdown(const SSL *);\nint SSL_pending(const SSL *);\nint SSL_write(SSL *, const void *, int);\nint SSL_read(SSL *, void *, int);\nint SSL_peek(SSL *, void *, int);\nX509 *SSL_get_certificate(const SSL *);\nX509 *SSL_get_peer_certificate(const SSL *);\nint SSL_get_ex_data_X509_STORE_CTX_idx(void);\n\n/* Added in 1.0.2 */\nX509_VERIFY_PARAM *SSL_get0_param(SSL *);\n\nint SSL_use_certificate(SSL *, X509 *);\nint SSL_use_certificate_ASN1(SSL *, const unsigned char *, int);\nint SSL_use_certificate_file(SSL *, const char *, int);\nint SSL_use_PrivateKey(SSL *, EVP_PKEY *);\nint SSL_use_PrivateKey_ASN1(int, SSL *, const unsigned char *, long);\nint SSL_use_PrivateKey_file(SSL *, const char *, int);\nint SSL_check_private_key(const SSL *);\n\nint SSL_get_sigalgs(SSL *, int, int *, int *, int *, unsigned char *,\n unsigned char *);\n\nCryptography_STACK_OF_X509 *SSL_get_peer_cert_chain(const SSL *);\nCryptography_STACK_OF_X509_NAME *SSL_get_client_CA_list(const SSL *);\n\nint SSL_get_error(const SSL *, int);\nint SSL_do_handshake(SSL *);\nint SSL_shutdown(SSL *);\nint SSL_renegotiate(SSL *);\nint SSL_renegotiate_pending(SSL *);\nconst char *SSL_get_cipher_list(const SSL *, int);\nCryptography_STACK_OF_SSL_CIPHER *SSL_get_ciphers(const SSL *);\n\n/* context */\nvoid SSL_CTX_free(SSL_CTX *);\nlong SSL_CTX_set_timeout(SSL_CTX *, long);\nint SSL_CTX_set_default_verify_paths(SSL_CTX *);\nvoid SSL_CTX_set_verify(SSL_CTX *, int, int (*)(int, X509_STORE_CTX *));\nvoid SSL_CTX_set_verify_depth(SSL_CTX *, int);\nint (*SSL_CTX_get_verify_callback(const SSL_CTX *))(int, X509_STORE_CTX *);\nint SSL_CTX_get_verify_mode(const SSL_CTX *);\nint SSL_CTX_get_verify_depth(const SSL_CTX *);\nint SSL_CTX_set_cipher_list(SSL_CTX *, const char *);\nint SSL_CTX_load_verify_locations(SSL_CTX *, const char *, const char *);\nvoid SSL_CTX_set_default_passwd_cb(SSL_CTX *, pem_password_cb *);\nvoid SSL_CTX_set_default_passwd_cb_userdata(SSL_CTX *, void *);\nint SSL_CTX_use_certificate(SSL_CTX *, X509 *);\nint SSL_CTX_use_certificate_ASN1(SSL_CTX *, int, const unsigned char *);\nint SSL_CTX_use_certificate_file(SSL_CTX *, const char *, int);\nint SSL_CTX_use_certificate_chain_file(SSL_CTX *, const char *);\nint SSL_CTX_use_PrivateKey(SSL_CTX *, EVP_PKEY *);\nint SSL_CTX_use_PrivateKey_ASN1(int, SSL_CTX *, const unsigned char *, long);\nint SSL_CTX_use_PrivateKey_file(SSL_CTX *, const char *, int);\nint SSL_CTX_check_private_key(const SSL_CTX *);\nvoid SSL_CTX_set_cert_verify_callback(SSL_CTX *,\n int (*)(X509_STORE_CTX *, void *),\n void *);\n\nvoid SSL_CTX_set_cookie_generate_cb(SSL_CTX *,\n int (*)(\n SSL *,\n unsigned char *,\n unsigned int *\n ));\nlong SSL_CTX_get_read_ahead(SSL_CTX *);\nlong SSL_CTX_set_read_ahead(SSL_CTX *, long);\n\nint SSL_CTX_use_psk_identity_hint(SSL_CTX *, const char *);\nvoid SSL_CTX_set_psk_server_callback(SSL_CTX *,\n unsigned int (*)(\n SSL *,\n const char *,\n unsigned char *,\n unsigned int\n ));\nvoid SSL_CTX_set_psk_client_callback(SSL_CTX *,\n unsigned int (*)(\n SSL *,\n const char *,\n char *,\n unsigned int,\n unsigned char *,\n unsigned int\n ));\n\nint SSL_CTX_set_session_id_context(SSL_CTX *, const unsigned char *,\n unsigned int);\n\nvoid SSL_CTX_set_cert_store(SSL_CTX *, X509_STORE *);\nX509_STORE *SSL_CTX_get_cert_store(const SSL_CTX *);\nint SSL_CTX_add_client_CA(SSL_CTX *, X509 *);\n\nvoid SSL_CTX_set_client_CA_list(SSL_CTX *, Cryptography_STACK_OF_X509_NAME *);\n\nvoid SSL_CTX_set_info_callback(SSL_CTX *, void (*)(const SSL *, int, int));\nvoid (*SSL_CTX_get_info_callback(SSL_CTX *))(const SSL *, int, int);\n\nlong SSL_CTX_set1_sigalgs_list(SSL_CTX *, const char *);\n\n/* SSL_SESSION */\nvoid SSL_SESSION_free(SSL_SESSION *);\n\n/* Information about actually used cipher */\nconst char *SSL_CIPHER_get_name(const SSL_CIPHER *);\nint SSL_CIPHER_get_bits(const SSL_CIPHER *, int *);\n/* the modern signature of this is uint32_t, but older openssl declared it\n as unsigned long. To make our compiler flags happy we'll declare it as a\n 64-bit wide value, which should always be safe */\nuint64_t SSL_CIPHER_get_id(const SSL_CIPHER *);\nint SSL_CIPHER_is_aead(const SSL_CIPHER *);\nint SSL_CIPHER_get_cipher_nid(const SSL_CIPHER *);\nint SSL_CIPHER_get_digest_nid(const SSL_CIPHER *);\nint SSL_CIPHER_get_kx_nid(const SSL_CIPHER *);\nint SSL_CIPHER_get_auth_nid(const SSL_CIPHER *);\n\nsize_t SSL_get_finished(const SSL *, void *, size_t);\nsize_t SSL_get_peer_finished(const SSL *, void *, size_t);\nCryptography_STACK_OF_X509_NAME *SSL_load_client_CA_file(const char *);\n\nconst char *SSL_get_servername(const SSL *, const int);\n/* Function signature changed to const char * in 1.1.0 */\nconst char *SSL_CIPHER_get_version(const SSL_CIPHER *);\n/* These became macros in 1.1.0 */\nint SSL_library_init(void);\nvoid SSL_load_error_strings(void);\n\n/* these CRYPTO_EX_DATA functions became macros in 1.1.0 */\nint SSL_get_ex_new_index(long, void *, CRYPTO_EX_new *, CRYPTO_EX_dup *,\n CRYPTO_EX_free *);\nint SSL_set_ex_data(SSL *, int, void *);\nint SSL_CTX_get_ex_new_index(long, void *, CRYPTO_EX_new *, CRYPTO_EX_dup *,\n CRYPTO_EX_free *);\nint SSL_CTX_set_ex_data(SSL_CTX *, int, void *);\n\nSSL_SESSION *SSL_get_session(const SSL *);\nconst unsigned char *SSL_SESSION_get_id(const SSL_SESSION *, unsigned int *);\nlong SSL_SESSION_get_time(const SSL_SESSION *);\nlong SSL_SESSION_get_timeout(const SSL_SESSION *);\nint SSL_SESSION_has_ticket(const SSL_SESSION *);\nlong SSL_SESSION_get_ticket_lifetime_hint(const SSL_SESSION *);\n\n/* not a macro, but older OpenSSLs don't pass the args as const */\nchar *SSL_CIPHER_description(const SSL_CIPHER *, char *, int);\nint SSL_SESSION_print(BIO *, const SSL_SESSION *);\n\n/* not macros, but will be conditionally bound so can't live in functions */\nconst COMP_METHOD *SSL_get_current_compression(SSL *);\nconst COMP_METHOD *SSL_get_current_expansion(SSL *);\nconst char *SSL_COMP_get_name(const COMP_METHOD *);\nint SSL_CTX_set_client_cert_engine(SSL_CTX *, ENGINE *);\n\nunsigned long SSL_set_mode(SSL *, unsigned long);\nunsigned long SSL_get_mode(SSL *);\n\nunsigned long SSL_set_options(SSL *, unsigned long);\nunsigned long SSL_get_options(SSL *);\n\nvoid SSL_set_app_data(SSL *, char *);\nchar * SSL_get_app_data(SSL *);\nvoid SSL_set_read_ahead(SSL *, int);\n\nint SSL_want_read(const SSL *);\nint SSL_want_write(const SSL *);\n\nlong SSL_total_renegotiations(SSL *);\nlong SSL_get_secure_renegotiation_support(SSL *);\n\n/* Defined as unsigned long because SSL_OP_ALL is greater than signed 32-bit\n and Windows defines long as 32-bit. */\nunsigned long SSL_CTX_set_options(SSL_CTX *, unsigned long);\nunsigned long SSL_CTX_clear_options(SSL_CTX *, unsigned long);\nunsigned long SSL_CTX_get_options(SSL_CTX *);\nunsigned long SSL_CTX_set_mode(SSL_CTX *, unsigned long);\nunsigned long SSL_CTX_get_mode(SSL_CTX *);\nunsigned long SSL_CTX_set_session_cache_mode(SSL_CTX *, unsigned long);\nunsigned long SSL_CTX_get_session_cache_mode(SSL_CTX *);\nunsigned long SSL_CTX_set_tmp_dh(SSL_CTX *, DH *);\nunsigned long SSL_CTX_set_tmp_ecdh(SSL_CTX *, EC_KEY *);\nunsigned long SSL_CTX_add_extra_chain_cert(SSL_CTX *, X509 *);\n\n/*- These aren't macros these functions are all const X on openssl > 1.0.x -*/\n\n/* methods */\n\n/*\n * TLSv1_1 and TLSv1_2 are recent additions. Only sufficiently new versions of\n * OpenSSL support them.\n */\nconst SSL_METHOD *TLSv1_1_method(void);\nconst SSL_METHOD *TLSv1_1_server_method(void);\nconst SSL_METHOD *TLSv1_1_client_method(void);\n\nconst SSL_METHOD *TLSv1_2_method(void);\nconst SSL_METHOD *TLSv1_2_server_method(void);\nconst SSL_METHOD *TLSv1_2_client_method(void);\n\nconst SSL_METHOD *SSLv3_method(void);\nconst SSL_METHOD *SSLv3_server_method(void);\nconst SSL_METHOD *SSLv3_client_method(void);\n\nconst SSL_METHOD *TLSv1_method(void);\nconst SSL_METHOD *TLSv1_server_method(void);\nconst SSL_METHOD *TLSv1_client_method(void);\n\nconst SSL_METHOD *DTLSv1_method(void);\nconst SSL_METHOD *DTLSv1_server_method(void);\nconst SSL_METHOD *DTLSv1_client_method(void);\n\n/* Added in 1.0.2 */\nconst SSL_METHOD *DTLS_method(void);\nconst SSL_METHOD *DTLS_server_method(void);\nconst SSL_METHOD *DTLS_client_method(void);\n\nconst SSL_METHOD *SSLv23_method(void);\nconst SSL_METHOD *SSLv23_server_method(void);\nconst SSL_METHOD *SSLv23_client_method(void);\n\n/*- These aren't macros these arguments are all const X on openssl > 1.0.x -*/\nSSL_CTX *SSL_CTX_new(SSL_METHOD *);\nlong SSL_CTX_get_timeout(const SSL_CTX *);\n\nconst SSL_CIPHER *SSL_get_current_cipher(const SSL *);\nconst char *SSL_get_version(const SSL *);\nint SSL_version(const SSL *);\n\nvoid *SSL_CTX_get_ex_data(const SSL_CTX *, int);\nvoid *SSL_get_ex_data(const SSL *, int);\n\nvoid SSL_set_tlsext_host_name(SSL *, char *);\nvoid SSL_CTX_set_tlsext_servername_callback(\n SSL_CTX *,\n int (*)(SSL *, int *, void *));\nvoid SSL_CTX_set_tlsext_servername_arg(\n SSL_CTX *, void *);\n\nlong SSL_set_tlsext_status_ocsp_resp(SSL *, unsigned char *, int);\nlong SSL_get_tlsext_status_ocsp_resp(SSL *, const unsigned char **);\nlong SSL_set_tlsext_status_type(SSL *, long);\nlong SSL_CTX_set_tlsext_status_cb(SSL_CTX *, int(*)(SSL *, void *));\nlong SSL_CTX_set_tlsext_status_arg(SSL_CTX *, void *);\n\nint SSL_CTX_set_tlsext_use_srtp(SSL_CTX *, const char *);\nint SSL_set_tlsext_use_srtp(SSL *, const char *);\n\nlong SSL_session_reused(SSL *);\n\nvoid SSL_CTX_set_next_protos_advertised_cb(SSL_CTX *,\n int (*)(SSL *,\n const unsigned char **,\n unsigned int *,\n void *),\n void *);\nvoid SSL_CTX_set_next_proto_select_cb(SSL_CTX *,\n int (*)(SSL *,\n unsigned char **,\n unsigned char *,\n const unsigned char *,\n unsigned int,\n void *),\n void *);\nint SSL_select_next_proto(unsigned char **, unsigned char *,\n const unsigned char *, unsigned int,\n const unsigned char *, unsigned int);\nvoid SSL_get0_next_proto_negotiated(const SSL *,\n const unsigned char **, unsigned *);\n\nint sk_SSL_CIPHER_num(Cryptography_STACK_OF_SSL_CIPHER *);\nconst SSL_CIPHER *sk_SSL_CIPHER_value(Cryptography_STACK_OF_SSL_CIPHER *, int);\n\n/* ALPN APIs were introduced in OpenSSL 1.0.2. To continue to support earlier\n * versions some special handling of these is necessary.\n */\nint SSL_CTX_set_alpn_protos(SSL_CTX *, const unsigned char *, unsigned);\nint SSL_set_alpn_protos(SSL *, const unsigned char *, unsigned);\nvoid SSL_CTX_set_alpn_select_cb(SSL_CTX *,\n int (*) (SSL *,\n const unsigned char **,\n unsigned char *,\n const unsigned char *,\n unsigned int,\n void *),\n void *);\nvoid SSL_get0_alpn_selected(const SSL *, const unsigned char **, unsigned *);\n\nlong SSL_get_server_tmp_key(SSL *, EVP_PKEY **);\n\n/* SSL_CTX_set_cert_cb is introduced in OpenSSL 1.0.2. To continue to support\n * earlier versions some special handling of these is necessary.\n */\nvoid SSL_CTX_set_cert_cb(SSL_CTX *, int (*)(SSL *, void *), void *);\nvoid SSL_set_cert_cb(SSL *, int (*)(SSL *, void *), void *);\n\n/* Added in 1.0.2 */\nconst SSL_METHOD *SSL_CTX_get_ssl_method(SSL_CTX *);\n\nint SSL_SESSION_set1_id_context(SSL_SESSION *, const unsigned char *,\n unsigned int);\n/* Added in 1.1.0 for the great opaquing of structs */\nsize_t SSL_SESSION_get_master_key(const SSL_SESSION *, unsigned char *,\n size_t);\nsize_t SSL_get_client_random(const SSL *, unsigned char *, size_t);\nsize_t SSL_get_server_random(const SSL *, unsigned char *, size_t);\nint SSL_export_keying_material(SSL *, unsigned char *, size_t, const char *,\n size_t, const unsigned char *, size_t, int);\n\nlong SSL_CTX_sess_number(SSL_CTX *);\nlong SSL_CTX_sess_connect(SSL_CTX *);\nlong SSL_CTX_sess_connect_good(SSL_CTX *);\nlong SSL_CTX_sess_connect_renegotiate(SSL_CTX *);\nlong SSL_CTX_sess_accept(SSL_CTX *);\nlong SSL_CTX_sess_accept_good(SSL_CTX *);\nlong SSL_CTX_sess_accept_renegotiate(SSL_CTX *);\nlong SSL_CTX_sess_hits(SSL_CTX *);\nlong SSL_CTX_sess_cb_hits(SSL_CTX *);\nlong SSL_CTX_sess_misses(SSL_CTX *);\nlong SSL_CTX_sess_timeouts(SSL_CTX *);\nlong SSL_CTX_sess_cache_full(SSL_CTX *);\n\n/* DTLS support */\nlong Cryptography_DTLSv1_get_timeout(SSL *, time_t *, long *);\nlong DTLSv1_handle_timeout(SSL *);\nlong DTLS_set_link_mtu(SSL *, long);\nlong DTLS_get_link_min_mtu(SSL *);\n\n/* Custom extensions. */\ntypedef int (*custom_ext_add_cb)(SSL *, unsigned int,\n const unsigned char **,\n size_t *, int *,\n void *);\n\ntypedef void (*custom_ext_free_cb)(SSL *, unsigned int,\n const unsigned char *,\n void *);\n\ntypedef int (*custom_ext_parse_cb)(SSL *, unsigned int,\n const unsigned char *,\n size_t, int *,\n void *);\n\nint SSL_CTX_add_client_custom_ext(SSL_CTX *, unsigned int,\n custom_ext_add_cb,\n custom_ext_free_cb, void *,\n custom_ext_parse_cb,\n void *);\n\nint SSL_CTX_add_server_custom_ext(SSL_CTX *, unsigned int,\n custom_ext_add_cb,\n custom_ext_free_cb, void *,\n custom_ext_parse_cb,\n void *);\n\nint SSL_extension_supported(unsigned int);\n\nint SSL_CTX_set_ciphersuites(SSL_CTX *, const char *);\nint SSL_verify_client_post_handshake(SSL *);\nvoid SSL_CTX_set_post_handshake_auth(SSL_CTX *, int);\nvoid SSL_set_post_handshake_auth(SSL *, int);\n\nuint32_t SSL_SESSION_get_max_early_data(const SSL_SESSION *);\nint SSL_write_early_data(SSL *, const void *, size_t, size_t *);\nint SSL_read_early_data(SSL *, void *, size_t, size_t *);\nint SSL_CTX_set_max_early_data(SSL_CTX *, uint32_t);\n\"\"\"\n\nCUSTOMIZATIONS = \"\"\"\n/* Added in 1.0.2 but we need it in all versions now due to the great\n opaquing. */\n#if CRYPTOGRAPHY_OPENSSL_LESS_THAN_102\n/* from ssl/ssl_lib.c */\nconst SSL_METHOD *SSL_CTX_get_ssl_method(SSL_CTX *ctx) {\n return ctx->method;\n}\n#endif\n\n/* Added in 1.1.0 in the great opaquing, but we need to define it for older\n OpenSSLs. Such is our burden. */\n#if CRYPTOGRAPHY_OPENSSL_LESS_THAN_110 && !CRYPTOGRAPHY_LIBRESSL_27_OR_GREATER\n/* from ssl/ssl_lib.c */\nsize_t SSL_get_client_random(const SSL *ssl, unsigned char *out, size_t outlen)\n{\n if (outlen == 0)\n return sizeof(ssl->s3->client_random);\n if (outlen > sizeof(ssl->s3->client_random))\n outlen = sizeof(ssl->s3->client_random);\n memcpy(out, ssl->s3->client_random, outlen);\n return outlen;\n}\n/* Added in 1.1.0 as well */\n/* from ssl/ssl_lib.c */\nsize_t SSL_get_server_random(const SSL *ssl, unsigned char *out, size_t outlen)\n{\n if (outlen == 0)\n return sizeof(ssl->s3->server_random);\n if (outlen > sizeof(ssl->s3->server_random))\n outlen = sizeof(ssl->s3->server_random);\n memcpy(out, ssl->s3->server_random, outlen);\n return outlen;\n}\n/* Added in 1.1.0 as well */\n/* from ssl/ssl_lib.c */\nsize_t SSL_SESSION_get_master_key(const SSL_SESSION *session,\n unsigned char *out, size_t outlen)\n{\n if (session->master_key_length < 0) {\n /* Should never happen */\n return 0;\n }\n if (outlen == 0)\n return session->master_key_length;\n if (outlen > (size_t)session->master_key_length)\n outlen = session->master_key_length;\n memcpy(out, session->master_key, outlen);\n return outlen;\n}\n/* from ssl/ssl_sess.c */\nint SSL_SESSION_has_ticket(const SSL_SESSION *s)\n{\n return (s->tlsext_ticklen > 0) ? 1 : 0;\n}\n/* from ssl/ssl_sess.c */\nunsigned long SSL_SESSION_get_ticket_lifetime_hint(const SSL_SESSION *s)\n{\n return s->tlsext_tick_lifetime_hint;\n}\n#endif\n\nstatic const long Cryptography_HAS_SECURE_RENEGOTIATION = 1;\n\n/* Cryptography now compiles out all SSLv2 bindings. This exists to allow\n * clients that use it to check for SSLv2 support to keep functioning as\n * expected.\n */\nstatic const long Cryptography_HAS_SSL2 = 0;\n\n#ifdef OPENSSL_NO_SSL3_METHOD\nstatic const long Cryptography_HAS_SSL3_METHOD = 0;\nSSL_METHOD* (*SSLv3_method)(void) = NULL;\nSSL_METHOD* (*SSLv3_client_method)(void) = NULL;\nSSL_METHOD* (*SSLv3_server_method)(void) = NULL;\n#else\nstatic const long Cryptography_HAS_SSL3_METHOD = 1;\n#endif\n\nstatic const long Cryptography_HAS_TLSEXT_HOSTNAME = 1;\nstatic const long Cryptography_HAS_TLSEXT_STATUS_REQ_CB = 1;\nstatic const long Cryptography_HAS_STATUS_REQ_OCSP_RESP = 1;\nstatic const long Cryptography_HAS_TLSEXT_STATUS_REQ_TYPE = 1;\nstatic const long Cryptography_HAS_RELEASE_BUFFERS = 1;\nstatic const long Cryptography_HAS_OP_NO_COMPRESSION = 1;\nstatic const long Cryptography_HAS_TLSv1_1 = 1;\nstatic const long Cryptography_HAS_TLSv1_2 = 1;\nstatic const long Cryptography_HAS_SSL_OP_MSIE_SSLV2_RSA_PADDING = 1;\nstatic const long Cryptography_HAS_SSL_OP_NO_TICKET = 1;\nstatic const long Cryptography_HAS_SSL_SET_SSL_CTX = 1;\nstatic const long Cryptography_HAS_NEXTPROTONEG = 1;\n\n/* SSL_get0_param was added in OpenSSL 1.0.2. */\n#if CRYPTOGRAPHY_OPENSSL_LESS_THAN_102 && !CRYPTOGRAPHY_LIBRESSL_27_OR_GREATER\nX509_VERIFY_PARAM *(*SSL_get0_param)(SSL *) = NULL;\n#else\n#endif\n\n/* ALPN was added in OpenSSL 1.0.2. */\n#if CRYPTOGRAPHY_OPENSSL_LESS_THAN_102 && !CRYPTOGRAPHY_IS_LIBRESSL\nint (*SSL_CTX_set_alpn_protos)(SSL_CTX *,\n const unsigned char *,\n unsigned) = NULL;\nint (*SSL_set_alpn_protos)(SSL *, const unsigned char *, unsigned) = NULL;\nvoid (*SSL_CTX_set_alpn_select_cb)(SSL_CTX *,\n int (*) (SSL *,\n const unsigned char **,\n unsigned char *,\n const unsigned char *,\n unsigned int,\n void *),\n void *) = NULL;\nvoid (*SSL_get0_alpn_selected)(const SSL *,\n const unsigned char **,\n unsigned *) = NULL;\nstatic const long Cryptography_HAS_ALPN = 0;\n#else\nstatic const long Cryptography_HAS_ALPN = 1;\n#endif\n\n/* SSL_CTX_set_cert_cb was added in OpenSSL 1.0.2. */\n#if CRYPTOGRAPHY_OPENSSL_LESS_THAN_102\nvoid (*SSL_CTX_set_cert_cb)(SSL_CTX *, int (*)(SSL *, void *), void *) = NULL;\nvoid (*SSL_set_cert_cb)(SSL *, int (*)(SSL *, void *), void *) = NULL;\nstatic const long Cryptography_HAS_SET_CERT_CB = 0;\n#else\nstatic const long Cryptography_HAS_SET_CERT_CB = 1;\n#endif\n\n\n/* In OpenSSL 1.0.2i+ the handling of COMP_METHOD when OPENSSL_NO_COMP was\n changed and we no longer need to typedef void */\n#if (defined(OPENSSL_NO_COMP) && CRYPTOGRAPHY_OPENSSL_LESS_THAN_102I) || \\\n CRYPTOGRAPHY_IS_LIBRESSL\nstatic const long Cryptography_HAS_COMPRESSION = 0;\ntypedef void COMP_METHOD;\n#else\nstatic const long Cryptography_HAS_COMPRESSION = 1;\n#endif\n\n#if defined(SSL_CTRL_GET_SERVER_TMP_KEY)\nstatic const long Cryptography_HAS_GET_SERVER_TMP_KEY = 1;\n#else\nstatic const long Cryptography_HAS_GET_SERVER_TMP_KEY = 0;\nlong (*SSL_get_server_tmp_key)(SSL *, EVP_PKEY **) = NULL;\n#endif\n\nstatic const long Cryptography_HAS_SSL_CTX_SET_CLIENT_CERT_ENGINE = 1;\n\nstatic const long Cryptography_HAS_SSL_CTX_CLEAR_OPTIONS = 1;\n\n/* in OpenSSL 1.1.0 the SSL_ST values were renamed to TLS_ST and several were\n removed */\n#if CRYPTOGRAPHY_OPENSSL_LESS_THAN_110\nstatic const long Cryptography_HAS_SSL_ST = 1;\n#else\nstatic const long Cryptography_HAS_SSL_ST = 0;\nstatic const long SSL_ST_BEFORE = 0;\nstatic const long SSL_ST_OK = 0;\nstatic const long SSL_ST_INIT = 0;\nstatic const long SSL_ST_RENEGOTIATE = 0;\n#endif\n#if CRYPTOGRAPHY_OPENSSL_110_OR_GREATER\nstatic const long Cryptography_HAS_TLS_ST = 1;\n#else\nstatic const long Cryptography_HAS_TLS_ST = 0;\nstatic const long TLS_ST_BEFORE = 0;\nstatic const long TLS_ST_OK = 0;\n#endif\n\n#if CRYPTOGRAPHY_OPENSSL_LESS_THAN_102\nstatic const long Cryptography_HAS_GENERIC_DTLS_METHOD = 0;\nconst SSL_METHOD *(*DTLS_method)(void) = NULL;\nconst SSL_METHOD *(*DTLS_server_method)(void) = NULL;\nconst SSL_METHOD *(*DTLS_client_method)(void) = NULL;\nstatic const long SSL_OP_NO_DTLSv1 = 0;\nstatic const long SSL_OP_NO_DTLSv1_2 = 0;\nlong (*DTLS_set_link_mtu)(SSL *, long) = NULL;\nlong (*DTLS_get_link_min_mtu)(SSL *) = NULL;\n#else\nstatic const long Cryptography_HAS_GENERIC_DTLS_METHOD = 1;\n#endif\n\nstatic const long Cryptography_HAS_DTLS = 1;\n/* Wrap DTLSv1_get_timeout to avoid cffi to handle a 'struct timeval'. */\nlong Cryptography_DTLSv1_get_timeout(SSL *ssl, time_t *ptv_sec,\n long *ptv_usec) {\n struct timeval tv = { 0 };\n long r = DTLSv1_get_timeout(ssl, &tv);\n\n if (r == 1) {\n if (ptv_sec) {\n *ptv_sec = tv.tv_sec;\n }\n\n if (ptv_usec) {\n *ptv_usec = tv.tv_usec;\n }\n }\n\n return r;\n}\n\n#if CRYPTOGRAPHY_OPENSSL_LESS_THAN_102\nstatic const long Cryptography_HAS_SIGALGS = 0;\nconst int (*SSL_get_sigalgs)(SSL *, int, int *, int *, int *, unsigned char *,\n unsigned char *) = NULL;\nconst long (*SSL_CTX_set1_sigalgs_list)(SSL_CTX *, const char *) = NULL;\n#else\nstatic const long Cryptography_HAS_SIGALGS = 1;\n#endif\n\n#if CRYPTOGRAPHY_IS_LIBRESSL || defined(OPENSSL_NO_PSK)\nstatic const long Cryptography_HAS_PSK = 0;\nint (*SSL_CTX_use_psk_identity_hint)(SSL_CTX *, const char *) = NULL;\nvoid (*SSL_CTX_set_psk_server_callback)(SSL_CTX *,\n unsigned int (*)(\n SSL *,\n const char *,\n unsigned char *,\n unsigned int\n )) = NULL;\nvoid (*SSL_CTX_set_psk_client_callback)(SSL_CTX *,\n unsigned int (*)(\n SSL *,\n const char *,\n char *,\n unsigned int,\n unsigned char *,\n unsigned int\n )) = NULL;\n#else\nstatic const long Cryptography_HAS_PSK = 1;\n#endif\n\n/*\n * Custom extensions were added in 1.0.2. 1.1.1 is adding a more general\n * SSL_CTX_add_custom_ext function, but we're not binding that yet.\n */\n#if CRYPTOGRAPHY_OPENSSL_102_OR_GREATER\nstatic const long Cryptography_HAS_CUSTOM_EXT = 1;\n#else\nstatic const long Cryptography_HAS_CUSTOM_EXT = 0;\n\ntypedef int (*custom_ext_add_cb)(SSL *, unsigned int,\n const unsigned char **,\n size_t *, int *,\n void *);\n\ntypedef void (*custom_ext_free_cb)(SSL *, unsigned int,\n const unsigned char *,\n void *);\n\ntypedef int (*custom_ext_parse_cb)(SSL *, unsigned int,\n const unsigned char *,\n size_t, int *,\n void *);\n\nint (*SSL_CTX_add_client_custom_ext)(SSL_CTX *, unsigned int,\n custom_ext_add_cb,\n custom_ext_free_cb, void *,\n custom_ext_parse_cb,\n void *) = NULL;\n\nint (*SSL_CTX_add_server_custom_ext)(SSL_CTX *, unsigned int,\n custom_ext_add_cb,\n custom_ext_free_cb, void *,\n custom_ext_parse_cb,\n void *) = NULL;\n\nint (*SSL_extension_supported)(unsigned int) = NULL;\n#endif\n\n#if CRYPTOGRAPHY_OPENSSL_LESS_THAN_110 && !CRYPTOGRAPHY_LIBRESSL_27_OR_GREATER\nint (*SSL_CIPHER_is_aead)(const SSL_CIPHER *) = NULL;\nint (*SSL_CIPHER_get_cipher_nid)(const SSL_CIPHER *) = NULL;\nint (*SSL_CIPHER_get_digest_nid)(const SSL_CIPHER *) = NULL;\nint (*SSL_CIPHER_get_kx_nid)(const SSL_CIPHER *) = NULL;\nint (*SSL_CIPHER_get_auth_nid)(const SSL_CIPHER *) = NULL;\nstatic const long Cryptography_HAS_CIPHER_DETAILS = 0;\n#else\nstatic const long Cryptography_HAS_CIPHER_DETAILS = 1;\n#endif\n\n#if CRYPTOGRAPHY_OPENSSL_LESS_THAN_111\nstatic const long Cryptography_HAS_TLSv1_3 = 0;\nstatic const long SSL_OP_NO_TLSv1_3 = 0;\nstatic const long SSL_VERIFY_POST_HANDSHAKE = 0;\nint (*SSL_CTX_set_ciphersuites)(SSL_CTX *, const char *) = NULL;\nint (*SSL_verify_client_post_handshake)(SSL *) = NULL;\nvoid (*SSL_CTX_set_post_handshake_auth)(SSL_CTX *, int) = NULL;\nvoid (*SSL_set_post_handshake_auth)(SSL *, int) = NULL;\nuint32_t (*SSL_SESSION_get_max_early_data)(const SSL_SESSION *) = NULL;\nint (*SSL_write_early_data)(SSL *, const void *, size_t, size_t *) = NULL;\nint (*SSL_read_early_data)(SSL *, void *, size_t, size_t *) = NULL;\nint (*SSL_CTX_set_max_early_data)(SSL_CTX *, uint32_t) = NULL;\n#else\nstatic const long Cryptography_HAS_TLSv1_3 = 1;\n#endif\n\"\"\"\n", "path": "src/_cffi_src/openssl/ssl.py"}]} |
gh_patches_debug_1323 | rasdani/github-patches | git_diff | google__jax-4847 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add Python 3.9 wheels for jaxlib
Sorry for the bother, but I'm having some problems with installing jaxlib in Python 3.9:
```
> pip install jaxlib==0.1.55
ERROR: Could not find a version that satisfies the requirement jaxlib==0.1.55 (from versions: 0.1, 0.1.1, 0.1.4)
ERROR: No matching distribution found for jaxlib==0.1.55
```
@hawkinsp
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `build/build.py`
Content:
```
1 #!/usr/bin/python
2 #
3 # Copyright 2018 Google LLC
4 #
5 # Licensed under the Apache License, Version 2.0 (the "License");
6 # you may not use this file except in compliance with the License.
7 # You may obtain a copy of the License at
8 #
9 # https://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing, software
12 # distributed under the License is distributed on an "AS IS" BASIS,
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 # See the License for the specific language governing permissions and
15 # limitations under the License.
16 #
17 # Helper script for building JAX's libjax easily.
18
19
20 import argparse
21 import collections
22 import hashlib
23 import os
24 import platform
25 import re
26 import shutil
27 import stat
28 import subprocess
29 import sys
30 import urllib
31
32 # pylint: disable=g-import-not-at-top
33 if hasattr(urllib, "urlretrieve"):
34 urlretrieve = urllib.urlretrieve
35 else:
36 import urllib.request
37 urlretrieve = urllib.request.urlretrieve
38
39 if hasattr(shutil, "which"):
40 which = shutil.which
41 else:
42 from distutils.spawn import find_executable as which
43 # pylint: enable=g-import-not-at-top
44
45
46 def shell(cmd):
47 output = subprocess.check_output(cmd)
48 return output.decode("UTF-8").strip()
49
50
51 # Python
52
53 def get_python_bin_path(python_bin_path_flag):
54 """Returns the path to the Python interpreter to use."""
55 return python_bin_path_flag or sys.executable
56
57
58 def get_python_version(python_bin_path):
59 version_output = shell(
60 [python_bin_path, "-c",
61 "import sys; print(\"{}.{}\".format(sys.version_info[0], "
62 "sys.version_info[1]))"])
63 major, minor = map(int, version_output.split("."))
64 return major, minor
65
66 def check_python_version(python_version):
67 if python_version < (3, 6):
68 print("JAX requires Python 3.6 or newer.")
69 sys.exit(-1)
70
71
72 # Bazel
73
74 BAZEL_BASE_URI = "https://github.com/bazelbuild/bazel/releases/download/2.0.0/"
75 BazelPackage = collections.namedtuple("BazelPackage", ["file", "sha256"])
76 bazel_packages = {
77 "Linux":
78 BazelPackage(
79 file="bazel-2.0.0-linux-x86_64",
80 sha256=
81 "4df79462c6c3ecdeeee7af99fc269b52ab1aa4828ef3bc359c1837d3fafeeee7"),
82 "Darwin":
83 BazelPackage(
84 file="bazel-2.0.0-darwin-x86_64",
85 sha256=
86 "3eca4c96cfda97a9d5f8d3d0dec4155a5cc5ff339b10d3f35213c398bf13881e"),
87 }
88
89
90 def download_and_verify_bazel():
91 """Downloads a bazel binary from Github, verifying its SHA256 hash."""
92 package = bazel_packages.get(platform.system())
93 if package is None:
94 return None
95
96 if not os.access(package.file, os.X_OK):
97 uri = BAZEL_BASE_URI + package.file
98 sys.stdout.write("Downloading bazel from: {}\n".format(uri))
99
100 def progress(block_count, block_size, total_size):
101 if total_size <= 0:
102 total_size = 170**6
103 progress = (block_count * block_size) / total_size
104 num_chars = 40
105 progress_chars = int(num_chars * progress)
106 sys.stdout.write("{} [{}{}] {}%\r".format(
107 package.file, "#" * progress_chars,
108 "." * (num_chars - progress_chars), int(progress * 100.0)))
109
110 tmp_path, _ = urlretrieve(uri, None,
111 progress if sys.stdout.isatty() else None)
112 sys.stdout.write("\n")
113
114 # Verify that the downloaded Bazel binary has the expected SHA256.
115 with open(tmp_path, "rb") as downloaded_file:
116 contents = downloaded_file.read()
117
118 digest = hashlib.sha256(contents).hexdigest()
119 if digest != package.sha256:
120 print(
121 "Checksum mismatch for downloaded bazel binary (expected {}; got {})."
122 .format(package.sha256, digest))
123 sys.exit(-1)
124
125 # Write the file as the bazel file name.
126 with open(package.file, "wb") as out_file:
127 out_file.write(contents)
128
129 # Mark the file as executable.
130 st = os.stat(package.file)
131 os.chmod(package.file,
132 st.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
133
134 return "./" + package.file
135
136
137 def get_bazel_path(bazel_path_flag):
138 """Returns the path to a Bazel binary, downloading Bazel if not found."""
139 if bazel_path_flag:
140 return bazel_path_flag
141
142 bazel = download_and_verify_bazel()
143 if bazel:
144 return bazel
145
146 bazel = which("bazel")
147 if bazel:
148 return bazel
149
150 print("Cannot find or download bazel. Please install bazel.")
151 sys.exit(-1)
152
153
154 def check_bazel_version(bazel_path, min_version, max_version):
155 """Checks Bazel's version is in the range [`min_version`, `max_version`)."""
156 version_output = shell([bazel_path, "--bazelrc=/dev/null", "version"])
157 match = re.search("Build label: *([0-9\\.]+)[^0-9\\.]", version_output)
158 if match is None:
159 print("Warning: bazel installation is not a release version. Make sure "
160 "bazel is at least {}".format(min_version))
161 return
162 version = match.group(1)
163 min_ints = [int(x) for x in min_version.split(".")]
164 actual_ints = [int(x) for x in match.group(1).split(".")]
165 if min_ints > actual_ints:
166 print("Outdated bazel revision (>= {} required, found {})".format(
167 min_version, version))
168 sys.exit(-1)
169 if max_version is not None:
170 max_ints = [int(x) for x in max_version.split(".")]
171 if actual_ints >= max_ints:
172 print("Please downgrade your bazel revision to build JAX (>= {} and < {}"
173 " required, found {})".format(min_version, max_version, version))
174 sys.exit(-1)
175
176
177 BAZELRC_TEMPLATE = """
178 # Flag to enable remote config
179 common --experimental_repo_remote_exec
180
181 build --repo_env PYTHON_BIN_PATH="{python_bin_path}"
182 build --python_path="{python_bin_path}"
183 build --repo_env TF_NEED_CUDA="{tf_need_cuda}"
184 build --action_env TF_CUDA_COMPUTE_CAPABILITIES="{cuda_compute_capabilities}"
185 build --distinct_host_configuration=false
186 build --copt=-Wno-sign-compare
187 build -c opt
188 build:opt --copt=-march=native
189 build:opt --host_copt=-march=native
190 build:mkl_open_source_only --define=tensorflow_mkldnn_contraction_kernel=1
191
192 # Sets the default Apple platform to macOS.
193 build --apple_platform_type=macos
194 build --macos_minimum_os=10.9
195
196 # Make Bazel print out all options from rc files.
197 build --announce_rc
198
199 build --define open_source_build=true
200
201 # Disable enabled-by-default TensorFlow features that we don't care about.
202 build --define=no_aws_support=true
203 build --define=no_gcp_support=true
204 build --define=no_hdfs_support=true
205 build --define=no_kafka_support=true
206 build --define=no_ignite_support=true
207 build --define=grpc_no_ares=true
208
209 build:cuda --crosstool_top=@local_config_cuda//crosstool:toolchain
210 build:cuda --define=using_cuda=true --define=using_cuda_nvcc=true
211
212 build --spawn_strategy=standalone
213 build --strategy=Genrule=standalone
214
215 build --cxxopt=-std=c++14
216 build --host_cxxopt=-std=c++14
217
218 # Suppress all warning messages.
219 build:short_logs --output_filter=DONT_MATCH_ANYTHING
220 """
221
222
223
224 def write_bazelrc(cuda_toolkit_path=None, cudnn_install_path=None, **kwargs):
225 with open("../.bazelrc", "w") as f:
226 f.write(BAZELRC_TEMPLATE.format(**kwargs))
227 if cuda_toolkit_path:
228 f.write("build --action_env CUDA_TOOLKIT_PATH=\"{cuda_toolkit_path}\"\n"
229 .format(cuda_toolkit_path=cuda_toolkit_path))
230 if cudnn_install_path:
231 f.write("build --action_env CUDNN_INSTALL_PATH=\"{cudnn_install_path}\"\n"
232 .format(cudnn_install_path=cudnn_install_path))
233
234
235 BANNER = r"""
236 _ _ __ __
237 | | / \ \ \/ /
238 _ | |/ _ \ \ /
239 | |_| / ___ \/ \
240 \___/_/ \/_/\_\
241
242 """
243
244 EPILOG = """
245
246 From the 'build' directory in the JAX repository, run
247 python build.py
248 or
249 python3 build.py
250 to download and build JAX's XLA (jaxlib) dependency.
251 """
252
253
254 def _parse_string_as_bool(s):
255 """Parses a string as a boolean argument."""
256 lower = s.lower()
257 if lower == "true":
258 return True
259 elif lower == "false":
260 return False
261 else:
262 raise ValueError("Expected either 'true' or 'false'; got {}".format(s))
263
264
265 def add_boolean_argument(parser, name, default=False, help_str=None):
266 """Creates a boolean flag."""
267 group = parser.add_mutually_exclusive_group()
268 group.add_argument(
269 "--" + name,
270 nargs="?",
271 default=default,
272 const=True,
273 type=_parse_string_as_bool,
274 help=help_str)
275 group.add_argument("--no" + name, dest=name, action="store_false")
276
277
278 def main():
279 parser = argparse.ArgumentParser(
280 description="Builds libjax from source.", epilog=EPILOG)
281 parser.add_argument(
282 "--bazel_path",
283 help="Path to the Bazel binary to use. The default is to find bazel via "
284 "the PATH; if none is found, downloads a fresh copy of bazel from "
285 "GitHub.")
286 parser.add_argument(
287 "--python_bin_path",
288 help="Path to Python binary to use. The default is the Python "
289 "interpreter used to run the build script.")
290 add_boolean_argument(
291 parser,
292 "enable_march_native",
293 default=False,
294 help_str="Generate code targeted to the current machine? This may "
295 "increase performance, but may generate code that does not run on "
296 "older machines.")
297 add_boolean_argument(
298 parser,
299 "enable_mkl_dnn",
300 default=True,
301 help_str="Should we build with MKL-DNN enabled?")
302 add_boolean_argument(
303 parser,
304 "enable_cuda",
305 help_str="Should we build with CUDA enabled? Requires CUDA and CuDNN.")
306 parser.add_argument(
307 "--cuda_path",
308 default=None,
309 help="Path to the CUDA toolkit.")
310 parser.add_argument(
311 "--cudnn_path",
312 default=None,
313 help="Path to CUDNN libraries.")
314 parser.add_argument(
315 "--cuda_compute_capabilities",
316 default="3.5,5.2,6.0,6.1,7.0",
317 help="A comma-separated list of CUDA compute capabilities to support.")
318 parser.add_argument(
319 "--bazel_startup_options",
320 action="append", default=[],
321 help="Additional startup options to pass to bazel.")
322 parser.add_argument(
323 "--bazel_options",
324 action="append", default=[],
325 help="Additional options to pass to bazel.")
326 args = parser.parse_args()
327
328 print(BANNER)
329 os.chdir(os.path.dirname(__file__ or args.prog) or '.')
330
331 # Find a working Bazel.
332 bazel_path = get_bazel_path(args.bazel_path)
333 check_bazel_version(bazel_path, min_version="2.0.0", max_version=None)
334 print("Bazel binary path: {}".format(bazel_path))
335
336 python_bin_path = get_python_bin_path(args.python_bin_path)
337 print("Python binary path: {}".format(python_bin_path))
338 python_version = get_python_version(python_bin_path)
339 print("Python version: {}".format(".".join(map(str, python_version))))
340 check_python_version(python_version)
341
342 print("MKL-DNN enabled: {}".format("yes" if args.enable_mkl_dnn else "no"))
343 print("-march=native: {}".format("yes" if args.enable_march_native else "no"))
344
345 cuda_toolkit_path = args.cuda_path
346 cudnn_install_path = args.cudnn_path
347 print("CUDA enabled: {}".format("yes" if args.enable_cuda else "no"))
348 if args.enable_cuda:
349 if cuda_toolkit_path:
350 print("CUDA toolkit path: {}".format(cuda_toolkit_path))
351 if cudnn_install_path:
352 print("CUDNN library path: {}".format(cudnn_install_path))
353 print("CUDA compute capabilities: {}".format(args.cuda_compute_capabilities))
354 write_bazelrc(
355 python_bin_path=python_bin_path,
356 tf_need_cuda=1 if args.enable_cuda else 0,
357 cuda_toolkit_path=cuda_toolkit_path,
358 cudnn_install_path=cudnn_install_path,
359 cuda_compute_capabilities=args.cuda_compute_capabilities)
360
361 print("\nBuilding XLA and installing it in the jaxlib source tree...")
362 config_args = args.bazel_options
363 config_args += ["--config=short_logs"]
364 if args.enable_march_native:
365 config_args += ["--config=opt"]
366 if args.enable_mkl_dnn:
367 config_args += ["--config=mkl_open_source_only"]
368 if args.enable_cuda:
369 config_args += ["--config=cuda"]
370 config_args += ["--define=xla_python_enable_gpu=true"]
371 command = ([bazel_path] + args.bazel_startup_options +
372 ["run", "--verbose_failures=true"] + config_args +
373 [":install_xla_in_source_tree", os.getcwd()])
374 print(" ".join(command))
375 shell(command)
376 shell([bazel_path, "shutdown"])
377
378
379 if __name__ == "__main__":
380 main()
381
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/build/build.py b/build/build.py
--- a/build/build.py
+++ b/build/build.py
@@ -179,6 +179,7 @@
common --experimental_repo_remote_exec
build --repo_env PYTHON_BIN_PATH="{python_bin_path}"
+build --action_env=PYENV_ROOT
build --python_path="{python_bin_path}"
build --repo_env TF_NEED_CUDA="{tf_need_cuda}"
build --action_env TF_CUDA_COMPUTE_CAPABILITIES="{cuda_compute_capabilities}"
| {"golden_diff": "diff --git a/build/build.py b/build/build.py\n--- a/build/build.py\n+++ b/build/build.py\n@@ -179,6 +179,7 @@\n common --experimental_repo_remote_exec\n \n build --repo_env PYTHON_BIN_PATH=\"{python_bin_path}\"\n+build --action_env=PYENV_ROOT\n build --python_path=\"{python_bin_path}\"\n build --repo_env TF_NEED_CUDA=\"{tf_need_cuda}\"\n build --action_env TF_CUDA_COMPUTE_CAPABILITIES=\"{cuda_compute_capabilities}\"\n", "issue": "Add Python 3.9 wheels for jaxlib\nSorry for the bother, but I'm having some problems with installing jaxlib in Python 3.9:\r\n```\r\n> pip install jaxlib==0.1.55\r\nERROR: Could not find a version that satisfies the requirement jaxlib==0.1.55 (from versions: 0.1, 0.1.1, 0.1.4)\r\nERROR: No matching distribution found for jaxlib==0.1.55\r\n```\r\n@hawkinsp \n", "before_files": [{"content": "#!/usr/bin/python\n#\n# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Helper script for building JAX's libjax easily.\n\n\nimport argparse\nimport collections\nimport hashlib\nimport os\nimport platform\nimport re\nimport shutil\nimport stat\nimport subprocess\nimport sys\nimport urllib\n\n# pylint: disable=g-import-not-at-top\nif hasattr(urllib, \"urlretrieve\"):\n urlretrieve = urllib.urlretrieve\nelse:\n import urllib.request\n urlretrieve = urllib.request.urlretrieve\n\nif hasattr(shutil, \"which\"):\n which = shutil.which\nelse:\n from distutils.spawn import find_executable as which\n# pylint: enable=g-import-not-at-top\n\n\ndef shell(cmd):\n output = subprocess.check_output(cmd)\n return output.decode(\"UTF-8\").strip()\n\n\n# Python\n\ndef get_python_bin_path(python_bin_path_flag):\n \"\"\"Returns the path to the Python interpreter to use.\"\"\"\n return python_bin_path_flag or sys.executable\n\n\ndef get_python_version(python_bin_path):\n version_output = shell(\n [python_bin_path, \"-c\",\n \"import sys; print(\\\"{}.{}\\\".format(sys.version_info[0], \"\n \"sys.version_info[1]))\"])\n major, minor = map(int, version_output.split(\".\"))\n return major, minor\n\ndef check_python_version(python_version):\n if python_version < (3, 6):\n print(\"JAX requires Python 3.6 or newer.\")\n sys.exit(-1)\n\n\n# Bazel\n\nBAZEL_BASE_URI = \"https://github.com/bazelbuild/bazel/releases/download/2.0.0/\"\nBazelPackage = collections.namedtuple(\"BazelPackage\", [\"file\", \"sha256\"])\nbazel_packages = {\n \"Linux\":\n BazelPackage(\n file=\"bazel-2.0.0-linux-x86_64\",\n sha256=\n \"4df79462c6c3ecdeeee7af99fc269b52ab1aa4828ef3bc359c1837d3fafeeee7\"),\n \"Darwin\":\n BazelPackage(\n file=\"bazel-2.0.0-darwin-x86_64\",\n sha256=\n \"3eca4c96cfda97a9d5f8d3d0dec4155a5cc5ff339b10d3f35213c398bf13881e\"),\n}\n\n\ndef download_and_verify_bazel():\n \"\"\"Downloads a bazel binary from Github, verifying its SHA256 hash.\"\"\"\n package = bazel_packages.get(platform.system())\n if package is None:\n return None\n\n if not os.access(package.file, os.X_OK):\n uri = BAZEL_BASE_URI + package.file\n sys.stdout.write(\"Downloading bazel from: {}\\n\".format(uri))\n\n def progress(block_count, block_size, total_size):\n if total_size <= 0:\n total_size = 170**6\n progress = (block_count * block_size) / total_size\n num_chars = 40\n progress_chars = int(num_chars * progress)\n sys.stdout.write(\"{} [{}{}] {}%\\r\".format(\n package.file, \"#\" * progress_chars,\n \".\" * (num_chars - progress_chars), int(progress * 100.0)))\n\n tmp_path, _ = urlretrieve(uri, None,\n progress if sys.stdout.isatty() else None)\n sys.stdout.write(\"\\n\")\n\n # Verify that the downloaded Bazel binary has the expected SHA256.\n with open(tmp_path, \"rb\") as downloaded_file:\n contents = downloaded_file.read()\n\n digest = hashlib.sha256(contents).hexdigest()\n if digest != package.sha256:\n print(\n \"Checksum mismatch for downloaded bazel binary (expected {}; got {}).\"\n .format(package.sha256, digest))\n sys.exit(-1)\n\n # Write the file as the bazel file name.\n with open(package.file, \"wb\") as out_file:\n out_file.write(contents)\n\n # Mark the file as executable.\n st = os.stat(package.file)\n os.chmod(package.file,\n st.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)\n\n return \"./\" + package.file\n\n\ndef get_bazel_path(bazel_path_flag):\n \"\"\"Returns the path to a Bazel binary, downloading Bazel if not found.\"\"\"\n if bazel_path_flag:\n return bazel_path_flag\n\n bazel = download_and_verify_bazel()\n if bazel:\n return bazel\n\n bazel = which(\"bazel\")\n if bazel:\n return bazel\n\n print(\"Cannot find or download bazel. Please install bazel.\")\n sys.exit(-1)\n\n\ndef check_bazel_version(bazel_path, min_version, max_version):\n \"\"\"Checks Bazel's version is in the range [`min_version`, `max_version`).\"\"\"\n version_output = shell([bazel_path, \"--bazelrc=/dev/null\", \"version\"])\n match = re.search(\"Build label: *([0-9\\\\.]+)[^0-9\\\\.]\", version_output)\n if match is None:\n print(\"Warning: bazel installation is not a release version. Make sure \"\n \"bazel is at least {}\".format(min_version))\n return\n version = match.group(1)\n min_ints = [int(x) for x in min_version.split(\".\")]\n actual_ints = [int(x) for x in match.group(1).split(\".\")]\n if min_ints > actual_ints:\n print(\"Outdated bazel revision (>= {} required, found {})\".format(\n min_version, version))\n sys.exit(-1)\n if max_version is not None:\n max_ints = [int(x) for x in max_version.split(\".\")]\n if actual_ints >= max_ints:\n print(\"Please downgrade your bazel revision to build JAX (>= {} and < {}\"\n \" required, found {})\".format(min_version, max_version, version))\n sys.exit(-1)\n\n\nBAZELRC_TEMPLATE = \"\"\"\n# Flag to enable remote config\ncommon --experimental_repo_remote_exec\n\nbuild --repo_env PYTHON_BIN_PATH=\"{python_bin_path}\"\nbuild --python_path=\"{python_bin_path}\"\nbuild --repo_env TF_NEED_CUDA=\"{tf_need_cuda}\"\nbuild --action_env TF_CUDA_COMPUTE_CAPABILITIES=\"{cuda_compute_capabilities}\"\nbuild --distinct_host_configuration=false\nbuild --copt=-Wno-sign-compare\nbuild -c opt\nbuild:opt --copt=-march=native\nbuild:opt --host_copt=-march=native\nbuild:mkl_open_source_only --define=tensorflow_mkldnn_contraction_kernel=1\n\n# Sets the default Apple platform to macOS.\nbuild --apple_platform_type=macos\nbuild --macos_minimum_os=10.9\n\n# Make Bazel print out all options from rc files.\nbuild --announce_rc\n\nbuild --define open_source_build=true\n\n# Disable enabled-by-default TensorFlow features that we don't care about.\nbuild --define=no_aws_support=true\nbuild --define=no_gcp_support=true\nbuild --define=no_hdfs_support=true\nbuild --define=no_kafka_support=true\nbuild --define=no_ignite_support=true\nbuild --define=grpc_no_ares=true\n\nbuild:cuda --crosstool_top=@local_config_cuda//crosstool:toolchain\nbuild:cuda --define=using_cuda=true --define=using_cuda_nvcc=true\n\nbuild --spawn_strategy=standalone\nbuild --strategy=Genrule=standalone\n\nbuild --cxxopt=-std=c++14\nbuild --host_cxxopt=-std=c++14\n\n# Suppress all warning messages.\nbuild:short_logs --output_filter=DONT_MATCH_ANYTHING\n\"\"\"\n\n\n\ndef write_bazelrc(cuda_toolkit_path=None, cudnn_install_path=None, **kwargs):\n with open(\"../.bazelrc\", \"w\") as f:\n f.write(BAZELRC_TEMPLATE.format(**kwargs))\n if cuda_toolkit_path:\n f.write(\"build --action_env CUDA_TOOLKIT_PATH=\\\"{cuda_toolkit_path}\\\"\\n\"\n .format(cuda_toolkit_path=cuda_toolkit_path))\n if cudnn_install_path:\n f.write(\"build --action_env CUDNN_INSTALL_PATH=\\\"{cudnn_install_path}\\\"\\n\"\n .format(cudnn_install_path=cudnn_install_path))\n\n\nBANNER = r\"\"\"\n _ _ __ __\n | | / \\ \\ \\/ /\n _ | |/ _ \\ \\ /\n| |_| / ___ \\/ \\\n \\___/_/ \\/_/\\_\\\n\n\"\"\"\n\nEPILOG = \"\"\"\n\nFrom the 'build' directory in the JAX repository, run\n python build.py\nor\n python3 build.py\nto download and build JAX's XLA (jaxlib) dependency.\n\"\"\"\n\n\ndef _parse_string_as_bool(s):\n \"\"\"Parses a string as a boolean argument.\"\"\"\n lower = s.lower()\n if lower == \"true\":\n return True\n elif lower == \"false\":\n return False\n else:\n raise ValueError(\"Expected either 'true' or 'false'; got {}\".format(s))\n\n\ndef add_boolean_argument(parser, name, default=False, help_str=None):\n \"\"\"Creates a boolean flag.\"\"\"\n group = parser.add_mutually_exclusive_group()\n group.add_argument(\n \"--\" + name,\n nargs=\"?\",\n default=default,\n const=True,\n type=_parse_string_as_bool,\n help=help_str)\n group.add_argument(\"--no\" + name, dest=name, action=\"store_false\")\n\n\ndef main():\n parser = argparse.ArgumentParser(\n description=\"Builds libjax from source.\", epilog=EPILOG)\n parser.add_argument(\n \"--bazel_path\",\n help=\"Path to the Bazel binary to use. The default is to find bazel via \"\n \"the PATH; if none is found, downloads a fresh copy of bazel from \"\n \"GitHub.\")\n parser.add_argument(\n \"--python_bin_path\",\n help=\"Path to Python binary to use. The default is the Python \"\n \"interpreter used to run the build script.\")\n add_boolean_argument(\n parser,\n \"enable_march_native\",\n default=False,\n help_str=\"Generate code targeted to the current machine? This may \"\n \"increase performance, but may generate code that does not run on \"\n \"older machines.\")\n add_boolean_argument(\n parser,\n \"enable_mkl_dnn\",\n default=True,\n help_str=\"Should we build with MKL-DNN enabled?\")\n add_boolean_argument(\n parser,\n \"enable_cuda\",\n help_str=\"Should we build with CUDA enabled? Requires CUDA and CuDNN.\")\n parser.add_argument(\n \"--cuda_path\",\n default=None,\n help=\"Path to the CUDA toolkit.\")\n parser.add_argument(\n \"--cudnn_path\",\n default=None,\n help=\"Path to CUDNN libraries.\")\n parser.add_argument(\n \"--cuda_compute_capabilities\",\n default=\"3.5,5.2,6.0,6.1,7.0\",\n help=\"A comma-separated list of CUDA compute capabilities to support.\")\n parser.add_argument(\n \"--bazel_startup_options\",\n action=\"append\", default=[],\n help=\"Additional startup options to pass to bazel.\")\n parser.add_argument(\n \"--bazel_options\",\n action=\"append\", default=[],\n help=\"Additional options to pass to bazel.\")\n args = parser.parse_args()\n\n print(BANNER)\n os.chdir(os.path.dirname(__file__ or args.prog) or '.')\n\n # Find a working Bazel.\n bazel_path = get_bazel_path(args.bazel_path)\n check_bazel_version(bazel_path, min_version=\"2.0.0\", max_version=None)\n print(\"Bazel binary path: {}\".format(bazel_path))\n\n python_bin_path = get_python_bin_path(args.python_bin_path)\n print(\"Python binary path: {}\".format(python_bin_path))\n python_version = get_python_version(python_bin_path)\n print(\"Python version: {}\".format(\".\".join(map(str, python_version))))\n check_python_version(python_version)\n\n print(\"MKL-DNN enabled: {}\".format(\"yes\" if args.enable_mkl_dnn else \"no\"))\n print(\"-march=native: {}\".format(\"yes\" if args.enable_march_native else \"no\"))\n\n cuda_toolkit_path = args.cuda_path\n cudnn_install_path = args.cudnn_path\n print(\"CUDA enabled: {}\".format(\"yes\" if args.enable_cuda else \"no\"))\n if args.enable_cuda:\n if cuda_toolkit_path:\n print(\"CUDA toolkit path: {}\".format(cuda_toolkit_path))\n if cudnn_install_path:\n print(\"CUDNN library path: {}\".format(cudnn_install_path))\n print(\"CUDA compute capabilities: {}\".format(args.cuda_compute_capabilities))\n write_bazelrc(\n python_bin_path=python_bin_path,\n tf_need_cuda=1 if args.enable_cuda else 0,\n cuda_toolkit_path=cuda_toolkit_path,\n cudnn_install_path=cudnn_install_path,\n cuda_compute_capabilities=args.cuda_compute_capabilities)\n\n print(\"\\nBuilding XLA and installing it in the jaxlib source tree...\")\n config_args = args.bazel_options\n config_args += [\"--config=short_logs\"]\n if args.enable_march_native:\n config_args += [\"--config=opt\"]\n if args.enable_mkl_dnn:\n config_args += [\"--config=mkl_open_source_only\"]\n if args.enable_cuda:\n config_args += [\"--config=cuda\"]\n config_args += [\"--define=xla_python_enable_gpu=true\"]\n command = ([bazel_path] + args.bazel_startup_options +\n [\"run\", \"--verbose_failures=true\"] + config_args +\n [\":install_xla_in_source_tree\", os.getcwd()])\n print(\" \".join(command))\n shell(command)\n shell([bazel_path, \"shutdown\"])\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "build/build.py"}], "after_files": [{"content": "#!/usr/bin/python\n#\n# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Helper script for building JAX's libjax easily.\n\n\nimport argparse\nimport collections\nimport hashlib\nimport os\nimport platform\nimport re\nimport shutil\nimport stat\nimport subprocess\nimport sys\nimport urllib\n\n# pylint: disable=g-import-not-at-top\nif hasattr(urllib, \"urlretrieve\"):\n urlretrieve = urllib.urlretrieve\nelse:\n import urllib.request\n urlretrieve = urllib.request.urlretrieve\n\nif hasattr(shutil, \"which\"):\n which = shutil.which\nelse:\n from distutils.spawn import find_executable as which\n# pylint: enable=g-import-not-at-top\n\n\ndef shell(cmd):\n output = subprocess.check_output(cmd)\n return output.decode(\"UTF-8\").strip()\n\n\n# Python\n\ndef get_python_bin_path(python_bin_path_flag):\n \"\"\"Returns the path to the Python interpreter to use.\"\"\"\n return python_bin_path_flag or sys.executable\n\n\ndef get_python_version(python_bin_path):\n version_output = shell(\n [python_bin_path, \"-c\",\n \"import sys; print(\\\"{}.{}\\\".format(sys.version_info[0], \"\n \"sys.version_info[1]))\"])\n major, minor = map(int, version_output.split(\".\"))\n return major, minor\n\ndef check_python_version(python_version):\n if python_version < (3, 6):\n print(\"JAX requires Python 3.6 or newer.\")\n sys.exit(-1)\n\n\n# Bazel\n\nBAZEL_BASE_URI = \"https://github.com/bazelbuild/bazel/releases/download/2.0.0/\"\nBazelPackage = collections.namedtuple(\"BazelPackage\", [\"file\", \"sha256\"])\nbazel_packages = {\n \"Linux\":\n BazelPackage(\n file=\"bazel-2.0.0-linux-x86_64\",\n sha256=\n \"4df79462c6c3ecdeeee7af99fc269b52ab1aa4828ef3bc359c1837d3fafeeee7\"),\n \"Darwin\":\n BazelPackage(\n file=\"bazel-2.0.0-darwin-x86_64\",\n sha256=\n \"3eca4c96cfda97a9d5f8d3d0dec4155a5cc5ff339b10d3f35213c398bf13881e\"),\n}\n\n\ndef download_and_verify_bazel():\n \"\"\"Downloads a bazel binary from Github, verifying its SHA256 hash.\"\"\"\n package = bazel_packages.get(platform.system())\n if package is None:\n return None\n\n if not os.access(package.file, os.X_OK):\n uri = BAZEL_BASE_URI + package.file\n sys.stdout.write(\"Downloading bazel from: {}\\n\".format(uri))\n\n def progress(block_count, block_size, total_size):\n if total_size <= 0:\n total_size = 170**6\n progress = (block_count * block_size) / total_size\n num_chars = 40\n progress_chars = int(num_chars * progress)\n sys.stdout.write(\"{} [{}{}] {}%\\r\".format(\n package.file, \"#\" * progress_chars,\n \".\" * (num_chars - progress_chars), int(progress * 100.0)))\n\n tmp_path, _ = urlretrieve(uri, None,\n progress if sys.stdout.isatty() else None)\n sys.stdout.write(\"\\n\")\n\n # Verify that the downloaded Bazel binary has the expected SHA256.\n with open(tmp_path, \"rb\") as downloaded_file:\n contents = downloaded_file.read()\n\n digest = hashlib.sha256(contents).hexdigest()\n if digest != package.sha256:\n print(\n \"Checksum mismatch for downloaded bazel binary (expected {}; got {}).\"\n .format(package.sha256, digest))\n sys.exit(-1)\n\n # Write the file as the bazel file name.\n with open(package.file, \"wb\") as out_file:\n out_file.write(contents)\n\n # Mark the file as executable.\n st = os.stat(package.file)\n os.chmod(package.file,\n st.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)\n\n return \"./\" + package.file\n\n\ndef get_bazel_path(bazel_path_flag):\n \"\"\"Returns the path to a Bazel binary, downloading Bazel if not found.\"\"\"\n if bazel_path_flag:\n return bazel_path_flag\n\n bazel = download_and_verify_bazel()\n if bazel:\n return bazel\n\n bazel = which(\"bazel\")\n if bazel:\n return bazel\n\n print(\"Cannot find or download bazel. Please install bazel.\")\n sys.exit(-1)\n\n\ndef check_bazel_version(bazel_path, min_version, max_version):\n \"\"\"Checks Bazel's version is in the range [`min_version`, `max_version`).\"\"\"\n version_output = shell([bazel_path, \"--bazelrc=/dev/null\", \"version\"])\n match = re.search(\"Build label: *([0-9\\\\.]+)[^0-9\\\\.]\", version_output)\n if match is None:\n print(\"Warning: bazel installation is not a release version. Make sure \"\n \"bazel is at least {}\".format(min_version))\n return\n version = match.group(1)\n min_ints = [int(x) for x in min_version.split(\".\")]\n actual_ints = [int(x) for x in match.group(1).split(\".\")]\n if min_ints > actual_ints:\n print(\"Outdated bazel revision (>= {} required, found {})\".format(\n min_version, version))\n sys.exit(-1)\n if max_version is not None:\n max_ints = [int(x) for x in max_version.split(\".\")]\n if actual_ints >= max_ints:\n print(\"Please downgrade your bazel revision to build JAX (>= {} and < {}\"\n \" required, found {})\".format(min_version, max_version, version))\n sys.exit(-1)\n\n\nBAZELRC_TEMPLATE = \"\"\"\n# Flag to enable remote config\ncommon --experimental_repo_remote_exec\n\nbuild --repo_env PYTHON_BIN_PATH=\"{python_bin_path}\"\nbuild --action_env=PYENV_ROOT\nbuild --python_path=\"{python_bin_path}\"\nbuild --repo_env TF_NEED_CUDA=\"{tf_need_cuda}\"\nbuild --action_env TF_CUDA_COMPUTE_CAPABILITIES=\"{cuda_compute_capabilities}\"\nbuild --distinct_host_configuration=false\nbuild --copt=-Wno-sign-compare\nbuild -c opt\nbuild:opt --copt=-march=native\nbuild:opt --host_copt=-march=native\nbuild:mkl_open_source_only --define=tensorflow_mkldnn_contraction_kernel=1\n\n# Sets the default Apple platform to macOS.\nbuild --apple_platform_type=macos\nbuild --macos_minimum_os=10.9\n\n# Make Bazel print out all options from rc files.\nbuild --announce_rc\n\nbuild --define open_source_build=true\n\n# Disable enabled-by-default TensorFlow features that we don't care about.\nbuild --define=no_aws_support=true\nbuild --define=no_gcp_support=true\nbuild --define=no_hdfs_support=true\nbuild --define=no_kafka_support=true\nbuild --define=no_ignite_support=true\nbuild --define=grpc_no_ares=true\n\nbuild:cuda --crosstool_top=@local_config_cuda//crosstool:toolchain\nbuild:cuda --define=using_cuda=true --define=using_cuda_nvcc=true\n\nbuild --spawn_strategy=standalone\nbuild --strategy=Genrule=standalone\n\nbuild --cxxopt=-std=c++14\nbuild --host_cxxopt=-std=c++14\n\n# Suppress all warning messages.\nbuild:short_logs --output_filter=DONT_MATCH_ANYTHING\n\"\"\"\n\n\n\ndef write_bazelrc(cuda_toolkit_path=None, cudnn_install_path=None, **kwargs):\n with open(\"../.bazelrc\", \"w\") as f:\n f.write(BAZELRC_TEMPLATE.format(**kwargs))\n if cuda_toolkit_path:\n f.write(\"build --action_env CUDA_TOOLKIT_PATH=\\\"{cuda_toolkit_path}\\\"\\n\"\n .format(cuda_toolkit_path=cuda_toolkit_path))\n if cudnn_install_path:\n f.write(\"build --action_env CUDNN_INSTALL_PATH=\\\"{cudnn_install_path}\\\"\\n\"\n .format(cudnn_install_path=cudnn_install_path))\n\n\nBANNER = r\"\"\"\n _ _ __ __\n | | / \\ \\ \\/ /\n _ | |/ _ \\ \\ /\n| |_| / ___ \\/ \\\n \\___/_/ \\/_/\\_\\\n\n\"\"\"\n\nEPILOG = \"\"\"\n\nFrom the 'build' directory in the JAX repository, run\n python build.py\nor\n python3 build.py\nto download and build JAX's XLA (jaxlib) dependency.\n\"\"\"\n\n\ndef _parse_string_as_bool(s):\n \"\"\"Parses a string as a boolean argument.\"\"\"\n lower = s.lower()\n if lower == \"true\":\n return True\n elif lower == \"false\":\n return False\n else:\n raise ValueError(\"Expected either 'true' or 'false'; got {}\".format(s))\n\n\ndef add_boolean_argument(parser, name, default=False, help_str=None):\n \"\"\"Creates a boolean flag.\"\"\"\n group = parser.add_mutually_exclusive_group()\n group.add_argument(\n \"--\" + name,\n nargs=\"?\",\n default=default,\n const=True,\n type=_parse_string_as_bool,\n help=help_str)\n group.add_argument(\"--no\" + name, dest=name, action=\"store_false\")\n\n\ndef main():\n parser = argparse.ArgumentParser(\n description=\"Builds libjax from source.\", epilog=EPILOG)\n parser.add_argument(\n \"--bazel_path\",\n help=\"Path to the Bazel binary to use. The default is to find bazel via \"\n \"the PATH; if none is found, downloads a fresh copy of bazel from \"\n \"GitHub.\")\n parser.add_argument(\n \"--python_bin_path\",\n help=\"Path to Python binary to use. The default is the Python \"\n \"interpreter used to run the build script.\")\n add_boolean_argument(\n parser,\n \"enable_march_native\",\n default=False,\n help_str=\"Generate code targeted to the current machine? This may \"\n \"increase performance, but may generate code that does not run on \"\n \"older machines.\")\n add_boolean_argument(\n parser,\n \"enable_mkl_dnn\",\n default=True,\n help_str=\"Should we build with MKL-DNN enabled?\")\n add_boolean_argument(\n parser,\n \"enable_cuda\",\n help_str=\"Should we build with CUDA enabled? Requires CUDA and CuDNN.\")\n parser.add_argument(\n \"--cuda_path\",\n default=None,\n help=\"Path to the CUDA toolkit.\")\n parser.add_argument(\n \"--cudnn_path\",\n default=None,\n help=\"Path to CUDNN libraries.\")\n parser.add_argument(\n \"--cuda_compute_capabilities\",\n default=\"3.5,5.2,6.0,6.1,7.0\",\n help=\"A comma-separated list of CUDA compute capabilities to support.\")\n parser.add_argument(\n \"--bazel_startup_options\",\n action=\"append\", default=[],\n help=\"Additional startup options to pass to bazel.\")\n parser.add_argument(\n \"--bazel_options\",\n action=\"append\", default=[],\n help=\"Additional options to pass to bazel.\")\n args = parser.parse_args()\n\n print(BANNER)\n os.chdir(os.path.dirname(__file__ or args.prog) or '.')\n\n # Find a working Bazel.\n bazel_path = get_bazel_path(args.bazel_path)\n check_bazel_version(bazel_path, min_version=\"2.0.0\", max_version=None)\n print(\"Bazel binary path: {}\".format(bazel_path))\n\n python_bin_path = get_python_bin_path(args.python_bin_path)\n print(\"Python binary path: {}\".format(python_bin_path))\n python_version = get_python_version(python_bin_path)\n print(\"Python version: {}\".format(\".\".join(map(str, python_version))))\n check_python_version(python_version)\n\n print(\"MKL-DNN enabled: {}\".format(\"yes\" if args.enable_mkl_dnn else \"no\"))\n print(\"-march=native: {}\".format(\"yes\" if args.enable_march_native else \"no\"))\n\n cuda_toolkit_path = args.cuda_path\n cudnn_install_path = args.cudnn_path\n print(\"CUDA enabled: {}\".format(\"yes\" if args.enable_cuda else \"no\"))\n if args.enable_cuda:\n if cuda_toolkit_path:\n print(\"CUDA toolkit path: {}\".format(cuda_toolkit_path))\n if cudnn_install_path:\n print(\"CUDNN library path: {}\".format(cudnn_install_path))\n print(\"CUDA compute capabilities: {}\".format(args.cuda_compute_capabilities))\n write_bazelrc(\n python_bin_path=python_bin_path,\n tf_need_cuda=1 if args.enable_cuda else 0,\n cuda_toolkit_path=cuda_toolkit_path,\n cudnn_install_path=cudnn_install_path,\n cuda_compute_capabilities=args.cuda_compute_capabilities)\n\n print(\"\\nBuilding XLA and installing it in the jaxlib source tree...\")\n config_args = args.bazel_options\n config_args += [\"--config=short_logs\"]\n if args.enable_march_native:\n config_args += [\"--config=opt\"]\n if args.enable_mkl_dnn:\n config_args += [\"--config=mkl_open_source_only\"]\n if args.enable_cuda:\n config_args += [\"--config=cuda\"]\n config_args += [\"--define=xla_python_enable_gpu=true\"]\n command = ([bazel_path] + args.bazel_startup_options +\n [\"run\", \"--verbose_failures=true\"] + config_args +\n [\":install_xla_in_source_tree\", os.getcwd()])\n print(\" \".join(command))\n shell(command)\n shell([bazel_path, \"shutdown\"])\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "build/build.py"}]} |
gh_patches_debug_1324 | rasdani/github-patches | git_diff | freqtrade__freqtrade-3719 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
cross_above problem with shift
<!--
Have you searched for similar issues before posting it?
Did you have a VERY good look at the [documentation](https://www.freqtrade.io/en/latest/) and are sure that the question is not explained there
Please do not use the question template to report bugs or to request new features.
-->
## Describe your environment
* Operating system: Ubuntu
* Python Version: Python 3.8.2 (`python -V`)
* CCXT version: ccxt==1.33.52 (`pip freeze | grep ccxt`)
* Freqtrade Version: freqtrade develop-21f4aba4 (`freqtrade -V` or `docker-compose run --rm freqtrade -V` for Freqtrade running in docker)
## Your question
Hello,
I am having a problem with the cross_above function in qtpylib. I am trying to optimize one strategy but as soon as I run it I receive the following exception:
Traceback (most recent call last):
File "/home/trademad/freqtrade/.env/lib/python3.8/site-packages/joblib/externals/loky/process_executor.py", line 431, in _process_worker
r = call_item()
File "/home/trademad/freqtrade/.env/lib/python3.8/site-packages/joblib/externals/loky/process_executor.py", line 285, in __call__
return self.fn(*self.args, **self.kwargs)
File "/home/trademad/freqtrade/.env/lib/python3.8/site-packages/joblib/_parallel_backends.py", line 595, in __call__
return self.func(*args, **kwargs)
File "/home/trademad/freqtrade/.env/lib/python3.8/site-packages/joblib/parallel.py", line 252, in __call__
return [func(*args, **kwargs)
File "/home/trademad/freqtrade/.env/lib/python3.8/site-packages/joblib/parallel.py", line 252, in <listcomp>
return [func(*args, **kwargs)
File "/home/trademad/freqtrade/.env/lib/python3.8/site-packages/joblib/externals/loky/cloudpickle_wrapper.py", line 38, in __call__
return self._obj(*args, **kwargs)
File "/home/trademad/freqtrade/freqtrade/optimize/hyperopt.py", line 532, in generate_optimizer
backtesting_results = self.backtesting.backtest(
File "/home/trademad/freqtrade/freqtrade/optimize/backtesting.py", line 312, in backtest
data: Dict = self._get_ohlcv_as_lists(processed)
File "/home/trademad/freqtrade/freqtrade/optimize/backtesting.py", line 164, in _get_ohlcv_as_lists
self.strategy.advise_buy(pair_data, {'pair': pair}), {'pair': pair})[headers].copy()
File "/home/trademad/freqtrade/user_data/hyperopts/bbrsi_opt_v01.py", line 61, in populate_buy_trend
conditions.append(qtpylib.crossed_above(dataframe['rsi'], params['rsi-value']))
File "/home/trademad/freqtrade/freqtrade/vendor/qtpylib/indicators.py", line 243, in crossed_above
return crossed(series1, series2, "above")
File "/home/trademad/freqtrade/freqtrade/vendor/qtpylib/indicators.py", line 230, in crossed
series1.shift(1) <= series2.shift(1)))
AttributeError: 'numpy.int64' object has no attribute 'shift'
Could it be a problem with pandas in the latest version?
thanks
*Ask the question you have not been able to find an answer in our [Documentation](https://www.freqtrade.io/en/latest/)*
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `freqtrade/vendor/qtpylib/indicators.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 #
4 # QTPyLib: Quantitative Trading Python Library
5 # https://github.com/ranaroussi/qtpylib
6 #
7 # Copyright 2016-2018 Ran Aroussi
8 #
9 # Licensed under the Apache License, Version 2.0 (the "License");
10 # you may not use this file except in compliance with the License.
11 # You may obtain a copy of the License at
12 #
13 # http://www.apache.org/licenses/LICENSE-2.0
14 #
15 # Unless required by applicable law or agreed to in writing, software
16 # distributed under the License is distributed on an "AS IS" BASIS,
17 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18 # See the License for the specific language governing permissions and
19 # limitations under the License.
20 #
21
22 import warnings
23 import sys
24 from datetime import datetime, timedelta
25
26 import numpy as np
27 import pandas as pd
28 from pandas.core.base import PandasObject
29
30 # =============================================
31 # check min, python version
32 if sys.version_info < (3, 4):
33 raise SystemError("QTPyLib requires Python version >= 3.4")
34
35 # =============================================
36 warnings.simplefilter(action="ignore", category=RuntimeWarning)
37
38 # =============================================
39
40
41 def numpy_rolling_window(data, window):
42 shape = data.shape[:-1] + (data.shape[-1] - window + 1, window)
43 strides = data.strides + (data.strides[-1],)
44 return np.lib.stride_tricks.as_strided(data, shape=shape, strides=strides)
45
46
47 def numpy_rolling_series(func):
48 def func_wrapper(data, window, as_source=False):
49 series = data.values if isinstance(data, pd.Series) else data
50
51 new_series = np.empty(len(series)) * np.nan
52 calculated = func(series, window)
53 new_series[-len(calculated):] = calculated
54
55 if as_source and isinstance(data, pd.Series):
56 return pd.Series(index=data.index, data=new_series)
57
58 return new_series
59
60 return func_wrapper
61
62
63 @numpy_rolling_series
64 def numpy_rolling_mean(data, window, as_source=False):
65 return np.mean(numpy_rolling_window(data, window), axis=-1)
66
67
68 @numpy_rolling_series
69 def numpy_rolling_std(data, window, as_source=False):
70 return np.std(numpy_rolling_window(data, window), axis=-1, ddof=1)
71
72
73 # ---------------------------------------------
74
75
76 def session(df, start='17:00', end='16:00'):
77 """ remove previous globex day from df """
78 if df.empty:
79 return df
80
81 # get start/end/now as decimals
82 int_start = list(map(int, start.split(':')))
83 int_start = (int_start[0] + int_start[1] - 1 / 100) - 0.0001
84 int_end = list(map(int, end.split(':')))
85 int_end = int_end[0] + int_end[1] / 100
86 int_now = (df[-1:].index.hour[0] + (df[:1].index.minute[0]) / 100)
87
88 # same-dat session?
89 is_same_day = int_end > int_start
90
91 # set pointers
92 curr = prev = df[-1:].index[0].strftime('%Y-%m-%d')
93
94 # globex/forex session
95 if not is_same_day:
96 prev = (datetime.strptime(curr, '%Y-%m-%d') -
97 timedelta(1)).strftime('%Y-%m-%d')
98
99 # slice
100 if int_now >= int_start:
101 df = df[df.index >= curr + ' ' + start]
102 else:
103 df = df[df.index >= prev + ' ' + start]
104
105 return df.copy()
106
107 # ---------------------------------------------
108
109
110 def heikinashi(bars):
111 bars = bars.copy()
112 bars['ha_close'] = (bars['open'] + bars['high'] +
113 bars['low'] + bars['close']) / 4
114
115 # ha open
116 bars.at[0, 'ha_open'] = (bars.at[0, 'open'] + bars.at[0, 'close']) / 2
117 for i in range(1, len(bars)):
118 bars.at[i, 'ha_open'] = (bars.at[i - 1, 'ha_open'] + bars.at[i - 1, 'ha_close']) / 2
119
120 bars['ha_high'] = bars.loc[:, ['high', 'ha_open', 'ha_close']].max(axis=1)
121 bars['ha_low'] = bars.loc[:, ['low', 'ha_open', 'ha_close']].min(axis=1)
122
123 return pd.DataFrame(index=bars.index,
124 data={'open': bars['ha_open'],
125 'high': bars['ha_high'],
126 'low': bars['ha_low'],
127 'close': bars['ha_close']})
128
129 # ---------------------------------------------
130
131
132 def tdi(series, rsi_lookback=13, rsi_smooth_len=2,
133 rsi_signal_len=7, bb_lookback=34, bb_std=1.6185):
134
135 rsi_data = rsi(series, rsi_lookback)
136 rsi_smooth = sma(rsi_data, rsi_smooth_len)
137 rsi_signal = sma(rsi_data, rsi_signal_len)
138
139 bb_series = bollinger_bands(rsi_data, bb_lookback, bb_std)
140
141 return pd.DataFrame(index=series.index, data={
142 "rsi": rsi_data,
143 "rsi_signal": rsi_signal,
144 "rsi_smooth": rsi_smooth,
145 "rsi_bb_upper": bb_series['upper'],
146 "rsi_bb_lower": bb_series['lower'],
147 "rsi_bb_mid": bb_series['mid']
148 })
149
150 # ---------------------------------------------
151
152
153 def awesome_oscillator(df, weighted=False, fast=5, slow=34):
154 midprice = (df['high'] + df['low']) / 2
155
156 if weighted:
157 ao = (midprice.ewm(fast).mean() - midprice.ewm(slow).mean()).values
158 else:
159 ao = numpy_rolling_mean(midprice, fast) - \
160 numpy_rolling_mean(midprice, slow)
161
162 return pd.Series(index=df.index, data=ao)
163
164
165 # ---------------------------------------------
166
167 def nans(length=1):
168 mtx = np.empty(length)
169 mtx[:] = np.nan
170 return mtx
171
172
173 # ---------------------------------------------
174
175 def typical_price(bars):
176 res = (bars['high'] + bars['low'] + bars['close']) / 3.
177 return pd.Series(index=bars.index, data=res)
178
179
180 # ---------------------------------------------
181
182 def mid_price(bars):
183 res = (bars['high'] + bars['low']) / 2.
184 return pd.Series(index=bars.index, data=res)
185
186
187 # ---------------------------------------------
188
189 def ibs(bars):
190 """ Internal bar strength """
191 res = np.round((bars['close'] - bars['low']) /
192 (bars['high'] - bars['low']), 2)
193 return pd.Series(index=bars.index, data=res)
194
195
196 # ---------------------------------------------
197
198 def true_range(bars):
199 return pd.DataFrame({
200 "hl": bars['high'] - bars['low'],
201 "hc": abs(bars['high'] - bars['close'].shift(1)),
202 "lc": abs(bars['low'] - bars['close'].shift(1))
203 }).max(axis=1)
204
205
206 # ---------------------------------------------
207
208 def atr(bars, window=14, exp=False):
209 tr = true_range(bars)
210
211 if exp:
212 res = rolling_weighted_mean(tr, window)
213 else:
214 res = rolling_mean(tr, window)
215
216 return pd.Series(res)
217
218
219 # ---------------------------------------------
220
221 def crossed(series1, series2, direction=None):
222 if isinstance(series1, np.ndarray):
223 series1 = pd.Series(series1)
224
225 if isinstance(series2, (float, int, np.ndarray)):
226 series2 = pd.Series(index=series1.index, data=series2)
227
228 if direction is None or direction == "above":
229 above = pd.Series((series1 > series2) & (
230 series1.shift(1) <= series2.shift(1)))
231
232 if direction is None or direction == "below":
233 below = pd.Series((series1 < series2) & (
234 series1.shift(1) >= series2.shift(1)))
235
236 if direction is None:
237 return above or below
238
239 return above if direction == "above" else below
240
241
242 def crossed_above(series1, series2):
243 return crossed(series1, series2, "above")
244
245
246 def crossed_below(series1, series2):
247 return crossed(series1, series2, "below")
248
249 # ---------------------------------------------
250
251
252 def rolling_std(series, window=200, min_periods=None):
253 min_periods = window if min_periods is None else min_periods
254 if min_periods == window and len(series) > window:
255 return numpy_rolling_std(series, window, True)
256 else:
257 try:
258 return series.rolling(window=window, min_periods=min_periods).std()
259 except Exception as e: # noqa: F841
260 return pd.Series(series).rolling(window=window, min_periods=min_periods).std()
261
262 # ---------------------------------------------
263
264
265 def rolling_mean(series, window=200, min_periods=None):
266 min_periods = window if min_periods is None else min_periods
267 if min_periods == window and len(series) > window:
268 return numpy_rolling_mean(series, window, True)
269 else:
270 try:
271 return series.rolling(window=window, min_periods=min_periods).mean()
272 except Exception as e: # noqa: F841
273 return pd.Series(series).rolling(window=window, min_periods=min_periods).mean()
274
275 # ---------------------------------------------
276
277
278 def rolling_min(series, window=14, min_periods=None):
279 min_periods = window if min_periods is None else min_periods
280 try:
281 return series.rolling(window=window, min_periods=min_periods).min()
282 except Exception as e: # noqa: F841
283 return pd.Series(series).rolling(window=window, min_periods=min_periods).min()
284
285
286 # ---------------------------------------------
287
288 def rolling_max(series, window=14, min_periods=None):
289 min_periods = window if min_periods is None else min_periods
290 try:
291 return series.rolling(window=window, min_periods=min_periods).max()
292 except Exception as e: # noqa: F841
293 return pd.Series(series).rolling(window=window, min_periods=min_periods).max()
294
295
296 # ---------------------------------------------
297
298 def rolling_weighted_mean(series, window=200, min_periods=None):
299 min_periods = window if min_periods is None else min_periods
300 try:
301 return series.ewm(span=window, min_periods=min_periods).mean()
302 except Exception as e: # noqa: F841
303 return pd.ewma(series, span=window, min_periods=min_periods)
304
305
306 # ---------------------------------------------
307
308 def hull_moving_average(series, window=200, min_periods=None):
309 min_periods = window if min_periods is None else min_periods
310 ma = (2 * rolling_weighted_mean(series, window / 2, min_periods)) - \
311 rolling_weighted_mean(series, window, min_periods)
312 return rolling_weighted_mean(ma, np.sqrt(window), min_periods)
313
314
315 # ---------------------------------------------
316
317 def sma(series, window=200, min_periods=None):
318 return rolling_mean(series, window=window, min_periods=min_periods)
319
320
321 # ---------------------------------------------
322
323 def wma(series, window=200, min_periods=None):
324 return rolling_weighted_mean(series, window=window, min_periods=min_periods)
325
326
327 # ---------------------------------------------
328
329 def hma(series, window=200, min_periods=None):
330 return hull_moving_average(series, window=window, min_periods=min_periods)
331
332
333 # ---------------------------------------------
334
335 def vwap(bars):
336 """
337 calculate vwap of entire time series
338 (input can be pandas series or numpy array)
339 bars are usually mid [ (h+l)/2 ] or typical [ (h+l+c)/3 ]
340 """
341 typical = ((bars['high'] + bars['low'] + bars['close']) / 3).values
342 volume = bars['volume'].values
343
344 return pd.Series(index=bars.index,
345 data=np.cumsum(volume * typical) / np.cumsum(volume))
346
347
348 # ---------------------------------------------
349
350 def rolling_vwap(bars, window=200, min_periods=None):
351 """
352 calculate vwap using moving window
353 (input can be pandas series or numpy array)
354 bars are usually mid [ (h+l)/2 ] or typical [ (h+l+c)/3 ]
355 """
356 min_periods = window if min_periods is None else min_periods
357
358 typical = ((bars['high'] + bars['low'] + bars['close']) / 3)
359 volume = bars['volume']
360
361 left = (volume * typical).rolling(window=window,
362 min_periods=min_periods).sum()
363 right = volume.rolling(window=window, min_periods=min_periods).sum()
364
365 return pd.Series(index=bars.index, data=(left / right)
366 ).replace([np.inf, -np.inf], float('NaN')).ffill()
367
368
369 # ---------------------------------------------
370
371 def rsi(series, window=14):
372 """
373 compute the n period relative strength indicator
374 """
375
376 # 100-(100/relative_strength)
377 deltas = np.diff(series)
378 seed = deltas[:window + 1]
379
380 # default values
381 ups = seed[seed > 0].sum() / window
382 downs = -seed[seed < 0].sum() / window
383 rsival = np.zeros_like(series)
384 rsival[:window] = 100. - 100. / (1. + ups / downs)
385
386 # period values
387 for i in range(window, len(series)):
388 delta = deltas[i - 1]
389 if delta > 0:
390 upval = delta
391 downval = 0
392 else:
393 upval = 0
394 downval = -delta
395
396 ups = (ups * (window - 1) + upval) / window
397 downs = (downs * (window - 1.) + downval) / window
398 rsival[i] = 100. - 100. / (1. + ups / downs)
399
400 # return rsival
401 return pd.Series(index=series.index, data=rsival)
402
403
404 # ---------------------------------------------
405
406 def macd(series, fast=3, slow=10, smooth=16):
407 """
408 compute the MACD (Moving Average Convergence/Divergence)
409 using a fast and slow exponential moving avg'
410 return value is emaslow, emafast, macd which are len(x) arrays
411 """
412 macd_line = rolling_weighted_mean(series, window=fast) - \
413 rolling_weighted_mean(series, window=slow)
414 signal = rolling_weighted_mean(macd_line, window=smooth)
415 histogram = macd_line - signal
416 # return macd_line, signal, histogram
417 return pd.DataFrame(index=series.index, data={
418 'macd': macd_line.values,
419 'signal': signal.values,
420 'histogram': histogram.values
421 })
422
423
424 # ---------------------------------------------
425
426 def bollinger_bands(series, window=20, stds=2):
427 ma = rolling_mean(series, window=window, min_periods=1)
428 std = rolling_std(series, window=window, min_periods=1)
429 upper = ma + std * stds
430 lower = ma - std * stds
431
432 return pd.DataFrame(index=series.index, data={
433 'upper': upper,
434 'mid': ma,
435 'lower': lower
436 })
437
438
439 # ---------------------------------------------
440
441 def weighted_bollinger_bands(series, window=20, stds=2):
442 ema = rolling_weighted_mean(series, window=window)
443 std = rolling_std(series, window=window)
444 upper = ema + std * stds
445 lower = ema - std * stds
446
447 return pd.DataFrame(index=series.index, data={
448 'upper': upper.values,
449 'mid': ema.values,
450 'lower': lower.values
451 })
452
453
454 # ---------------------------------------------
455
456 def returns(series):
457 try:
458 res = (series / series.shift(1) -
459 1).replace([np.inf, -np.inf], float('NaN'))
460 except Exception as e: # noqa: F841
461 res = nans(len(series))
462
463 return pd.Series(index=series.index, data=res)
464
465
466 # ---------------------------------------------
467
468 def log_returns(series):
469 try:
470 res = np.log(series / series.shift(1)
471 ).replace([np.inf, -np.inf], float('NaN'))
472 except Exception as e: # noqa: F841
473 res = nans(len(series))
474
475 return pd.Series(index=series.index, data=res)
476
477
478 # ---------------------------------------------
479
480 def implied_volatility(series, window=252):
481 try:
482 logret = np.log(series / series.shift(1)
483 ).replace([np.inf, -np.inf], float('NaN'))
484 res = numpy_rolling_std(logret, window) * np.sqrt(window)
485 except Exception as e: # noqa: F841
486 res = nans(len(series))
487
488 return pd.Series(index=series.index, data=res)
489
490
491 # ---------------------------------------------
492
493 def keltner_channel(bars, window=14, atrs=2):
494 typical_mean = rolling_mean(typical_price(bars), window)
495 atrval = atr(bars, window) * atrs
496
497 upper = typical_mean + atrval
498 lower = typical_mean - atrval
499
500 return pd.DataFrame(index=bars.index, data={
501 'upper': upper.values,
502 'mid': typical_mean.values,
503 'lower': lower.values
504 })
505
506
507 # ---------------------------------------------
508
509 def roc(series, window=14):
510 """
511 compute rate of change
512 """
513 res = (series - series.shift(window)) / series.shift(window)
514 return pd.Series(index=series.index, data=res)
515
516
517 # ---------------------------------------------
518
519 def cci(series, window=14):
520 """
521 compute commodity channel index
522 """
523 price = typical_price(series)
524 typical_mean = rolling_mean(price, window)
525 res = (price - typical_mean) / (.015 * np.std(typical_mean))
526 return pd.Series(index=series.index, data=res)
527
528
529 # ---------------------------------------------
530
531 def stoch(df, window=14, d=3, k=3, fast=False):
532 """
533 compute the n period relative strength indicator
534 http://excelta.blogspot.co.il/2013/09/stochastic-oscillator-technical.html
535 """
536
537 my_df = pd.DataFrame(index=df.index)
538
539 my_df['rolling_max'] = df['high'].rolling(window).max()
540 my_df['rolling_min'] = df['low'].rolling(window).min()
541
542 my_df['fast_k'] = (
543 100 * (df['close'] - my_df['rolling_min']) /
544 (my_df['rolling_max'] - my_df['rolling_min'])
545 )
546 my_df['fast_d'] = my_df['fast_k'].rolling(d).mean()
547
548 if fast:
549 return my_df.loc[:, ['fast_k', 'fast_d']]
550
551 my_df['slow_k'] = my_df['fast_k'].rolling(k).mean()
552 my_df['slow_d'] = my_df['slow_k'].rolling(d).mean()
553
554 return my_df.loc[:, ['slow_k', 'slow_d']]
555
556 # ---------------------------------------------
557
558
559 def zlma(series, window=20, min_periods=None, kind="ema"):
560 """
561 John Ehlers' Zero lag (exponential) moving average
562 https://en.wikipedia.org/wiki/Zero_lag_exponential_moving_average
563 """
564 min_periods = window if min_periods is None else min_periods
565
566 lag = (window - 1) // 2
567 series = 2 * series - series.shift(lag)
568 if kind in ['ewm', 'ema']:
569 return wma(series, lag, min_periods)
570 elif kind == "hma":
571 return hma(series, lag, min_periods)
572 return sma(series, lag, min_periods)
573
574
575 def zlema(series, window, min_periods=None):
576 return zlma(series, window, min_periods, kind="ema")
577
578
579 def zlsma(series, window, min_periods=None):
580 return zlma(series, window, min_periods, kind="sma")
581
582
583 def zlhma(series, window, min_periods=None):
584 return zlma(series, window, min_periods, kind="hma")
585
586 # ---------------------------------------------
587
588
589 def zscore(bars, window=20, stds=1, col='close'):
590 """ get zscore of price """
591 std = numpy_rolling_std(bars[col], window)
592 mean = numpy_rolling_mean(bars[col], window)
593 return (bars[col] - mean) / (std * stds)
594
595 # ---------------------------------------------
596
597
598 def pvt(bars):
599 """ Price Volume Trend """
600 trend = ((bars['close'] - bars['close'].shift(1)) /
601 bars['close'].shift(1)) * bars['volume']
602 return trend.cumsum()
603
604
605 def chopiness(bars, window=14):
606 atrsum = true_range(bars).rolling(window).sum()
607 highs = bars['high'].rolling(window).max()
608 lows = bars['low'].rolling(window).min()
609 return 100 * np.log10(atrsum / (highs - lows)) / np.log10(window)
610
611
612 # =============================================
613
614
615 PandasObject.session = session
616 PandasObject.atr = atr
617 PandasObject.bollinger_bands = bollinger_bands
618 PandasObject.cci = cci
619 PandasObject.crossed = crossed
620 PandasObject.crossed_above = crossed_above
621 PandasObject.crossed_below = crossed_below
622 PandasObject.heikinashi = heikinashi
623 PandasObject.hull_moving_average = hull_moving_average
624 PandasObject.ibs = ibs
625 PandasObject.implied_volatility = implied_volatility
626 PandasObject.keltner_channel = keltner_channel
627 PandasObject.log_returns = log_returns
628 PandasObject.macd = macd
629 PandasObject.returns = returns
630 PandasObject.roc = roc
631 PandasObject.rolling_max = rolling_max
632 PandasObject.rolling_min = rolling_min
633 PandasObject.rolling_mean = rolling_mean
634 PandasObject.rolling_std = rolling_std
635 PandasObject.rsi = rsi
636 PandasObject.stoch = stoch
637 PandasObject.zscore = zscore
638 PandasObject.pvt = pvt
639 PandasObject.chopiness = chopiness
640 PandasObject.tdi = tdi
641 PandasObject.true_range = true_range
642 PandasObject.mid_price = mid_price
643 PandasObject.typical_price = typical_price
644 PandasObject.vwap = vwap
645 PandasObject.rolling_vwap = rolling_vwap
646 PandasObject.weighted_bollinger_bands = weighted_bollinger_bands
647 PandasObject.rolling_weighted_mean = rolling_weighted_mean
648
649 PandasObject.sma = sma
650 PandasObject.wma = wma
651 PandasObject.ema = wma
652 PandasObject.hma = hma
653
654 PandasObject.zlsma = zlsma
655 PandasObject.zlwma = zlema
656 PandasObject.zlema = zlema
657 PandasObject.zlhma = zlhma
658 PandasObject.zlma = zlma
659
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/freqtrade/vendor/qtpylib/indicators.py b/freqtrade/vendor/qtpylib/indicators.py
--- a/freqtrade/vendor/qtpylib/indicators.py
+++ b/freqtrade/vendor/qtpylib/indicators.py
@@ -222,7 +222,7 @@
if isinstance(series1, np.ndarray):
series1 = pd.Series(series1)
- if isinstance(series2, (float, int, np.ndarray)):
+ if isinstance(series2, (float, int, np.ndarray, np.integer, np.floating)):
series2 = pd.Series(index=series1.index, data=series2)
if direction is None or direction == "above":
| {"golden_diff": "diff --git a/freqtrade/vendor/qtpylib/indicators.py b/freqtrade/vendor/qtpylib/indicators.py\n--- a/freqtrade/vendor/qtpylib/indicators.py\n+++ b/freqtrade/vendor/qtpylib/indicators.py\n@@ -222,7 +222,7 @@\n if isinstance(series1, np.ndarray):\n series1 = pd.Series(series1)\n \n- if isinstance(series2, (float, int, np.ndarray)):\n+ if isinstance(series2, (float, int, np.ndarray, np.integer, np.floating)):\n series2 = pd.Series(index=series1.index, data=series2)\n \n if direction is None or direction == \"above\":\n", "issue": "cross_above problem with shift\n<!-- \r\nHave you searched for similar issues before posting it?\r\nDid you have a VERY good look at the [documentation](https://www.freqtrade.io/en/latest/) and are sure that the question is not explained there\r\n\r\nPlease do not use the question template to report bugs or to request new features.\r\n-->\r\n\r\n## Describe your environment\r\n\r\n * Operating system: Ubuntu \r\n * Python Version: Python 3.8.2 (`python -V`)\r\n * CCXT version: ccxt==1.33.52 (`pip freeze | grep ccxt`)\r\n * Freqtrade Version: freqtrade develop-21f4aba4 (`freqtrade -V` or `docker-compose run --rm freqtrade -V` for Freqtrade running in docker)\r\n \r\n## Your question\r\n\r\nHello, \r\nI am having a problem with the cross_above function in qtpylib. I am trying to optimize one strategy but as soon as I run it I receive the following exception:\r\n\r\nTraceback (most recent call last):\r\n File \"/home/trademad/freqtrade/.env/lib/python3.8/site-packages/joblib/externals/loky/process_executor.py\", line 431, in _process_worker\r\n r = call_item()\r\n File \"/home/trademad/freqtrade/.env/lib/python3.8/site-packages/joblib/externals/loky/process_executor.py\", line 285, in __call__\r\n return self.fn(*self.args, **self.kwargs)\r\n File \"/home/trademad/freqtrade/.env/lib/python3.8/site-packages/joblib/_parallel_backends.py\", line 595, in __call__\r\n return self.func(*args, **kwargs)\r\n File \"/home/trademad/freqtrade/.env/lib/python3.8/site-packages/joblib/parallel.py\", line 252, in __call__\r\n return [func(*args, **kwargs)\r\n File \"/home/trademad/freqtrade/.env/lib/python3.8/site-packages/joblib/parallel.py\", line 252, in <listcomp>\r\n return [func(*args, **kwargs)\r\n File \"/home/trademad/freqtrade/.env/lib/python3.8/site-packages/joblib/externals/loky/cloudpickle_wrapper.py\", line 38, in __call__\r\n return self._obj(*args, **kwargs)\r\n File \"/home/trademad/freqtrade/freqtrade/optimize/hyperopt.py\", line 532, in generate_optimizer\r\n backtesting_results = self.backtesting.backtest(\r\n File \"/home/trademad/freqtrade/freqtrade/optimize/backtesting.py\", line 312, in backtest\r\n data: Dict = self._get_ohlcv_as_lists(processed)\r\n File \"/home/trademad/freqtrade/freqtrade/optimize/backtesting.py\", line 164, in _get_ohlcv_as_lists\r\n self.strategy.advise_buy(pair_data, {'pair': pair}), {'pair': pair})[headers].copy()\r\n File \"/home/trademad/freqtrade/user_data/hyperopts/bbrsi_opt_v01.py\", line 61, in populate_buy_trend\r\n conditions.append(qtpylib.crossed_above(dataframe['rsi'], params['rsi-value']))\r\n File \"/home/trademad/freqtrade/freqtrade/vendor/qtpylib/indicators.py\", line 243, in crossed_above\r\n return crossed(series1, series2, \"above\")\r\n File \"/home/trademad/freqtrade/freqtrade/vendor/qtpylib/indicators.py\", line 230, in crossed\r\n series1.shift(1) <= series2.shift(1)))\r\nAttributeError: 'numpy.int64' object has no attribute 'shift'\r\n\r\nCould it be a problem with pandas in the latest version?\r\nthanks\r\n\r\n*Ask the question you have not been able to find an answer in our [Documentation](https://www.freqtrade.io/en/latest/)*\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# QTPyLib: Quantitative Trading Python Library\n# https://github.com/ranaroussi/qtpylib\n#\n# Copyright 2016-2018 Ran Aroussi\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport warnings\nimport sys\nfrom datetime import datetime, timedelta\n\nimport numpy as np\nimport pandas as pd\nfrom pandas.core.base import PandasObject\n\n# =============================================\n# check min, python version\nif sys.version_info < (3, 4):\n raise SystemError(\"QTPyLib requires Python version >= 3.4\")\n\n# =============================================\nwarnings.simplefilter(action=\"ignore\", category=RuntimeWarning)\n\n# =============================================\n\n\ndef numpy_rolling_window(data, window):\n shape = data.shape[:-1] + (data.shape[-1] - window + 1, window)\n strides = data.strides + (data.strides[-1],)\n return np.lib.stride_tricks.as_strided(data, shape=shape, strides=strides)\n\n\ndef numpy_rolling_series(func):\n def func_wrapper(data, window, as_source=False):\n series = data.values if isinstance(data, pd.Series) else data\n\n new_series = np.empty(len(series)) * np.nan\n calculated = func(series, window)\n new_series[-len(calculated):] = calculated\n\n if as_source and isinstance(data, pd.Series):\n return pd.Series(index=data.index, data=new_series)\n\n return new_series\n\n return func_wrapper\n\n\n@numpy_rolling_series\ndef numpy_rolling_mean(data, window, as_source=False):\n return np.mean(numpy_rolling_window(data, window), axis=-1)\n\n\n@numpy_rolling_series\ndef numpy_rolling_std(data, window, as_source=False):\n return np.std(numpy_rolling_window(data, window), axis=-1, ddof=1)\n\n\n# ---------------------------------------------\n\n\ndef session(df, start='17:00', end='16:00'):\n \"\"\" remove previous globex day from df \"\"\"\n if df.empty:\n return df\n\n # get start/end/now as decimals\n int_start = list(map(int, start.split(':')))\n int_start = (int_start[0] + int_start[1] - 1 / 100) - 0.0001\n int_end = list(map(int, end.split(':')))\n int_end = int_end[0] + int_end[1] / 100\n int_now = (df[-1:].index.hour[0] + (df[:1].index.minute[0]) / 100)\n\n # same-dat session?\n is_same_day = int_end > int_start\n\n # set pointers\n curr = prev = df[-1:].index[0].strftime('%Y-%m-%d')\n\n # globex/forex session\n if not is_same_day:\n prev = (datetime.strptime(curr, '%Y-%m-%d') -\n timedelta(1)).strftime('%Y-%m-%d')\n\n # slice\n if int_now >= int_start:\n df = df[df.index >= curr + ' ' + start]\n else:\n df = df[df.index >= prev + ' ' + start]\n\n return df.copy()\n\n# ---------------------------------------------\n\n\ndef heikinashi(bars):\n bars = bars.copy()\n bars['ha_close'] = (bars['open'] + bars['high'] +\n bars['low'] + bars['close']) / 4\n\n # ha open\n bars.at[0, 'ha_open'] = (bars.at[0, 'open'] + bars.at[0, 'close']) / 2\n for i in range(1, len(bars)):\n bars.at[i, 'ha_open'] = (bars.at[i - 1, 'ha_open'] + bars.at[i - 1, 'ha_close']) / 2\n\n bars['ha_high'] = bars.loc[:, ['high', 'ha_open', 'ha_close']].max(axis=1)\n bars['ha_low'] = bars.loc[:, ['low', 'ha_open', 'ha_close']].min(axis=1)\n\n return pd.DataFrame(index=bars.index,\n data={'open': bars['ha_open'],\n 'high': bars['ha_high'],\n 'low': bars['ha_low'],\n 'close': bars['ha_close']})\n\n# ---------------------------------------------\n\n\ndef tdi(series, rsi_lookback=13, rsi_smooth_len=2,\n rsi_signal_len=7, bb_lookback=34, bb_std=1.6185):\n\n rsi_data = rsi(series, rsi_lookback)\n rsi_smooth = sma(rsi_data, rsi_smooth_len)\n rsi_signal = sma(rsi_data, rsi_signal_len)\n\n bb_series = bollinger_bands(rsi_data, bb_lookback, bb_std)\n\n return pd.DataFrame(index=series.index, data={\n \"rsi\": rsi_data,\n \"rsi_signal\": rsi_signal,\n \"rsi_smooth\": rsi_smooth,\n \"rsi_bb_upper\": bb_series['upper'],\n \"rsi_bb_lower\": bb_series['lower'],\n \"rsi_bb_mid\": bb_series['mid']\n })\n\n# ---------------------------------------------\n\n\ndef awesome_oscillator(df, weighted=False, fast=5, slow=34):\n midprice = (df['high'] + df['low']) / 2\n\n if weighted:\n ao = (midprice.ewm(fast).mean() - midprice.ewm(slow).mean()).values\n else:\n ao = numpy_rolling_mean(midprice, fast) - \\\n numpy_rolling_mean(midprice, slow)\n\n return pd.Series(index=df.index, data=ao)\n\n\n# ---------------------------------------------\n\ndef nans(length=1):\n mtx = np.empty(length)\n mtx[:] = np.nan\n return mtx\n\n\n# ---------------------------------------------\n\ndef typical_price(bars):\n res = (bars['high'] + bars['low'] + bars['close']) / 3.\n return pd.Series(index=bars.index, data=res)\n\n\n# ---------------------------------------------\n\ndef mid_price(bars):\n res = (bars['high'] + bars['low']) / 2.\n return pd.Series(index=bars.index, data=res)\n\n\n# ---------------------------------------------\n\ndef ibs(bars):\n \"\"\" Internal bar strength \"\"\"\n res = np.round((bars['close'] - bars['low']) /\n (bars['high'] - bars['low']), 2)\n return pd.Series(index=bars.index, data=res)\n\n\n# ---------------------------------------------\n\ndef true_range(bars):\n return pd.DataFrame({\n \"hl\": bars['high'] - bars['low'],\n \"hc\": abs(bars['high'] - bars['close'].shift(1)),\n \"lc\": abs(bars['low'] - bars['close'].shift(1))\n }).max(axis=1)\n\n\n# ---------------------------------------------\n\ndef atr(bars, window=14, exp=False):\n tr = true_range(bars)\n\n if exp:\n res = rolling_weighted_mean(tr, window)\n else:\n res = rolling_mean(tr, window)\n\n return pd.Series(res)\n\n\n# ---------------------------------------------\n\ndef crossed(series1, series2, direction=None):\n if isinstance(series1, np.ndarray):\n series1 = pd.Series(series1)\n\n if isinstance(series2, (float, int, np.ndarray)):\n series2 = pd.Series(index=series1.index, data=series2)\n\n if direction is None or direction == \"above\":\n above = pd.Series((series1 > series2) & (\n series1.shift(1) <= series2.shift(1)))\n\n if direction is None or direction == \"below\":\n below = pd.Series((series1 < series2) & (\n series1.shift(1) >= series2.shift(1)))\n\n if direction is None:\n return above or below\n\n return above if direction == \"above\" else below\n\n\ndef crossed_above(series1, series2):\n return crossed(series1, series2, \"above\")\n\n\ndef crossed_below(series1, series2):\n return crossed(series1, series2, \"below\")\n\n# ---------------------------------------------\n\n\ndef rolling_std(series, window=200, min_periods=None):\n min_periods = window if min_periods is None else min_periods\n if min_periods == window and len(series) > window:\n return numpy_rolling_std(series, window, True)\n else:\n try:\n return series.rolling(window=window, min_periods=min_periods).std()\n except Exception as e: # noqa: F841\n return pd.Series(series).rolling(window=window, min_periods=min_periods).std()\n\n# ---------------------------------------------\n\n\ndef rolling_mean(series, window=200, min_periods=None):\n min_periods = window if min_periods is None else min_periods\n if min_periods == window and len(series) > window:\n return numpy_rolling_mean(series, window, True)\n else:\n try:\n return series.rolling(window=window, min_periods=min_periods).mean()\n except Exception as e: # noqa: F841\n return pd.Series(series).rolling(window=window, min_periods=min_periods).mean()\n\n# ---------------------------------------------\n\n\ndef rolling_min(series, window=14, min_periods=None):\n min_periods = window if min_periods is None else min_periods\n try:\n return series.rolling(window=window, min_periods=min_periods).min()\n except Exception as e: # noqa: F841\n return pd.Series(series).rolling(window=window, min_periods=min_periods).min()\n\n\n# ---------------------------------------------\n\ndef rolling_max(series, window=14, min_periods=None):\n min_periods = window if min_periods is None else min_periods\n try:\n return series.rolling(window=window, min_periods=min_periods).max()\n except Exception as e: # noqa: F841\n return pd.Series(series).rolling(window=window, min_periods=min_periods).max()\n\n\n# ---------------------------------------------\n\ndef rolling_weighted_mean(series, window=200, min_periods=None):\n min_periods = window if min_periods is None else min_periods\n try:\n return series.ewm(span=window, min_periods=min_periods).mean()\n except Exception as e: # noqa: F841\n return pd.ewma(series, span=window, min_periods=min_periods)\n\n\n# ---------------------------------------------\n\ndef hull_moving_average(series, window=200, min_periods=None):\n min_periods = window if min_periods is None else min_periods\n ma = (2 * rolling_weighted_mean(series, window / 2, min_periods)) - \\\n rolling_weighted_mean(series, window, min_periods)\n return rolling_weighted_mean(ma, np.sqrt(window), min_periods)\n\n\n# ---------------------------------------------\n\ndef sma(series, window=200, min_periods=None):\n return rolling_mean(series, window=window, min_periods=min_periods)\n\n\n# ---------------------------------------------\n\ndef wma(series, window=200, min_periods=None):\n return rolling_weighted_mean(series, window=window, min_periods=min_periods)\n\n\n# ---------------------------------------------\n\ndef hma(series, window=200, min_periods=None):\n return hull_moving_average(series, window=window, min_periods=min_periods)\n\n\n# ---------------------------------------------\n\ndef vwap(bars):\n \"\"\"\n calculate vwap of entire time series\n (input can be pandas series or numpy array)\n bars are usually mid [ (h+l)/2 ] or typical [ (h+l+c)/3 ]\n \"\"\"\n typical = ((bars['high'] + bars['low'] + bars['close']) / 3).values\n volume = bars['volume'].values\n\n return pd.Series(index=bars.index,\n data=np.cumsum(volume * typical) / np.cumsum(volume))\n\n\n# ---------------------------------------------\n\ndef rolling_vwap(bars, window=200, min_periods=None):\n \"\"\"\n calculate vwap using moving window\n (input can be pandas series or numpy array)\n bars are usually mid [ (h+l)/2 ] or typical [ (h+l+c)/3 ]\n \"\"\"\n min_periods = window if min_periods is None else min_periods\n\n typical = ((bars['high'] + bars['low'] + bars['close']) / 3)\n volume = bars['volume']\n\n left = (volume * typical).rolling(window=window,\n min_periods=min_periods).sum()\n right = volume.rolling(window=window, min_periods=min_periods).sum()\n\n return pd.Series(index=bars.index, data=(left / right)\n ).replace([np.inf, -np.inf], float('NaN')).ffill()\n\n\n# ---------------------------------------------\n\ndef rsi(series, window=14):\n \"\"\"\n compute the n period relative strength indicator\n \"\"\"\n\n # 100-(100/relative_strength)\n deltas = np.diff(series)\n seed = deltas[:window + 1]\n\n # default values\n ups = seed[seed > 0].sum() / window\n downs = -seed[seed < 0].sum() / window\n rsival = np.zeros_like(series)\n rsival[:window] = 100. - 100. / (1. + ups / downs)\n\n # period values\n for i in range(window, len(series)):\n delta = deltas[i - 1]\n if delta > 0:\n upval = delta\n downval = 0\n else:\n upval = 0\n downval = -delta\n\n ups = (ups * (window - 1) + upval) / window\n downs = (downs * (window - 1.) + downval) / window\n rsival[i] = 100. - 100. / (1. + ups / downs)\n\n # return rsival\n return pd.Series(index=series.index, data=rsival)\n\n\n# ---------------------------------------------\n\ndef macd(series, fast=3, slow=10, smooth=16):\n \"\"\"\n compute the MACD (Moving Average Convergence/Divergence)\n using a fast and slow exponential moving avg'\n return value is emaslow, emafast, macd which are len(x) arrays\n \"\"\"\n macd_line = rolling_weighted_mean(series, window=fast) - \\\n rolling_weighted_mean(series, window=slow)\n signal = rolling_weighted_mean(macd_line, window=smooth)\n histogram = macd_line - signal\n # return macd_line, signal, histogram\n return pd.DataFrame(index=series.index, data={\n 'macd': macd_line.values,\n 'signal': signal.values,\n 'histogram': histogram.values\n })\n\n\n# ---------------------------------------------\n\ndef bollinger_bands(series, window=20, stds=2):\n ma = rolling_mean(series, window=window, min_periods=1)\n std = rolling_std(series, window=window, min_periods=1)\n upper = ma + std * stds\n lower = ma - std * stds\n\n return pd.DataFrame(index=series.index, data={\n 'upper': upper,\n 'mid': ma,\n 'lower': lower\n })\n\n\n# ---------------------------------------------\n\ndef weighted_bollinger_bands(series, window=20, stds=2):\n ema = rolling_weighted_mean(series, window=window)\n std = rolling_std(series, window=window)\n upper = ema + std * stds\n lower = ema - std * stds\n\n return pd.DataFrame(index=series.index, data={\n 'upper': upper.values,\n 'mid': ema.values,\n 'lower': lower.values\n })\n\n\n# ---------------------------------------------\n\ndef returns(series):\n try:\n res = (series / series.shift(1) -\n 1).replace([np.inf, -np.inf], float('NaN'))\n except Exception as e: # noqa: F841\n res = nans(len(series))\n\n return pd.Series(index=series.index, data=res)\n\n\n# ---------------------------------------------\n\ndef log_returns(series):\n try:\n res = np.log(series / series.shift(1)\n ).replace([np.inf, -np.inf], float('NaN'))\n except Exception as e: # noqa: F841\n res = nans(len(series))\n\n return pd.Series(index=series.index, data=res)\n\n\n# ---------------------------------------------\n\ndef implied_volatility(series, window=252):\n try:\n logret = np.log(series / series.shift(1)\n ).replace([np.inf, -np.inf], float('NaN'))\n res = numpy_rolling_std(logret, window) * np.sqrt(window)\n except Exception as e: # noqa: F841\n res = nans(len(series))\n\n return pd.Series(index=series.index, data=res)\n\n\n# ---------------------------------------------\n\ndef keltner_channel(bars, window=14, atrs=2):\n typical_mean = rolling_mean(typical_price(bars), window)\n atrval = atr(bars, window) * atrs\n\n upper = typical_mean + atrval\n lower = typical_mean - atrval\n\n return pd.DataFrame(index=bars.index, data={\n 'upper': upper.values,\n 'mid': typical_mean.values,\n 'lower': lower.values\n })\n\n\n# ---------------------------------------------\n\ndef roc(series, window=14):\n \"\"\"\n compute rate of change\n \"\"\"\n res = (series - series.shift(window)) / series.shift(window)\n return pd.Series(index=series.index, data=res)\n\n\n# ---------------------------------------------\n\ndef cci(series, window=14):\n \"\"\"\n compute commodity channel index\n \"\"\"\n price = typical_price(series)\n typical_mean = rolling_mean(price, window)\n res = (price - typical_mean) / (.015 * np.std(typical_mean))\n return pd.Series(index=series.index, data=res)\n\n\n# ---------------------------------------------\n\ndef stoch(df, window=14, d=3, k=3, fast=False):\n \"\"\"\n compute the n period relative strength indicator\n http://excelta.blogspot.co.il/2013/09/stochastic-oscillator-technical.html\n \"\"\"\n\n my_df = pd.DataFrame(index=df.index)\n\n my_df['rolling_max'] = df['high'].rolling(window).max()\n my_df['rolling_min'] = df['low'].rolling(window).min()\n\n my_df['fast_k'] = (\n 100 * (df['close'] - my_df['rolling_min']) /\n (my_df['rolling_max'] - my_df['rolling_min'])\n )\n my_df['fast_d'] = my_df['fast_k'].rolling(d).mean()\n\n if fast:\n return my_df.loc[:, ['fast_k', 'fast_d']]\n\n my_df['slow_k'] = my_df['fast_k'].rolling(k).mean()\n my_df['slow_d'] = my_df['slow_k'].rolling(d).mean()\n\n return my_df.loc[:, ['slow_k', 'slow_d']]\n\n# ---------------------------------------------\n\n\ndef zlma(series, window=20, min_periods=None, kind=\"ema\"):\n \"\"\"\n John Ehlers' Zero lag (exponential) moving average\n https://en.wikipedia.org/wiki/Zero_lag_exponential_moving_average\n \"\"\"\n min_periods = window if min_periods is None else min_periods\n\n lag = (window - 1) // 2\n series = 2 * series - series.shift(lag)\n if kind in ['ewm', 'ema']:\n return wma(series, lag, min_periods)\n elif kind == \"hma\":\n return hma(series, lag, min_periods)\n return sma(series, lag, min_periods)\n\n\ndef zlema(series, window, min_periods=None):\n return zlma(series, window, min_periods, kind=\"ema\")\n\n\ndef zlsma(series, window, min_periods=None):\n return zlma(series, window, min_periods, kind=\"sma\")\n\n\ndef zlhma(series, window, min_periods=None):\n return zlma(series, window, min_periods, kind=\"hma\")\n\n# ---------------------------------------------\n\n\ndef zscore(bars, window=20, stds=1, col='close'):\n \"\"\" get zscore of price \"\"\"\n std = numpy_rolling_std(bars[col], window)\n mean = numpy_rolling_mean(bars[col], window)\n return (bars[col] - mean) / (std * stds)\n\n# ---------------------------------------------\n\n\ndef pvt(bars):\n \"\"\" Price Volume Trend \"\"\"\n trend = ((bars['close'] - bars['close'].shift(1)) /\n bars['close'].shift(1)) * bars['volume']\n return trend.cumsum()\n\n\ndef chopiness(bars, window=14):\n atrsum = true_range(bars).rolling(window).sum()\n highs = bars['high'].rolling(window).max()\n lows = bars['low'].rolling(window).min()\n return 100 * np.log10(atrsum / (highs - lows)) / np.log10(window)\n\n\n# =============================================\n\n\nPandasObject.session = session\nPandasObject.atr = atr\nPandasObject.bollinger_bands = bollinger_bands\nPandasObject.cci = cci\nPandasObject.crossed = crossed\nPandasObject.crossed_above = crossed_above\nPandasObject.crossed_below = crossed_below\nPandasObject.heikinashi = heikinashi\nPandasObject.hull_moving_average = hull_moving_average\nPandasObject.ibs = ibs\nPandasObject.implied_volatility = implied_volatility\nPandasObject.keltner_channel = keltner_channel\nPandasObject.log_returns = log_returns\nPandasObject.macd = macd\nPandasObject.returns = returns\nPandasObject.roc = roc\nPandasObject.rolling_max = rolling_max\nPandasObject.rolling_min = rolling_min\nPandasObject.rolling_mean = rolling_mean\nPandasObject.rolling_std = rolling_std\nPandasObject.rsi = rsi\nPandasObject.stoch = stoch\nPandasObject.zscore = zscore\nPandasObject.pvt = pvt\nPandasObject.chopiness = chopiness\nPandasObject.tdi = tdi\nPandasObject.true_range = true_range\nPandasObject.mid_price = mid_price\nPandasObject.typical_price = typical_price\nPandasObject.vwap = vwap\nPandasObject.rolling_vwap = rolling_vwap\nPandasObject.weighted_bollinger_bands = weighted_bollinger_bands\nPandasObject.rolling_weighted_mean = rolling_weighted_mean\n\nPandasObject.sma = sma\nPandasObject.wma = wma\nPandasObject.ema = wma\nPandasObject.hma = hma\n\nPandasObject.zlsma = zlsma\nPandasObject.zlwma = zlema\nPandasObject.zlema = zlema\nPandasObject.zlhma = zlhma\nPandasObject.zlma = zlma\n", "path": "freqtrade/vendor/qtpylib/indicators.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# QTPyLib: Quantitative Trading Python Library\n# https://github.com/ranaroussi/qtpylib\n#\n# Copyright 2016-2018 Ran Aroussi\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport warnings\nimport sys\nfrom datetime import datetime, timedelta\n\nimport numpy as np\nimport pandas as pd\nfrom pandas.core.base import PandasObject\n\n# =============================================\n# check min, python version\nif sys.version_info < (3, 4):\n raise SystemError(\"QTPyLib requires Python version >= 3.4\")\n\n# =============================================\nwarnings.simplefilter(action=\"ignore\", category=RuntimeWarning)\n\n# =============================================\n\n\ndef numpy_rolling_window(data, window):\n shape = data.shape[:-1] + (data.shape[-1] - window + 1, window)\n strides = data.strides + (data.strides[-1],)\n return np.lib.stride_tricks.as_strided(data, shape=shape, strides=strides)\n\n\ndef numpy_rolling_series(func):\n def func_wrapper(data, window, as_source=False):\n series = data.values if isinstance(data, pd.Series) else data\n\n new_series = np.empty(len(series)) * np.nan\n calculated = func(series, window)\n new_series[-len(calculated):] = calculated\n\n if as_source and isinstance(data, pd.Series):\n return pd.Series(index=data.index, data=new_series)\n\n return new_series\n\n return func_wrapper\n\n\n@numpy_rolling_series\ndef numpy_rolling_mean(data, window, as_source=False):\n return np.mean(numpy_rolling_window(data, window), axis=-1)\n\n\n@numpy_rolling_series\ndef numpy_rolling_std(data, window, as_source=False):\n return np.std(numpy_rolling_window(data, window), axis=-1, ddof=1)\n\n\n# ---------------------------------------------\n\n\ndef session(df, start='17:00', end='16:00'):\n \"\"\" remove previous globex day from df \"\"\"\n if df.empty:\n return df\n\n # get start/end/now as decimals\n int_start = list(map(int, start.split(':')))\n int_start = (int_start[0] + int_start[1] - 1 / 100) - 0.0001\n int_end = list(map(int, end.split(':')))\n int_end = int_end[0] + int_end[1] / 100\n int_now = (df[-1:].index.hour[0] + (df[:1].index.minute[0]) / 100)\n\n # same-dat session?\n is_same_day = int_end > int_start\n\n # set pointers\n curr = prev = df[-1:].index[0].strftime('%Y-%m-%d')\n\n # globex/forex session\n if not is_same_day:\n prev = (datetime.strptime(curr, '%Y-%m-%d') -\n timedelta(1)).strftime('%Y-%m-%d')\n\n # slice\n if int_now >= int_start:\n df = df[df.index >= curr + ' ' + start]\n else:\n df = df[df.index >= prev + ' ' + start]\n\n return df.copy()\n\n# ---------------------------------------------\n\n\ndef heikinashi(bars):\n bars = bars.copy()\n bars['ha_close'] = (bars['open'] + bars['high'] +\n bars['low'] + bars['close']) / 4\n\n # ha open\n bars.at[0, 'ha_open'] = (bars.at[0, 'open'] + bars.at[0, 'close']) / 2\n for i in range(1, len(bars)):\n bars.at[i, 'ha_open'] = (bars.at[i - 1, 'ha_open'] + bars.at[i - 1, 'ha_close']) / 2\n\n bars['ha_high'] = bars.loc[:, ['high', 'ha_open', 'ha_close']].max(axis=1)\n bars['ha_low'] = bars.loc[:, ['low', 'ha_open', 'ha_close']].min(axis=1)\n\n return pd.DataFrame(index=bars.index,\n data={'open': bars['ha_open'],\n 'high': bars['ha_high'],\n 'low': bars['ha_low'],\n 'close': bars['ha_close']})\n\n# ---------------------------------------------\n\n\ndef tdi(series, rsi_lookback=13, rsi_smooth_len=2,\n rsi_signal_len=7, bb_lookback=34, bb_std=1.6185):\n\n rsi_data = rsi(series, rsi_lookback)\n rsi_smooth = sma(rsi_data, rsi_smooth_len)\n rsi_signal = sma(rsi_data, rsi_signal_len)\n\n bb_series = bollinger_bands(rsi_data, bb_lookback, bb_std)\n\n return pd.DataFrame(index=series.index, data={\n \"rsi\": rsi_data,\n \"rsi_signal\": rsi_signal,\n \"rsi_smooth\": rsi_smooth,\n \"rsi_bb_upper\": bb_series['upper'],\n \"rsi_bb_lower\": bb_series['lower'],\n \"rsi_bb_mid\": bb_series['mid']\n })\n\n# ---------------------------------------------\n\n\ndef awesome_oscillator(df, weighted=False, fast=5, slow=34):\n midprice = (df['high'] + df['low']) / 2\n\n if weighted:\n ao = (midprice.ewm(fast).mean() - midprice.ewm(slow).mean()).values\n else:\n ao = numpy_rolling_mean(midprice, fast) - \\\n numpy_rolling_mean(midprice, slow)\n\n return pd.Series(index=df.index, data=ao)\n\n\n# ---------------------------------------------\n\ndef nans(length=1):\n mtx = np.empty(length)\n mtx[:] = np.nan\n return mtx\n\n\n# ---------------------------------------------\n\ndef typical_price(bars):\n res = (bars['high'] + bars['low'] + bars['close']) / 3.\n return pd.Series(index=bars.index, data=res)\n\n\n# ---------------------------------------------\n\ndef mid_price(bars):\n res = (bars['high'] + bars['low']) / 2.\n return pd.Series(index=bars.index, data=res)\n\n\n# ---------------------------------------------\n\ndef ibs(bars):\n \"\"\" Internal bar strength \"\"\"\n res = np.round((bars['close'] - bars['low']) /\n (bars['high'] - bars['low']), 2)\n return pd.Series(index=bars.index, data=res)\n\n\n# ---------------------------------------------\n\ndef true_range(bars):\n return pd.DataFrame({\n \"hl\": bars['high'] - bars['low'],\n \"hc\": abs(bars['high'] - bars['close'].shift(1)),\n \"lc\": abs(bars['low'] - bars['close'].shift(1))\n }).max(axis=1)\n\n\n# ---------------------------------------------\n\ndef atr(bars, window=14, exp=False):\n tr = true_range(bars)\n\n if exp:\n res = rolling_weighted_mean(tr, window)\n else:\n res = rolling_mean(tr, window)\n\n return pd.Series(res)\n\n\n# ---------------------------------------------\n\ndef crossed(series1, series2, direction=None):\n if isinstance(series1, np.ndarray):\n series1 = pd.Series(series1)\n\n if isinstance(series2, (float, int, np.ndarray, np.integer, np.floating)):\n series2 = pd.Series(index=series1.index, data=series2)\n\n if direction is None or direction == \"above\":\n above = pd.Series((series1 > series2) & (\n series1.shift(1) <= series2.shift(1)))\n\n if direction is None or direction == \"below\":\n below = pd.Series((series1 < series2) & (\n series1.shift(1) >= series2.shift(1)))\n\n if direction is None:\n return above or below\n\n return above if direction == \"above\" else below\n\n\ndef crossed_above(series1, series2):\n return crossed(series1, series2, \"above\")\n\n\ndef crossed_below(series1, series2):\n return crossed(series1, series2, \"below\")\n\n# ---------------------------------------------\n\n\ndef rolling_std(series, window=200, min_periods=None):\n min_periods = window if min_periods is None else min_periods\n if min_periods == window and len(series) > window:\n return numpy_rolling_std(series, window, True)\n else:\n try:\n return series.rolling(window=window, min_periods=min_periods).std()\n except Exception as e: # noqa: F841\n return pd.Series(series).rolling(window=window, min_periods=min_periods).std()\n\n# ---------------------------------------------\n\n\ndef rolling_mean(series, window=200, min_periods=None):\n min_periods = window if min_periods is None else min_periods\n if min_periods == window and len(series) > window:\n return numpy_rolling_mean(series, window, True)\n else:\n try:\n return series.rolling(window=window, min_periods=min_periods).mean()\n except Exception as e: # noqa: F841\n return pd.Series(series).rolling(window=window, min_periods=min_periods).mean()\n\n# ---------------------------------------------\n\n\ndef rolling_min(series, window=14, min_periods=None):\n min_periods = window if min_periods is None else min_periods\n try:\n return series.rolling(window=window, min_periods=min_periods).min()\n except Exception as e: # noqa: F841\n return pd.Series(series).rolling(window=window, min_periods=min_periods).min()\n\n\n# ---------------------------------------------\n\ndef rolling_max(series, window=14, min_periods=None):\n min_periods = window if min_periods is None else min_periods\n try:\n return series.rolling(window=window, min_periods=min_periods).max()\n except Exception as e: # noqa: F841\n return pd.Series(series).rolling(window=window, min_periods=min_periods).max()\n\n\n# ---------------------------------------------\n\ndef rolling_weighted_mean(series, window=200, min_periods=None):\n min_periods = window if min_periods is None else min_periods\n try:\n return series.ewm(span=window, min_periods=min_periods).mean()\n except Exception as e: # noqa: F841\n return pd.ewma(series, span=window, min_periods=min_periods)\n\n\n# ---------------------------------------------\n\ndef hull_moving_average(series, window=200, min_periods=None):\n min_periods = window if min_periods is None else min_periods\n ma = (2 * rolling_weighted_mean(series, window / 2, min_periods)) - \\\n rolling_weighted_mean(series, window, min_periods)\n return rolling_weighted_mean(ma, np.sqrt(window), min_periods)\n\n\n# ---------------------------------------------\n\ndef sma(series, window=200, min_periods=None):\n return rolling_mean(series, window=window, min_periods=min_periods)\n\n\n# ---------------------------------------------\n\ndef wma(series, window=200, min_periods=None):\n return rolling_weighted_mean(series, window=window, min_periods=min_periods)\n\n\n# ---------------------------------------------\n\ndef hma(series, window=200, min_periods=None):\n return hull_moving_average(series, window=window, min_periods=min_periods)\n\n\n# ---------------------------------------------\n\ndef vwap(bars):\n \"\"\"\n calculate vwap of entire time series\n (input can be pandas series or numpy array)\n bars are usually mid [ (h+l)/2 ] or typical [ (h+l+c)/3 ]\n \"\"\"\n typical = ((bars['high'] + bars['low'] + bars['close']) / 3).values\n volume = bars['volume'].values\n\n return pd.Series(index=bars.index,\n data=np.cumsum(volume * typical) / np.cumsum(volume))\n\n\n# ---------------------------------------------\n\ndef rolling_vwap(bars, window=200, min_periods=None):\n \"\"\"\n calculate vwap using moving window\n (input can be pandas series or numpy array)\n bars are usually mid [ (h+l)/2 ] or typical [ (h+l+c)/3 ]\n \"\"\"\n min_periods = window if min_periods is None else min_periods\n\n typical = ((bars['high'] + bars['low'] + bars['close']) / 3)\n volume = bars['volume']\n\n left = (volume * typical).rolling(window=window,\n min_periods=min_periods).sum()\n right = volume.rolling(window=window, min_periods=min_periods).sum()\n\n return pd.Series(index=bars.index, data=(left / right)\n ).replace([np.inf, -np.inf], float('NaN')).ffill()\n\n\n# ---------------------------------------------\n\ndef rsi(series, window=14):\n \"\"\"\n compute the n period relative strength indicator\n \"\"\"\n\n # 100-(100/relative_strength)\n deltas = np.diff(series)\n seed = deltas[:window + 1]\n\n # default values\n ups = seed[seed > 0].sum() / window\n downs = -seed[seed < 0].sum() / window\n rsival = np.zeros_like(series)\n rsival[:window] = 100. - 100. / (1. + ups / downs)\n\n # period values\n for i in range(window, len(series)):\n delta = deltas[i - 1]\n if delta > 0:\n upval = delta\n downval = 0\n else:\n upval = 0\n downval = -delta\n\n ups = (ups * (window - 1) + upval) / window\n downs = (downs * (window - 1.) + downval) / window\n rsival[i] = 100. - 100. / (1. + ups / downs)\n\n # return rsival\n return pd.Series(index=series.index, data=rsival)\n\n\n# ---------------------------------------------\n\ndef macd(series, fast=3, slow=10, smooth=16):\n \"\"\"\n compute the MACD (Moving Average Convergence/Divergence)\n using a fast and slow exponential moving avg'\n return value is emaslow, emafast, macd which are len(x) arrays\n \"\"\"\n macd_line = rolling_weighted_mean(series, window=fast) - \\\n rolling_weighted_mean(series, window=slow)\n signal = rolling_weighted_mean(macd_line, window=smooth)\n histogram = macd_line - signal\n # return macd_line, signal, histogram\n return pd.DataFrame(index=series.index, data={\n 'macd': macd_line.values,\n 'signal': signal.values,\n 'histogram': histogram.values\n })\n\n\n# ---------------------------------------------\n\ndef bollinger_bands(series, window=20, stds=2):\n ma = rolling_mean(series, window=window, min_periods=1)\n std = rolling_std(series, window=window, min_periods=1)\n upper = ma + std * stds\n lower = ma - std * stds\n\n return pd.DataFrame(index=series.index, data={\n 'upper': upper,\n 'mid': ma,\n 'lower': lower\n })\n\n\n# ---------------------------------------------\n\ndef weighted_bollinger_bands(series, window=20, stds=2):\n ema = rolling_weighted_mean(series, window=window)\n std = rolling_std(series, window=window)\n upper = ema + std * stds\n lower = ema - std * stds\n\n return pd.DataFrame(index=series.index, data={\n 'upper': upper.values,\n 'mid': ema.values,\n 'lower': lower.values\n })\n\n\n# ---------------------------------------------\n\ndef returns(series):\n try:\n res = (series / series.shift(1) -\n 1).replace([np.inf, -np.inf], float('NaN'))\n except Exception as e: # noqa: F841\n res = nans(len(series))\n\n return pd.Series(index=series.index, data=res)\n\n\n# ---------------------------------------------\n\ndef log_returns(series):\n try:\n res = np.log(series / series.shift(1)\n ).replace([np.inf, -np.inf], float('NaN'))\n except Exception as e: # noqa: F841\n res = nans(len(series))\n\n return pd.Series(index=series.index, data=res)\n\n\n# ---------------------------------------------\n\ndef implied_volatility(series, window=252):\n try:\n logret = np.log(series / series.shift(1)\n ).replace([np.inf, -np.inf], float('NaN'))\n res = numpy_rolling_std(logret, window) * np.sqrt(window)\n except Exception as e: # noqa: F841\n res = nans(len(series))\n\n return pd.Series(index=series.index, data=res)\n\n\n# ---------------------------------------------\n\ndef keltner_channel(bars, window=14, atrs=2):\n typical_mean = rolling_mean(typical_price(bars), window)\n atrval = atr(bars, window) * atrs\n\n upper = typical_mean + atrval\n lower = typical_mean - atrval\n\n return pd.DataFrame(index=bars.index, data={\n 'upper': upper.values,\n 'mid': typical_mean.values,\n 'lower': lower.values\n })\n\n\n# ---------------------------------------------\n\ndef roc(series, window=14):\n \"\"\"\n compute rate of change\n \"\"\"\n res = (series - series.shift(window)) / series.shift(window)\n return pd.Series(index=series.index, data=res)\n\n\n# ---------------------------------------------\n\ndef cci(series, window=14):\n \"\"\"\n compute commodity channel index\n \"\"\"\n price = typical_price(series)\n typical_mean = rolling_mean(price, window)\n res = (price - typical_mean) / (.015 * np.std(typical_mean))\n return pd.Series(index=series.index, data=res)\n\n\n# ---------------------------------------------\n\ndef stoch(df, window=14, d=3, k=3, fast=False):\n \"\"\"\n compute the n period relative strength indicator\n http://excelta.blogspot.co.il/2013/09/stochastic-oscillator-technical.html\n \"\"\"\n\n my_df = pd.DataFrame(index=df.index)\n\n my_df['rolling_max'] = df['high'].rolling(window).max()\n my_df['rolling_min'] = df['low'].rolling(window).min()\n\n my_df['fast_k'] = (\n 100 * (df['close'] - my_df['rolling_min']) /\n (my_df['rolling_max'] - my_df['rolling_min'])\n )\n my_df['fast_d'] = my_df['fast_k'].rolling(d).mean()\n\n if fast:\n return my_df.loc[:, ['fast_k', 'fast_d']]\n\n my_df['slow_k'] = my_df['fast_k'].rolling(k).mean()\n my_df['slow_d'] = my_df['slow_k'].rolling(d).mean()\n\n return my_df.loc[:, ['slow_k', 'slow_d']]\n\n# ---------------------------------------------\n\n\ndef zlma(series, window=20, min_periods=None, kind=\"ema\"):\n \"\"\"\n John Ehlers' Zero lag (exponential) moving average\n https://en.wikipedia.org/wiki/Zero_lag_exponential_moving_average\n \"\"\"\n min_periods = window if min_periods is None else min_periods\n\n lag = (window - 1) // 2\n series = 2 * series - series.shift(lag)\n if kind in ['ewm', 'ema']:\n return wma(series, lag, min_periods)\n elif kind == \"hma\":\n return hma(series, lag, min_periods)\n return sma(series, lag, min_periods)\n\n\ndef zlema(series, window, min_periods=None):\n return zlma(series, window, min_periods, kind=\"ema\")\n\n\ndef zlsma(series, window, min_periods=None):\n return zlma(series, window, min_periods, kind=\"sma\")\n\n\ndef zlhma(series, window, min_periods=None):\n return zlma(series, window, min_periods, kind=\"hma\")\n\n# ---------------------------------------------\n\n\ndef zscore(bars, window=20, stds=1, col='close'):\n \"\"\" get zscore of price \"\"\"\n std = numpy_rolling_std(bars[col], window)\n mean = numpy_rolling_mean(bars[col], window)\n return (bars[col] - mean) / (std * stds)\n\n# ---------------------------------------------\n\n\ndef pvt(bars):\n \"\"\" Price Volume Trend \"\"\"\n trend = ((bars['close'] - bars['close'].shift(1)) /\n bars['close'].shift(1)) * bars['volume']\n return trend.cumsum()\n\n\ndef chopiness(bars, window=14):\n atrsum = true_range(bars).rolling(window).sum()\n highs = bars['high'].rolling(window).max()\n lows = bars['low'].rolling(window).min()\n return 100 * np.log10(atrsum / (highs - lows)) / np.log10(window)\n\n\n# =============================================\n\n\nPandasObject.session = session\nPandasObject.atr = atr\nPandasObject.bollinger_bands = bollinger_bands\nPandasObject.cci = cci\nPandasObject.crossed = crossed\nPandasObject.crossed_above = crossed_above\nPandasObject.crossed_below = crossed_below\nPandasObject.heikinashi = heikinashi\nPandasObject.hull_moving_average = hull_moving_average\nPandasObject.ibs = ibs\nPandasObject.implied_volatility = implied_volatility\nPandasObject.keltner_channel = keltner_channel\nPandasObject.log_returns = log_returns\nPandasObject.macd = macd\nPandasObject.returns = returns\nPandasObject.roc = roc\nPandasObject.rolling_max = rolling_max\nPandasObject.rolling_min = rolling_min\nPandasObject.rolling_mean = rolling_mean\nPandasObject.rolling_std = rolling_std\nPandasObject.rsi = rsi\nPandasObject.stoch = stoch\nPandasObject.zscore = zscore\nPandasObject.pvt = pvt\nPandasObject.chopiness = chopiness\nPandasObject.tdi = tdi\nPandasObject.true_range = true_range\nPandasObject.mid_price = mid_price\nPandasObject.typical_price = typical_price\nPandasObject.vwap = vwap\nPandasObject.rolling_vwap = rolling_vwap\nPandasObject.weighted_bollinger_bands = weighted_bollinger_bands\nPandasObject.rolling_weighted_mean = rolling_weighted_mean\n\nPandasObject.sma = sma\nPandasObject.wma = wma\nPandasObject.ema = wma\nPandasObject.hma = hma\n\nPandasObject.zlsma = zlsma\nPandasObject.zlwma = zlema\nPandasObject.zlema = zlema\nPandasObject.zlhma = zlhma\nPandasObject.zlma = zlma\n", "path": "freqtrade/vendor/qtpylib/indicators.py"}]} |
gh_patches_debug_1325 | rasdani/github-patches | git_diff | mozmeao__basket-184 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Use MySQL Strict Mode
I noticed the following warning during DB migrations in a Jenkins deployment:
> WARNINGS:
>
> ?: (mysql.W002) MySQL Strict Mode is not set for database connection 'default'
>
> HINT: MySQL's Strict Mode fixes many data integrity problems in MySQL, such as data truncation upon insertion, by escalating warnings into errors. It is strongly recommended you activate it. See: https://docs.djangoproject.com/en/1.11/ref/databases/#mysql-sql-mode
Seems like a good idea to do that.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `basket/settings.py`
Content:
```
1 import os
2 import platform
3 import socket
4 import struct
5 import sys
6 from datetime import timedelta
7
8 import dj_database_url
9 import django_cache_url
10 from decouple import config, Csv, UndefinedValueError
11 from pathlib import Path
12
13 # Application version.
14 VERSION = (0, 1)
15
16 # ROOT path of the project. A pathlib.Path object.
17 ROOT_PATH = Path(__file__).resolve().parents[1]
18 ROOT = str(ROOT_PATH)
19
20
21 def path(*args):
22 return str(ROOT_PATH.joinpath(*args))
23
24
25 DEBUG = config('DEBUG', default=False, cast=bool)
26
27 ADMINS = (
28 # ('Your Name', '[email protected]'),
29 )
30
31 MANAGERS = ADMINS
32 # avoids a warning from django
33 TEST_RUNNER = 'django.test.runner.DiscoverRunner'
34
35 # DB read-only, API can still read-write to Salesforce
36 READ_ONLY_MODE = config('READ_ONLY_MODE', False, cast=bool)
37 # Disables the API and changes redirects
38 ADMIN_ONLY_MODE = config('ADMIN_ONLY_MODE', False, cast=bool)
39 BASKET_RW_URL = config('BASKET_RW_URL', default='https://prod-oregon-b.basket.moz.works')
40
41 REDIS_URL = config('REDIS_URL', None)
42 if REDIS_URL:
43 REDIS_URL = REDIS_URL.rstrip('/0')
44 # use redis for celery and cache
45 os.environ['CELERY_BROKER_URL'] = REDIS_URL + '/' + config('REDIS_CELERY_DB', '0')
46 os.environ['CACHE_URL'] = REDIS_URL + '/' + config('REDIS_CACHE_DB', '1')
47
48 # Production uses MySQL, but Sqlite should be sufficient for local development.
49 # Our CI server tests against MySQL. See travis.py in this directory
50 # for an example if you'd like to run MySQL locally, and add that to your
51 # local.py.
52 DATABASES = {
53 'default': config('DATABASE_URL',
54 default='sqlite:///basket.db',
55 cast=dj_database_url.parse),
56 }
57 if DATABASES['default']['ENGINE'] == 'django.db.backends.mysql':
58 DATABASES['default']['OPTIONS'] = {'init_command': 'SET storage_engine=InnoDB'}
59
60 CACHES = {
61 'default': config('CACHE_URL',
62 default='locmem://',
63 cast=django_cache_url.parse),
64 'bad_message_ids': {
65 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
66 'TIMEOUT': 12 * 60 * 60, # 12 hours
67 },
68 'email_block_list': {
69 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
70 'TIMEOUT': 60 * 60, # 1 hour
71 },
72 'product_details': {
73 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
74 },
75 }
76
77 if CACHES['default']['BACKEND'].startswith('django_redis'):
78 options = CACHES['default'].setdefault('OPTIONS', {})
79 options['PARSER_CLASS'] = 'redis.connection.HiredisParser'
80
81 default_email_backend = ('django.core.mail.backends.console.EmailBackend' if DEBUG else
82 'django.core.mail.backends.smtp.EmailBackend')
83 EMAIL_BACKEND = config('EMAIL_BACKEND', default=default_email_backend)
84 EMAIL_HOST = config('EMAIL_HOST', default='localhost')
85 EMAIL_PORT = config('EMAIL_PORT', default=25, cast=int)
86 EMAIL_USE_TLS = config('EMAIL_USE_TLS', default=False, cast=bool)
87 EMAIL_SUBJECT_PREFIX = config('EMAIL_SUBJECT_PREFIX', default='[basket] ')
88 EMAIL_HOST_USER = config('EMAIL_HOST_USER', default='')
89 EMAIL_HOST_PASSWORD = config('EMAIL_HOST_PASSWORD', default='')
90
91 ALLOWED_HOSTS = config('ALLOWED_HOSTS',
92 default='.allizom.org, .moz.works, basket.mozmar.org, '
93 'basket.mozilla.com, basket.mozilla.org',
94 cast=Csv())
95 ALLOWED_CIDR_NETS = config('ALLOWED_CIDR_NETS', default='', cast=Csv())
96 ENFORCE_HOSTNAME = config('ENFORCE_HOSTNAME', default='', cast=Csv())
97 USE_X_FORWARDED_HOST = True
98
99 SESSION_COOKIE_SECURE = config('SESSION_COOKIE_SECURE', not DEBUG, cast=bool)
100 SESSION_ENGINE = config('SESSION_ENGINE', default='django.contrib.sessions.backends.cache')
101 CSRF_COOKIE_SECURE = config('CSRF_COOKIE_SECURE', not DEBUG, cast=bool)
102 DISABLE_ADMIN = config('DISABLE_ADMIN', READ_ONLY_MODE, cast=bool)
103 STORE_TASK_FAILURES = config('STORE_TASK_FAILURES', not READ_ONLY_MODE, cast=bool)
104 # if DISABLE_ADMIN is True redirect /admin/ to this URL
105 ADMIN_REDIRECT_URL = config('ADMIN_REDIRECT_URL',
106 'https://admin.basket.moz.works/admin/')
107
108 TIME_ZONE = 'UTC'
109 USE_TZ = True
110 SITE_ID = 1
111 USE_I18N = False
112
113 STATIC_ROOT = path('static')
114 STATIC_URL = '/static/'
115 if not DEBUG:
116 STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
117
118 try:
119 # Make this unique, and don't share it with anybody.
120 SECRET_KEY = config('SECRET_KEY')
121 except UndefinedValueError:
122 raise UndefinedValueError('The SECRET_KEY environment varialbe is required. '
123 'Move env-dist to .env if you want the defaults.')
124
125 TEMPLATES = [
126 {
127 'BACKEND': 'django.template.backends.django.DjangoTemplates',
128 'DIRS': ['templates'],
129 'APP_DIRS': True,
130 'OPTIONS': {
131 'context_processors': [
132 'django.contrib.auth.context_processors.auth',
133 'django.template.context_processors.request',
134 'django.contrib.messages.context_processors.messages',
135 'basket.news.context_processors.settings',
136 ],
137 },
138 },
139 ]
140
141 MIDDLEWARE = (
142 'allow_cidr.middleware.AllowCIDRMiddleware',
143 'django.middleware.security.SecurityMiddleware',
144 'whitenoise.middleware.WhiteNoiseMiddleware',
145 'basket.news.middleware.EnforceHostnameMiddleware',
146 'basket.news.middleware.HostnameMiddleware',
147 'django.middleware.common.CommonMiddleware',
148 'corsheaders.middleware.CorsMiddleware',
149 'django.contrib.sessions.middleware.SessionMiddleware',
150 'django.middleware.csrf.CsrfViewMiddleware',
151 'django.contrib.auth.middleware.AuthenticationMiddleware',
152 'django.contrib.messages.middleware.MessageMiddleware',
153 'basket.news.middleware.GraphiteViewHitCountMiddleware',
154 'django_statsd.middleware.GraphiteRequestTimingMiddleware',
155 'django_statsd.middleware.GraphiteMiddleware',
156 'ratelimit.middleware.RatelimitMiddleware',
157 )
158
159 ROOT_URLCONF = 'basket.urls'
160
161 INSTALLED_APPS = (
162 'basket.news',
163 'basket.base',
164
165 'corsheaders',
166 'product_details',
167 'raven.contrib.django.raven_compat',
168 'django_extensions',
169 'mozilla_django_oidc',
170 'watchman',
171
172 'django.contrib.auth',
173 'django.contrib.contenttypes',
174 'django.contrib.sessions',
175 'django.contrib.sites',
176 'django.contrib.messages',
177 'django.contrib.admin',
178 'django.contrib.staticfiles',
179 )
180
181 # SecurityMiddleware settings
182 SECURE_HSTS_SECONDS = config('SECURE_HSTS_SECONDS', default='0', cast=int)
183 SECURE_HSTS_INCLUDE_SUBDOMAINS = False
184 SECURE_BROWSER_XSS_FILTER = config('SECURE_BROWSER_XSS_FILTER', default=True, cast=bool)
185 SECURE_CONTENT_TYPE_NOSNIFF = config('SECURE_CONTENT_TYPE_NOSNIFF', default=True, cast=bool)
186 SECURE_SSL_REDIRECT = config('SECURE_SSL_REDIRECT', default=False, cast=bool)
187 SECURE_REDIRECT_EXEMPT = [
188 r'^healthz/$',
189 r'^readiness/$',
190 ]
191 if config('USE_SECURE_PROXY_HEADER', default=SECURE_SSL_REDIRECT, cast=bool):
192 SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
193
194 # watchman
195 WATCHMAN_DISABLE_APM = True
196 WATCHMAN_CHECKS = (
197 'watchman.checks.caches',
198 'watchman.checks.databases',
199 )
200
201 # legacy name
202 EXACTTARGET_USE_SANDBOX = config('EXACTTARGET_USE_SANDBOX', False, cast=bool)
203 USE_SANDBOX_BACKEND = config('USE_SANDBOX_BACKEND', EXACTTARGET_USE_SANDBOX, cast=bool)
204 ET_CLIENT_ID = config('ET_CLIENT_ID', None)
205 ET_CLIENT_SECRET = config('ET_CLIENT_SECRET', None)
206
207 if USE_SANDBOX_BACKEND:
208 auth_url = 'https://auth-test.exacttargetapis.com/v1/requestToken?legacy=1'
209 wsdl_loc = 'etframework.test.wsdl'
210 else:
211 auth_url = 'https://auth.exacttargetapis.com/v1/requestToken?legacy=1'
212 wsdl_loc = 'etframework.wsdl'
213
214 SFMC_DEBUG = config('SFMC_DEBUG', DEBUG, cast=bool)
215 SFMC_SETTINGS = {
216 'authenticationurl': auth_url,
217 'wsdl_file_local_loc': path('basket', 'news', 'backends', wsdl_loc),
218 }
219 if ET_CLIENT_ID and ET_CLIENT_SECRET:
220 SFMC_SETTINGS['clientid'] = ET_CLIENT_ID
221 SFMC_SETTINGS['clientsecret'] = ET_CLIENT_SECRET
222
223 # Salesforce.com
224 SFDC_SETTINGS = {
225 'username': config('SFDC_USERNAME', None),
226 'password': config('SFDC_PASSWORD', None),
227 'security_token': config('SFDC_SEC_TOKEN', None),
228 'sandbox': config('SFDC_USE_SANDBOX', USE_SANDBOX_BACKEND, cast=bool),
229 }
230 # default SFDC sessions timeout after 2 hours of inactivity. so they never timeout on
231 # prod. Let's make it every 4 hours by default.
232 SFDC_SESSION_TIMEOUT = config('SFDC_SESSION_TIMEOUT', 60 * 60 * 4, cast=int)
233 SFDC_REQUEST_TIMEOUT = config('SFDC_REQUEST_TIMEOUT', 30, cast=int)
234
235
236 CORS_ORIGIN_ALLOW_ALL = True
237 CORS_URLS_REGEX = r'^/(news/|subscribe)'
238
239 # view rate limiting
240 RATELIMIT_VIEW = 'basket.news.views.ratelimited'
241
242 KOMBU_FERNET_KEY = config('KOMBU_FERNET_KEY', None)
243 # for key rotation
244 KOMBU_FERNET_KEY_PREVIOUS = config('KOMBU_FERNET_KEY_PREVIOUS', None)
245 CELERY_TASK_ALWAYS_EAGER = config('CELERY_TASK_ALWAYS_EAGER', DEBUG, cast=bool)
246 CELERY_TASK_SERIALIZER = 'json'
247 CELERY_TASK_ACKS_LATE = config('CELERY_TASK_ACKS_LATE', False, cast=bool)
248 CELERY_TASK_REJECT_ON_WORKER_LOST = False
249 CELERY_ACCEPT_CONTENT = ['json']
250 CELERY_BROKER_TRANSPORT_OPTIONS = {'visibility_timeout': 245760} # 68.27 hours 11th retry
251 CELERY_BROKER_URL = config('CELERY_BROKER_URL', None)
252 CELERY_REDIS_MAX_CONNECTIONS = config('CELERY_REDIS_MAX_CONNECTIONS', 2, cast=int)
253 CELERY_WORKER_DISABLE_RATE_LIMITS = True
254 CELERY_TASK_IGNORE_RESULT = True
255 CELERY_WORKER_PREFETCH_MULTIPLIER = config('CELERY_WORKER_PREFETCH_MULTIPLIER', 1, cast=int)
256 CELERY_TASK_COMPRESSION = 'gzip'
257 CELERY_TASK_ROUTES = {
258 'basket.news.tasks.snitch': {'queue': 'snitch'},
259 }
260
261 # size in kb
262 CELERY_WORKER_MAX_MEMORY_PER_CHILD = config('CELERY_WORKER_MAX_MEMORY_PER_CHILD', 200000, cast=int)
263
264 SNITCH_ID = config('SNITCH_ID', None)
265
266 CELERY_BEAT_SCHEDULE = {}
267
268 if SNITCH_ID:
269 CELERY_BEAT_SCHEDULE['snitch'] = {
270 'task': 'basket.news.tasks.snitch',
271 'schedule': timedelta(minutes=5),
272 }
273
274 if not READ_ONLY_MODE:
275 CELERY_BEAT_SCHEDULE['common-voice'] = {
276 'task': 'basket.news.tasks.process_common_voice_batch',
277 'schedule': timedelta(hours=1),
278 }
279
280
281 # via http://stackoverflow.com/a/6556951/107114
282 def get_default_gateway_linux():
283 """Read the default gateway directly from /proc."""
284 try:
285 with open("/proc/net/route") as fh:
286 for line in fh:
287 fields = line.strip().split()
288 if fields[1] != '00000000' or not int(fields[3], 16) & 2:
289 continue
290
291 return socket.inet_ntoa(struct.pack("<L", int(fields[2], 16)))
292 except IOError:
293 return 'localhost'
294
295
296 HOSTNAME = platform.node()
297 CLUSTER_NAME = config('CLUSTER_NAME', default=None)
298 K8S_NAMESPACE = config('K8S_NAMESPACE', default=None)
299 K8S_POD_NAME = config('K8S_POD_NAME', default=None)
300
301 RAVEN_CONFIG = {
302 'dsn': config('SENTRY_DSN', None),
303 'site': '.'.join(x for x in [K8S_NAMESPACE, CLUSTER_NAME] if x),
304 'release': config('GIT_SHA', None),
305 }
306
307 STATSD_HOST = config('STATSD_HOST', get_default_gateway_linux())
308 STATSD_PORT = config('STATSD_PORT', 8125, cast=int)
309 STATSD_PREFIX = config('STATSD_PREFIX', K8S_NAMESPACE)
310 STATSD_CLIENT = config('STATSD_CLIENT', 'django_statsd.clients.null')
311
312 LOGGING = {
313 'version': 1,
314 'disable_existing_loggers': False,
315 'root': {
316 'level': config('DJANGO_LOG_LEVEL', default='WARNING'),
317 'handlers': ['console'],
318 },
319 'formatters': {
320 'verbose': {
321 'format': '%(levelname)s %(asctime)s %(module)s %(message)s'
322 },
323 },
324 'handlers': {
325 'console': {
326 'level': 'DEBUG',
327 'class': 'logging.StreamHandler',
328 'formatter': 'verbose'
329 }
330 },
331 'loggers': {
332 'django.db.backends': {
333 'level': 'ERROR',
334 'handlers': ['console'],
335 'propagate': False,
336 },
337 'suds.client': {
338 'level': 'ERROR',
339 'handlers': ['console'],
340 'propagate': False,
341 },
342 },
343 }
344
345 PROD_DETAILS_CACHE_NAME = 'product_details'
346 PROD_DETAILS_CACHE_TIMEOUT = None
347
348 RECOVER_MSG_LANGS = config('RECOVER_MSG_LANGS', 'en', cast=Csv())
349 # language codes that we support and send through to SFDC
350 # regardless of their existence in the DB
351 EXTRA_SUPPORTED_LANGS = config('EXTRA_SUPPORTED_LANGS', '', cast=Csv())
352
353 SYNC_KEY = config('SYNC_KEY', None)
354 TESTING_EMAIL_DOMAINS = config('TESTING_EMAIL_DOMAINS',
355 'restmail.net,restmail.lcip.org,example.com',
356 cast=Csv())
357
358 MAINTENANCE_MODE = config('MAINTENANCE_MODE', False, cast=bool)
359 QUEUE_BATCH_SIZE = config('QUEUE_BATCH_SIZE', 500, cast=int)
360 # can we read user data in maintenance mode
361 MAINTENANCE_READ_ONLY = config('MAINTENANCE_READ_ONLY', False, cast=bool)
362
363 TASK_LOCK_TIMEOUT = config('TASK_LOCK_TIMEOUT', 60, cast=int)
364 TASK_LOCKING_ENABLE = config('TASK_LOCKING_ENABLE', False, cast=bool)
365
366 DONATE_ACCESS_KEY_ID = config('DONATE_ACCESS_KEY_ID', default='')
367 DONATE_SECRET_ACCESS_KEY = config('DONATE_SECRET_ACCESS_KEY', default='')
368 DONATE_QUEUE_REGION = config('DONATE_QUEUE_REGION', default='')
369 DONATE_QUEUE_URL = config('DONATE_QUEUE_URL', default='')
370 DONATE_QUEUE_WAIT_TIME = config('DONATE_QUEUE_WAIT_TIME', cast=int, default=10)
371 # turn this on to consume the queue but ignore the messages
372 # needed so that donate.m.o can run continuous tests w/o filling the SFDC sandbox
373 DONATE_QUEUE_IGNORE_MODE = config('DONATE_QUEUE_IGNORE_MODE', cast=bool, default=False)
374 DONATE_OPP_RECORD_TYPE = config('DONATE_OPP_RECORD_TYPE', default='')
375 DONATE_CONTACT_RECORD_TYPE = config('DONATE_CONTACT_RECORD_TYPE', default='')
376 DONATE_SNITCH_ID = config('DONATE_SNITCH_ID', default='')
377 DONATE_NOTIFY_EMAIL = config('DONATE_NOTIFY_EMAIL', default='')
378 DONATE_UPDATE_FAIL_DE = config('DONATE_UPDATE_FAIL_DE', default='Donation_Diff')
379
380 FXA_EVENTS_QUEUE_ENABLE = config('FXA_EVENTS_QUEUE_ENABLE', cast=bool, default=False)
381 FXA_EVENTS_ACCESS_KEY_ID = config('FXA_EVENTS_ACCESS_KEY_ID', default='')
382 FXA_EVENTS_SECRET_ACCESS_KEY = config('FXA_EVENTS_SECRET_ACCESS_KEY', default='')
383 FXA_EVENTS_QUEUE_REGION = config('FXA_EVENTS_QUEUE_REGION', default='')
384 FXA_EVENTS_QUEUE_URL = config('FXA_EVENTS_QUEUE_URL', default='')
385 FXA_EVENTS_QUEUE_WAIT_TIME = config('FXA_EVENTS_QUEUE_WAIT_TIME', cast=int, default=10)
386 FXA_EVENTS_SNITCH_ID = config('FXA_EVENTS_SNITCH_ID', default='')
387 FXA_EVENTS_VERIFIED_SFDC_ENABLE = config('FXA_EVENTS_VERIFIED_SFDC_ENABLE', cast=bool, default=False)
388
389 FXA_ACCESS_KEY_ID = config('FXA_ACCESS_KEY_ID', default='')
390 FXA_SECRET_ACCESS_KEY = config('FXA_SECRET_ACCESS_KEY', default='')
391 FXA_S3_BUCKET = config('FXA_S3_BUCKET', default='')
392 FXA_SFMC_DE = config('FXA_SFMC_DE', default='FXA_Logins')
393 FXA_SNITCH_URL = config('FXA_SNITCH_URL', default='')
394 # stable, stage, or production
395 # https://github.com/mozilla/PyFxA/blob/master/fxa/constants.py
396 FXA_OAUTH_SERVER_ENV = config('FXA_OAUTH_SERVER_ENV', default='stable')
397 FXA_CLIENT_ID = config('FXA_CLIENT_ID', default='')
398 FXA_CLIENT_SECRET = config('FXA_CLIENT_SECRET', default='')
399 FXA_OAUTH_TOKEN_TTL = config('FXA_OAUTH_TOKEN_TTL', default=300, cast=int) # 5 minutes
400
401 FXA_EMAIL_PREFS_DOMAIN = config('FXA_EMAIL_PREFS_DOMAIN', default='www.mozilla.org')
402 FXA_REGISTER_NEWSLETTER = config('FXA_REGISTER_NEWSLETTER', default='firefox-accounts-journey')
403 FXA_REGISTER_SOURCE_URL = config('FXA_REGISTER_SOURCE_URL', default='https://accounts.firefox.com/')
404 # TODO move this to the DB
405 FXA_LOGIN_CAMPAIGNS = {
406 'fxa-embedded-form-moz': 'mozilla-welcome',
407 'fxa-embedded-form-fx': 'firefox-welcome',
408 'membership-idealo': 'member-idealo',
409 'membership-comm': 'member-comm',
410 'membership-tech': 'member-tech',
411 'membership-tk': 'member-tk',
412 }
413
414 SUBHUB_OPP_RECORD_TYPE = config('SUBHUB_OPP_RECORD_TYPE', default='')
415 SUBHUB_CC_EXPIRE_TRIGGER = config('SUBHUB_CC_EXPIRE_TRIGGER', default='en_subscription_services_cc_expired')
416
417 COMMON_VOICE_NEWSLETTER = config('COMMON_VOICE_NEWSLETTER', default='common-voice')
418 COMMON_VOICE_BATCH_UPDATES = config('COMMON_VOICE_BATCH_UPDATES', default=False, cast=bool)
419 COMMON_VOICE_BATCH_PROCESSING = config('COMMON_VOICE_BATCH_PROCESSING', default=False, cast=bool)
420 COMMON_VOICE_BATCH_CHUNK_SIZE = config('COMMON_VOICE_BATCH_CHUNK_SIZE', default=1000, cast=int)
421
422 OIDC_ENABLE = config('OIDC_ENABLE', default=False, cast=bool)
423 if OIDC_ENABLE:
424 AUTHENTICATION_BACKENDS = (
425 'basket.base.authentication.OIDCModelBackend',
426 )
427 OIDC_OP_AUTHORIZATION_ENDPOINT = config('OIDC_OP_AUTHORIZATION_ENDPOINT')
428 OIDC_OP_TOKEN_ENDPOINT = config('OIDC_OP_TOKEN_ENDPOINT')
429 OIDC_OP_USER_ENDPOINT = config('OIDC_OP_USER_ENDPOINT')
430
431 OIDC_RP_CLIENT_ID = config('OIDC_RP_CLIENT_ID')
432 OIDC_RP_CLIENT_SECRET = config('OIDC_RP_CLIENT_SECRET')
433 OIDC_CREATE_USER = config('OIDC_CREATE_USER', default=False, cast=bool)
434 MIDDLEWARE += ('basket.news.middleware.OIDCSessionRefreshMiddleware',)
435 LOGIN_REDIRECT_URL = '/admin/'
436
437 if sys.argv[0].endswith('py.test') or (len(sys.argv) > 1 and sys.argv[1] == 'test'):
438 # stuff that's absolutely required for a test run
439 CELERY_TASK_ALWAYS_EAGER = True
440 SFDC_SETTINGS.pop('username', None)
441 SFDC_SETTINGS.pop('password', None)
442 SFMC_SETTINGS.pop('clientid', None)
443 SFMC_SETTINGS.pop('clientsecret', None)
444 TESTING_EMAIL_DOMAINS = []
445
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/basket/settings.py b/basket/settings.py
--- a/basket/settings.py
+++ b/basket/settings.py
@@ -55,7 +55,10 @@
cast=dj_database_url.parse),
}
if DATABASES['default']['ENGINE'] == 'django.db.backends.mysql':
- DATABASES['default']['OPTIONS'] = {'init_command': 'SET storage_engine=InnoDB'}
+ DATABASES['default']['OPTIONS'] = {
+ 'init_command': "SET sql_mode='STRICT_TRANS_TABLES'",
+ }
+
CACHES = {
'default': config('CACHE_URL',
| {"golden_diff": "diff --git a/basket/settings.py b/basket/settings.py\n--- a/basket/settings.py\n+++ b/basket/settings.py\n@@ -55,7 +55,10 @@\n cast=dj_database_url.parse),\n }\n if DATABASES['default']['ENGINE'] == 'django.db.backends.mysql':\n- DATABASES['default']['OPTIONS'] = {'init_command': 'SET storage_engine=InnoDB'}\n+ DATABASES['default']['OPTIONS'] = {\n+ 'init_command': \"SET sql_mode='STRICT_TRANS_TABLES'\",\n+ }\n+\n \n CACHES = {\n 'default': config('CACHE_URL',\n", "issue": "Use MySQL Strict Mode\nI noticed the following warning during DB migrations in a Jenkins deployment:\r\n\r\n\r\n> WARNINGS:\r\n> \r\n> ?: (mysql.W002) MySQL Strict Mode is not set for database connection 'default'\r\n> \r\n> \tHINT: MySQL's Strict Mode fixes many data integrity problems in MySQL, such as data truncation upon insertion, by escalating warnings into errors. It is strongly recommended you activate it. See: https://docs.djangoproject.com/en/1.11/ref/databases/#mysql-sql-mode\r\n\r\n\r\nSeems like a good idea to do that.\n", "before_files": [{"content": "import os\nimport platform\nimport socket\nimport struct\nimport sys\nfrom datetime import timedelta\n\nimport dj_database_url\nimport django_cache_url\nfrom decouple import config, Csv, UndefinedValueError\nfrom pathlib import Path\n\n# Application version.\nVERSION = (0, 1)\n\n# ROOT path of the project. A pathlib.Path object.\nROOT_PATH = Path(__file__).resolve().parents[1]\nROOT = str(ROOT_PATH)\n\n\ndef path(*args):\n return str(ROOT_PATH.joinpath(*args))\n\n\nDEBUG = config('DEBUG', default=False, cast=bool)\n\nADMINS = (\n # ('Your Name', '[email protected]'),\n)\n\nMANAGERS = ADMINS\n# avoids a warning from django\nTEST_RUNNER = 'django.test.runner.DiscoverRunner'\n\n# DB read-only, API can still read-write to Salesforce\nREAD_ONLY_MODE = config('READ_ONLY_MODE', False, cast=bool)\n# Disables the API and changes redirects\nADMIN_ONLY_MODE = config('ADMIN_ONLY_MODE', False, cast=bool)\nBASKET_RW_URL = config('BASKET_RW_URL', default='https://prod-oregon-b.basket.moz.works')\n\nREDIS_URL = config('REDIS_URL', None)\nif REDIS_URL:\n REDIS_URL = REDIS_URL.rstrip('/0')\n # use redis for celery and cache\n os.environ['CELERY_BROKER_URL'] = REDIS_URL + '/' + config('REDIS_CELERY_DB', '0')\n os.environ['CACHE_URL'] = REDIS_URL + '/' + config('REDIS_CACHE_DB', '1')\n\n# Production uses MySQL, but Sqlite should be sufficient for local development.\n# Our CI server tests against MySQL. See travis.py in this directory\n# for an example if you'd like to run MySQL locally, and add that to your\n# local.py.\nDATABASES = {\n 'default': config('DATABASE_URL',\n default='sqlite:///basket.db',\n cast=dj_database_url.parse),\n}\nif DATABASES['default']['ENGINE'] == 'django.db.backends.mysql':\n DATABASES['default']['OPTIONS'] = {'init_command': 'SET storage_engine=InnoDB'}\n\nCACHES = {\n 'default': config('CACHE_URL',\n default='locmem://',\n cast=django_cache_url.parse),\n 'bad_message_ids': {\n 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',\n 'TIMEOUT': 12 * 60 * 60, # 12 hours\n },\n 'email_block_list': {\n 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',\n 'TIMEOUT': 60 * 60, # 1 hour\n },\n 'product_details': {\n 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',\n },\n}\n\nif CACHES['default']['BACKEND'].startswith('django_redis'):\n options = CACHES['default'].setdefault('OPTIONS', {})\n options['PARSER_CLASS'] = 'redis.connection.HiredisParser'\n\ndefault_email_backend = ('django.core.mail.backends.console.EmailBackend' if DEBUG else\n 'django.core.mail.backends.smtp.EmailBackend')\nEMAIL_BACKEND = config('EMAIL_BACKEND', default=default_email_backend)\nEMAIL_HOST = config('EMAIL_HOST', default='localhost')\nEMAIL_PORT = config('EMAIL_PORT', default=25, cast=int)\nEMAIL_USE_TLS = config('EMAIL_USE_TLS', default=False, cast=bool)\nEMAIL_SUBJECT_PREFIX = config('EMAIL_SUBJECT_PREFIX', default='[basket] ')\nEMAIL_HOST_USER = config('EMAIL_HOST_USER', default='')\nEMAIL_HOST_PASSWORD = config('EMAIL_HOST_PASSWORD', default='')\n\nALLOWED_HOSTS = config('ALLOWED_HOSTS',\n default='.allizom.org, .moz.works, basket.mozmar.org, '\n 'basket.mozilla.com, basket.mozilla.org',\n cast=Csv())\nALLOWED_CIDR_NETS = config('ALLOWED_CIDR_NETS', default='', cast=Csv())\nENFORCE_HOSTNAME = config('ENFORCE_HOSTNAME', default='', cast=Csv())\nUSE_X_FORWARDED_HOST = True\n\nSESSION_COOKIE_SECURE = config('SESSION_COOKIE_SECURE', not DEBUG, cast=bool)\nSESSION_ENGINE = config('SESSION_ENGINE', default='django.contrib.sessions.backends.cache')\nCSRF_COOKIE_SECURE = config('CSRF_COOKIE_SECURE', not DEBUG, cast=bool)\nDISABLE_ADMIN = config('DISABLE_ADMIN', READ_ONLY_MODE, cast=bool)\nSTORE_TASK_FAILURES = config('STORE_TASK_FAILURES', not READ_ONLY_MODE, cast=bool)\n# if DISABLE_ADMIN is True redirect /admin/ to this URL\nADMIN_REDIRECT_URL = config('ADMIN_REDIRECT_URL',\n 'https://admin.basket.moz.works/admin/')\n\nTIME_ZONE = 'UTC'\nUSE_TZ = True\nSITE_ID = 1\nUSE_I18N = False\n\nSTATIC_ROOT = path('static')\nSTATIC_URL = '/static/'\nif not DEBUG:\n STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'\n\ntry:\n # Make this unique, and don't share it with anybody.\n SECRET_KEY = config('SECRET_KEY')\nexcept UndefinedValueError:\n raise UndefinedValueError('The SECRET_KEY environment varialbe is required. '\n 'Move env-dist to .env if you want the defaults.')\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': ['templates'],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.contrib.auth.context_processors.auth',\n 'django.template.context_processors.request',\n 'django.contrib.messages.context_processors.messages',\n 'basket.news.context_processors.settings',\n ],\n },\n },\n]\n\nMIDDLEWARE = (\n 'allow_cidr.middleware.AllowCIDRMiddleware',\n 'django.middleware.security.SecurityMiddleware',\n 'whitenoise.middleware.WhiteNoiseMiddleware',\n 'basket.news.middleware.EnforceHostnameMiddleware',\n 'basket.news.middleware.HostnameMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'corsheaders.middleware.CorsMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'basket.news.middleware.GraphiteViewHitCountMiddleware',\n 'django_statsd.middleware.GraphiteRequestTimingMiddleware',\n 'django_statsd.middleware.GraphiteMiddleware',\n 'ratelimit.middleware.RatelimitMiddleware',\n)\n\nROOT_URLCONF = 'basket.urls'\n\nINSTALLED_APPS = (\n 'basket.news',\n 'basket.base',\n\n 'corsheaders',\n 'product_details',\n 'raven.contrib.django.raven_compat',\n 'django_extensions',\n 'mozilla_django_oidc',\n 'watchman',\n\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n 'django.contrib.messages',\n 'django.contrib.admin',\n 'django.contrib.staticfiles',\n)\n\n# SecurityMiddleware settings\nSECURE_HSTS_SECONDS = config('SECURE_HSTS_SECONDS', default='0', cast=int)\nSECURE_HSTS_INCLUDE_SUBDOMAINS = False\nSECURE_BROWSER_XSS_FILTER = config('SECURE_BROWSER_XSS_FILTER', default=True, cast=bool)\nSECURE_CONTENT_TYPE_NOSNIFF = config('SECURE_CONTENT_TYPE_NOSNIFF', default=True, cast=bool)\nSECURE_SSL_REDIRECT = config('SECURE_SSL_REDIRECT', default=False, cast=bool)\nSECURE_REDIRECT_EXEMPT = [\n r'^healthz/$',\n r'^readiness/$',\n]\nif config('USE_SECURE_PROXY_HEADER', default=SECURE_SSL_REDIRECT, cast=bool):\n SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')\n\n# watchman\nWATCHMAN_DISABLE_APM = True\nWATCHMAN_CHECKS = (\n 'watchman.checks.caches',\n 'watchman.checks.databases',\n)\n\n# legacy name\nEXACTTARGET_USE_SANDBOX = config('EXACTTARGET_USE_SANDBOX', False, cast=bool)\nUSE_SANDBOX_BACKEND = config('USE_SANDBOX_BACKEND', EXACTTARGET_USE_SANDBOX, cast=bool)\nET_CLIENT_ID = config('ET_CLIENT_ID', None)\nET_CLIENT_SECRET = config('ET_CLIENT_SECRET', None)\n\nif USE_SANDBOX_BACKEND:\n auth_url = 'https://auth-test.exacttargetapis.com/v1/requestToken?legacy=1'\n wsdl_loc = 'etframework.test.wsdl'\nelse:\n auth_url = 'https://auth.exacttargetapis.com/v1/requestToken?legacy=1'\n wsdl_loc = 'etframework.wsdl'\n\nSFMC_DEBUG = config('SFMC_DEBUG', DEBUG, cast=bool)\nSFMC_SETTINGS = {\n 'authenticationurl': auth_url,\n 'wsdl_file_local_loc': path('basket', 'news', 'backends', wsdl_loc),\n}\nif ET_CLIENT_ID and ET_CLIENT_SECRET:\n SFMC_SETTINGS['clientid'] = ET_CLIENT_ID\n SFMC_SETTINGS['clientsecret'] = ET_CLIENT_SECRET\n\n# Salesforce.com\nSFDC_SETTINGS = {\n 'username': config('SFDC_USERNAME', None),\n 'password': config('SFDC_PASSWORD', None),\n 'security_token': config('SFDC_SEC_TOKEN', None),\n 'sandbox': config('SFDC_USE_SANDBOX', USE_SANDBOX_BACKEND, cast=bool),\n}\n# default SFDC sessions timeout after 2 hours of inactivity. so they never timeout on\n# prod. Let's make it every 4 hours by default.\nSFDC_SESSION_TIMEOUT = config('SFDC_SESSION_TIMEOUT', 60 * 60 * 4, cast=int)\nSFDC_REQUEST_TIMEOUT = config('SFDC_REQUEST_TIMEOUT', 30, cast=int)\n\n\nCORS_ORIGIN_ALLOW_ALL = True\nCORS_URLS_REGEX = r'^/(news/|subscribe)'\n\n# view rate limiting\nRATELIMIT_VIEW = 'basket.news.views.ratelimited'\n\nKOMBU_FERNET_KEY = config('KOMBU_FERNET_KEY', None)\n# for key rotation\nKOMBU_FERNET_KEY_PREVIOUS = config('KOMBU_FERNET_KEY_PREVIOUS', None)\nCELERY_TASK_ALWAYS_EAGER = config('CELERY_TASK_ALWAYS_EAGER', DEBUG, cast=bool)\nCELERY_TASK_SERIALIZER = 'json'\nCELERY_TASK_ACKS_LATE = config('CELERY_TASK_ACKS_LATE', False, cast=bool)\nCELERY_TASK_REJECT_ON_WORKER_LOST = False\nCELERY_ACCEPT_CONTENT = ['json']\nCELERY_BROKER_TRANSPORT_OPTIONS = {'visibility_timeout': 245760} # 68.27 hours 11th retry\nCELERY_BROKER_URL = config('CELERY_BROKER_URL', None)\nCELERY_REDIS_MAX_CONNECTIONS = config('CELERY_REDIS_MAX_CONNECTIONS', 2, cast=int)\nCELERY_WORKER_DISABLE_RATE_LIMITS = True\nCELERY_TASK_IGNORE_RESULT = True\nCELERY_WORKER_PREFETCH_MULTIPLIER = config('CELERY_WORKER_PREFETCH_MULTIPLIER', 1, cast=int)\nCELERY_TASK_COMPRESSION = 'gzip'\nCELERY_TASK_ROUTES = {\n 'basket.news.tasks.snitch': {'queue': 'snitch'},\n}\n\n# size in kb\nCELERY_WORKER_MAX_MEMORY_PER_CHILD = config('CELERY_WORKER_MAX_MEMORY_PER_CHILD', 200000, cast=int)\n\nSNITCH_ID = config('SNITCH_ID', None)\n\nCELERY_BEAT_SCHEDULE = {}\n\nif SNITCH_ID:\n CELERY_BEAT_SCHEDULE['snitch'] = {\n 'task': 'basket.news.tasks.snitch',\n 'schedule': timedelta(minutes=5),\n }\n\nif not READ_ONLY_MODE:\n CELERY_BEAT_SCHEDULE['common-voice'] = {\n 'task': 'basket.news.tasks.process_common_voice_batch',\n 'schedule': timedelta(hours=1),\n }\n\n\n# via http://stackoverflow.com/a/6556951/107114\ndef get_default_gateway_linux():\n \"\"\"Read the default gateway directly from /proc.\"\"\"\n try:\n with open(\"/proc/net/route\") as fh:\n for line in fh:\n fields = line.strip().split()\n if fields[1] != '00000000' or not int(fields[3], 16) & 2:\n continue\n\n return socket.inet_ntoa(struct.pack(\"<L\", int(fields[2], 16)))\n except IOError:\n return 'localhost'\n\n\nHOSTNAME = platform.node()\nCLUSTER_NAME = config('CLUSTER_NAME', default=None)\nK8S_NAMESPACE = config('K8S_NAMESPACE', default=None)\nK8S_POD_NAME = config('K8S_POD_NAME', default=None)\n\nRAVEN_CONFIG = {\n 'dsn': config('SENTRY_DSN', None),\n 'site': '.'.join(x for x in [K8S_NAMESPACE, CLUSTER_NAME] if x),\n 'release': config('GIT_SHA', None),\n}\n\nSTATSD_HOST = config('STATSD_HOST', get_default_gateway_linux())\nSTATSD_PORT = config('STATSD_PORT', 8125, cast=int)\nSTATSD_PREFIX = config('STATSD_PREFIX', K8S_NAMESPACE)\nSTATSD_CLIENT = config('STATSD_CLIENT', 'django_statsd.clients.null')\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'root': {\n 'level': config('DJANGO_LOG_LEVEL', default='WARNING'),\n 'handlers': ['console'],\n },\n 'formatters': {\n 'verbose': {\n 'format': '%(levelname)s %(asctime)s %(module)s %(message)s'\n },\n },\n 'handlers': {\n 'console': {\n 'level': 'DEBUG',\n 'class': 'logging.StreamHandler',\n 'formatter': 'verbose'\n }\n },\n 'loggers': {\n 'django.db.backends': {\n 'level': 'ERROR',\n 'handlers': ['console'],\n 'propagate': False,\n },\n 'suds.client': {\n 'level': 'ERROR',\n 'handlers': ['console'],\n 'propagate': False,\n },\n },\n}\n\nPROD_DETAILS_CACHE_NAME = 'product_details'\nPROD_DETAILS_CACHE_TIMEOUT = None\n\nRECOVER_MSG_LANGS = config('RECOVER_MSG_LANGS', 'en', cast=Csv())\n# language codes that we support and send through to SFDC\n# regardless of their existence in the DB\nEXTRA_SUPPORTED_LANGS = config('EXTRA_SUPPORTED_LANGS', '', cast=Csv())\n\nSYNC_KEY = config('SYNC_KEY', None)\nTESTING_EMAIL_DOMAINS = config('TESTING_EMAIL_DOMAINS',\n 'restmail.net,restmail.lcip.org,example.com',\n cast=Csv())\n\nMAINTENANCE_MODE = config('MAINTENANCE_MODE', False, cast=bool)\nQUEUE_BATCH_SIZE = config('QUEUE_BATCH_SIZE', 500, cast=int)\n# can we read user data in maintenance mode\nMAINTENANCE_READ_ONLY = config('MAINTENANCE_READ_ONLY', False, cast=bool)\n\nTASK_LOCK_TIMEOUT = config('TASK_LOCK_TIMEOUT', 60, cast=int)\nTASK_LOCKING_ENABLE = config('TASK_LOCKING_ENABLE', False, cast=bool)\n\nDONATE_ACCESS_KEY_ID = config('DONATE_ACCESS_KEY_ID', default='')\nDONATE_SECRET_ACCESS_KEY = config('DONATE_SECRET_ACCESS_KEY', default='')\nDONATE_QUEUE_REGION = config('DONATE_QUEUE_REGION', default='')\nDONATE_QUEUE_URL = config('DONATE_QUEUE_URL', default='')\nDONATE_QUEUE_WAIT_TIME = config('DONATE_QUEUE_WAIT_TIME', cast=int, default=10)\n# turn this on to consume the queue but ignore the messages\n# needed so that donate.m.o can run continuous tests w/o filling the SFDC sandbox\nDONATE_QUEUE_IGNORE_MODE = config('DONATE_QUEUE_IGNORE_MODE', cast=bool, default=False)\nDONATE_OPP_RECORD_TYPE = config('DONATE_OPP_RECORD_TYPE', default='')\nDONATE_CONTACT_RECORD_TYPE = config('DONATE_CONTACT_RECORD_TYPE', default='')\nDONATE_SNITCH_ID = config('DONATE_SNITCH_ID', default='')\nDONATE_NOTIFY_EMAIL = config('DONATE_NOTIFY_EMAIL', default='')\nDONATE_UPDATE_FAIL_DE = config('DONATE_UPDATE_FAIL_DE', default='Donation_Diff')\n\nFXA_EVENTS_QUEUE_ENABLE = config('FXA_EVENTS_QUEUE_ENABLE', cast=bool, default=False)\nFXA_EVENTS_ACCESS_KEY_ID = config('FXA_EVENTS_ACCESS_KEY_ID', default='')\nFXA_EVENTS_SECRET_ACCESS_KEY = config('FXA_EVENTS_SECRET_ACCESS_KEY', default='')\nFXA_EVENTS_QUEUE_REGION = config('FXA_EVENTS_QUEUE_REGION', default='')\nFXA_EVENTS_QUEUE_URL = config('FXA_EVENTS_QUEUE_URL', default='')\nFXA_EVENTS_QUEUE_WAIT_TIME = config('FXA_EVENTS_QUEUE_WAIT_TIME', cast=int, default=10)\nFXA_EVENTS_SNITCH_ID = config('FXA_EVENTS_SNITCH_ID', default='')\nFXA_EVENTS_VERIFIED_SFDC_ENABLE = config('FXA_EVENTS_VERIFIED_SFDC_ENABLE', cast=bool, default=False)\n\nFXA_ACCESS_KEY_ID = config('FXA_ACCESS_KEY_ID', default='')\nFXA_SECRET_ACCESS_KEY = config('FXA_SECRET_ACCESS_KEY', default='')\nFXA_S3_BUCKET = config('FXA_S3_BUCKET', default='')\nFXA_SFMC_DE = config('FXA_SFMC_DE', default='FXA_Logins')\nFXA_SNITCH_URL = config('FXA_SNITCH_URL', default='')\n# stable, stage, or production\n# https://github.com/mozilla/PyFxA/blob/master/fxa/constants.py\nFXA_OAUTH_SERVER_ENV = config('FXA_OAUTH_SERVER_ENV', default='stable')\nFXA_CLIENT_ID = config('FXA_CLIENT_ID', default='')\nFXA_CLIENT_SECRET = config('FXA_CLIENT_SECRET', default='')\nFXA_OAUTH_TOKEN_TTL = config('FXA_OAUTH_TOKEN_TTL', default=300, cast=int) # 5 minutes\n\nFXA_EMAIL_PREFS_DOMAIN = config('FXA_EMAIL_PREFS_DOMAIN', default='www.mozilla.org')\nFXA_REGISTER_NEWSLETTER = config('FXA_REGISTER_NEWSLETTER', default='firefox-accounts-journey')\nFXA_REGISTER_SOURCE_URL = config('FXA_REGISTER_SOURCE_URL', default='https://accounts.firefox.com/')\n# TODO move this to the DB\nFXA_LOGIN_CAMPAIGNS = {\n 'fxa-embedded-form-moz': 'mozilla-welcome',\n 'fxa-embedded-form-fx': 'firefox-welcome',\n 'membership-idealo': 'member-idealo',\n 'membership-comm': 'member-comm',\n 'membership-tech': 'member-tech',\n 'membership-tk': 'member-tk',\n}\n\nSUBHUB_OPP_RECORD_TYPE = config('SUBHUB_OPP_RECORD_TYPE', default='')\nSUBHUB_CC_EXPIRE_TRIGGER = config('SUBHUB_CC_EXPIRE_TRIGGER', default='en_subscription_services_cc_expired')\n\nCOMMON_VOICE_NEWSLETTER = config('COMMON_VOICE_NEWSLETTER', default='common-voice')\nCOMMON_VOICE_BATCH_UPDATES = config('COMMON_VOICE_BATCH_UPDATES', default=False, cast=bool)\nCOMMON_VOICE_BATCH_PROCESSING = config('COMMON_VOICE_BATCH_PROCESSING', default=False, cast=bool)\nCOMMON_VOICE_BATCH_CHUNK_SIZE = config('COMMON_VOICE_BATCH_CHUNK_SIZE', default=1000, cast=int)\n\nOIDC_ENABLE = config('OIDC_ENABLE', default=False, cast=bool)\nif OIDC_ENABLE:\n AUTHENTICATION_BACKENDS = (\n 'basket.base.authentication.OIDCModelBackend',\n )\n OIDC_OP_AUTHORIZATION_ENDPOINT = config('OIDC_OP_AUTHORIZATION_ENDPOINT')\n OIDC_OP_TOKEN_ENDPOINT = config('OIDC_OP_TOKEN_ENDPOINT')\n OIDC_OP_USER_ENDPOINT = config('OIDC_OP_USER_ENDPOINT')\n\n OIDC_RP_CLIENT_ID = config('OIDC_RP_CLIENT_ID')\n OIDC_RP_CLIENT_SECRET = config('OIDC_RP_CLIENT_SECRET')\n OIDC_CREATE_USER = config('OIDC_CREATE_USER', default=False, cast=bool)\n MIDDLEWARE += ('basket.news.middleware.OIDCSessionRefreshMiddleware',)\n LOGIN_REDIRECT_URL = '/admin/'\n\nif sys.argv[0].endswith('py.test') or (len(sys.argv) > 1 and sys.argv[1] == 'test'):\n # stuff that's absolutely required for a test run\n CELERY_TASK_ALWAYS_EAGER = True\n SFDC_SETTINGS.pop('username', None)\n SFDC_SETTINGS.pop('password', None)\n SFMC_SETTINGS.pop('clientid', None)\n SFMC_SETTINGS.pop('clientsecret', None)\n TESTING_EMAIL_DOMAINS = []\n", "path": "basket/settings.py"}], "after_files": [{"content": "import os\nimport platform\nimport socket\nimport struct\nimport sys\nfrom datetime import timedelta\n\nimport dj_database_url\nimport django_cache_url\nfrom decouple import config, Csv, UndefinedValueError\nfrom pathlib import Path\n\n# Application version.\nVERSION = (0, 1)\n\n# ROOT path of the project. A pathlib.Path object.\nROOT_PATH = Path(__file__).resolve().parents[1]\nROOT = str(ROOT_PATH)\n\n\ndef path(*args):\n return str(ROOT_PATH.joinpath(*args))\n\n\nDEBUG = config('DEBUG', default=False, cast=bool)\n\nADMINS = (\n # ('Your Name', '[email protected]'),\n)\n\nMANAGERS = ADMINS\n# avoids a warning from django\nTEST_RUNNER = 'django.test.runner.DiscoverRunner'\n\n# DB read-only, API can still read-write to Salesforce\nREAD_ONLY_MODE = config('READ_ONLY_MODE', False, cast=bool)\n# Disables the API and changes redirects\nADMIN_ONLY_MODE = config('ADMIN_ONLY_MODE', False, cast=bool)\nBASKET_RW_URL = config('BASKET_RW_URL', default='https://prod-oregon-b.basket.moz.works')\n\nREDIS_URL = config('REDIS_URL', None)\nif REDIS_URL:\n REDIS_URL = REDIS_URL.rstrip('/0')\n # use redis for celery and cache\n os.environ['CELERY_BROKER_URL'] = REDIS_URL + '/' + config('REDIS_CELERY_DB', '0')\n os.environ['CACHE_URL'] = REDIS_URL + '/' + config('REDIS_CACHE_DB', '1')\n\n# Production uses MySQL, but Sqlite should be sufficient for local development.\n# Our CI server tests against MySQL. See travis.py in this directory\n# for an example if you'd like to run MySQL locally, and add that to your\n# local.py.\nDATABASES = {\n 'default': config('DATABASE_URL',\n default='sqlite:///basket.db',\n cast=dj_database_url.parse),\n}\nif DATABASES['default']['ENGINE'] == 'django.db.backends.mysql':\n DATABASES['default']['OPTIONS'] = {\n 'init_command': \"SET sql_mode='STRICT_TRANS_TABLES'\",\n }\n\n\nCACHES = {\n 'default': config('CACHE_URL',\n default='locmem://',\n cast=django_cache_url.parse),\n 'bad_message_ids': {\n 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',\n 'TIMEOUT': 12 * 60 * 60, # 12 hours\n },\n 'email_block_list': {\n 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',\n 'TIMEOUT': 60 * 60, # 1 hour\n },\n 'product_details': {\n 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',\n },\n}\n\nif CACHES['default']['BACKEND'].startswith('django_redis'):\n options = CACHES['default'].setdefault('OPTIONS', {})\n options['PARSER_CLASS'] = 'redis.connection.HiredisParser'\n\ndefault_email_backend = ('django.core.mail.backends.console.EmailBackend' if DEBUG else\n 'django.core.mail.backends.smtp.EmailBackend')\nEMAIL_BACKEND = config('EMAIL_BACKEND', default=default_email_backend)\nEMAIL_HOST = config('EMAIL_HOST', default='localhost')\nEMAIL_PORT = config('EMAIL_PORT', default=25, cast=int)\nEMAIL_USE_TLS = config('EMAIL_USE_TLS', default=False, cast=bool)\nEMAIL_SUBJECT_PREFIX = config('EMAIL_SUBJECT_PREFIX', default='[basket] ')\nEMAIL_HOST_USER = config('EMAIL_HOST_USER', default='')\nEMAIL_HOST_PASSWORD = config('EMAIL_HOST_PASSWORD', default='')\n\nALLOWED_HOSTS = config('ALLOWED_HOSTS',\n default='.allizom.org, .moz.works, basket.mozmar.org, '\n 'basket.mozilla.com, basket.mozilla.org',\n cast=Csv())\nALLOWED_CIDR_NETS = config('ALLOWED_CIDR_NETS', default='', cast=Csv())\nENFORCE_HOSTNAME = config('ENFORCE_HOSTNAME', default='', cast=Csv())\nUSE_X_FORWARDED_HOST = True\n\nSESSION_COOKIE_SECURE = config('SESSION_COOKIE_SECURE', not DEBUG, cast=bool)\nSESSION_ENGINE = config('SESSION_ENGINE', default='django.contrib.sessions.backends.cache')\nCSRF_COOKIE_SECURE = config('CSRF_COOKIE_SECURE', not DEBUG, cast=bool)\nDISABLE_ADMIN = config('DISABLE_ADMIN', READ_ONLY_MODE, cast=bool)\nSTORE_TASK_FAILURES = config('STORE_TASK_FAILURES', not READ_ONLY_MODE, cast=bool)\n# if DISABLE_ADMIN is True redirect /admin/ to this URL\nADMIN_REDIRECT_URL = config('ADMIN_REDIRECT_URL',\n 'https://admin.basket.moz.works/admin/')\n\nTIME_ZONE = 'UTC'\nUSE_TZ = True\nSITE_ID = 1\nUSE_I18N = False\n\nSTATIC_ROOT = path('static')\nSTATIC_URL = '/static/'\nif not DEBUG:\n STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'\n\ntry:\n # Make this unique, and don't share it with anybody.\n SECRET_KEY = config('SECRET_KEY')\nexcept UndefinedValueError:\n raise UndefinedValueError('The SECRET_KEY environment varialbe is required. '\n 'Move env-dist to .env if you want the defaults.')\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': ['templates'],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.contrib.auth.context_processors.auth',\n 'django.template.context_processors.request',\n 'django.contrib.messages.context_processors.messages',\n 'basket.news.context_processors.settings',\n ],\n },\n },\n]\n\nMIDDLEWARE = (\n 'allow_cidr.middleware.AllowCIDRMiddleware',\n 'django.middleware.security.SecurityMiddleware',\n 'whitenoise.middleware.WhiteNoiseMiddleware',\n 'basket.news.middleware.EnforceHostnameMiddleware',\n 'basket.news.middleware.HostnameMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'corsheaders.middleware.CorsMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'basket.news.middleware.GraphiteViewHitCountMiddleware',\n 'django_statsd.middleware.GraphiteRequestTimingMiddleware',\n 'django_statsd.middleware.GraphiteMiddleware',\n 'ratelimit.middleware.RatelimitMiddleware',\n)\n\nROOT_URLCONF = 'basket.urls'\n\nINSTALLED_APPS = (\n 'basket.news',\n 'basket.base',\n\n 'corsheaders',\n 'product_details',\n 'raven.contrib.django.raven_compat',\n 'django_extensions',\n 'mozilla_django_oidc',\n 'watchman',\n\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n 'django.contrib.messages',\n 'django.contrib.admin',\n 'django.contrib.staticfiles',\n)\n\n# SecurityMiddleware settings\nSECURE_HSTS_SECONDS = config('SECURE_HSTS_SECONDS', default='0', cast=int)\nSECURE_HSTS_INCLUDE_SUBDOMAINS = False\nSECURE_BROWSER_XSS_FILTER = config('SECURE_BROWSER_XSS_FILTER', default=True, cast=bool)\nSECURE_CONTENT_TYPE_NOSNIFF = config('SECURE_CONTENT_TYPE_NOSNIFF', default=True, cast=bool)\nSECURE_SSL_REDIRECT = config('SECURE_SSL_REDIRECT', default=False, cast=bool)\nSECURE_REDIRECT_EXEMPT = [\n r'^healthz/$',\n r'^readiness/$',\n]\nif config('USE_SECURE_PROXY_HEADER', default=SECURE_SSL_REDIRECT, cast=bool):\n SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')\n\n# watchman\nWATCHMAN_DISABLE_APM = True\nWATCHMAN_CHECKS = (\n 'watchman.checks.caches',\n 'watchman.checks.databases',\n)\n\n# legacy name\nEXACTTARGET_USE_SANDBOX = config('EXACTTARGET_USE_SANDBOX', False, cast=bool)\nUSE_SANDBOX_BACKEND = config('USE_SANDBOX_BACKEND', EXACTTARGET_USE_SANDBOX, cast=bool)\nET_CLIENT_ID = config('ET_CLIENT_ID', None)\nET_CLIENT_SECRET = config('ET_CLIENT_SECRET', None)\n\nif USE_SANDBOX_BACKEND:\n auth_url = 'https://auth-test.exacttargetapis.com/v1/requestToken?legacy=1'\n wsdl_loc = 'etframework.test.wsdl'\nelse:\n auth_url = 'https://auth.exacttargetapis.com/v1/requestToken?legacy=1'\n wsdl_loc = 'etframework.wsdl'\n\nSFMC_DEBUG = config('SFMC_DEBUG', DEBUG, cast=bool)\nSFMC_SETTINGS = {\n 'authenticationurl': auth_url,\n 'wsdl_file_local_loc': path('basket', 'news', 'backends', wsdl_loc),\n}\nif ET_CLIENT_ID and ET_CLIENT_SECRET:\n SFMC_SETTINGS['clientid'] = ET_CLIENT_ID\n SFMC_SETTINGS['clientsecret'] = ET_CLIENT_SECRET\n\n# Salesforce.com\nSFDC_SETTINGS = {\n 'username': config('SFDC_USERNAME', None),\n 'password': config('SFDC_PASSWORD', None),\n 'security_token': config('SFDC_SEC_TOKEN', None),\n 'sandbox': config('SFDC_USE_SANDBOX', USE_SANDBOX_BACKEND, cast=bool),\n}\n# default SFDC sessions timeout after 2 hours of inactivity. so they never timeout on\n# prod. Let's make it every 4 hours by default.\nSFDC_SESSION_TIMEOUT = config('SFDC_SESSION_TIMEOUT', 60 * 60 * 4, cast=int)\nSFDC_REQUEST_TIMEOUT = config('SFDC_REQUEST_TIMEOUT', 30, cast=int)\n\n\nCORS_ORIGIN_ALLOW_ALL = True\nCORS_URLS_REGEX = r'^/(news/|subscribe)'\n\n# view rate limiting\nRATELIMIT_VIEW = 'basket.news.views.ratelimited'\n\nKOMBU_FERNET_KEY = config('KOMBU_FERNET_KEY', None)\n# for key rotation\nKOMBU_FERNET_KEY_PREVIOUS = config('KOMBU_FERNET_KEY_PREVIOUS', None)\nCELERY_TASK_ALWAYS_EAGER = config('CELERY_TASK_ALWAYS_EAGER', DEBUG, cast=bool)\nCELERY_TASK_SERIALIZER = 'json'\nCELERY_TASK_ACKS_LATE = config('CELERY_TASK_ACKS_LATE', False, cast=bool)\nCELERY_TASK_REJECT_ON_WORKER_LOST = False\nCELERY_ACCEPT_CONTENT = ['json']\nCELERY_BROKER_TRANSPORT_OPTIONS = {'visibility_timeout': 245760} # 68.27 hours 11th retry\nCELERY_BROKER_URL = config('CELERY_BROKER_URL', None)\nCELERY_REDIS_MAX_CONNECTIONS = config('CELERY_REDIS_MAX_CONNECTIONS', 2, cast=int)\nCELERY_WORKER_DISABLE_RATE_LIMITS = True\nCELERY_TASK_IGNORE_RESULT = True\nCELERY_WORKER_PREFETCH_MULTIPLIER = config('CELERY_WORKER_PREFETCH_MULTIPLIER', 1, cast=int)\nCELERY_TASK_COMPRESSION = 'gzip'\nCELERY_TASK_ROUTES = {\n 'basket.news.tasks.snitch': {'queue': 'snitch'},\n}\n\n# size in kb\nCELERY_WORKER_MAX_MEMORY_PER_CHILD = config('CELERY_WORKER_MAX_MEMORY_PER_CHILD', 200000, cast=int)\n\nSNITCH_ID = config('SNITCH_ID', None)\n\nCELERY_BEAT_SCHEDULE = {}\n\nif SNITCH_ID:\n CELERY_BEAT_SCHEDULE['snitch'] = {\n 'task': 'basket.news.tasks.snitch',\n 'schedule': timedelta(minutes=5),\n }\n\nif not READ_ONLY_MODE:\n CELERY_BEAT_SCHEDULE['common-voice'] = {\n 'task': 'basket.news.tasks.process_common_voice_batch',\n 'schedule': timedelta(hours=1),\n }\n\n\n# via http://stackoverflow.com/a/6556951/107114\ndef get_default_gateway_linux():\n \"\"\"Read the default gateway directly from /proc.\"\"\"\n try:\n with open(\"/proc/net/route\") as fh:\n for line in fh:\n fields = line.strip().split()\n if fields[1] != '00000000' or not int(fields[3], 16) & 2:\n continue\n\n return socket.inet_ntoa(struct.pack(\"<L\", int(fields[2], 16)))\n except IOError:\n return 'localhost'\n\n\nHOSTNAME = platform.node()\nCLUSTER_NAME = config('CLUSTER_NAME', default=None)\nK8S_NAMESPACE = config('K8S_NAMESPACE', default=None)\nK8S_POD_NAME = config('K8S_POD_NAME', default=None)\n\nRAVEN_CONFIG = {\n 'dsn': config('SENTRY_DSN', None),\n 'site': '.'.join(x for x in [K8S_NAMESPACE, CLUSTER_NAME] if x),\n 'release': config('GIT_SHA', None),\n}\n\nSTATSD_HOST = config('STATSD_HOST', get_default_gateway_linux())\nSTATSD_PORT = config('STATSD_PORT', 8125, cast=int)\nSTATSD_PREFIX = config('STATSD_PREFIX', K8S_NAMESPACE)\nSTATSD_CLIENT = config('STATSD_CLIENT', 'django_statsd.clients.null')\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'root': {\n 'level': config('DJANGO_LOG_LEVEL', default='WARNING'),\n 'handlers': ['console'],\n },\n 'formatters': {\n 'verbose': {\n 'format': '%(levelname)s %(asctime)s %(module)s %(message)s'\n },\n },\n 'handlers': {\n 'console': {\n 'level': 'DEBUG',\n 'class': 'logging.StreamHandler',\n 'formatter': 'verbose'\n }\n },\n 'loggers': {\n 'django.db.backends': {\n 'level': 'ERROR',\n 'handlers': ['console'],\n 'propagate': False,\n },\n 'suds.client': {\n 'level': 'ERROR',\n 'handlers': ['console'],\n 'propagate': False,\n },\n },\n}\n\nPROD_DETAILS_CACHE_NAME = 'product_details'\nPROD_DETAILS_CACHE_TIMEOUT = None\n\nRECOVER_MSG_LANGS = config('RECOVER_MSG_LANGS', 'en', cast=Csv())\n# language codes that we support and send through to SFDC\n# regardless of their existence in the DB\nEXTRA_SUPPORTED_LANGS = config('EXTRA_SUPPORTED_LANGS', '', cast=Csv())\n\nSYNC_KEY = config('SYNC_KEY', None)\nTESTING_EMAIL_DOMAINS = config('TESTING_EMAIL_DOMAINS',\n 'restmail.net,restmail.lcip.org,example.com',\n cast=Csv())\n\nMAINTENANCE_MODE = config('MAINTENANCE_MODE', False, cast=bool)\nQUEUE_BATCH_SIZE = config('QUEUE_BATCH_SIZE', 500, cast=int)\n# can we read user data in maintenance mode\nMAINTENANCE_READ_ONLY = config('MAINTENANCE_READ_ONLY', False, cast=bool)\n\nTASK_LOCK_TIMEOUT = config('TASK_LOCK_TIMEOUT', 60, cast=int)\nTASK_LOCKING_ENABLE = config('TASK_LOCKING_ENABLE', False, cast=bool)\n\nDONATE_ACCESS_KEY_ID = config('DONATE_ACCESS_KEY_ID', default='')\nDONATE_SECRET_ACCESS_KEY = config('DONATE_SECRET_ACCESS_KEY', default='')\nDONATE_QUEUE_REGION = config('DONATE_QUEUE_REGION', default='')\nDONATE_QUEUE_URL = config('DONATE_QUEUE_URL', default='')\nDONATE_QUEUE_WAIT_TIME = config('DONATE_QUEUE_WAIT_TIME', cast=int, default=10)\n# turn this on to consume the queue but ignore the messages\n# needed so that donate.m.o can run continuous tests w/o filling the SFDC sandbox\nDONATE_QUEUE_IGNORE_MODE = config('DONATE_QUEUE_IGNORE_MODE', cast=bool, default=False)\nDONATE_OPP_RECORD_TYPE = config('DONATE_OPP_RECORD_TYPE', default='')\nDONATE_CONTACT_RECORD_TYPE = config('DONATE_CONTACT_RECORD_TYPE', default='')\nDONATE_SNITCH_ID = config('DONATE_SNITCH_ID', default='')\nDONATE_NOTIFY_EMAIL = config('DONATE_NOTIFY_EMAIL', default='')\nDONATE_UPDATE_FAIL_DE = config('DONATE_UPDATE_FAIL_DE', default='Donation_Diff')\n\nFXA_EVENTS_QUEUE_ENABLE = config('FXA_EVENTS_QUEUE_ENABLE', cast=bool, default=False)\nFXA_EVENTS_ACCESS_KEY_ID = config('FXA_EVENTS_ACCESS_KEY_ID', default='')\nFXA_EVENTS_SECRET_ACCESS_KEY = config('FXA_EVENTS_SECRET_ACCESS_KEY', default='')\nFXA_EVENTS_QUEUE_REGION = config('FXA_EVENTS_QUEUE_REGION', default='')\nFXA_EVENTS_QUEUE_URL = config('FXA_EVENTS_QUEUE_URL', default='')\nFXA_EVENTS_QUEUE_WAIT_TIME = config('FXA_EVENTS_QUEUE_WAIT_TIME', cast=int, default=10)\nFXA_EVENTS_SNITCH_ID = config('FXA_EVENTS_SNITCH_ID', default='')\nFXA_EVENTS_VERIFIED_SFDC_ENABLE = config('FXA_EVENTS_VERIFIED_SFDC_ENABLE', cast=bool, default=False)\n\nFXA_ACCESS_KEY_ID = config('FXA_ACCESS_KEY_ID', default='')\nFXA_SECRET_ACCESS_KEY = config('FXA_SECRET_ACCESS_KEY', default='')\nFXA_S3_BUCKET = config('FXA_S3_BUCKET', default='')\nFXA_SFMC_DE = config('FXA_SFMC_DE', default='FXA_Logins')\nFXA_SNITCH_URL = config('FXA_SNITCH_URL', default='')\n# stable, stage, or production\n# https://github.com/mozilla/PyFxA/blob/master/fxa/constants.py\nFXA_OAUTH_SERVER_ENV = config('FXA_OAUTH_SERVER_ENV', default='stable')\nFXA_CLIENT_ID = config('FXA_CLIENT_ID', default='')\nFXA_CLIENT_SECRET = config('FXA_CLIENT_SECRET', default='')\nFXA_OAUTH_TOKEN_TTL = config('FXA_OAUTH_TOKEN_TTL', default=300, cast=int) # 5 minutes\n\nFXA_EMAIL_PREFS_DOMAIN = config('FXA_EMAIL_PREFS_DOMAIN', default='www.mozilla.org')\nFXA_REGISTER_NEWSLETTER = config('FXA_REGISTER_NEWSLETTER', default='firefox-accounts-journey')\nFXA_REGISTER_SOURCE_URL = config('FXA_REGISTER_SOURCE_URL', default='https://accounts.firefox.com/')\n# TODO move this to the DB\nFXA_LOGIN_CAMPAIGNS = {\n 'fxa-embedded-form-moz': 'mozilla-welcome',\n 'fxa-embedded-form-fx': 'firefox-welcome',\n 'membership-idealo': 'member-idealo',\n 'membership-comm': 'member-comm',\n 'membership-tech': 'member-tech',\n 'membership-tk': 'member-tk',\n}\n\nSUBHUB_OPP_RECORD_TYPE = config('SUBHUB_OPP_RECORD_TYPE', default='')\nSUBHUB_CC_EXPIRE_TRIGGER = config('SUBHUB_CC_EXPIRE_TRIGGER', default='en_subscription_services_cc_expired')\n\nCOMMON_VOICE_NEWSLETTER = config('COMMON_VOICE_NEWSLETTER', default='common-voice')\nCOMMON_VOICE_BATCH_UPDATES = config('COMMON_VOICE_BATCH_UPDATES', default=False, cast=bool)\nCOMMON_VOICE_BATCH_PROCESSING = config('COMMON_VOICE_BATCH_PROCESSING', default=False, cast=bool)\nCOMMON_VOICE_BATCH_CHUNK_SIZE = config('COMMON_VOICE_BATCH_CHUNK_SIZE', default=1000, cast=int)\n\nOIDC_ENABLE = config('OIDC_ENABLE', default=False, cast=bool)\nif OIDC_ENABLE:\n AUTHENTICATION_BACKENDS = (\n 'basket.base.authentication.OIDCModelBackend',\n )\n OIDC_OP_AUTHORIZATION_ENDPOINT = config('OIDC_OP_AUTHORIZATION_ENDPOINT')\n OIDC_OP_TOKEN_ENDPOINT = config('OIDC_OP_TOKEN_ENDPOINT')\n OIDC_OP_USER_ENDPOINT = config('OIDC_OP_USER_ENDPOINT')\n\n OIDC_RP_CLIENT_ID = config('OIDC_RP_CLIENT_ID')\n OIDC_RP_CLIENT_SECRET = config('OIDC_RP_CLIENT_SECRET')\n OIDC_CREATE_USER = config('OIDC_CREATE_USER', default=False, cast=bool)\n MIDDLEWARE += ('basket.news.middleware.OIDCSessionRefreshMiddleware',)\n LOGIN_REDIRECT_URL = '/admin/'\n\nif sys.argv[0].endswith('py.test') or (len(sys.argv) > 1 and sys.argv[1] == 'test'):\n # stuff that's absolutely required for a test run\n CELERY_TASK_ALWAYS_EAGER = True\n SFDC_SETTINGS.pop('username', None)\n SFDC_SETTINGS.pop('password', None)\n SFMC_SETTINGS.pop('clientid', None)\n SFMC_SETTINGS.pop('clientsecret', None)\n TESTING_EMAIL_DOMAINS = []\n", "path": "basket/settings.py"}]} |
gh_patches_debug_1326 | rasdani/github-patches | git_diff | microsoft__torchgeo-309 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Open in Colab URL broken in 0.1.1
The latest 0.1.1 release broke the "Open in Colab" URL in our tutorials. Still trying to fix this.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/conf.py`
Content:
```
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License.
3
4 # Configuration file for the Sphinx documentation builder.
5 #
6 # This file only contains a selection of the most common options. For a full
7 # list see the documentation:
8 # https://www.sphinx-doc.org/en/master/usage/configuration.html
9
10 # -- Path setup --------------------------------------------------------------
11
12 import os
13 import sys
14
15 import pytorch_sphinx_theme
16
17 # If extensions (or modules to document with autodoc) are in another directory,
18 # add these directories to sys.path here. If the directory is relative to the
19 # documentation root, use os.path.abspath to make it absolute, like shown here.
20 sys.path.insert(0, os.path.abspath(".."))
21
22 import torchgeo # noqa: E402
23
24 # -- Project information -----------------------------------------------------
25
26 project = "torchgeo"
27 copyright = "2021, Microsoft Corporation"
28 author = torchgeo.__author__
29 version = ".".join(torchgeo.__version__.split(".")[:2])
30 release = torchgeo.__version__
31
32
33 # -- General configuration ---------------------------------------------------
34
35 # Add any Sphinx extension module names here, as strings. They can be
36 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
37 # ones.
38 extensions = [
39 "sphinx.ext.autodoc",
40 "sphinx.ext.intersphinx",
41 "sphinx.ext.napoleon",
42 "sphinx.ext.todo",
43 "sphinx.ext.viewcode",
44 "nbsphinx",
45 ]
46
47 # List of patterns, relative to source directory, that match files and
48 # directories to ignore when looking for source files.
49 # This pattern also affects html_static_path and html_extra_path.
50 exclude_patterns = ["_build"]
51
52 # Sphinx 3.0+ required for:
53 # autodoc_typehints = "description"
54 needs_sphinx = "3.0"
55
56 nitpicky = True
57 nitpick_ignore = [
58 # https://github.com/sphinx-doc/sphinx/issues/8127
59 ("py:class", ".."),
60 # TODO: can't figure out why this isn't found
61 ("py:class", "LightningDataModule"),
62 # Undocumented class
63 ("py:class", "torchvision.models.resnet.ResNet"),
64 ]
65
66
67 # -- Options for HTML output -------------------------------------------------
68
69 # The theme to use for HTML and HTML Help pages. See the documentation for
70 # a list of builtin themes.
71 html_theme = "pytorch_sphinx_theme"
72 html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]
73
74 # Theme options are theme-specific and customize the look and feel of a theme
75 # further. For a list of options available for each theme, see the
76 # documentation.
77 html_theme_options = {
78 "collapse_navigation": False,
79 "display_version": True,
80 "logo_only": True,
81 "pytorch_project": "docs",
82 "navigation_with_keys": True,
83 "analytics_id": "UA-209075005-1",
84 }
85
86 html_favicon = os.path.join("..", "logo", "favicon.ico")
87
88 html_static_path = ["_static"]
89 html_css_files = ["workaround.css"]
90
91 # -- Extension configuration -------------------------------------------------
92
93 # sphinx.ext.autodoc
94 autodoc_default_options = {
95 "members": True,
96 "special-members": True,
97 "show-inheritance": True,
98 }
99 autodoc_member_order = "bysource"
100 autodoc_typehints = "description"
101
102 # sphinx.ext.intersphinx
103 intersphinx_mapping = {
104 "matplotlib": ("https://matplotlib.org/stable/", None),
105 "python": ("https://docs.python.org/3", None),
106 "pytorch-lightning": ("https://pytorch-lightning.readthedocs.io/en/latest/", None),
107 "rasterio": ("https://rasterio.readthedocs.io/en/latest/", None),
108 "rtree": ("https://rtree.readthedocs.io/en/latest/", None),
109 "torch": ("https://pytorch.org/docs/stable", None),
110 "torchvision": ("https://pytorch.org/vision/stable", None),
111 }
112
113 # nbsphinx
114 nbsphinx_execute = "never"
115 # TODO: branch/tag should change depending on which version of docs you look at
116 # TODO: width option of image directive is broken, see:
117 # https://github.com/pytorch/pytorch_sphinx_theme/issues/140
118 nbsphinx_prolog = """
119 {% set colab = "https://colab.research.google.com" %}
120 {% set repo = "microsoft/torchgeo" %}
121 {% set branch = "main" %}
122
123 .. image:: {{ colab }}/assets/colab-badge.svg
124 :class: colabbadge
125 :alt: Open in Colab
126 :target: {{ colab }}/github/{{ repo }}/blob/{{ branch }}/docs/{{ env.docname }}.ipynb
127 """
128
129 # Disables requirejs in nbsphinx to enable compatibility with the pytorch_sphinx_theme
130 # See more information here https://github.com/spatialaudio/nbsphinx/issues/599
131 # NOTE: This will likely break nbsphinx widgets
132 nbsphinx_requirejs_path = ""
133
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -118,7 +118,11 @@
nbsphinx_prolog = """
{% set colab = "https://colab.research.google.com" %}
{% set repo = "microsoft/torchgeo" %}
-{% set branch = "main" %}
+{% if "dev" in env.config.release %}
+ {% set branch = "main" %}
+{% else %}
+ {% set branch = "releases/v" ~ env.config.version %}
+{% endif %}
.. image:: {{ colab }}/assets/colab-badge.svg
:class: colabbadge
| {"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -118,7 +118,11 @@\n nbsphinx_prolog = \"\"\"\n {% set colab = \"https://colab.research.google.com\" %}\n {% set repo = \"microsoft/torchgeo\" %}\n-{% set branch = \"main\" %}\n+{% if \"dev\" in env.config.release %}\n+ {% set branch = \"main\" %}\n+{% else %}\n+ {% set branch = \"releases/v\" ~ env.config.version %}\n+{% endif %}\n \n .. image:: {{ colab }}/assets/colab-badge.svg\n :class: colabbadge\n", "issue": "Open in Colab URL broken in 0.1.1\nThe latest 0.1.1 release broke the \"Open in Colab\" URL in our tutorials. Still trying to fix this.\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\n# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Path setup --------------------------------------------------------------\n\nimport os\nimport sys\n\nimport pytorch_sphinx_theme\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\nsys.path.insert(0, os.path.abspath(\"..\"))\n\nimport torchgeo # noqa: E402\n\n# -- Project information -----------------------------------------------------\n\nproject = \"torchgeo\"\ncopyright = \"2021, Microsoft Corporation\"\nauthor = torchgeo.__author__\nversion = \".\".join(torchgeo.__version__.split(\".\")[:2])\nrelease = torchgeo.__version__\n\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.napoleon\",\n \"sphinx.ext.todo\",\n \"sphinx.ext.viewcode\",\n \"nbsphinx\",\n]\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = [\"_build\"]\n\n# Sphinx 3.0+ required for:\n# autodoc_typehints = \"description\"\nneeds_sphinx = \"3.0\"\n\nnitpicky = True\nnitpick_ignore = [\n # https://github.com/sphinx-doc/sphinx/issues/8127\n (\"py:class\", \"..\"),\n # TODO: can't figure out why this isn't found\n (\"py:class\", \"LightningDataModule\"),\n # Undocumented class\n (\"py:class\", \"torchvision.models.resnet.ResNet\"),\n]\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nhtml_theme = \"pytorch_sphinx_theme\"\nhtml_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\nhtml_theme_options = {\n \"collapse_navigation\": False,\n \"display_version\": True,\n \"logo_only\": True,\n \"pytorch_project\": \"docs\",\n \"navigation_with_keys\": True,\n \"analytics_id\": \"UA-209075005-1\",\n}\n\nhtml_favicon = os.path.join(\"..\", \"logo\", \"favicon.ico\")\n\nhtml_static_path = [\"_static\"]\nhtml_css_files = [\"workaround.css\"]\n\n# -- Extension configuration -------------------------------------------------\n\n# sphinx.ext.autodoc\nautodoc_default_options = {\n \"members\": True,\n \"special-members\": True,\n \"show-inheritance\": True,\n}\nautodoc_member_order = \"bysource\"\nautodoc_typehints = \"description\"\n\n# sphinx.ext.intersphinx\nintersphinx_mapping = {\n \"matplotlib\": (\"https://matplotlib.org/stable/\", None),\n \"python\": (\"https://docs.python.org/3\", None),\n \"pytorch-lightning\": (\"https://pytorch-lightning.readthedocs.io/en/latest/\", None),\n \"rasterio\": (\"https://rasterio.readthedocs.io/en/latest/\", None),\n \"rtree\": (\"https://rtree.readthedocs.io/en/latest/\", None),\n \"torch\": (\"https://pytorch.org/docs/stable\", None),\n \"torchvision\": (\"https://pytorch.org/vision/stable\", None),\n}\n\n# nbsphinx\nnbsphinx_execute = \"never\"\n# TODO: branch/tag should change depending on which version of docs you look at\n# TODO: width option of image directive is broken, see:\n# https://github.com/pytorch/pytorch_sphinx_theme/issues/140\nnbsphinx_prolog = \"\"\"\n{% set colab = \"https://colab.research.google.com\" %}\n{% set repo = \"microsoft/torchgeo\" %}\n{% set branch = \"main\" %}\n\n.. image:: {{ colab }}/assets/colab-badge.svg\n :class: colabbadge\n :alt: Open in Colab\n :target: {{ colab }}/github/{{ repo }}/blob/{{ branch }}/docs/{{ env.docname }}.ipynb\n\"\"\"\n\n# Disables requirejs in nbsphinx to enable compatibility with the pytorch_sphinx_theme\n# See more information here https://github.com/spatialaudio/nbsphinx/issues/599\n# NOTE: This will likely break nbsphinx widgets\nnbsphinx_requirejs_path = \"\"\n", "path": "docs/conf.py"}], "after_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\n# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Path setup --------------------------------------------------------------\n\nimport os\nimport sys\n\nimport pytorch_sphinx_theme\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\nsys.path.insert(0, os.path.abspath(\"..\"))\n\nimport torchgeo # noqa: E402\n\n# -- Project information -----------------------------------------------------\n\nproject = \"torchgeo\"\ncopyright = \"2021, Microsoft Corporation\"\nauthor = torchgeo.__author__\nversion = \".\".join(torchgeo.__version__.split(\".\")[:2])\nrelease = torchgeo.__version__\n\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.napoleon\",\n \"sphinx.ext.todo\",\n \"sphinx.ext.viewcode\",\n \"nbsphinx\",\n]\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = [\"_build\"]\n\n# Sphinx 3.0+ required for:\n# autodoc_typehints = \"description\"\nneeds_sphinx = \"3.0\"\n\nnitpicky = True\nnitpick_ignore = [\n # https://github.com/sphinx-doc/sphinx/issues/8127\n (\"py:class\", \"..\"),\n # TODO: can't figure out why this isn't found\n (\"py:class\", \"LightningDataModule\"),\n # Undocumented class\n (\"py:class\", \"torchvision.models.resnet.ResNet\"),\n]\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nhtml_theme = \"pytorch_sphinx_theme\"\nhtml_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\nhtml_theme_options = {\n \"collapse_navigation\": False,\n \"display_version\": True,\n \"logo_only\": True,\n \"pytorch_project\": \"docs\",\n \"navigation_with_keys\": True,\n \"analytics_id\": \"UA-209075005-1\",\n}\n\nhtml_favicon = os.path.join(\"..\", \"logo\", \"favicon.ico\")\n\nhtml_static_path = [\"_static\"]\nhtml_css_files = [\"workaround.css\"]\n\n# -- Extension configuration -------------------------------------------------\n\n# sphinx.ext.autodoc\nautodoc_default_options = {\n \"members\": True,\n \"special-members\": True,\n \"show-inheritance\": True,\n}\nautodoc_member_order = \"bysource\"\nautodoc_typehints = \"description\"\n\n# sphinx.ext.intersphinx\nintersphinx_mapping = {\n \"matplotlib\": (\"https://matplotlib.org/stable/\", None),\n \"python\": (\"https://docs.python.org/3\", None),\n \"pytorch-lightning\": (\"https://pytorch-lightning.readthedocs.io/en/latest/\", None),\n \"rasterio\": (\"https://rasterio.readthedocs.io/en/latest/\", None),\n \"rtree\": (\"https://rtree.readthedocs.io/en/latest/\", None),\n \"torch\": (\"https://pytorch.org/docs/stable\", None),\n \"torchvision\": (\"https://pytorch.org/vision/stable\", None),\n}\n\n# nbsphinx\nnbsphinx_execute = \"never\"\n# TODO: branch/tag should change depending on which version of docs you look at\n# TODO: width option of image directive is broken, see:\n# https://github.com/pytorch/pytorch_sphinx_theme/issues/140\nnbsphinx_prolog = \"\"\"\n{% set colab = \"https://colab.research.google.com\" %}\n{% set repo = \"microsoft/torchgeo\" %}\n{% if \"dev\" in env.config.release %}\n {% set branch = \"main\" %}\n{% else %}\n {% set branch = \"releases/v\" ~ env.config.version %}\n{% endif %}\n\n.. image:: {{ colab }}/assets/colab-badge.svg\n :class: colabbadge\n :alt: Open in Colab\n :target: {{ colab }}/github/{{ repo }}/blob/{{ branch }}/docs/{{ env.docname }}.ipynb\n\"\"\"\n\n# Disables requirejs in nbsphinx to enable compatibility with the pytorch_sphinx_theme\n# See more information here https://github.com/spatialaudio/nbsphinx/issues/599\n# NOTE: This will likely break nbsphinx widgets\nnbsphinx_requirejs_path = \"\"\n", "path": "docs/conf.py"}]} |
gh_patches_debug_1327 | rasdani/github-patches | git_diff | psychopy__psychopy-4905 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
DotStim size parameter does not correspond to displayed size
The size attribute of a DotStim does not correspond to its actual displayed size. For example, below are two polygons each set to have a size of (2.5, 2.5) deg, and a DotStim notionally of the same size and units:

This is not a diameter/radius confusion, as doubling the size of the polygons still does not match the extend of the DotStim field (see https://discourse.psychopy.org/t/dotstim-fieldsize-argument-has-unclear-units/29520/3 )
When using other units, such as `height` rather than `deg`, the displayed size does not seem to lawfully correspond to the specified values (see the initial post in the Discourse thread above).
***This then raises questions about whether the specified dot speeds are correct, if the stimulus doesn't know its own angular extent.***
Below is another example, where all three stimuli were set to have sizes of (0.5, 0.5) in height units. In this case, the DotStim is now markedly smaller than the reference polygons:

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `psychopy/visual/dot.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3
4 """This stimulus class defines a field of dots with an update rule that
5 determines how they change on every call to the .draw() method.
6 """
7
8 # Part of the PsychoPy library
9 # Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2022 Open Science Tools Ltd.
10 # Distributed under the terms of the GNU General Public License (GPL).
11
12 # Bugfix by Andrew Schofield.
13 # Replaces out of bounds but still live dots at opposite edge of aperture
14 # instead of randomly within the field. This stops the concentration of dots at
15 # one side of field when lifetime is long.
16 # Update the dot direction immediately for 'walk' as otherwise when the
17 # coherence varies some signal dots will inherit the random directions of
18 # previous walking dots.
19 # Provide a visible wrapper function to refresh all the dot locations so that
20 # the whole field can be more easily refreshed between trials.
21
22 # Ensure setting pyglet.options['debug_gl'] to False is done prior to any
23 # other calls to pyglet or pyglet submodules, otherwise it may not get picked
24 # up by the pyglet GL engine and have no effect.
25 # Shaders will work but require OpenGL2.0 drivers AND PyOpenGL3.0+
26 import pyglet
27 pyglet.options['debug_gl'] = False
28 import ctypes
29 GL = pyglet.gl
30
31 import psychopy # so we can get the __path__
32 from psychopy import logging
33
34 # tools must only be imported *after* event or MovieStim breaks on win32
35 # (JWP has no idea why!)
36 from psychopy.tools.attributetools import attributeSetter, setAttribute
37 from psychopy.tools.arraytools import val2array
38 from psychopy.visual.basevisual import (BaseVisualStim, ColorMixin,
39 ContainerMixin, WindowMixin)
40 from psychopy.layout import Size
41
42 import numpy as np
43
44 # some constants
45 _piOver2 = np.pi / 2.
46 _piOver180 = np.pi / 180.
47 _2pi = 2 * np.pi
48
49
50 class DotStim(BaseVisualStim, ColorMixin, ContainerMixin):
51 """This stimulus class defines a field of dots with an update rule that
52 determines how they change on every call to the .draw() method.
53
54 This single class can be used to generate a wide variety of dot motion
55 types. For a review of possible types and their pros and cons see Scase,
56 Braddick & Raymond (1996). All six possible motions they describe can be
57 generated with appropriate choices of the `signalDots` (which determines
58 whether signal dots are the 'same' or 'different' on each frame),
59 `noiseDots` (which determines the locations of the noise dots on each frame)
60 and the `dotLife` (which determines for how many frames the dot will
61 continue before being regenerated).
62
63 The default settings (as of v1.70.00) is for the noise dots to have
64 identical velocity but random direction and signal dots remain the 'same'
65 (once a signal dot, always a signal dot).
66
67 For further detail about the different configurations see :ref:`dots` in the
68 Builder Components section of the documentation.
69
70 If further customisation is required, then the DotStim should be subclassed
71 and its _update_dotsXY and _newDotsXY methods overridden.
72
73 The maximum number of dots that can be drawn is limited by system
74 performance.
75
76 Attributes
77 ----------
78 fieldShape : str
79 *'sqr'* or 'circle'. Defines the envelope used to present the dots. If
80 changed while drawing, dots outside new envelope will be respawned.
81 dotSize : float
82 Dot size specified in pixels (overridden if `element` is specified).
83 :ref:`operations <attrib-operations>` are supported.
84 dotLife : int
85 Number of frames each dot lives for (-1=infinite). Dot lives are
86 initiated randomly from a uniform distribution from 0 to dotLife. If
87 changed while drawing, the lives of all dots will be randomly initiated
88 again.
89 signalDots : str
90 If 'same' then the signal and noise dots are constant. If 'different'
91 then the choice of which is signal and which is noise gets randomised on
92 each frame. This corresponds to Scase et al's (1996) categories of RDK.
93 noiseDots : str
94 Determines the behaviour of the noise dots, taken directly from Scase et
95 al's (1996) categories. For 'position', noise dots take a random
96 position every frame. For 'direction' noise dots follow a random, but
97 constant direction. For 'walk' noise dots vary their direction every
98 frame, but keep a constant speed.
99 element : object
100 This can be any object that has a ``.draw()`` method and a
101 ``.setPos([x,y])`` method (e.g. a GratingStim, TextStim...)!! DotStim
102 assumes that the element uses pixels as units. ``None`` defaults to
103 dots.
104 fieldPos : array_like
105 Specifying the location of the centre of the stimulus using a
106 :ref:`x,y-pair <attrib-xy>`. See e.g. :class:`.ShapeStim` for more
107 documentation/examples on how to set position.
108 :ref:`operations <attrib-operations>` are supported.
109 fieldSize : array_like
110 Specifying the size of the field of dots using a
111 :ref:`x,y-pair <attrib-xy>`. See e.g. :class:`.ShapeStim` for more
112 documentation/examples on how to set position.
113 :ref:`operations <attrib-operations>` are supported.
114 coherence : float
115 Change the coherence (%) of the DotStim. This will be rounded according
116 to the number of dots in the stimulus.
117 dir : float
118 Direction of the coherent dots in degrees. :ref:`operations
119 <attrib-operations>` are supported.
120 speed : float
121 Speed of the dots (in *units*/frame). :ref:`operations
122 <attrib-operations>` are supported.
123
124 """
125 def __init__(self,
126 win,
127 units='',
128 nDots=1,
129 coherence=0.5,
130 fieldPos=(0.0, 0.0),
131 fieldSize=(1.0, 1.0),
132 fieldShape='sqr',
133 fieldAnchor="center",
134 dotSize=2.0,
135 dotLife=3,
136 dir=0.0,
137 speed=0.5,
138 rgb=None,
139 color=(1.0, 1.0, 1.0),
140 colorSpace='rgb',
141 opacity=None,
142 contrast=1.0,
143 depth=0,
144 element=None,
145 signalDots='same',
146 noiseDots='direction',
147 name=None,
148 autoLog=None):
149 """
150 Parameters
151 ----------
152 win : window.Window
153 Window this stimulus is associated with.
154 units : str
155 Units to use.
156 nDots : int
157 Number of dots to present in the field.
158 coherence : float
159 Proportion of dots which are coherent. This value can be set using
160 the `coherence` property after initialization.
161 fieldPos : array_like
162 (x,y) or [x,y] position of the field. This value can be set using
163 the `fieldPos` property after initialization.
164 fieldSize : array_like, int or float
165 (x,y) or [x,y] or single value (applied to both dimensions). Sizes
166 can be negative and can extend beyond the window. This value can be
167 set using the `fieldSize` property after initialization.
168 fieldShape : str
169 Defines the envelope used to present the dots. If changed while
170 drawing by setting the `fieldShape` property, dots outside new
171 envelope will be respawned., valid values are 'square', 'sqr' or
172 'circle'.
173 dotSize : array_like or float
174 Size of the dots. If given an array, the sizes of individual dots
175 will be set. The array must have length `nDots`. If a single value
176 is given, all dots will be set to the same size.
177 dotLife : int
178 Lifetime of a dot in frames. Dot lives are initiated randomly from a
179 uniform distribution from 0 to dotLife. If changed while drawing,
180 the lives of all dots will be randomly initiated again. A value of
181 -1 results in dots having an infinite lifetime. This value can be
182 set using the `dotLife` property after initialization.
183 dir : float
184 Direction of the coherent dots in degrees. At 0 degrees, coherent
185 dots will move from left to right. Increasing the angle will rotate
186 the direction counter-clockwise. This value can be set using the
187 `dir` property after initialization.
188 speed : float
189 Speed of the dots (in *units* per frame). This value can be set
190 using the `speed` property after initialization.
191 rgb : array_like, optional
192 Color of the dots in form (r, g, b) or [r, g, b]. **Deprecated**,
193 use `color` instead.
194 color : array_like or str
195 Color of the dots in form (r, g, b) or [r, g, b].
196 colorSpace : str
197 Colorspace to use.
198 opacity : float
199 Opacity of the dots from 0.0 to 1.0.
200 contrast : float
201 Contrast of the dots 0.0 to 1.0. This value is simply multiplied by
202 the `color` value.
203 depth : float
204 **Deprecated**, depth is now controlled simply by drawing order.
205 element : object
206 This can be any object that has a ``.draw()`` method and a
207 ``.setPos([x,y])`` method (e.g. a GratingStim, TextStim...)!!
208 DotStim assumes that the element uses pixels as units.
209 ``None`` defaults to dots.
210 signalDots : str
211 If 'same' then the signal and noise dots are constant. If different
212 then the choice of which is signal and which is noise gets
213 randomised on each frame. This corresponds to Scase et al's (1996)
214 categories of RDK. This value can be set using the `signalDots`
215 property after initialization.
216 noiseDots : str
217 Determines the behaviour of the noise dots, taken directly from
218 Scase et al's (1996) categories. For 'position', noise dots take a
219 random position every frame. For 'direction' noise dots follow a
220 random, but constant direction. For 'walk' noise dots vary their
221 direction every frame, but keep a constant speed. This value can be
222 set using the `noiseDots` property after initialization.
223 name : str, optional
224 Optional name to use for logging.
225 autoLog : bool
226 Enable automatic logging.
227
228 """
229 # what local vars are defined (these are the init params) for use by
230 # __repr__
231 self._initParams = __builtins__['dir']()
232 self._initParams.remove('self')
233
234 super(DotStim, self).__init__(win, units=units, name=name,
235 autoLog=False) # set at end of init
236
237 self.nDots = nDots
238 # pos and size are ambiguous for dots so DotStim explicitly has
239 # fieldPos = pos, fieldSize=size and then dotSize as additional param
240 self.fieldPos = fieldPos # self.pos is also set here
241 self.fieldSize = val2array(fieldSize, False) # self.size is also set
242 if type(dotSize) in (tuple, list):
243 self.dotSize = np.array(dotSize)
244 else:
245 self.dotSize = dotSize
246 if self.win.useRetina:
247 self.dotSize *= 2 # double dot size to make up for 1/2-size pixels
248 self.fieldShape = fieldShape
249 self.__dict__['dir'] = dir
250 self.speed = speed
251 self.element = element
252 self.dotLife = dotLife
253 self.signalDots = signalDots
254
255 if rgb != None:
256 logging.warning("Use of rgb arguments to stimuli are deprecated."
257 " Please use color and colorSpace args instead")
258 self.colorSpace = 'rgba'
259 self.color = rgb
260 else:
261 self.colorSpace = colorSpace
262 self.color = color
263 self.opacity = opacity
264 self.contrast = float(contrast)
265 self.depth = depth
266
267 # initialise the dots themselves - give them all random dir and then
268 # fix the first n in the array to have the direction specified
269 self.coherence = coherence # using the attributeSetter
270 self.noiseDots = noiseDots
271
272 # initialise a random array of X,Y
273 self.vertices = self._verticesBase = self._dotsXY = self._newDotsXY(self.nDots)
274 # all dots have the same speed
275 self._dotsSpeed = np.ones(self.nDots, dtype=float) * self.speed
276 # abs() means we can ignore the -1 case (no life)
277 self._dotsLife = np.abs(dotLife) * np.random.rand(self.nDots)
278 # pre-allocate array for flagging dead dots
279 self._deadDots = np.zeros(self.nDots, dtype=bool)
280 # set directions (only used when self.noiseDots='direction')
281 self._dotsDir = np.random.rand(self.nDots) * _2pi
282 self._dotsDir[self._signalDots] = self.dir * _piOver180
283
284 self._update_dotsXY()
285
286 self.anchor = fieldAnchor
287
288 # set autoLog now that params have been initialised
289 wantLog = autoLog is None and self.win.autoLog
290 self.__dict__['autoLog'] = autoLog or wantLog
291 if self.autoLog:
292 logging.exp("Created %s = %s" % (self.name, str(self)))
293
294 def set(self, attrib, val, op='', log=None):
295 """DEPRECATED: DotStim.set() is obsolete and may not be supported
296 in future versions of PsychoPy. Use the specific method for each
297 parameter instead (e.g. setFieldPos(), setCoherence()...).
298 """
299 self._set(attrib, val, op, log=log)
300
301 @attributeSetter
302 def fieldShape(self, fieldShape):
303 """*'sqr'* or 'circle'. Defines the envelope used to present the dots.
304 If changed while drawing, dots outside new envelope will be respawned.
305 """
306 self.__dict__['fieldShape'] = fieldShape
307
308 @property
309 def anchor(self):
310 return WindowMixin.anchor.fget(self)
311
312 @anchor.setter
313 def anchor(self, value):
314 WindowMixin.anchor.fset(self, value)
315
316 def setAnchor(self, value, log=None):
317 setAttribute(self, 'anchor', value, log)
318
319 @property
320 def dotSize(self):
321 """Float specified in pixels (overridden if `element` is specified).
322 :ref:`operations <attrib-operations>` are supported."""
323 if hasattr(self, "_dotSize"):
324 return getattr(self._dotSize, 'pix')[0]
325
326 @dotSize.setter
327 def dotSize(self, value):
328 self._dotSize = Size(value, units='pix', win=self.win)
329
330 @attributeSetter
331 def dotLife(self, dotLife):
332 """Int. Number of frames each dot lives for (-1=infinite).
333 Dot lives are initiated randomly from a uniform distribution
334 from 0 to dotLife. If changed while drawing, the lives of all
335 dots will be randomly initiated again.
336
337 :ref:`operations <attrib-operations>` are supported.
338 """
339 self.__dict__['dotLife'] = dotLife
340 self._dotsLife = abs(self.dotLife) * np.random.rand(self.nDots)
341
342 @attributeSetter
343 def signalDots(self, signalDots):
344 """str - 'same' or *'different'*
345 If 'same' then the signal and noise dots are constant. If different
346 then the choice of which is signal and which is noise gets
347 randomised on each frame. This corresponds to Scase et al's (1996)
348 categories of RDK.
349 """
350 self.__dict__['signalDots'] = signalDots
351
352 @attributeSetter
353 def noiseDots(self, noiseDots):
354 """str - *'direction'*, 'position' or 'walk'
355 Determines the behaviour of the noise dots, taken directly from
356 Scase et al's (1996) categories. For 'position', noise dots take a
357 random position every frame. For 'direction' noise dots follow a
358 random, but constant direction. For 'walk' noise dots vary their
359 direction every frame, but keep a constant speed.
360 """
361 self.__dict__['noiseDots'] = noiseDots
362 self.coherence = self.coherence # update using attributeSetter
363
364 @attributeSetter
365 def element(self, element):
366 """*None* or a visual stimulus object
367 This can be any object that has a ``.draw()`` method and a
368 ``.setPos([x,y])`` method (e.g. a GratingStim, TextStim...)!!
369 DotStim assumes that the element uses pixels as units.
370 ``None`` defaults to dots.
371
372 See `ElementArrayStim` for a faster implementation of this idea.
373 """
374 self.__dict__['element'] = element
375
376 @attributeSetter
377 def fieldPos(self, pos):
378 """Specifying the location of the centre of the stimulus
379 using a :ref:`x,y-pair <attrib-xy>`.
380 See e.g. :class:`.ShapeStim` for more documentation / examples
381 on how to set position.
382
383 :ref:`operations <attrib-operations>` are supported.
384 """
385 # Isn't there a way to use BaseVisualStim.pos.__doc__ as docstring
386 # here?
387 self.pos = pos # using BaseVisualStim. we'll store this as both
388 self.__dict__['fieldPos'] = self.pos
389
390 def setFieldPos(self, val, op='', log=None):
391 """Usually you can use 'stim.attribute = value' syntax instead, but use
392 this method if you need to suppress the log message.
393 """
394 setAttribute(self, 'fieldPos', val, log, op) # calls attributeSetter
395
396 def setPos(self, newPos=None, operation='', units=None, log=None):
397 """Obsolete - users should use setFieldPos instead of setPos
398 """
399 logging.error("User called DotStim.setPos(pos). "
400 "Use DotStim.SetFieldPos(pos) instead.")
401
402 def setFieldSize(self, val, op='', log=None):
403 """Usually you can use 'stim.attribute = value' syntax instead, but use
404 this method if you need to suppress the log message.
405 """
406 setAttribute(self, 'fieldSize', val, log, op) # calls attributeSetter
407
408 @attributeSetter
409 def fieldSize(self, size):
410 """Specifying the size of the field of dots using a
411 :ref:`x,y-pair <attrib-xy>`. See e.g. :class:`.ShapeStim` for more
412 documentation/examples on how to set position.
413
414 :ref:`operations <attrib-operations>` are supported.
415 """
416 # Isn't there a way to use BaseVisualStim.pos.__doc__ as docstring
417 # here?
418 self.size = size # using BaseVisualStim. we'll store this as both
419 self.__dict__['fieldSize'] = self.size
420
421 @attributeSetter
422 def coherence(self, coherence):
423 """Scalar between 0 and 1.
424
425 Change the coherence (%) of the DotStim. This will be rounded according
426 to the number of dots in the stimulus.
427
428 :ref:`operations <attrib-operations>` are supported.
429 """
430 if not 0 <= coherence <= 1:
431 raise ValueError('DotStim.coherence must be between 0 and 1')
432
433 _cohDots = coherence * self.nDots
434
435 self.__dict__['coherence'] = round(_cohDots) /self.nDots
436 self._signalDots = np.zeros(self.nDots, dtype=bool)
437 self._signalDots[0:int(self.coherence * self.nDots)] = True
438 # for 'direction' method we need to update the direction of the number
439 # of signal dots immediately, but for other methods it will be done
440 # during updateXY
441
442 # NB - AJS Actually you need to do this for 'walk' also
443 # otherwise would be signal dots adopt random directions when the become
444 # sinal dots in later trails
445 if self.noiseDots in ('direction', 'position', 'walk'):
446 self._dotsDir = np.random.rand(self.nDots) * _2pi
447 self._dotsDir[self._signalDots] = self.dir * _piOver180
448
449 def setFieldCoherence(self, val, op='', log=None):
450 """Usually you can use 'stim.attribute = value' syntax instead, but use
451 this method if you need to suppress the log message.
452 """
453 setAttribute(self, 'coherence', val, log, op) # calls attributeSetter
454
455 @attributeSetter
456 def dir(self, dir):
457 """float (degrees). direction of the coherent dots. :ref:`operations
458 <attrib-operations>` are supported.
459 """
460 # check which dots are signal before setting new dir
461 signalDots = self._dotsDir == (self.dir * _piOver180)
462 self.__dict__['dir'] = dir
463
464 # dots currently moving in the signal direction also need to update
465 # their direction
466 self._dotsDir[signalDots] = self.dir * _piOver180
467
468 def setDir(self, val, op='', log=None):
469 """Usually you can use 'stim.attribute = value' syntax instead, but use
470 this method if you need to suppress the log message.
471 """
472 setAttribute(self, 'dir', val, log, op)
473
474 @attributeSetter
475 def speed(self, speed):
476 """float. speed of the dots (in *units*/frame). :ref:`operations
477 <attrib-operations>` are supported.
478 """
479 self.__dict__['speed'] = speed
480
481 def setSpeed(self, val, op='', log=None):
482 """Usually you can use 'stim.attribute = value' syntax instead, but use
483 this method if you need to suppress the log message.
484
485 """
486 setAttribute(self, 'speed', val, log, op)
487
488 def draw(self, win=None):
489 """Draw the stimulus in its relevant window. You must call this method
490 after every MyWin.flip() if you want the stimulus to appear on that
491 frame and then update the screen again.
492
493 Parameters
494 ----------
495 win : window.Window, optional
496 Window to draw dots to. If `None`, dots will be drawn to the parent
497 window.
498
499 """
500 if win is None:
501 win = self.win
502 self._selectWindow(win)
503
504 self._update_dotsXY()
505
506 GL.glPushMatrix() # push before drawing, pop after
507
508 # draw the dots
509 if self.element is None:
510 win.setScale('pix')
511 GL.glPointSize(self.dotSize)
512
513 # load Null textures into multitexteureARB - they modulate with
514 # glColor
515 GL.glActiveTexture(GL.GL_TEXTURE0)
516 GL.glEnable(GL.GL_TEXTURE_2D)
517 GL.glBindTexture(GL.GL_TEXTURE_2D, 0)
518 GL.glActiveTexture(GL.GL_TEXTURE1)
519 GL.glEnable(GL.GL_TEXTURE_2D)
520 GL.glBindTexture(GL.GL_TEXTURE_2D, 0)
521
522 CPCD = ctypes.POINTER(ctypes.c_double)
523 GL.glVertexPointer(2, GL.GL_DOUBLE, 0,
524 self.verticesPix.ctypes.data_as(CPCD))
525 GL.glColor4f(*self._foreColor.render('rgba1'))
526 GL.glEnableClientState(GL.GL_VERTEX_ARRAY)
527 GL.glDrawArrays(GL.GL_POINTS, 0, self.nDots)
528 GL.glDisableClientState(GL.GL_VERTEX_ARRAY)
529 else:
530 # we don't want to do the screen scaling twice so for each dot
531 # subtract the screen centre
532 initialDepth = self.element.depth
533 for pointN in range(0, self.nDots):
534 _p = self.verticesPix[pointN, :] + self.fieldPos
535 self.element.setPos(_p)
536 self.element.draw()
537 # reset depth before going to next frame
538 self.element.setDepth(initialDepth)
539 GL.glPopMatrix()
540
541 def _newDotsXY(self, nDots):
542 """Returns a uniform spread of dots, according to the `fieldShape` and
543 `fieldSize`.
544
545 Parameters
546 ----------
547 nDots : int
548 Number of dots to sample.
549
550 Returns
551 -------
552 ndarray
553 Nx2 array of X and Y positions of dots.
554
555 Examples
556 --------
557 Create a new array of dot positions::
558
559 dots = self._newDots(nDots)
560
561 """
562 if self.fieldShape == 'circle':
563 length = np.sqrt(np.random.uniform(0, 1, (nDots,)))
564 angle = np.random.uniform(0., _2pi, (nDots,))
565
566 newDots = np.zeros((nDots, 2))
567 newDots[:, 0] = length * np.cos(angle)
568 newDots[:, 1] = length * np.sin(angle)
569
570 newDots *= self.fieldSize * .5
571 else:
572 newDots = np.random.uniform(-0.5, 0.5, size = (nDots, 2)) * self.fieldSize
573
574 return newDots
575
576 def refreshDots(self):
577 """Callable user function to choose a new set of dots."""
578 self.vertices = self._verticesBase = self._dotsXY = self._newDotsXY(self.nDots)
579
580 # Don't allocate another array if the new number of dots is equal to
581 # the last.
582 if self.nDots != len(self._deadDots):
583 self._deadDots = np.zeros(self.nDots, dtype=bool)
584
585 def _update_dotsXY(self):
586 """The user shouldn't call this - its gets done within draw().
587 """
588 # Find dead dots, update positions, get new positions for
589 # dead and out-of-bounds
590 # renew dead dots
591 if self.dotLife > 0: # if less than zero ignore it
592 # decrement. Then dots to be reborn will be negative
593 self._dotsLife -= 1
594 self._deadDots[:] = (self._dotsLife <= 0)
595 self._dotsLife[self._deadDots] = self.dotLife
596 else:
597 self._deadDots[:] = False
598
599 # update XY based on speed and dir
600 # NB self._dotsDir is in radians, but self.dir is in degs
601 # update which are the noise/signal dots
602 if self.signalDots == 'different':
603 # **up to version 1.70.00 this was the other way around,
604 # not in keeping with Scase et al**
605 # noise and signal dots change identity constantly
606 np.random.shuffle(self._dotsDir)
607 # and then update _signalDots from that
608 self._signalDots = (self._dotsDir == (self.dir * _piOver180))
609
610 # update the locations of signal and noise; 0 radians=East!
611 reshape = np.reshape
612 if self.noiseDots == 'walk':
613 # noise dots are ~self._signalDots
614 sig = np.random.rand(np.sum(~self._signalDots))
615 self._dotsDir[~self._signalDots] = sig * _2pi
616 # then update all positions from dir*speed
617 cosDots = reshape(np.cos(self._dotsDir), (self.nDots,))
618 sinDots = reshape(np.sin(self._dotsDir), (self.nDots,))
619 self._verticesBase[:, 0] += self.speed * cosDots
620 self._verticesBase[:, 1] += self.speed * sinDots
621 elif self.noiseDots == 'direction':
622 # simply use the stored directions to update position
623 cosDots = reshape(np.cos(self._dotsDir), (self.nDots,))
624 sinDots = reshape(np.sin(self._dotsDir), (self.nDots,))
625 self._verticesBase[:, 0] += self.speed * cosDots
626 self._verticesBase[:, 1] += self.speed * sinDots
627 elif self.noiseDots == 'position':
628 # update signal dots
629 sd = self._signalDots
630 sdSum = self._signalDots.sum()
631 cosDots = reshape(np.cos(self._dotsDir[sd]), (sdSum,))
632 sinDots = reshape(np.sin(self._dotsDir[sd]), (sdSum,))
633 self._verticesBase[sd, 0] += self.speed * cosDots
634 self._verticesBase[sd, 1] += self.speed * sinDots
635 # update noise dots
636 self._deadDots[:] = self._deadDots + (~self._signalDots)
637
638 # handle boundaries of the field
639 if self.fieldShape in (None, 'square', 'sqr'):
640 out0 = (np.abs(self._verticesBase[:, 0]) > .5 * self.fieldSize[0])
641 out1 = (np.abs(self._verticesBase[:, 1]) > .5 * self.fieldSize[1])
642 outofbounds = out0 + out1
643 else:
644 # transform to a normalised circle (radius = 1 all around)
645 # then to polar coords to check
646 # the normalised XY position (where radius should be < 1)
647 normXY = self._verticesBase / .5 / self.fieldSize
648 # add out-of-bounds to those that need replacing
649 outofbounds = np.hypot(normXY[:, 0], normXY[:, 1]) > 1.
650
651 # update any dead dots
652 nDead = self._deadDots.sum()
653 if nDead:
654 self._verticesBase[self._deadDots, :] = self._newDotsXY(nDead)
655
656 # Reposition any dots that have gone out of bounds. Net effect is to
657 # place dot one step inside the boundary on the other side of the
658 # aperture.
659 nOutOfBounds = outofbounds.sum()
660 if nOutOfBounds:
661 self._verticesBase[outofbounds, :] = self._newDotsXY(nOutOfBounds)
662
663 self.vertices = self._verticesBase
664
665 # update the pixel XY coordinates in pixels (using _BaseVisual class)
666 self._updateVertices()
667
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/psychopy/visual/dot.py b/psychopy/visual/dot.py
--- a/psychopy/visual/dot.py
+++ b/psychopy/visual/dot.py
@@ -660,7 +660,7 @@
if nOutOfBounds:
self._verticesBase[outofbounds, :] = self._newDotsXY(nOutOfBounds)
- self.vertices = self._verticesBase
+ self.vertices = self._verticesBase / self.fieldSize
# update the pixel XY coordinates in pixels (using _BaseVisual class)
self._updateVertices()
| {"golden_diff": "diff --git a/psychopy/visual/dot.py b/psychopy/visual/dot.py\n--- a/psychopy/visual/dot.py\n+++ b/psychopy/visual/dot.py\n@@ -660,7 +660,7 @@\n if nOutOfBounds:\n self._verticesBase[outofbounds, :] = self._newDotsXY(nOutOfBounds)\n \n- self.vertices = self._verticesBase\n+ self.vertices = self._verticesBase / self.fieldSize\n \n # update the pixel XY coordinates in pixels (using _BaseVisual class)\n self._updateVertices()\n", "issue": "DotStim size parameter does not correspond to displayed size\nThe size attribute of a DotStim does not correspond to its actual displayed size. For example, below are two polygons each set to have a size of (2.5, 2.5) deg, and a DotStim notionally of the same size and units:\r\n\r\n\r\n\r\nThis is not a diameter/radius confusion, as doubling the size of the polygons still does not match the extend of the DotStim field (see https://discourse.psychopy.org/t/dotstim-fieldsize-argument-has-unclear-units/29520/3 )\r\n\r\nWhen using other units, such as `height` rather than `deg`, the displayed size does not seem to lawfully correspond to the specified values (see the initial post in the Discourse thread above).\r\n\r\n***This then raises questions about whether the specified dot speeds are correct, if the stimulus doesn't know its own angular extent.***\r\n\r\nBelow is another example, where all three stimuli were set to have sizes of (0.5, 0.5) in height units. In this case, the DotStim is now markedly smaller than the reference polygons:\r\n\r\n\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"This stimulus class defines a field of dots with an update rule that\ndetermines how they change on every call to the .draw() method.\n\"\"\"\n\n# Part of the PsychoPy library\n# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2022 Open Science Tools Ltd.\n# Distributed under the terms of the GNU General Public License (GPL).\n\n# Bugfix by Andrew Schofield.\n# Replaces out of bounds but still live dots at opposite edge of aperture \n# instead of randomly within the field. This stops the concentration of dots at \n# one side of field when lifetime is long.\n# Update the dot direction immediately for 'walk' as otherwise when the \n# coherence varies some signal dots will inherit the random directions of \n# previous walking dots.\n# Provide a visible wrapper function to refresh all the dot locations so that \n# the whole field can be more easily refreshed between trials.\n\n# Ensure setting pyglet.options['debug_gl'] to False is done prior to any\n# other calls to pyglet or pyglet submodules, otherwise it may not get picked\n# up by the pyglet GL engine and have no effect.\n# Shaders will work but require OpenGL2.0 drivers AND PyOpenGL3.0+\nimport pyglet\npyglet.options['debug_gl'] = False\nimport ctypes\nGL = pyglet.gl\n\nimport psychopy # so we can get the __path__\nfrom psychopy import logging\n\n# tools must only be imported *after* event or MovieStim breaks on win32\n# (JWP has no idea why!)\nfrom psychopy.tools.attributetools import attributeSetter, setAttribute\nfrom psychopy.tools.arraytools import val2array\nfrom psychopy.visual.basevisual import (BaseVisualStim, ColorMixin,\n ContainerMixin, WindowMixin)\nfrom psychopy.layout import Size\n\nimport numpy as np\n\n# some constants\n_piOver2 = np.pi / 2.\n_piOver180 = np.pi / 180.\n_2pi = 2 * np.pi\n\n\nclass DotStim(BaseVisualStim, ColorMixin, ContainerMixin):\n \"\"\"This stimulus class defines a field of dots with an update rule that\n determines how they change on every call to the .draw() method.\n\n This single class can be used to generate a wide variety of dot motion\n types. For a review of possible types and their pros and cons see Scase,\n Braddick & Raymond (1996). All six possible motions they describe can be\n generated with appropriate choices of the `signalDots` (which determines\n whether signal dots are the 'same' or 'different' on each frame),\n `noiseDots` (which determines the locations of the noise dots on each frame)\n and the `dotLife` (which determines for how many frames the dot will\n continue before being regenerated).\n\n The default settings (as of v1.70.00) is for the noise dots to have\n identical velocity but random direction and signal dots remain the 'same'\n (once a signal dot, always a signal dot).\n\n For further detail about the different configurations see :ref:`dots` in the\n Builder Components section of the documentation.\n\n If further customisation is required, then the DotStim should be subclassed\n and its _update_dotsXY and _newDotsXY methods overridden.\n\n The maximum number of dots that can be drawn is limited by system\n performance.\n\n Attributes\n ----------\n fieldShape : str\n *'sqr'* or 'circle'. Defines the envelope used to present the dots. If\n changed while drawing, dots outside new envelope will be respawned.\n dotSize : float\n Dot size specified in pixels (overridden if `element` is specified).\n :ref:`operations <attrib-operations>` are supported.\n dotLife : int\n Number of frames each dot lives for (-1=infinite). Dot lives are\n initiated randomly from a uniform distribution from 0 to dotLife. If\n changed while drawing, the lives of all dots will be randomly initiated\n again.\n signalDots : str\n If 'same' then the signal and noise dots are constant. If 'different'\n then the choice of which is signal and which is noise gets randomised on\n each frame. This corresponds to Scase et al's (1996) categories of RDK.\n noiseDots : str\n Determines the behaviour of the noise dots, taken directly from Scase et\n al's (1996) categories. For 'position', noise dots take a random\n position every frame. For 'direction' noise dots follow a random, but\n constant direction. For 'walk' noise dots vary their direction every\n frame, but keep a constant speed.\n element : object\n This can be any object that has a ``.draw()`` method and a\n ``.setPos([x,y])`` method (e.g. a GratingStim, TextStim...)!! DotStim\n assumes that the element uses pixels as units. ``None`` defaults to\n dots.\n fieldPos : array_like\n Specifying the location of the centre of the stimulus using a\n :ref:`x,y-pair <attrib-xy>`. See e.g. :class:`.ShapeStim` for more\n documentation/examples on how to set position.\n :ref:`operations <attrib-operations>` are supported.\n fieldSize : array_like\n Specifying the size of the field of dots using a\n :ref:`x,y-pair <attrib-xy>`. See e.g. :class:`.ShapeStim` for more\n documentation/examples on how to set position.\n :ref:`operations <attrib-operations>` are supported.\n coherence : float\n Change the coherence (%) of the DotStim. This will be rounded according\n to the number of dots in the stimulus.\n dir : float\n Direction of the coherent dots in degrees. :ref:`operations\n <attrib-operations>` are supported.\n speed : float\n Speed of the dots (in *units*/frame). :ref:`operations\n <attrib-operations>` are supported.\n\n \"\"\"\n def __init__(self,\n win,\n units='',\n nDots=1,\n coherence=0.5,\n fieldPos=(0.0, 0.0),\n fieldSize=(1.0, 1.0),\n fieldShape='sqr',\n fieldAnchor=\"center\",\n dotSize=2.0,\n dotLife=3,\n dir=0.0,\n speed=0.5,\n rgb=None,\n color=(1.0, 1.0, 1.0),\n colorSpace='rgb',\n opacity=None,\n contrast=1.0,\n depth=0,\n element=None,\n signalDots='same',\n noiseDots='direction',\n name=None,\n autoLog=None):\n \"\"\"\n Parameters\n ----------\n win : window.Window\n Window this stimulus is associated with.\n units : str\n Units to use.\n nDots : int\n Number of dots to present in the field.\n coherence : float\n Proportion of dots which are coherent. This value can be set using\n the `coherence` property after initialization.\n fieldPos : array_like\n (x,y) or [x,y] position of the field. This value can be set using\n the `fieldPos` property after initialization.\n fieldSize : array_like, int or float\n (x,y) or [x,y] or single value (applied to both dimensions). Sizes\n can be negative and can extend beyond the window. This value can be\n set using the `fieldSize` property after initialization.\n fieldShape : str\n Defines the envelope used to present the dots. If changed while\n drawing by setting the `fieldShape` property, dots outside new\n envelope will be respawned., valid values are 'square', 'sqr' or\n 'circle'.\n dotSize : array_like or float\n Size of the dots. If given an array, the sizes of individual dots\n will be set. The array must have length `nDots`. If a single value\n is given, all dots will be set to the same size.\n dotLife : int\n Lifetime of a dot in frames. Dot lives are initiated randomly from a\n uniform distribution from 0 to dotLife. If changed while drawing,\n the lives of all dots will be randomly initiated again. A value of\n -1 results in dots having an infinite lifetime. This value can be\n set using the `dotLife` property after initialization.\n dir : float\n Direction of the coherent dots in degrees. At 0 degrees, coherent\n dots will move from left to right. Increasing the angle will rotate\n the direction counter-clockwise. This value can be set using the\n `dir` property after initialization.\n speed : float\n Speed of the dots (in *units* per frame). This value can be set\n using the `speed` property after initialization.\n rgb : array_like, optional\n Color of the dots in form (r, g, b) or [r, g, b]. **Deprecated**,\n use `color` instead.\n color : array_like or str\n Color of the dots in form (r, g, b) or [r, g, b].\n colorSpace : str\n Colorspace to use.\n opacity : float\n Opacity of the dots from 0.0 to 1.0.\n contrast : float\n Contrast of the dots 0.0 to 1.0. This value is simply multiplied by\n the `color` value.\n depth : float\n **Deprecated**, depth is now controlled simply by drawing order.\n element : object\n This can be any object that has a ``.draw()`` method and a\n ``.setPos([x,y])`` method (e.g. a GratingStim, TextStim...)!!\n DotStim assumes that the element uses pixels as units.\n ``None`` defaults to dots.\n signalDots : str\n If 'same' then the signal and noise dots are constant. If different\n then the choice of which is signal and which is noise gets\n randomised on each frame. This corresponds to Scase et al's (1996)\n categories of RDK. This value can be set using the `signalDots`\n property after initialization.\n noiseDots : str\n Determines the behaviour of the noise dots, taken directly from\n Scase et al's (1996) categories. For 'position', noise dots take a\n random position every frame. For 'direction' noise dots follow a\n random, but constant direction. For 'walk' noise dots vary their\n direction every frame, but keep a constant speed. This value can be\n set using the `noiseDots` property after initialization.\n name : str, optional\n Optional name to use for logging.\n autoLog : bool\n Enable automatic logging.\n\n \"\"\"\n # what local vars are defined (these are the init params) for use by\n # __repr__\n self._initParams = __builtins__['dir']()\n self._initParams.remove('self')\n\n super(DotStim, self).__init__(win, units=units, name=name,\n autoLog=False) # set at end of init\n\n self.nDots = nDots\n # pos and size are ambiguous for dots so DotStim explicitly has\n # fieldPos = pos, fieldSize=size and then dotSize as additional param\n self.fieldPos = fieldPos # self.pos is also set here\n self.fieldSize = val2array(fieldSize, False) # self.size is also set\n if type(dotSize) in (tuple, list):\n self.dotSize = np.array(dotSize)\n else:\n self.dotSize = dotSize\n if self.win.useRetina:\n self.dotSize *= 2 # double dot size to make up for 1/2-size pixels\n self.fieldShape = fieldShape\n self.__dict__['dir'] = dir\n self.speed = speed\n self.element = element\n self.dotLife = dotLife\n self.signalDots = signalDots\n\n if rgb != None:\n logging.warning(\"Use of rgb arguments to stimuli are deprecated.\"\n \" Please use color and colorSpace args instead\")\n self.colorSpace = 'rgba'\n self.color = rgb\n else:\n self.colorSpace = colorSpace\n self.color = color\n self.opacity = opacity\n self.contrast = float(contrast)\n self.depth = depth\n\n # initialise the dots themselves - give them all random dir and then\n # fix the first n in the array to have the direction specified\n self.coherence = coherence # using the attributeSetter\n self.noiseDots = noiseDots\n\n # initialise a random array of X,Y\n self.vertices = self._verticesBase = self._dotsXY = self._newDotsXY(self.nDots)\n # all dots have the same speed\n self._dotsSpeed = np.ones(self.nDots, dtype=float) * self.speed\n # abs() means we can ignore the -1 case (no life)\n self._dotsLife = np.abs(dotLife) * np.random.rand(self.nDots)\n # pre-allocate array for flagging dead dots\n self._deadDots = np.zeros(self.nDots, dtype=bool)\n # set directions (only used when self.noiseDots='direction')\n self._dotsDir = np.random.rand(self.nDots) * _2pi\n self._dotsDir[self._signalDots] = self.dir * _piOver180\n\n self._update_dotsXY()\n\n self.anchor = fieldAnchor\n\n # set autoLog now that params have been initialised\n wantLog = autoLog is None and self.win.autoLog\n self.__dict__['autoLog'] = autoLog or wantLog\n if self.autoLog:\n logging.exp(\"Created %s = %s\" % (self.name, str(self)))\n\n def set(self, attrib, val, op='', log=None):\n \"\"\"DEPRECATED: DotStim.set() is obsolete and may not be supported\n in future versions of PsychoPy. Use the specific method for each\n parameter instead (e.g. setFieldPos(), setCoherence()...).\n \"\"\"\n self._set(attrib, val, op, log=log)\n\n @attributeSetter\n def fieldShape(self, fieldShape):\n \"\"\"*'sqr'* or 'circle'. Defines the envelope used to present the dots.\n If changed while drawing, dots outside new envelope will be respawned.\n \"\"\"\n self.__dict__['fieldShape'] = fieldShape\n\n @property\n def anchor(self):\n return WindowMixin.anchor.fget(self)\n\n @anchor.setter\n def anchor(self, value):\n WindowMixin.anchor.fset(self, value)\n\n def setAnchor(self, value, log=None):\n setAttribute(self, 'anchor', value, log)\n\n @property\n def dotSize(self):\n \"\"\"Float specified in pixels (overridden if `element` is specified).\n :ref:`operations <attrib-operations>` are supported.\"\"\"\n if hasattr(self, \"_dotSize\"):\n return getattr(self._dotSize, 'pix')[0]\n\n @dotSize.setter\n def dotSize(self, value):\n self._dotSize = Size(value, units='pix', win=self.win)\n\n @attributeSetter\n def dotLife(self, dotLife):\n \"\"\"Int. Number of frames each dot lives for (-1=infinite).\n Dot lives are initiated randomly from a uniform distribution\n from 0 to dotLife. If changed while drawing, the lives of all\n dots will be randomly initiated again.\n\n :ref:`operations <attrib-operations>` are supported.\n \"\"\"\n self.__dict__['dotLife'] = dotLife\n self._dotsLife = abs(self.dotLife) * np.random.rand(self.nDots)\n\n @attributeSetter\n def signalDots(self, signalDots):\n \"\"\"str - 'same' or *'different'*\n If 'same' then the signal and noise dots are constant. If different\n then the choice of which is signal and which is noise gets\n randomised on each frame. This corresponds to Scase et al's (1996)\n categories of RDK.\n \"\"\"\n self.__dict__['signalDots'] = signalDots\n\n @attributeSetter\n def noiseDots(self, noiseDots):\n \"\"\"str - *'direction'*, 'position' or 'walk'\n Determines the behaviour of the noise dots, taken directly from\n Scase et al's (1996) categories. For 'position', noise dots take a\n random position every frame. For 'direction' noise dots follow a\n random, but constant direction. For 'walk' noise dots vary their\n direction every frame, but keep a constant speed.\n \"\"\"\n self.__dict__['noiseDots'] = noiseDots\n self.coherence = self.coherence # update using attributeSetter\n\n @attributeSetter\n def element(self, element):\n \"\"\"*None* or a visual stimulus object\n This can be any object that has a ``.draw()`` method and a\n ``.setPos([x,y])`` method (e.g. a GratingStim, TextStim...)!!\n DotStim assumes that the element uses pixels as units.\n ``None`` defaults to dots.\n\n See `ElementArrayStim` for a faster implementation of this idea.\n \"\"\"\n self.__dict__['element'] = element\n\n @attributeSetter\n def fieldPos(self, pos):\n \"\"\"Specifying the location of the centre of the stimulus\n using a :ref:`x,y-pair <attrib-xy>`.\n See e.g. :class:`.ShapeStim` for more documentation / examples\n on how to set position.\n\n :ref:`operations <attrib-operations>` are supported.\n \"\"\"\n # Isn't there a way to use BaseVisualStim.pos.__doc__ as docstring\n # here?\n self.pos = pos # using BaseVisualStim. we'll store this as both\n self.__dict__['fieldPos'] = self.pos\n\n def setFieldPos(self, val, op='', log=None):\n \"\"\"Usually you can use 'stim.attribute = value' syntax instead, but use \n this method if you need to suppress the log message.\n \"\"\"\n setAttribute(self, 'fieldPos', val, log, op) # calls attributeSetter\n\n def setPos(self, newPos=None, operation='', units=None, log=None):\n \"\"\"Obsolete - users should use setFieldPos instead of setPos\n \"\"\"\n logging.error(\"User called DotStim.setPos(pos). \"\n \"Use DotStim.SetFieldPos(pos) instead.\")\n\n def setFieldSize(self, val, op='', log=None):\n \"\"\"Usually you can use 'stim.attribute = value' syntax instead, but use \n this method if you need to suppress the log message.\n \"\"\"\n setAttribute(self, 'fieldSize', val, log, op) # calls attributeSetter\n\n @attributeSetter\n def fieldSize(self, size):\n \"\"\"Specifying the size of the field of dots using a\n :ref:`x,y-pair <attrib-xy>`. See e.g. :class:`.ShapeStim` for more \n documentation/examples on how to set position.\n\n :ref:`operations <attrib-operations>` are supported.\n \"\"\"\n # Isn't there a way to use BaseVisualStim.pos.__doc__ as docstring\n # here?\n self.size = size # using BaseVisualStim. we'll store this as both\n self.__dict__['fieldSize'] = self.size\n\n @attributeSetter\n def coherence(self, coherence):\n \"\"\"Scalar between 0 and 1.\n\n Change the coherence (%) of the DotStim. This will be rounded according \n to the number of dots in the stimulus.\n\n :ref:`operations <attrib-operations>` are supported.\n \"\"\"\n if not 0 <= coherence <= 1:\n raise ValueError('DotStim.coherence must be between 0 and 1')\n\n _cohDots = coherence * self.nDots\n\n self.__dict__['coherence'] = round(_cohDots) /self.nDots\n self._signalDots = np.zeros(self.nDots, dtype=bool)\n self._signalDots[0:int(self.coherence * self.nDots)] = True\n # for 'direction' method we need to update the direction of the number\n # of signal dots immediately, but for other methods it will be done\n # during updateXY\n\n # NB - AJS Actually you need to do this for 'walk' also\n # otherwise would be signal dots adopt random directions when the become\n # sinal dots in later trails\n if self.noiseDots in ('direction', 'position', 'walk'):\n self._dotsDir = np.random.rand(self.nDots) * _2pi\n self._dotsDir[self._signalDots] = self.dir * _piOver180\n\n def setFieldCoherence(self, val, op='', log=None):\n \"\"\"Usually you can use 'stim.attribute = value' syntax instead, but use \n this method if you need to suppress the log message.\n \"\"\"\n setAttribute(self, 'coherence', val, log, op) # calls attributeSetter\n\n @attributeSetter\n def dir(self, dir):\n \"\"\"float (degrees). direction of the coherent dots. :ref:`operations \n <attrib-operations>` are supported.\n \"\"\"\n # check which dots are signal before setting new dir\n signalDots = self._dotsDir == (self.dir * _piOver180)\n self.__dict__['dir'] = dir\n\n # dots currently moving in the signal direction also need to update\n # their direction\n self._dotsDir[signalDots] = self.dir * _piOver180\n\n def setDir(self, val, op='', log=None):\n \"\"\"Usually you can use 'stim.attribute = value' syntax instead, but use \n this method if you need to suppress the log message.\n \"\"\"\n setAttribute(self, 'dir', val, log, op)\n\n @attributeSetter\n def speed(self, speed):\n \"\"\"float. speed of the dots (in *units*/frame). :ref:`operations \n <attrib-operations>` are supported.\n \"\"\"\n self.__dict__['speed'] = speed\n\n def setSpeed(self, val, op='', log=None):\n \"\"\"Usually you can use 'stim.attribute = value' syntax instead, but use \n this method if you need to suppress the log message.\n \n \"\"\"\n setAttribute(self, 'speed', val, log, op)\n\n def draw(self, win=None):\n \"\"\"Draw the stimulus in its relevant window. You must call this method\n after every MyWin.flip() if you want the stimulus to appear on that\n frame and then update the screen again.\n\n Parameters\n ----------\n win : window.Window, optional\n Window to draw dots to. If `None`, dots will be drawn to the parent\n window.\n\n \"\"\"\n if win is None:\n win = self.win\n self._selectWindow(win)\n\n self._update_dotsXY()\n\n GL.glPushMatrix() # push before drawing, pop after\n\n # draw the dots\n if self.element is None:\n win.setScale('pix')\n GL.glPointSize(self.dotSize)\n\n # load Null textures into multitexteureARB - they modulate with\n # glColor\n GL.glActiveTexture(GL.GL_TEXTURE0)\n GL.glEnable(GL.GL_TEXTURE_2D)\n GL.glBindTexture(GL.GL_TEXTURE_2D, 0)\n GL.glActiveTexture(GL.GL_TEXTURE1)\n GL.glEnable(GL.GL_TEXTURE_2D)\n GL.glBindTexture(GL.GL_TEXTURE_2D, 0)\n\n CPCD = ctypes.POINTER(ctypes.c_double)\n GL.glVertexPointer(2, GL.GL_DOUBLE, 0,\n self.verticesPix.ctypes.data_as(CPCD))\n GL.glColor4f(*self._foreColor.render('rgba1'))\n GL.glEnableClientState(GL.GL_VERTEX_ARRAY)\n GL.glDrawArrays(GL.GL_POINTS, 0, self.nDots)\n GL.glDisableClientState(GL.GL_VERTEX_ARRAY)\n else:\n # we don't want to do the screen scaling twice so for each dot\n # subtract the screen centre\n initialDepth = self.element.depth\n for pointN in range(0, self.nDots):\n _p = self.verticesPix[pointN, :] + self.fieldPos\n self.element.setPos(_p)\n self.element.draw()\n # reset depth before going to next frame\n self.element.setDepth(initialDepth)\n GL.glPopMatrix()\n\n def _newDotsXY(self, nDots):\n \"\"\"Returns a uniform spread of dots, according to the `fieldShape` and\n `fieldSize`.\n\n Parameters\n ----------\n nDots : int\n Number of dots to sample.\n\n Returns\n -------\n ndarray\n Nx2 array of X and Y positions of dots.\n\n Examples\n --------\n Create a new array of dot positions::\n\n dots = self._newDots(nDots)\n\n \"\"\"\n if self.fieldShape == 'circle':\n length = np.sqrt(np.random.uniform(0, 1, (nDots,)))\n angle = np.random.uniform(0., _2pi, (nDots,))\n\n newDots = np.zeros((nDots, 2))\n newDots[:, 0] = length * np.cos(angle)\n newDots[:, 1] = length * np.sin(angle)\n\n newDots *= self.fieldSize * .5\n else:\n newDots = np.random.uniform(-0.5, 0.5, size = (nDots, 2)) * self.fieldSize\n\n return newDots\n\n def refreshDots(self):\n \"\"\"Callable user function to choose a new set of dots.\"\"\"\n self.vertices = self._verticesBase = self._dotsXY = self._newDotsXY(self.nDots)\n\n # Don't allocate another array if the new number of dots is equal to\n # the last.\n if self.nDots != len(self._deadDots):\n self._deadDots = np.zeros(self.nDots, dtype=bool)\n\n def _update_dotsXY(self):\n \"\"\"The user shouldn't call this - its gets done within draw().\n \"\"\"\n # Find dead dots, update positions, get new positions for\n # dead and out-of-bounds\n # renew dead dots\n if self.dotLife > 0: # if less than zero ignore it\n # decrement. Then dots to be reborn will be negative\n self._dotsLife -= 1\n self._deadDots[:] = (self._dotsLife <= 0)\n self._dotsLife[self._deadDots] = self.dotLife\n else:\n self._deadDots[:] = False\n\n # update XY based on speed and dir\n # NB self._dotsDir is in radians, but self.dir is in degs\n # update which are the noise/signal dots\n if self.signalDots == 'different':\n # **up to version 1.70.00 this was the other way around,\n # not in keeping with Scase et al**\n # noise and signal dots change identity constantly\n np.random.shuffle(self._dotsDir)\n # and then update _signalDots from that\n self._signalDots = (self._dotsDir == (self.dir * _piOver180))\n\n # update the locations of signal and noise; 0 radians=East!\n reshape = np.reshape\n if self.noiseDots == 'walk':\n # noise dots are ~self._signalDots\n sig = np.random.rand(np.sum(~self._signalDots))\n self._dotsDir[~self._signalDots] = sig * _2pi\n # then update all positions from dir*speed\n cosDots = reshape(np.cos(self._dotsDir), (self.nDots,))\n sinDots = reshape(np.sin(self._dotsDir), (self.nDots,))\n self._verticesBase[:, 0] += self.speed * cosDots\n self._verticesBase[:, 1] += self.speed * sinDots\n elif self.noiseDots == 'direction':\n # simply use the stored directions to update position\n cosDots = reshape(np.cos(self._dotsDir), (self.nDots,))\n sinDots = reshape(np.sin(self._dotsDir), (self.nDots,))\n self._verticesBase[:, 0] += self.speed * cosDots\n self._verticesBase[:, 1] += self.speed * sinDots\n elif self.noiseDots == 'position':\n # update signal dots\n sd = self._signalDots\n sdSum = self._signalDots.sum()\n cosDots = reshape(np.cos(self._dotsDir[sd]), (sdSum,))\n sinDots = reshape(np.sin(self._dotsDir[sd]), (sdSum,))\n self._verticesBase[sd, 0] += self.speed * cosDots\n self._verticesBase[sd, 1] += self.speed * sinDots\n # update noise dots\n self._deadDots[:] = self._deadDots + (~self._signalDots)\n\n # handle boundaries of the field\n if self.fieldShape in (None, 'square', 'sqr'):\n out0 = (np.abs(self._verticesBase[:, 0]) > .5 * self.fieldSize[0])\n out1 = (np.abs(self._verticesBase[:, 1]) > .5 * self.fieldSize[1])\n outofbounds = out0 + out1\n else:\n # transform to a normalised circle (radius = 1 all around)\n # then to polar coords to check\n # the normalised XY position (where radius should be < 1)\n normXY = self._verticesBase / .5 / self.fieldSize\n # add out-of-bounds to those that need replacing\n outofbounds = np.hypot(normXY[:, 0], normXY[:, 1]) > 1.\n\n # update any dead dots\n nDead = self._deadDots.sum()\n if nDead:\n self._verticesBase[self._deadDots, :] = self._newDotsXY(nDead)\n\n # Reposition any dots that have gone out of bounds. Net effect is to\n # place dot one step inside the boundary on the other side of the\n # aperture.\n nOutOfBounds = outofbounds.sum()\n if nOutOfBounds:\n self._verticesBase[outofbounds, :] = self._newDotsXY(nOutOfBounds)\n\n self.vertices = self._verticesBase\n\n # update the pixel XY coordinates in pixels (using _BaseVisual class)\n self._updateVertices()\n", "path": "psychopy/visual/dot.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"This stimulus class defines a field of dots with an update rule that\ndetermines how they change on every call to the .draw() method.\n\"\"\"\n\n# Part of the PsychoPy library\n# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2022 Open Science Tools Ltd.\n# Distributed under the terms of the GNU General Public License (GPL).\n\n# Bugfix by Andrew Schofield.\n# Replaces out of bounds but still live dots at opposite edge of aperture \n# instead of randomly within the field. This stops the concentration of dots at \n# one side of field when lifetime is long.\n# Update the dot direction immediately for 'walk' as otherwise when the \n# coherence varies some signal dots will inherit the random directions of \n# previous walking dots.\n# Provide a visible wrapper function to refresh all the dot locations so that \n# the whole field can be more easily refreshed between trials.\n\n# Ensure setting pyglet.options['debug_gl'] to False is done prior to any\n# other calls to pyglet or pyglet submodules, otherwise it may not get picked\n# up by the pyglet GL engine and have no effect.\n# Shaders will work but require OpenGL2.0 drivers AND PyOpenGL3.0+\nimport pyglet\npyglet.options['debug_gl'] = False\nimport ctypes\nGL = pyglet.gl\n\nimport psychopy # so we can get the __path__\nfrom psychopy import logging\n\n# tools must only be imported *after* event or MovieStim breaks on win32\n# (JWP has no idea why!)\nfrom psychopy.tools.attributetools import attributeSetter, setAttribute\nfrom psychopy.tools.arraytools import val2array\nfrom psychopy.visual.basevisual import (BaseVisualStim, ColorMixin,\n ContainerMixin, WindowMixin)\nfrom psychopy.layout import Size\n\nimport numpy as np\n\n# some constants\n_piOver2 = np.pi / 2.\n_piOver180 = np.pi / 180.\n_2pi = 2 * np.pi\n\n\nclass DotStim(BaseVisualStim, ColorMixin, ContainerMixin):\n \"\"\"This stimulus class defines a field of dots with an update rule that\n determines how they change on every call to the .draw() method.\n\n This single class can be used to generate a wide variety of dot motion\n types. For a review of possible types and their pros and cons see Scase,\n Braddick & Raymond (1996). All six possible motions they describe can be\n generated with appropriate choices of the `signalDots` (which determines\n whether signal dots are the 'same' or 'different' on each frame),\n `noiseDots` (which determines the locations of the noise dots on each frame)\n and the `dotLife` (which determines for how many frames the dot will\n continue before being regenerated).\n\n The default settings (as of v1.70.00) is for the noise dots to have\n identical velocity but random direction and signal dots remain the 'same'\n (once a signal dot, always a signal dot).\n\n For further detail about the different configurations see :ref:`dots` in the\n Builder Components section of the documentation.\n\n If further customisation is required, then the DotStim should be subclassed\n and its _update_dotsXY and _newDotsXY methods overridden.\n\n The maximum number of dots that can be drawn is limited by system\n performance.\n\n Attributes\n ----------\n fieldShape : str\n *'sqr'* or 'circle'. Defines the envelope used to present the dots. If\n changed while drawing, dots outside new envelope will be respawned.\n dotSize : float\n Dot size specified in pixels (overridden if `element` is specified).\n :ref:`operations <attrib-operations>` are supported.\n dotLife : int\n Number of frames each dot lives for (-1=infinite). Dot lives are\n initiated randomly from a uniform distribution from 0 to dotLife. If\n changed while drawing, the lives of all dots will be randomly initiated\n again.\n signalDots : str\n If 'same' then the signal and noise dots are constant. If 'different'\n then the choice of which is signal and which is noise gets randomised on\n each frame. This corresponds to Scase et al's (1996) categories of RDK.\n noiseDots : str\n Determines the behaviour of the noise dots, taken directly from Scase et\n al's (1996) categories. For 'position', noise dots take a random\n position every frame. For 'direction' noise dots follow a random, but\n constant direction. For 'walk' noise dots vary their direction every\n frame, but keep a constant speed.\n element : object\n This can be any object that has a ``.draw()`` method and a\n ``.setPos([x,y])`` method (e.g. a GratingStim, TextStim...)!! DotStim\n assumes that the element uses pixels as units. ``None`` defaults to\n dots.\n fieldPos : array_like\n Specifying the location of the centre of the stimulus using a\n :ref:`x,y-pair <attrib-xy>`. See e.g. :class:`.ShapeStim` for more\n documentation/examples on how to set position.\n :ref:`operations <attrib-operations>` are supported.\n fieldSize : array_like\n Specifying the size of the field of dots using a\n :ref:`x,y-pair <attrib-xy>`. See e.g. :class:`.ShapeStim` for more\n documentation/examples on how to set position.\n :ref:`operations <attrib-operations>` are supported.\n coherence : float\n Change the coherence (%) of the DotStim. This will be rounded according\n to the number of dots in the stimulus.\n dir : float\n Direction of the coherent dots in degrees. :ref:`operations\n <attrib-operations>` are supported.\n speed : float\n Speed of the dots (in *units*/frame). :ref:`operations\n <attrib-operations>` are supported.\n\n \"\"\"\n def __init__(self,\n win,\n units='',\n nDots=1,\n coherence=0.5,\n fieldPos=(0.0, 0.0),\n fieldSize=(1.0, 1.0),\n fieldShape='sqr',\n fieldAnchor=\"center\",\n dotSize=2.0,\n dotLife=3,\n dir=0.0,\n speed=0.5,\n rgb=None,\n color=(1.0, 1.0, 1.0),\n colorSpace='rgb',\n opacity=None,\n contrast=1.0,\n depth=0,\n element=None,\n signalDots='same',\n noiseDots='direction',\n name=None,\n autoLog=None):\n \"\"\"\n Parameters\n ----------\n win : window.Window\n Window this stimulus is associated with.\n units : str\n Units to use.\n nDots : int\n Number of dots to present in the field.\n coherence : float\n Proportion of dots which are coherent. This value can be set using\n the `coherence` property after initialization.\n fieldPos : array_like\n (x,y) or [x,y] position of the field. This value can be set using\n the `fieldPos` property after initialization.\n fieldSize : array_like, int or float\n (x,y) or [x,y] or single value (applied to both dimensions). Sizes\n can be negative and can extend beyond the window. This value can be\n set using the `fieldSize` property after initialization.\n fieldShape : str\n Defines the envelope used to present the dots. If changed while\n drawing by setting the `fieldShape` property, dots outside new\n envelope will be respawned., valid values are 'square', 'sqr' or\n 'circle'.\n dotSize : array_like or float\n Size of the dots. If given an array, the sizes of individual dots\n will be set. The array must have length `nDots`. If a single value\n is given, all dots will be set to the same size.\n dotLife : int\n Lifetime of a dot in frames. Dot lives are initiated randomly from a\n uniform distribution from 0 to dotLife. If changed while drawing,\n the lives of all dots will be randomly initiated again. A value of\n -1 results in dots having an infinite lifetime. This value can be\n set using the `dotLife` property after initialization.\n dir : float\n Direction of the coherent dots in degrees. At 0 degrees, coherent\n dots will move from left to right. Increasing the angle will rotate\n the direction counter-clockwise. This value can be set using the\n `dir` property after initialization.\n speed : float\n Speed of the dots (in *units* per frame). This value can be set\n using the `speed` property after initialization.\n rgb : array_like, optional\n Color of the dots in form (r, g, b) or [r, g, b]. **Deprecated**,\n use `color` instead.\n color : array_like or str\n Color of the dots in form (r, g, b) or [r, g, b].\n colorSpace : str\n Colorspace to use.\n opacity : float\n Opacity of the dots from 0.0 to 1.0.\n contrast : float\n Contrast of the dots 0.0 to 1.0. This value is simply multiplied by\n the `color` value.\n depth : float\n **Deprecated**, depth is now controlled simply by drawing order.\n element : object\n This can be any object that has a ``.draw()`` method and a\n ``.setPos([x,y])`` method (e.g. a GratingStim, TextStim...)!!\n DotStim assumes that the element uses pixels as units.\n ``None`` defaults to dots.\n signalDots : str\n If 'same' then the signal and noise dots are constant. If different\n then the choice of which is signal and which is noise gets\n randomised on each frame. This corresponds to Scase et al's (1996)\n categories of RDK. This value can be set using the `signalDots`\n property after initialization.\n noiseDots : str\n Determines the behaviour of the noise dots, taken directly from\n Scase et al's (1996) categories. For 'position', noise dots take a\n random position every frame. For 'direction' noise dots follow a\n random, but constant direction. For 'walk' noise dots vary their\n direction every frame, but keep a constant speed. This value can be\n set using the `noiseDots` property after initialization.\n name : str, optional\n Optional name to use for logging.\n autoLog : bool\n Enable automatic logging.\n\n \"\"\"\n # what local vars are defined (these are the init params) for use by\n # __repr__\n self._initParams = __builtins__['dir']()\n self._initParams.remove('self')\n\n super(DotStim, self).__init__(win, units=units, name=name,\n autoLog=False) # set at end of init\n\n self.nDots = nDots\n # pos and size are ambiguous for dots so DotStim explicitly has\n # fieldPos = pos, fieldSize=size and then dotSize as additional param\n self.fieldPos = fieldPos # self.pos is also set here\n self.fieldSize = val2array(fieldSize, False) # self.size is also set\n if type(dotSize) in (tuple, list):\n self.dotSize = np.array(dotSize)\n else:\n self.dotSize = dotSize\n if self.win.useRetina:\n self.dotSize *= 2 # double dot size to make up for 1/2-size pixels\n self.fieldShape = fieldShape\n self.__dict__['dir'] = dir\n self.speed = speed\n self.element = element\n self.dotLife = dotLife\n self.signalDots = signalDots\n\n if rgb != None:\n logging.warning(\"Use of rgb arguments to stimuli are deprecated.\"\n \" Please use color and colorSpace args instead\")\n self.colorSpace = 'rgba'\n self.color = rgb\n else:\n self.colorSpace = colorSpace\n self.color = color\n self.opacity = opacity\n self.contrast = float(contrast)\n self.depth = depth\n\n # initialise the dots themselves - give them all random dir and then\n # fix the first n in the array to have the direction specified\n self.coherence = coherence # using the attributeSetter\n self.noiseDots = noiseDots\n\n # initialise a random array of X,Y\n self.vertices = self._verticesBase = self._dotsXY = self._newDotsXY(self.nDots)\n # all dots have the same speed\n self._dotsSpeed = np.ones(self.nDots, dtype=float) * self.speed\n # abs() means we can ignore the -1 case (no life)\n self._dotsLife = np.abs(dotLife) * np.random.rand(self.nDots)\n # pre-allocate array for flagging dead dots\n self._deadDots = np.zeros(self.nDots, dtype=bool)\n # set directions (only used when self.noiseDots='direction')\n self._dotsDir = np.random.rand(self.nDots) * _2pi\n self._dotsDir[self._signalDots] = self.dir * _piOver180\n\n self._update_dotsXY()\n\n self.anchor = fieldAnchor\n\n # set autoLog now that params have been initialised\n wantLog = autoLog is None and self.win.autoLog\n self.__dict__['autoLog'] = autoLog or wantLog\n if self.autoLog:\n logging.exp(\"Created %s = %s\" % (self.name, str(self)))\n\n def set(self, attrib, val, op='', log=None):\n \"\"\"DEPRECATED: DotStim.set() is obsolete and may not be supported\n in future versions of PsychoPy. Use the specific method for each\n parameter instead (e.g. setFieldPos(), setCoherence()...).\n \"\"\"\n self._set(attrib, val, op, log=log)\n\n @attributeSetter\n def fieldShape(self, fieldShape):\n \"\"\"*'sqr'* or 'circle'. Defines the envelope used to present the dots.\n If changed while drawing, dots outside new envelope will be respawned.\n \"\"\"\n self.__dict__['fieldShape'] = fieldShape\n\n @property\n def anchor(self):\n return WindowMixin.anchor.fget(self)\n\n @anchor.setter\n def anchor(self, value):\n WindowMixin.anchor.fset(self, value)\n\n def setAnchor(self, value, log=None):\n setAttribute(self, 'anchor', value, log)\n\n @property\n def dotSize(self):\n \"\"\"Float specified in pixels (overridden if `element` is specified).\n :ref:`operations <attrib-operations>` are supported.\"\"\"\n if hasattr(self, \"_dotSize\"):\n return getattr(self._dotSize, 'pix')[0]\n\n @dotSize.setter\n def dotSize(self, value):\n self._dotSize = Size(value, units='pix', win=self.win)\n\n @attributeSetter\n def dotLife(self, dotLife):\n \"\"\"Int. Number of frames each dot lives for (-1=infinite).\n Dot lives are initiated randomly from a uniform distribution\n from 0 to dotLife. If changed while drawing, the lives of all\n dots will be randomly initiated again.\n\n :ref:`operations <attrib-operations>` are supported.\n \"\"\"\n self.__dict__['dotLife'] = dotLife\n self._dotsLife = abs(self.dotLife) * np.random.rand(self.nDots)\n\n @attributeSetter\n def signalDots(self, signalDots):\n \"\"\"str - 'same' or *'different'*\n If 'same' then the signal and noise dots are constant. If different\n then the choice of which is signal and which is noise gets\n randomised on each frame. This corresponds to Scase et al's (1996)\n categories of RDK.\n \"\"\"\n self.__dict__['signalDots'] = signalDots\n\n @attributeSetter\n def noiseDots(self, noiseDots):\n \"\"\"str - *'direction'*, 'position' or 'walk'\n Determines the behaviour of the noise dots, taken directly from\n Scase et al's (1996) categories. For 'position', noise dots take a\n random position every frame. For 'direction' noise dots follow a\n random, but constant direction. For 'walk' noise dots vary their\n direction every frame, but keep a constant speed.\n \"\"\"\n self.__dict__['noiseDots'] = noiseDots\n self.coherence = self.coherence # update using attributeSetter\n\n @attributeSetter\n def element(self, element):\n \"\"\"*None* or a visual stimulus object\n This can be any object that has a ``.draw()`` method and a\n ``.setPos([x,y])`` method (e.g. a GratingStim, TextStim...)!!\n DotStim assumes that the element uses pixels as units.\n ``None`` defaults to dots.\n\n See `ElementArrayStim` for a faster implementation of this idea.\n \"\"\"\n self.__dict__['element'] = element\n\n @attributeSetter\n def fieldPos(self, pos):\n \"\"\"Specifying the location of the centre of the stimulus\n using a :ref:`x,y-pair <attrib-xy>`.\n See e.g. :class:`.ShapeStim` for more documentation / examples\n on how to set position.\n\n :ref:`operations <attrib-operations>` are supported.\n \"\"\"\n # Isn't there a way to use BaseVisualStim.pos.__doc__ as docstring\n # here?\n self.pos = pos # using BaseVisualStim. we'll store this as both\n self.__dict__['fieldPos'] = self.pos\n\n def setFieldPos(self, val, op='', log=None):\n \"\"\"Usually you can use 'stim.attribute = value' syntax instead, but use \n this method if you need to suppress the log message.\n \"\"\"\n setAttribute(self, 'fieldPos', val, log, op) # calls attributeSetter\n\n def setPos(self, newPos=None, operation='', units=None, log=None):\n \"\"\"Obsolete - users should use setFieldPos instead of setPos\n \"\"\"\n logging.error(\"User called DotStim.setPos(pos). \"\n \"Use DotStim.SetFieldPos(pos) instead.\")\n\n def setFieldSize(self, val, op='', log=None):\n \"\"\"Usually you can use 'stim.attribute = value' syntax instead, but use \n this method if you need to suppress the log message.\n \"\"\"\n setAttribute(self, 'fieldSize', val, log, op) # calls attributeSetter\n\n @attributeSetter\n def fieldSize(self, size):\n \"\"\"Specifying the size of the field of dots using a\n :ref:`x,y-pair <attrib-xy>`. See e.g. :class:`.ShapeStim` for more \n documentation/examples on how to set position.\n\n :ref:`operations <attrib-operations>` are supported.\n \"\"\"\n # Isn't there a way to use BaseVisualStim.pos.__doc__ as docstring\n # here?\n self.size = size # using BaseVisualStim. we'll store this as both\n self.__dict__['fieldSize'] = self.size\n\n @attributeSetter\n def coherence(self, coherence):\n \"\"\"Scalar between 0 and 1.\n\n Change the coherence (%) of the DotStim. This will be rounded according \n to the number of dots in the stimulus.\n\n :ref:`operations <attrib-operations>` are supported.\n \"\"\"\n if not 0 <= coherence <= 1:\n raise ValueError('DotStim.coherence must be between 0 and 1')\n\n _cohDots = coherence * self.nDots\n\n self.__dict__['coherence'] = round(_cohDots) /self.nDots\n self._signalDots = np.zeros(self.nDots, dtype=bool)\n self._signalDots[0:int(self.coherence * self.nDots)] = True\n # for 'direction' method we need to update the direction of the number\n # of signal dots immediately, but for other methods it will be done\n # during updateXY\n\n # NB - AJS Actually you need to do this for 'walk' also\n # otherwise would be signal dots adopt random directions when the become\n # sinal dots in later trails\n if self.noiseDots in ('direction', 'position', 'walk'):\n self._dotsDir = np.random.rand(self.nDots) * _2pi\n self._dotsDir[self._signalDots] = self.dir * _piOver180\n\n def setFieldCoherence(self, val, op='', log=None):\n \"\"\"Usually you can use 'stim.attribute = value' syntax instead, but use \n this method if you need to suppress the log message.\n \"\"\"\n setAttribute(self, 'coherence', val, log, op) # calls attributeSetter\n\n @attributeSetter\n def dir(self, dir):\n \"\"\"float (degrees). direction of the coherent dots. :ref:`operations \n <attrib-operations>` are supported.\n \"\"\"\n # check which dots are signal before setting new dir\n signalDots = self._dotsDir == (self.dir * _piOver180)\n self.__dict__['dir'] = dir\n\n # dots currently moving in the signal direction also need to update\n # their direction\n self._dotsDir[signalDots] = self.dir * _piOver180\n\n def setDir(self, val, op='', log=None):\n \"\"\"Usually you can use 'stim.attribute = value' syntax instead, but use \n this method if you need to suppress the log message.\n \"\"\"\n setAttribute(self, 'dir', val, log, op)\n\n @attributeSetter\n def speed(self, speed):\n \"\"\"float. speed of the dots (in *units*/frame). :ref:`operations \n <attrib-operations>` are supported.\n \"\"\"\n self.__dict__['speed'] = speed\n\n def setSpeed(self, val, op='', log=None):\n \"\"\"Usually you can use 'stim.attribute = value' syntax instead, but use \n this method if you need to suppress the log message.\n \n \"\"\"\n setAttribute(self, 'speed', val, log, op)\n\n def draw(self, win=None):\n \"\"\"Draw the stimulus in its relevant window. You must call this method\n after every MyWin.flip() if you want the stimulus to appear on that\n frame and then update the screen again.\n\n Parameters\n ----------\n win : window.Window, optional\n Window to draw dots to. If `None`, dots will be drawn to the parent\n window.\n\n \"\"\"\n if win is None:\n win = self.win\n self._selectWindow(win)\n\n self._update_dotsXY()\n\n GL.glPushMatrix() # push before drawing, pop after\n\n # draw the dots\n if self.element is None:\n win.setScale('pix')\n GL.glPointSize(self.dotSize)\n\n # load Null textures into multitexteureARB - they modulate with\n # glColor\n GL.glActiveTexture(GL.GL_TEXTURE0)\n GL.glEnable(GL.GL_TEXTURE_2D)\n GL.glBindTexture(GL.GL_TEXTURE_2D, 0)\n GL.glActiveTexture(GL.GL_TEXTURE1)\n GL.glEnable(GL.GL_TEXTURE_2D)\n GL.glBindTexture(GL.GL_TEXTURE_2D, 0)\n\n CPCD = ctypes.POINTER(ctypes.c_double)\n GL.glVertexPointer(2, GL.GL_DOUBLE, 0,\n self.verticesPix.ctypes.data_as(CPCD))\n GL.glColor4f(*self._foreColor.render('rgba1'))\n GL.glEnableClientState(GL.GL_VERTEX_ARRAY)\n GL.glDrawArrays(GL.GL_POINTS, 0, self.nDots)\n GL.glDisableClientState(GL.GL_VERTEX_ARRAY)\n else:\n # we don't want to do the screen scaling twice so for each dot\n # subtract the screen centre\n initialDepth = self.element.depth\n for pointN in range(0, self.nDots):\n _p = self.verticesPix[pointN, :] + self.fieldPos\n self.element.setPos(_p)\n self.element.draw()\n # reset depth before going to next frame\n self.element.setDepth(initialDepth)\n GL.glPopMatrix()\n\n def _newDotsXY(self, nDots):\n \"\"\"Returns a uniform spread of dots, according to the `fieldShape` and\n `fieldSize`.\n\n Parameters\n ----------\n nDots : int\n Number of dots to sample.\n\n Returns\n -------\n ndarray\n Nx2 array of X and Y positions of dots.\n\n Examples\n --------\n Create a new array of dot positions::\n\n dots = self._newDots(nDots)\n\n \"\"\"\n if self.fieldShape == 'circle':\n length = np.sqrt(np.random.uniform(0, 1, (nDots,)))\n angle = np.random.uniform(0., _2pi, (nDots,))\n\n newDots = np.zeros((nDots, 2))\n newDots[:, 0] = length * np.cos(angle)\n newDots[:, 1] = length * np.sin(angle)\n\n newDots *= self.fieldSize * .5\n else:\n newDots = np.random.uniform(-0.5, 0.5, size = (nDots, 2)) * self.fieldSize\n\n return newDots\n\n def refreshDots(self):\n \"\"\"Callable user function to choose a new set of dots.\"\"\"\n self.vertices = self._verticesBase = self._dotsXY = self._newDotsXY(self.nDots)\n\n # Don't allocate another array if the new number of dots is equal to\n # the last.\n if self.nDots != len(self._deadDots):\n self._deadDots = np.zeros(self.nDots, dtype=bool)\n\n def _update_dotsXY(self):\n \"\"\"The user shouldn't call this - its gets done within draw().\n \"\"\"\n # Find dead dots, update positions, get new positions for\n # dead and out-of-bounds\n # renew dead dots\n if self.dotLife > 0: # if less than zero ignore it\n # decrement. Then dots to be reborn will be negative\n self._dotsLife -= 1\n self._deadDots[:] = (self._dotsLife <= 0)\n self._dotsLife[self._deadDots] = self.dotLife\n else:\n self._deadDots[:] = False\n\n # update XY based on speed and dir\n # NB self._dotsDir is in radians, but self.dir is in degs\n # update which are the noise/signal dots\n if self.signalDots == 'different':\n # **up to version 1.70.00 this was the other way around,\n # not in keeping with Scase et al**\n # noise and signal dots change identity constantly\n np.random.shuffle(self._dotsDir)\n # and then update _signalDots from that\n self._signalDots = (self._dotsDir == (self.dir * _piOver180))\n\n # update the locations of signal and noise; 0 radians=East!\n reshape = np.reshape\n if self.noiseDots == 'walk':\n # noise dots are ~self._signalDots\n sig = np.random.rand(np.sum(~self._signalDots))\n self._dotsDir[~self._signalDots] = sig * _2pi\n # then update all positions from dir*speed\n cosDots = reshape(np.cos(self._dotsDir), (self.nDots,))\n sinDots = reshape(np.sin(self._dotsDir), (self.nDots,))\n self._verticesBase[:, 0] += self.speed * cosDots\n self._verticesBase[:, 1] += self.speed * sinDots\n elif self.noiseDots == 'direction':\n # simply use the stored directions to update position\n cosDots = reshape(np.cos(self._dotsDir), (self.nDots,))\n sinDots = reshape(np.sin(self._dotsDir), (self.nDots,))\n self._verticesBase[:, 0] += self.speed * cosDots\n self._verticesBase[:, 1] += self.speed * sinDots\n elif self.noiseDots == 'position':\n # update signal dots\n sd = self._signalDots\n sdSum = self._signalDots.sum()\n cosDots = reshape(np.cos(self._dotsDir[sd]), (sdSum,))\n sinDots = reshape(np.sin(self._dotsDir[sd]), (sdSum,))\n self._verticesBase[sd, 0] += self.speed * cosDots\n self._verticesBase[sd, 1] += self.speed * sinDots\n # update noise dots\n self._deadDots[:] = self._deadDots + (~self._signalDots)\n\n # handle boundaries of the field\n if self.fieldShape in (None, 'square', 'sqr'):\n out0 = (np.abs(self._verticesBase[:, 0]) > .5 * self.fieldSize[0])\n out1 = (np.abs(self._verticesBase[:, 1]) > .5 * self.fieldSize[1])\n outofbounds = out0 + out1\n else:\n # transform to a normalised circle (radius = 1 all around)\n # then to polar coords to check\n # the normalised XY position (where radius should be < 1)\n normXY = self._verticesBase / .5 / self.fieldSize\n # add out-of-bounds to those that need replacing\n outofbounds = np.hypot(normXY[:, 0], normXY[:, 1]) > 1.\n\n # update any dead dots\n nDead = self._deadDots.sum()\n if nDead:\n self._verticesBase[self._deadDots, :] = self._newDotsXY(nDead)\n\n # Reposition any dots that have gone out of bounds. Net effect is to\n # place dot one step inside the boundary on the other side of the\n # aperture.\n nOutOfBounds = outofbounds.sum()\n if nOutOfBounds:\n self._verticesBase[outofbounds, :] = self._newDotsXY(nOutOfBounds)\n\n self.vertices = self._verticesBase / self.fieldSize\n\n # update the pixel XY coordinates in pixels (using _BaseVisual class)\n self._updateVertices()\n", "path": "psychopy/visual/dot.py"}]} |
gh_patches_debug_1328 | rasdani/github-patches | git_diff | obspy__obspy-3178 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Seg2 read error for NOTE keyword
### Avoid duplicates
- [X] I searched existing issues
### Bug Summary
Hi, I am having problems reading seg2 files if the keyword 'NOTE' exists but has no contents behind :
Initial code (function "parse_free_form" near the end, around line 327 in file seg2.py:
```
if key == 'NOTE':
value = [cleanup_and_decode_string(line)
for line in value.split(self.line_terminator)
if line]
else:
value = cleanup_and_decode_string(value)
```
I suggest:
```
if key == 'NOTE':
try:
value = [cleanup_and_decode_string(line)
for line in value.split(self.line_terminator)
if line]
except:
value = ''
else:
value = cleanup_and_decode_string(value)
```
[Rec_00001.zip](https://github.com/obspy/obspy/files/9765136/Rec_00001.zip)
### Code to Reproduce
```python
from obspy.io.seg2 import seg2
filename = "rec_00001.seg2"
st = seg2._read_seg2(filename)
```
### Error Traceback
```Python traceback
Traceback (most recent call last):
File "C:\Users\Hermann\anaconda3\envs\pg\lib\site-packages\spyder_kernels\py3compat.py", line 356, in compat_exec
exec(code, globals, locals)
File "c:\sources_2010\python_programs\obspy_error.py", line 13, in <module>
st = seg2._read_seg2(filename)
File "C:\Users\Hermann\anaconda3\envs\pg\lib\site-packages\obspy\io\seg2\seg2.py", line 364, in _read_seg2
st = seg2.read_file(filename)
File "C:\Users\Hermann\anaconda3\envs\pg\lib\site-packages\obspy\io\seg2\seg2.py", line 83, in read_file
self.read_file_descriptor_block()
File "C:\Users\Hermann\anaconda3\envs\pg\lib\site-packages\obspy\io\seg2\seg2.py", line 167, in read_file_descriptor_block
self.parse_free_form(
File "C:\Users\Hermann\anaconda3\envs\pg\lib\site-packages\obspy\io\seg2\seg2.py", line 329, in parse_free_form
for line in value.split(self.line_terminator)
TypeError: must be str or None, not bytes
```
### ObsPy Version?
1.3.0
### Operating System?
Windows
### Python Version?
3.8.13
### Installation Method?
conda
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `obspy/io/seg2/seg2.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 """
3 SEG-2 support for ObsPy.
4
5 A file format description is given by [Pullan1990]_.
6
7 :copyright:
8 Lion Krischer ([email protected]), 2011
9 :license:
10 GNU Lesser General Public License, Version 3
11 (https://www.gnu.org/copyleft/lesser.html)
12 """
13 from copy import deepcopy
14 from struct import unpack, unpack_from
15 import warnings
16 import re
17
18 import numpy as np
19
20 from obspy import Stream, Trace, UTCDateTime
21 from obspy.core import AttribDict
22 from obspy.core.compatibility import from_buffer
23 from .header import MONTHS
24
25
26 WARNING_HEADER = "Many companies use custom defined SEG2 header variables." + \
27 " This might cause basic header information reflected in the single " + \
28 "traces' stats to be wrong (e.g. recording delays, first sample " + \
29 "number, station code names, ..). Please check the complete list of " + \
30 "additional unmapped header fields that gets stored in " + \
31 "Trace.stats.seg2 and/or the manual of the source of the SEG2 files " + \
32 "for fields that might influence e.g. trace start times."
33
34
35 class SEG2BaseError(Exception):
36 """
37 Base class for all SEG-2 specific errors.
38 """
39 pass
40
41
42 class SEG2InvalidFileError(SEG2BaseError):
43 """
44 Will be raised if something is not correct with the SEG-2 file.
45 """
46 pass
47
48
49 class SEG2(object):
50 """
51 Class to read SEG 2 formatted files.
52
53 The main reason this is realized as a class is for the ease of passing
54 the various parameters from one function to the next.
55
56 Do not change the file_pointer attribute while using this class. It will
57 be used to keep track of which parts have been read yet and which not.
58 """
59 def __init__(self):
60 pass
61
62 def read_file(self, file_object):
63 """
64 Reads the following file and will return a Stream object. If
65 file_object is a string it will be treated as a file name, otherwise it
66 will be expected to be a file like object with read(), seek() and
67 tell() methods.
68
69 If it is a file_like object, file.seek(0, 0) is expected to be the
70 beginning of the SEG-2 file.
71 """
72 # Read the file if it is a file name.
73 if not hasattr(file_object, 'write'):
74 self.file_pointer = open(file_object, 'rb')
75 else:
76 self.file_pointer = file_object
77 self.file_pointer.seek(0, 0)
78
79 self.stream = Stream()
80
81 # Read the file descriptor block. This will also determine the
82 # endianness.
83 self.read_file_descriptor_block()
84
85 # Loop over every trace, read it and append it to the Stream.
86 for tr_pointer in self.trace_pointers:
87 self.file_pointer.seek(tr_pointer, 0)
88 self.stream.append(self.parse_next_trace())
89
90 if not hasattr(file_object, 'write'):
91 self.file_pointer.close()
92 return self.stream
93
94 def read_file_descriptor_block(self):
95 """
96 Handles the reading of the file descriptor block and the free form
97 section following it.
98 """
99 file_descriptor_block = self.file_pointer.read(32)
100
101 # Determine the endianness and check if the block id is valid.
102 if unpack_from(b'2B', file_descriptor_block) == (0x55, 0x3a):
103 self.endian = b'<'
104 elif unpack_from(b'2B', file_descriptor_block) == (0x3a, 0x55):
105 self.endian = b'>'
106 else:
107 msg = 'Wrong File Descriptor Block ID'
108 raise SEG2InvalidFileError(msg)
109
110 # Check the revision number.
111 revision_number, = unpack_from(self.endian + b'H',
112 file_descriptor_block, 2)
113 if revision_number != 1:
114 msg = '\nOnly SEG 2 revision 1 is officially supported. This file '
115 msg += 'has revision %i. Reading it might fail.' % revision_number
116 msg += '\nPlease contact the ObsPy developers with a sample file.'
117 warnings.warn(msg)
118
119 # Determine trace counts.
120 (size_of_trace_pointer_sub_block,
121 number_of_traces
122 ) = unpack_from(self.endian + b'HH', file_descriptor_block, 4)
123 if number_of_traces * 4 > size_of_trace_pointer_sub_block:
124 msg = ('File indicates %d traces, but there are only %d trace '
125 'pointers.') % (number_of_traces,
126 size_of_trace_pointer_sub_block // 4)
127 raise SEG2InvalidFileError(msg)
128
129 # Define the string and line terminators.
130 (size_of_string_terminator,
131 first_string_terminator_char,
132 second_string_terminator_char,
133 size_of_line_terminator,
134 first_line_terminator_char,
135 second_line_terminator_char
136 ) = unpack_from(b'BccBcc', file_descriptor_block, 8)
137
138 # Assemble the string terminator.
139 if size_of_string_terminator == 1:
140 self.string_terminator = first_string_terminator_char
141 elif size_of_string_terminator == 2:
142 self.string_terminator = first_string_terminator_char + \
143 second_string_terminator_char
144 else:
145 msg = 'Wrong size of string terminator.'
146 raise SEG2InvalidFileError(msg)
147 # Assemble the line terminator.
148 if size_of_line_terminator == 1:
149 self.line_terminator = first_line_terminator_char
150 elif size_of_line_terminator == 2:
151 self.line_terminator = first_line_terminator_char + \
152 second_line_terminator_char
153 else:
154 msg = 'Wrong size of line terminator.'
155 raise SEG2InvalidFileError(msg)
156
157 # Read the trace pointer sub-block and retrieve all the pointers.
158 trace_pointer_sub_block = \
159 self.file_pointer.read(size_of_trace_pointer_sub_block)
160 self.trace_pointers = unpack_from(
161 self.endian + (b'L' * number_of_traces), trace_pointer_sub_block)
162
163 # The rest of the header up to where the first trace pointer points is
164 # a free form section.
165 self.stream.stats = AttribDict()
166 self.stream.stats.seg2 = AttribDict()
167 self.parse_free_form(
168 self.file_pointer.read(self.trace_pointers[0] -
169 self.file_pointer.tell()),
170 self.stream.stats.seg2)
171
172 # Get the time information from the file header.
173 # XXX: Need some more generic date/time parsers.
174 if "ACQUISITION_TIME" in self.stream.stats.seg2 \
175 and "ACQUISITION_DATE" in self.stream.stats.seg2:
176 time = self.stream.stats.seg2.ACQUISITION_TIME
177 date = self.stream.stats.seg2.ACQUISITION_DATE
178 # Split on any non numeric character
179 time = list(filter(None, re.split(r'\D+', time)))
180 # Split on space, dot (.), slash (/), and dash (-)
181 date = list(filter(None, re.split("[, ./-]+", date)))
182 hour, minute, second = int(time[0]), int(time[1]), float(time[2])
183 day, month, year = int(date[0]), MONTHS[date[1].lower()], \
184 int(date[2])
185 self.starttime = UTCDateTime(year, month, day, hour, minute,
186 second)
187 else:
188 self.starttime = UTCDateTime(0)
189
190 def parse_next_trace(self):
191 """
192 Parse the next trace in the trace pointer list and return a Trace
193 object.
194 """
195 trace_descriptor_block = self.file_pointer.read(32)
196 # Check if the trace descriptor block id is valid.
197 if unpack(self.endian + b'H', trace_descriptor_block[0:2])[0] != \
198 0x4422:
199 msg = 'Invalid trace descriptor block id.'
200 raise SEG2InvalidFileError(msg)
201 size_of_this_block, = unpack_from(self.endian + b'H',
202 trace_descriptor_block, 2)
203 number_of_samples_in_data_block, = \
204 unpack_from(self.endian + b'L', trace_descriptor_block, 8)
205 data_format_code, = unpack_from(b'B', trace_descriptor_block, 12)
206
207 # Parse the data format code.
208 if data_format_code == 4:
209 dtype = self.endian + b'f4'
210 sample_size = 4
211 elif data_format_code == 5:
212 dtype = self.endian + b'f8'
213 sample_size = 8
214 elif data_format_code == 1:
215 dtype = self.endian + b'i2'
216 sample_size = 2
217 elif data_format_code == 2:
218 dtype = self.endian + b'i4'
219 sample_size = 4
220 elif data_format_code == 3:
221 dtype = self.endian + b'i2'
222 sample_size = 2.5
223 if number_of_samples_in_data_block % 4 != 0:
224 raise SEG2InvalidFileError(
225 'Data format code 3 requires that the number of samples '
226 'is divisible by 4, but sample count is %d' % (
227 number_of_samples_in_data_block, ))
228 else:
229 msg = 'Unrecognized data format code'
230 raise SEG2InvalidFileError(msg)
231
232 # The rest of the trace block is free form.
233 header = {}
234 header['seg2'] = AttribDict()
235 self.parse_free_form(self.file_pointer.read(size_of_this_block - 32),
236 header['seg2'])
237 header['delta'] = float(header['seg2']['SAMPLE_INTERVAL'])
238 # Set to the file's start time.
239 header['starttime'] = deepcopy(self.starttime)
240 if 'DELAY' in header['seg2']:
241 if float(header['seg2']['DELAY']) != 0:
242 msg = "Non-zero value found in Trace's 'DELAY' field. " + \
243 "This is not supported/tested yet and might lead " + \
244 "to a wrong starttime of the Trace. Please contact " + \
245 "the ObsPy developers with a sample file."
246 warnings.warn(msg)
247
248 if "DESCALING_FACTOR" in header["seg2"]:
249 header['calib'] = float(header['seg2']['DESCALING_FACTOR'])
250
251 # Unpack the data.
252 data = from_buffer(
253 self.file_pointer.read(
254 int(number_of_samples_in_data_block * sample_size)),
255 dtype=dtype)
256 if data_format_code == 3:
257 # Convert one's complement to two's complement by adding one to
258 # negative numbers.
259 one_to_two = (data < 0)
260 # The first two bytes (1 word) of every 10 bytes (5 words) contains
261 # a 4-bit exponent for each of the 4 remaining 2-byte (int16)
262 # samples.
263 exponents = data[0::5].view(self.endian + b'u2')
264 result = np.empty(number_of_samples_in_data_block, dtype=np.int32)
265 # Apply the negative correction, then multiply by correct exponent.
266 result[0::4] = ((data[1::5] + one_to_two[1::5]) *
267 2**((exponents & 0x000f) >> 0))
268 result[1::4] = ((data[2::5] + one_to_two[2::5]) *
269 2**((exponents & 0x00f0) >> 4))
270 result[2::4] = ((data[3::5] + one_to_two[3::5]) *
271 2**((exponents & 0x0f00) >> 8))
272 result[3::4] = ((data[4::5] + one_to_two[4::5]) *
273 2**((exponents & 0xf000) >> 12))
274 data = result
275
276 # Integrate SEG2 file header into each trace header
277 tmp = self.stream.stats.seg2.copy()
278 tmp.update(header['seg2'])
279 header['seg2'] = tmp
280 return Trace(data=data, header=header)
281
282 def parse_free_form(self, free_form_str, attrib_dict):
283 """
284 Parse the free form section stored in free_form_str and save it in
285 attrib_dict.
286 """
287 def cleanup_and_decode_string(value):
288 # Some software/hardware produces invalid characters.
289 def is_good_char(c):
290 return c in (b'0123456789'
291 b'abcdefghijklmnopqrstuvwxyz'
292 b'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
293 b'!"#$%&\'()*+,-./:; <=>?@[\\]^_`{|}~ ')
294
295 # A loop over a bytestring in Python 3 returns integers. This can
296 # be solved with a number of imports from the python-future module
297 # and all kinds of subtle changes throughout this file.
298 return "".join(map(chr, filter(is_good_char, value))).strip()
299
300 # Separate the strings. Every string starts with a 2-byte offset to the
301 # next string, and ends with a terminator. An offset of 0 indicates the
302 # end of the strings.
303 offset = 0
304 strings = []
305 while offset + 2 < len(free_form_str):
306 strlen, = unpack_from(self.endian + b'H', free_form_str, offset)
307 if strlen == 0:
308 break
309 curstr = free_form_str[offset + 2:offset + strlen]
310 try:
311 curstrlen = curstr.index(self.string_terminator)
312 except ValueError:
313 strings.append(curstr)
314 else:
315 strings.append(curstr[:curstrlen])
316 offset += strlen
317
318 # Every string has the structure OPTION<SPACE>VALUE. Write to
319 # stream.stats attribute.
320 for string in strings:
321 string = string.strip().split(b' ', 1)
322 key = cleanup_and_decode_string(string[0])
323 try:
324 value = string[1]
325 except IndexError:
326 value = ''
327 if key == 'NOTE':
328 value = [cleanup_and_decode_string(line)
329 for line in value.split(self.line_terminator)
330 if line]
331 else:
332 value = cleanup_and_decode_string(value)
333 setattr(attrib_dict, key, value)
334
335
336 def _is_seg2(filename):
337 if not hasattr(filename, 'write'):
338 file_pointer = open(filename, 'rb')
339 else:
340 file_pointer = filename
341
342 file_descriptor_block = file_pointer.read(4)
343 if not hasattr(filename, 'write'):
344 file_pointer.close()
345 try:
346 # Determine the endianness and check if the block id is valid.
347 if unpack_from(b'2B', file_descriptor_block) == (0x55, 0x3a):
348 endian = b'<'
349 elif unpack_from(b'2B', file_descriptor_block) == (0x3a, 0x55):
350 endian = b'>'
351 else:
352 return False
353 except Exception:
354 return False
355 # Check the revision number.
356 revision_number, = unpack_from(endian + b'H', file_descriptor_block, 2)
357 if revision_number != 1:
358 return False
359 return True
360
361
362 def _read_seg2(filename, **kwargs): # @UnusedVariable
363 seg2 = SEG2()
364 st = seg2.read_file(filename)
365 warnings.warn(WARNING_HEADER)
366 return st
367
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/obspy/io/seg2/seg2.py b/obspy/io/seg2/seg2.py
--- a/obspy/io/seg2/seg2.py
+++ b/obspy/io/seg2/seg2.py
@@ -323,7 +323,7 @@
try:
value = string[1]
except IndexError:
- value = ''
+ value = b''
if key == 'NOTE':
value = [cleanup_and_decode_string(line)
for line in value.split(self.line_terminator)
| {"golden_diff": "diff --git a/obspy/io/seg2/seg2.py b/obspy/io/seg2/seg2.py\n--- a/obspy/io/seg2/seg2.py\n+++ b/obspy/io/seg2/seg2.py\n@@ -323,7 +323,7 @@\n try:\n value = string[1]\n except IndexError:\n- value = ''\n+ value = b''\n if key == 'NOTE':\n value = [cleanup_and_decode_string(line)\n for line in value.split(self.line_terminator)\n", "issue": "Seg2 read error for NOTE keyword\n### Avoid duplicates\n\n- [X] I searched existing issues\n\n### Bug Summary\n\nHi, I am having problems reading seg2 files if the keyword 'NOTE' exists but has no contents behind :\r\n\r\nInitial code (function \"parse_free_form\" near the end, around line 327 in file seg2.py:\r\n\r\n```\r\n if key == 'NOTE':\r\n value = [cleanup_and_decode_string(line)\r\n for line in value.split(self.line_terminator)\r\n if line]\r\n else:\r\n value = cleanup_and_decode_string(value)\r\n```\r\nI suggest:\r\n```\r\n if key == 'NOTE':\r\n try:\r\n value = [cleanup_and_decode_string(line)\r\n for line in value.split(self.line_terminator)\r\n if line]\r\n except:\r\n value = ''\r\n else:\r\n value = cleanup_and_decode_string(value)\r\n```\r\n\r\n\r\n[Rec_00001.zip](https://github.com/obspy/obspy/files/9765136/Rec_00001.zip)\r\n\n\n### Code to Reproduce\n\n```python\nfrom obspy.io.seg2 import seg2\r\nfilename = \"rec_00001.seg2\"\r\nst = seg2._read_seg2(filename)\n```\n\n\n### Error Traceback\n\n```Python traceback\nTraceback (most recent call last):\r\n\r\n File \"C:\\Users\\Hermann\\anaconda3\\envs\\pg\\lib\\site-packages\\spyder_kernels\\py3compat.py\", line 356, in compat_exec\r\n exec(code, globals, locals)\r\n\r\n File \"c:\\sources_2010\\python_programs\\obspy_error.py\", line 13, in <module>\r\n st = seg2._read_seg2(filename)\r\n\r\n File \"C:\\Users\\Hermann\\anaconda3\\envs\\pg\\lib\\site-packages\\obspy\\io\\seg2\\seg2.py\", line 364, in _read_seg2\r\n st = seg2.read_file(filename)\r\n\r\n File \"C:\\Users\\Hermann\\anaconda3\\envs\\pg\\lib\\site-packages\\obspy\\io\\seg2\\seg2.py\", line 83, in read_file\r\n self.read_file_descriptor_block()\r\n\r\n File \"C:\\Users\\Hermann\\anaconda3\\envs\\pg\\lib\\site-packages\\obspy\\io\\seg2\\seg2.py\", line 167, in read_file_descriptor_block\r\n self.parse_free_form(\r\n\r\n File \"C:\\Users\\Hermann\\anaconda3\\envs\\pg\\lib\\site-packages\\obspy\\io\\seg2\\seg2.py\", line 329, in parse_free_form\r\n for line in value.split(self.line_terminator)\r\n\r\nTypeError: must be str or None, not bytes\n```\n\n\n### ObsPy Version?\n\n1.3.0\n\n### Operating System?\n\nWindows\n\n### Python Version?\n\n3.8.13\n\n### Installation Method?\n\nconda\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nSEG-2 support for ObsPy.\n\nA file format description is given by [Pullan1990]_.\n\n:copyright:\n Lion Krischer ([email protected]), 2011\n:license:\n GNU Lesser General Public License, Version 3\n (https://www.gnu.org/copyleft/lesser.html)\n\"\"\"\nfrom copy import deepcopy\nfrom struct import unpack, unpack_from\nimport warnings\nimport re\n\nimport numpy as np\n\nfrom obspy import Stream, Trace, UTCDateTime\nfrom obspy.core import AttribDict\nfrom obspy.core.compatibility import from_buffer\nfrom .header import MONTHS\n\n\nWARNING_HEADER = \"Many companies use custom defined SEG2 header variables.\" + \\\n \" This might cause basic header information reflected in the single \" + \\\n \"traces' stats to be wrong (e.g. recording delays, first sample \" + \\\n \"number, station code names, ..). Please check the complete list of \" + \\\n \"additional unmapped header fields that gets stored in \" + \\\n \"Trace.stats.seg2 and/or the manual of the source of the SEG2 files \" + \\\n \"for fields that might influence e.g. trace start times.\"\n\n\nclass SEG2BaseError(Exception):\n \"\"\"\n Base class for all SEG-2 specific errors.\n \"\"\"\n pass\n\n\nclass SEG2InvalidFileError(SEG2BaseError):\n \"\"\"\n Will be raised if something is not correct with the SEG-2 file.\n \"\"\"\n pass\n\n\nclass SEG2(object):\n \"\"\"\n Class to read SEG 2 formatted files.\n\n The main reason this is realized as a class is for the ease of passing\n the various parameters from one function to the next.\n\n Do not change the file_pointer attribute while using this class. It will\n be used to keep track of which parts have been read yet and which not.\n \"\"\"\n def __init__(self):\n pass\n\n def read_file(self, file_object):\n \"\"\"\n Reads the following file and will return a Stream object. If\n file_object is a string it will be treated as a file name, otherwise it\n will be expected to be a file like object with read(), seek() and\n tell() methods.\n\n If it is a file_like object, file.seek(0, 0) is expected to be the\n beginning of the SEG-2 file.\n \"\"\"\n # Read the file if it is a file name.\n if not hasattr(file_object, 'write'):\n self.file_pointer = open(file_object, 'rb')\n else:\n self.file_pointer = file_object\n self.file_pointer.seek(0, 0)\n\n self.stream = Stream()\n\n # Read the file descriptor block. This will also determine the\n # endianness.\n self.read_file_descriptor_block()\n\n # Loop over every trace, read it and append it to the Stream.\n for tr_pointer in self.trace_pointers:\n self.file_pointer.seek(tr_pointer, 0)\n self.stream.append(self.parse_next_trace())\n\n if not hasattr(file_object, 'write'):\n self.file_pointer.close()\n return self.stream\n\n def read_file_descriptor_block(self):\n \"\"\"\n Handles the reading of the file descriptor block and the free form\n section following it.\n \"\"\"\n file_descriptor_block = self.file_pointer.read(32)\n\n # Determine the endianness and check if the block id is valid.\n if unpack_from(b'2B', file_descriptor_block) == (0x55, 0x3a):\n self.endian = b'<'\n elif unpack_from(b'2B', file_descriptor_block) == (0x3a, 0x55):\n self.endian = b'>'\n else:\n msg = 'Wrong File Descriptor Block ID'\n raise SEG2InvalidFileError(msg)\n\n # Check the revision number.\n revision_number, = unpack_from(self.endian + b'H',\n file_descriptor_block, 2)\n if revision_number != 1:\n msg = '\\nOnly SEG 2 revision 1 is officially supported. This file '\n msg += 'has revision %i. Reading it might fail.' % revision_number\n msg += '\\nPlease contact the ObsPy developers with a sample file.'\n warnings.warn(msg)\n\n # Determine trace counts.\n (size_of_trace_pointer_sub_block,\n number_of_traces\n ) = unpack_from(self.endian + b'HH', file_descriptor_block, 4)\n if number_of_traces * 4 > size_of_trace_pointer_sub_block:\n msg = ('File indicates %d traces, but there are only %d trace '\n 'pointers.') % (number_of_traces,\n size_of_trace_pointer_sub_block // 4)\n raise SEG2InvalidFileError(msg)\n\n # Define the string and line terminators.\n (size_of_string_terminator,\n first_string_terminator_char,\n second_string_terminator_char,\n size_of_line_terminator,\n first_line_terminator_char,\n second_line_terminator_char\n ) = unpack_from(b'BccBcc', file_descriptor_block, 8)\n\n # Assemble the string terminator.\n if size_of_string_terminator == 1:\n self.string_terminator = first_string_terminator_char\n elif size_of_string_terminator == 2:\n self.string_terminator = first_string_terminator_char + \\\n second_string_terminator_char\n else:\n msg = 'Wrong size of string terminator.'\n raise SEG2InvalidFileError(msg)\n # Assemble the line terminator.\n if size_of_line_terminator == 1:\n self.line_terminator = first_line_terminator_char\n elif size_of_line_terminator == 2:\n self.line_terminator = first_line_terminator_char + \\\n second_line_terminator_char\n else:\n msg = 'Wrong size of line terminator.'\n raise SEG2InvalidFileError(msg)\n\n # Read the trace pointer sub-block and retrieve all the pointers.\n trace_pointer_sub_block = \\\n self.file_pointer.read(size_of_trace_pointer_sub_block)\n self.trace_pointers = unpack_from(\n self.endian + (b'L' * number_of_traces), trace_pointer_sub_block)\n\n # The rest of the header up to where the first trace pointer points is\n # a free form section.\n self.stream.stats = AttribDict()\n self.stream.stats.seg2 = AttribDict()\n self.parse_free_form(\n self.file_pointer.read(self.trace_pointers[0] -\n self.file_pointer.tell()),\n self.stream.stats.seg2)\n\n # Get the time information from the file header.\n # XXX: Need some more generic date/time parsers.\n if \"ACQUISITION_TIME\" in self.stream.stats.seg2 \\\n and \"ACQUISITION_DATE\" in self.stream.stats.seg2:\n time = self.stream.stats.seg2.ACQUISITION_TIME\n date = self.stream.stats.seg2.ACQUISITION_DATE\n # Split on any non numeric character\n time = list(filter(None, re.split(r'\\D+', time)))\n # Split on space, dot (.), slash (/), and dash (-)\n date = list(filter(None, re.split(\"[, ./-]+\", date)))\n hour, minute, second = int(time[0]), int(time[1]), float(time[2])\n day, month, year = int(date[0]), MONTHS[date[1].lower()], \\\n int(date[2])\n self.starttime = UTCDateTime(year, month, day, hour, minute,\n second)\n else:\n self.starttime = UTCDateTime(0)\n\n def parse_next_trace(self):\n \"\"\"\n Parse the next trace in the trace pointer list and return a Trace\n object.\n \"\"\"\n trace_descriptor_block = self.file_pointer.read(32)\n # Check if the trace descriptor block id is valid.\n if unpack(self.endian + b'H', trace_descriptor_block[0:2])[0] != \\\n 0x4422:\n msg = 'Invalid trace descriptor block id.'\n raise SEG2InvalidFileError(msg)\n size_of_this_block, = unpack_from(self.endian + b'H',\n trace_descriptor_block, 2)\n number_of_samples_in_data_block, = \\\n unpack_from(self.endian + b'L', trace_descriptor_block, 8)\n data_format_code, = unpack_from(b'B', trace_descriptor_block, 12)\n\n # Parse the data format code.\n if data_format_code == 4:\n dtype = self.endian + b'f4'\n sample_size = 4\n elif data_format_code == 5:\n dtype = self.endian + b'f8'\n sample_size = 8\n elif data_format_code == 1:\n dtype = self.endian + b'i2'\n sample_size = 2\n elif data_format_code == 2:\n dtype = self.endian + b'i4'\n sample_size = 4\n elif data_format_code == 3:\n dtype = self.endian + b'i2'\n sample_size = 2.5\n if number_of_samples_in_data_block % 4 != 0:\n raise SEG2InvalidFileError(\n 'Data format code 3 requires that the number of samples '\n 'is divisible by 4, but sample count is %d' % (\n number_of_samples_in_data_block, ))\n else:\n msg = 'Unrecognized data format code'\n raise SEG2InvalidFileError(msg)\n\n # The rest of the trace block is free form.\n header = {}\n header['seg2'] = AttribDict()\n self.parse_free_form(self.file_pointer.read(size_of_this_block - 32),\n header['seg2'])\n header['delta'] = float(header['seg2']['SAMPLE_INTERVAL'])\n # Set to the file's start time.\n header['starttime'] = deepcopy(self.starttime)\n if 'DELAY' in header['seg2']:\n if float(header['seg2']['DELAY']) != 0:\n msg = \"Non-zero value found in Trace's 'DELAY' field. \" + \\\n \"This is not supported/tested yet and might lead \" + \\\n \"to a wrong starttime of the Trace. Please contact \" + \\\n \"the ObsPy developers with a sample file.\"\n warnings.warn(msg)\n\n if \"DESCALING_FACTOR\" in header[\"seg2\"]:\n header['calib'] = float(header['seg2']['DESCALING_FACTOR'])\n\n # Unpack the data.\n data = from_buffer(\n self.file_pointer.read(\n int(number_of_samples_in_data_block * sample_size)),\n dtype=dtype)\n if data_format_code == 3:\n # Convert one's complement to two's complement by adding one to\n # negative numbers.\n one_to_two = (data < 0)\n # The first two bytes (1 word) of every 10 bytes (5 words) contains\n # a 4-bit exponent for each of the 4 remaining 2-byte (int16)\n # samples.\n exponents = data[0::5].view(self.endian + b'u2')\n result = np.empty(number_of_samples_in_data_block, dtype=np.int32)\n # Apply the negative correction, then multiply by correct exponent.\n result[0::4] = ((data[1::5] + one_to_two[1::5]) *\n 2**((exponents & 0x000f) >> 0))\n result[1::4] = ((data[2::5] + one_to_two[2::5]) *\n 2**((exponents & 0x00f0) >> 4))\n result[2::4] = ((data[3::5] + one_to_two[3::5]) *\n 2**((exponents & 0x0f00) >> 8))\n result[3::4] = ((data[4::5] + one_to_two[4::5]) *\n 2**((exponents & 0xf000) >> 12))\n data = result\n\n # Integrate SEG2 file header into each trace header\n tmp = self.stream.stats.seg2.copy()\n tmp.update(header['seg2'])\n header['seg2'] = tmp\n return Trace(data=data, header=header)\n\n def parse_free_form(self, free_form_str, attrib_dict):\n \"\"\"\n Parse the free form section stored in free_form_str and save it in\n attrib_dict.\n \"\"\"\n def cleanup_and_decode_string(value):\n # Some software/hardware produces invalid characters.\n def is_good_char(c):\n return c in (b'0123456789'\n b'abcdefghijklmnopqrstuvwxyz'\n b'ABCDEFGHIJKLMNOPQRSTUVWXYZ'\n b'!\"#$%&\\'()*+,-./:; <=>?@[\\\\]^_`{|}~ ')\n\n # A loop over a bytestring in Python 3 returns integers. This can\n # be solved with a number of imports from the python-future module\n # and all kinds of subtle changes throughout this file.\n return \"\".join(map(chr, filter(is_good_char, value))).strip()\n\n # Separate the strings. Every string starts with a 2-byte offset to the\n # next string, and ends with a terminator. An offset of 0 indicates the\n # end of the strings.\n offset = 0\n strings = []\n while offset + 2 < len(free_form_str):\n strlen, = unpack_from(self.endian + b'H', free_form_str, offset)\n if strlen == 0:\n break\n curstr = free_form_str[offset + 2:offset + strlen]\n try:\n curstrlen = curstr.index(self.string_terminator)\n except ValueError:\n strings.append(curstr)\n else:\n strings.append(curstr[:curstrlen])\n offset += strlen\n\n # Every string has the structure OPTION<SPACE>VALUE. Write to\n # stream.stats attribute.\n for string in strings:\n string = string.strip().split(b' ', 1)\n key = cleanup_and_decode_string(string[0])\n try:\n value = string[1]\n except IndexError:\n value = ''\n if key == 'NOTE':\n value = [cleanup_and_decode_string(line)\n for line in value.split(self.line_terminator)\n if line]\n else:\n value = cleanup_and_decode_string(value)\n setattr(attrib_dict, key, value)\n\n\ndef _is_seg2(filename):\n if not hasattr(filename, 'write'):\n file_pointer = open(filename, 'rb')\n else:\n file_pointer = filename\n\n file_descriptor_block = file_pointer.read(4)\n if not hasattr(filename, 'write'):\n file_pointer.close()\n try:\n # Determine the endianness and check if the block id is valid.\n if unpack_from(b'2B', file_descriptor_block) == (0x55, 0x3a):\n endian = b'<'\n elif unpack_from(b'2B', file_descriptor_block) == (0x3a, 0x55):\n endian = b'>'\n else:\n return False\n except Exception:\n return False\n # Check the revision number.\n revision_number, = unpack_from(endian + b'H', file_descriptor_block, 2)\n if revision_number != 1:\n return False\n return True\n\n\ndef _read_seg2(filename, **kwargs): # @UnusedVariable\n seg2 = SEG2()\n st = seg2.read_file(filename)\n warnings.warn(WARNING_HEADER)\n return st\n", "path": "obspy/io/seg2/seg2.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nSEG-2 support for ObsPy.\n\nA file format description is given by [Pullan1990]_.\n\n:copyright:\n Lion Krischer ([email protected]), 2011\n:license:\n GNU Lesser General Public License, Version 3\n (https://www.gnu.org/copyleft/lesser.html)\n\"\"\"\nfrom copy import deepcopy\nfrom struct import unpack, unpack_from\nimport warnings\nimport re\n\nimport numpy as np\n\nfrom obspy import Stream, Trace, UTCDateTime\nfrom obspy.core import AttribDict\nfrom obspy.core.compatibility import from_buffer\nfrom .header import MONTHS\n\n\nWARNING_HEADER = \"Many companies use custom defined SEG2 header variables.\" + \\\n \" This might cause basic header information reflected in the single \" + \\\n \"traces' stats to be wrong (e.g. recording delays, first sample \" + \\\n \"number, station code names, ..). Please check the complete list of \" + \\\n \"additional unmapped header fields that gets stored in \" + \\\n \"Trace.stats.seg2 and/or the manual of the source of the SEG2 files \" + \\\n \"for fields that might influence e.g. trace start times.\"\n\n\nclass SEG2BaseError(Exception):\n \"\"\"\n Base class for all SEG-2 specific errors.\n \"\"\"\n pass\n\n\nclass SEG2InvalidFileError(SEG2BaseError):\n \"\"\"\n Will be raised if something is not correct with the SEG-2 file.\n \"\"\"\n pass\n\n\nclass SEG2(object):\n \"\"\"\n Class to read SEG 2 formatted files.\n\n The main reason this is realized as a class is for the ease of passing\n the various parameters from one function to the next.\n\n Do not change the file_pointer attribute while using this class. It will\n be used to keep track of which parts have been read yet and which not.\n \"\"\"\n def __init__(self):\n pass\n\n def read_file(self, file_object):\n \"\"\"\n Reads the following file and will return a Stream object. If\n file_object is a string it will be treated as a file name, otherwise it\n will be expected to be a file like object with read(), seek() and\n tell() methods.\n\n If it is a file_like object, file.seek(0, 0) is expected to be the\n beginning of the SEG-2 file.\n \"\"\"\n # Read the file if it is a file name.\n if not hasattr(file_object, 'write'):\n self.file_pointer = open(file_object, 'rb')\n else:\n self.file_pointer = file_object\n self.file_pointer.seek(0, 0)\n\n self.stream = Stream()\n\n # Read the file descriptor block. This will also determine the\n # endianness.\n self.read_file_descriptor_block()\n\n # Loop over every trace, read it and append it to the Stream.\n for tr_pointer in self.trace_pointers:\n self.file_pointer.seek(tr_pointer, 0)\n self.stream.append(self.parse_next_trace())\n\n if not hasattr(file_object, 'write'):\n self.file_pointer.close()\n return self.stream\n\n def read_file_descriptor_block(self):\n \"\"\"\n Handles the reading of the file descriptor block and the free form\n section following it.\n \"\"\"\n file_descriptor_block = self.file_pointer.read(32)\n\n # Determine the endianness and check if the block id is valid.\n if unpack_from(b'2B', file_descriptor_block) == (0x55, 0x3a):\n self.endian = b'<'\n elif unpack_from(b'2B', file_descriptor_block) == (0x3a, 0x55):\n self.endian = b'>'\n else:\n msg = 'Wrong File Descriptor Block ID'\n raise SEG2InvalidFileError(msg)\n\n # Check the revision number.\n revision_number, = unpack_from(self.endian + b'H',\n file_descriptor_block, 2)\n if revision_number != 1:\n msg = '\\nOnly SEG 2 revision 1 is officially supported. This file '\n msg += 'has revision %i. Reading it might fail.' % revision_number\n msg += '\\nPlease contact the ObsPy developers with a sample file.'\n warnings.warn(msg)\n\n # Determine trace counts.\n (size_of_trace_pointer_sub_block,\n number_of_traces\n ) = unpack_from(self.endian + b'HH', file_descriptor_block, 4)\n if number_of_traces * 4 > size_of_trace_pointer_sub_block:\n msg = ('File indicates %d traces, but there are only %d trace '\n 'pointers.') % (number_of_traces,\n size_of_trace_pointer_sub_block // 4)\n raise SEG2InvalidFileError(msg)\n\n # Define the string and line terminators.\n (size_of_string_terminator,\n first_string_terminator_char,\n second_string_terminator_char,\n size_of_line_terminator,\n first_line_terminator_char,\n second_line_terminator_char\n ) = unpack_from(b'BccBcc', file_descriptor_block, 8)\n\n # Assemble the string terminator.\n if size_of_string_terminator == 1:\n self.string_terminator = first_string_terminator_char\n elif size_of_string_terminator == 2:\n self.string_terminator = first_string_terminator_char + \\\n second_string_terminator_char\n else:\n msg = 'Wrong size of string terminator.'\n raise SEG2InvalidFileError(msg)\n # Assemble the line terminator.\n if size_of_line_terminator == 1:\n self.line_terminator = first_line_terminator_char\n elif size_of_line_terminator == 2:\n self.line_terminator = first_line_terminator_char + \\\n second_line_terminator_char\n else:\n msg = 'Wrong size of line terminator.'\n raise SEG2InvalidFileError(msg)\n\n # Read the trace pointer sub-block and retrieve all the pointers.\n trace_pointer_sub_block = \\\n self.file_pointer.read(size_of_trace_pointer_sub_block)\n self.trace_pointers = unpack_from(\n self.endian + (b'L' * number_of_traces), trace_pointer_sub_block)\n\n # The rest of the header up to where the first trace pointer points is\n # a free form section.\n self.stream.stats = AttribDict()\n self.stream.stats.seg2 = AttribDict()\n self.parse_free_form(\n self.file_pointer.read(self.trace_pointers[0] -\n self.file_pointer.tell()),\n self.stream.stats.seg2)\n\n # Get the time information from the file header.\n # XXX: Need some more generic date/time parsers.\n if \"ACQUISITION_TIME\" in self.stream.stats.seg2 \\\n and \"ACQUISITION_DATE\" in self.stream.stats.seg2:\n time = self.stream.stats.seg2.ACQUISITION_TIME\n date = self.stream.stats.seg2.ACQUISITION_DATE\n # Split on any non numeric character\n time = list(filter(None, re.split(r'\\D+', time)))\n # Split on space, dot (.), slash (/), and dash (-)\n date = list(filter(None, re.split(\"[, ./-]+\", date)))\n hour, minute, second = int(time[0]), int(time[1]), float(time[2])\n day, month, year = int(date[0]), MONTHS[date[1].lower()], \\\n int(date[2])\n self.starttime = UTCDateTime(year, month, day, hour, minute,\n second)\n else:\n self.starttime = UTCDateTime(0)\n\n def parse_next_trace(self):\n \"\"\"\n Parse the next trace in the trace pointer list and return a Trace\n object.\n \"\"\"\n trace_descriptor_block = self.file_pointer.read(32)\n # Check if the trace descriptor block id is valid.\n if unpack(self.endian + b'H', trace_descriptor_block[0:2])[0] != \\\n 0x4422:\n msg = 'Invalid trace descriptor block id.'\n raise SEG2InvalidFileError(msg)\n size_of_this_block, = unpack_from(self.endian + b'H',\n trace_descriptor_block, 2)\n number_of_samples_in_data_block, = \\\n unpack_from(self.endian + b'L', trace_descriptor_block, 8)\n data_format_code, = unpack_from(b'B', trace_descriptor_block, 12)\n\n # Parse the data format code.\n if data_format_code == 4:\n dtype = self.endian + b'f4'\n sample_size = 4\n elif data_format_code == 5:\n dtype = self.endian + b'f8'\n sample_size = 8\n elif data_format_code == 1:\n dtype = self.endian + b'i2'\n sample_size = 2\n elif data_format_code == 2:\n dtype = self.endian + b'i4'\n sample_size = 4\n elif data_format_code == 3:\n dtype = self.endian + b'i2'\n sample_size = 2.5\n if number_of_samples_in_data_block % 4 != 0:\n raise SEG2InvalidFileError(\n 'Data format code 3 requires that the number of samples '\n 'is divisible by 4, but sample count is %d' % (\n number_of_samples_in_data_block, ))\n else:\n msg = 'Unrecognized data format code'\n raise SEG2InvalidFileError(msg)\n\n # The rest of the trace block is free form.\n header = {}\n header['seg2'] = AttribDict()\n self.parse_free_form(self.file_pointer.read(size_of_this_block - 32),\n header['seg2'])\n header['delta'] = float(header['seg2']['SAMPLE_INTERVAL'])\n # Set to the file's start time.\n header['starttime'] = deepcopy(self.starttime)\n if 'DELAY' in header['seg2']:\n if float(header['seg2']['DELAY']) != 0:\n msg = \"Non-zero value found in Trace's 'DELAY' field. \" + \\\n \"This is not supported/tested yet and might lead \" + \\\n \"to a wrong starttime of the Trace. Please contact \" + \\\n \"the ObsPy developers with a sample file.\"\n warnings.warn(msg)\n\n if \"DESCALING_FACTOR\" in header[\"seg2\"]:\n header['calib'] = float(header['seg2']['DESCALING_FACTOR'])\n\n # Unpack the data.\n data = from_buffer(\n self.file_pointer.read(\n int(number_of_samples_in_data_block * sample_size)),\n dtype=dtype)\n if data_format_code == 3:\n # Convert one's complement to two's complement by adding one to\n # negative numbers.\n one_to_two = (data < 0)\n # The first two bytes (1 word) of every 10 bytes (5 words) contains\n # a 4-bit exponent for each of the 4 remaining 2-byte (int16)\n # samples.\n exponents = data[0::5].view(self.endian + b'u2')\n result = np.empty(number_of_samples_in_data_block, dtype=np.int32)\n # Apply the negative correction, then multiply by correct exponent.\n result[0::4] = ((data[1::5] + one_to_two[1::5]) *\n 2**((exponents & 0x000f) >> 0))\n result[1::4] = ((data[2::5] + one_to_two[2::5]) *\n 2**((exponents & 0x00f0) >> 4))\n result[2::4] = ((data[3::5] + one_to_two[3::5]) *\n 2**((exponents & 0x0f00) >> 8))\n result[3::4] = ((data[4::5] + one_to_two[4::5]) *\n 2**((exponents & 0xf000) >> 12))\n data = result\n\n # Integrate SEG2 file header into each trace header\n tmp = self.stream.stats.seg2.copy()\n tmp.update(header['seg2'])\n header['seg2'] = tmp\n return Trace(data=data, header=header)\n\n def parse_free_form(self, free_form_str, attrib_dict):\n \"\"\"\n Parse the free form section stored in free_form_str and save it in\n attrib_dict.\n \"\"\"\n def cleanup_and_decode_string(value):\n # Some software/hardware produces invalid characters.\n def is_good_char(c):\n return c in (b'0123456789'\n b'abcdefghijklmnopqrstuvwxyz'\n b'ABCDEFGHIJKLMNOPQRSTUVWXYZ'\n b'!\"#$%&\\'()*+,-./:; <=>?@[\\\\]^_`{|}~ ')\n\n # A loop over a bytestring in Python 3 returns integers. This can\n # be solved with a number of imports from the python-future module\n # and all kinds of subtle changes throughout this file.\n return \"\".join(map(chr, filter(is_good_char, value))).strip()\n\n # Separate the strings. Every string starts with a 2-byte offset to the\n # next string, and ends with a terminator. An offset of 0 indicates the\n # end of the strings.\n offset = 0\n strings = []\n while offset + 2 < len(free_form_str):\n strlen, = unpack_from(self.endian + b'H', free_form_str, offset)\n if strlen == 0:\n break\n curstr = free_form_str[offset + 2:offset + strlen]\n try:\n curstrlen = curstr.index(self.string_terminator)\n except ValueError:\n strings.append(curstr)\n else:\n strings.append(curstr[:curstrlen])\n offset += strlen\n\n # Every string has the structure OPTION<SPACE>VALUE. Write to\n # stream.stats attribute.\n for string in strings:\n string = string.strip().split(b' ', 1)\n key = cleanup_and_decode_string(string[0])\n try:\n value = string[1]\n except IndexError:\n value = b''\n if key == 'NOTE':\n value = [cleanup_and_decode_string(line)\n for line in value.split(self.line_terminator)\n if line]\n else:\n value = cleanup_and_decode_string(value)\n setattr(attrib_dict, key, value)\n\n\ndef _is_seg2(filename):\n if not hasattr(filename, 'write'):\n file_pointer = open(filename, 'rb')\n else:\n file_pointer = filename\n\n file_descriptor_block = file_pointer.read(4)\n if not hasattr(filename, 'write'):\n file_pointer.close()\n try:\n # Determine the endianness and check if the block id is valid.\n if unpack_from(b'2B', file_descriptor_block) == (0x55, 0x3a):\n endian = b'<'\n elif unpack_from(b'2B', file_descriptor_block) == (0x3a, 0x55):\n endian = b'>'\n else:\n return False\n except Exception:\n return False\n # Check the revision number.\n revision_number, = unpack_from(endian + b'H', file_descriptor_block, 2)\n if revision_number != 1:\n return False\n return True\n\n\ndef _read_seg2(filename, **kwargs): # @UnusedVariable\n seg2 = SEG2()\n st = seg2.read_file(filename)\n warnings.warn(WARNING_HEADER)\n return st\n", "path": "obspy/io/seg2/seg2.py"}]} |
gh_patches_debug_1329 | rasdani/github-patches | git_diff | ivy-llc__ivy-27836 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
No value for argument `shape` in `reshape()` function call
In the following function call, the `shape` argument is not passed.
https://github.com/unifyai/ivy/blob/ef2c6d04e7c6c76535ff159011dbfd8b1f7f3704/ivy/functional/frontends/paddle/tensor/tensor.py#L305
From the if-else conditions above, this function call happens when both `shape` and `args` are `None`.
This is similar to this issue (https://github.com/unifyai/ivy/issues/27351)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ivy/functional/frontends/paddle/tensor/tensor.py`
Content:
```
1 # local
2 import ivy
3 import ivy.functional.frontends.paddle as paddle_frontend
4 from ivy.func_wrapper import (
5 with_supported_dtypes,
6 with_unsupported_dtypes,
7 with_supported_device_and_dtypes,
8 )
9 from ivy.functional.frontends.paddle.func_wrapper import _to_ivy_array
10
11
12 class Tensor:
13 def __init__(self, array, dtype=None, place="cpu", stop_gradient=True):
14 self._ivy_array = (
15 ivy.array(array, dtype=dtype, device=place)
16 if not isinstance(array, ivy.Array)
17 else array
18 )
19 self._dtype = dtype
20 self._place = place
21 self._stop_gradient = stop_gradient
22
23 def __repr__(self):
24 return (
25 f"ivy.frontends.paddle.Tensor(shape={self.shape}, dtype={self.dtype}, "
26 + str(self.ivy_array.__repr__()).replace("ivy.array(", "")
27 )
28
29 # Properties #
30 # ---------- #
31
32 @property
33 def ivy_array(self):
34 return self._ivy_array
35
36 @property
37 def place(self):
38 return self.ivy_array.device
39
40 @property
41 def dtype(self):
42 return self._ivy_array.dtype
43
44 @property
45 def shape(self):
46 return list(self.ivy_array.shape.shape)
47
48 @property
49 def ndim(self):
50 return self.dim()
51
52 # Setters #
53 # --------#
54
55 @ivy_array.setter
56 def ivy_array(self, array):
57 self._ivy_array = (
58 ivy.array(array) if not isinstance(array, ivy.Array) else array
59 )
60
61 # Special Methods #
62 # -------------------#
63
64 @with_unsupported_dtypes(
65 {"2.5.2 and below": ("bool", "unsigned", "int8", "float16", "bfloat16")},
66 "paddle",
67 )
68 def __add__(self, y, /, name=None):
69 return paddle_frontend.add(self, y)
70
71 @with_unsupported_dtypes(
72 {"2.5.2 and below": ("bool", "unsigned", "int8", "float16", "bfloat16")},
73 "paddle",
74 )
75 def __radd__(self, x, /, name=None):
76 return paddle_frontend.add(self, x)
77
78 @with_unsupported_dtypes(
79 {"2.5.2 and below": ("bool", "unsigned", "int8", "float16", "bfloat16")},
80 "paddle",
81 )
82 def __sub__(self, y, /, name=None):
83 return paddle_frontend.subtract(self, y)
84
85 @with_unsupported_dtypes(
86 {"2.5.2 and below": ("uint8", "int8", "int16", "float16", "bfloat16")},
87 "paddle",
88 )
89 def __mul__(self, y, /, name=None):
90 return paddle_frontend.multiply(self, y)
91
92 @with_unsupported_dtypes(
93 {
94 "2.5.2 and below": (
95 "bool",
96 "uint8",
97 "int8",
98 "int16",
99 "complex64",
100 "complex128",
101 )
102 },
103 "paddle",
104 )
105 def __gt__(self, y, /, name=None):
106 return paddle_frontend.logic.greater_than(self, y)
107
108 @with_unsupported_dtypes(
109 {
110 "2.5.2 and below": (
111 "bool",
112 "uint8",
113 "int8",
114 "int16",
115 "complex64",
116 "complex128",
117 )
118 },
119 "paddle",
120 )
121 def __lt__(self, y, /, name=None):
122 return paddle_frontend.logic.less_than(self, y)
123
124 @with_unsupported_dtypes(
125 {
126 "2.5.2 and below": (
127 "bool",
128 "uint8",
129 "int8",
130 "int16",
131 "complex64",
132 "complex128",
133 )
134 },
135 "paddle",
136 )
137 def __ge__(self, y, /, name=None):
138 return paddle_frontend.logic.greater_equal(self, y)
139
140 @with_unsupported_dtypes(
141 {
142 "2.5.2 and below": (
143 "bool",
144 "uint8",
145 "int8",
146 "int16",
147 "complex64",
148 "complex128",
149 )
150 },
151 "paddle",
152 )
153 def __le__(self, y, /, name=None):
154 return paddle_frontend.logic.less_equal(self, y)
155
156 @with_supported_dtypes(
157 {
158 "2.5.2 and below": (
159 "bool",
160 "uint8",
161 "int8",
162 "int16",
163 "int32",
164 "int64",
165 )
166 },
167 "paddle",
168 )
169 def __or__(self, y, /, name=None):
170 return paddle_frontend.logic.bitwise_or(self, y)
171
172 @with_unsupported_dtypes(
173 {"2.5.2 and below": ("bool", "unsigned", "int8", "float16", "bfloat16")},
174 "paddle",
175 )
176 def __rsub__(self, x, /, name=None):
177 return paddle_frontend.subtract(x, self)
178
179 def __getitem__(self, item):
180 ivy_args = ivy.nested_map(_to_ivy_array, [self, item])
181 ret = ivy.get_item(*ivy_args)
182 return paddle_frontend.Tensor(ret)
183
184 def __setitem__(self, item, value):
185 raise ivy.utils.exceptions.IvyException(
186 "ivy.functional.frontends.paddle.Tensor object doesn't support assignment"
187 )
188
189 @with_unsupported_dtypes({"2.5.2 and below": ("float16", "bfloat16")}, "paddle")
190 def __floordiv__(self, y, /, name=None):
191 return paddle_frontend.floor_divide(self, y)
192
193 @with_unsupported_dtypes({"2.5.2 and below": ("float16", "bfloat16")}, "paddle")
194 def __ne__(self, y, /, name=None):
195 return paddle_frontend.not_equal(self, y)
196
197 def __iter__(self):
198 if self.ndim == 0:
199 raise TypeError("iteration over a 0-d tensor not supported")
200 for i in range(self.shape[0]):
201 yield self[i]
202
203 @with_unsupported_dtypes(
204 {"2.5.2 and below": ("bool", "unsigned", "int8", "float16", "bfloat16")},
205 "paddle",
206 )
207 def __rmul__(self, y, /, name=None):
208 return paddle_frontend.multiply(self, y)
209
210 @with_unsupported_dtypes(
211 {"2.5.2 and below": ("bool", "unsigned", "int8", "float16", "bfloat16")},
212 "paddle",
213 )
214 def __float__(self):
215 return float(self._ivy_array)
216
217 def __xor__(self, y, /, name=None):
218 return paddle_frontend.logic.bitwise_xor(self, y)
219
220 def __invert__(self, out=None, name=None):
221 return paddle_frontend.logic.bitwise_not(self)
222
223 def __len__(self):
224 return len(self._ivy_array)
225
226 def __neg__(self):
227 return paddle_frontend.neg(self)
228
229 @with_unsupported_dtypes(
230 {"2.5.2 and below": ("bool", "unsigned", "int8", "float16", "bfloat16")},
231 "paddle",
232 )
233 def __rdiv__(self, y, /, name=None):
234 return paddle_frontend.divide(y, self)
235
236 @with_unsupported_dtypes(
237 {"2.5.2 and below": ("bool", "unsigned", "int8", "float16", "bfloat16")},
238 "paddle",
239 )
240 def __rtruediv__(self, y, /, name=None):
241 return paddle_frontend.divide(y, self)
242
243 @with_unsupported_dtypes(
244 {"2.5.2 and below": ("bool", "unsigned", "int8", "float16", "bfloat16")},
245 "paddle",
246 )
247 def __int__(self):
248 return int(self._ivy_array)
249
250 @with_unsupported_dtypes(
251 {
252 "2.5.2 and below": (
253 "bool",
254 "unsigned",
255 "int8",
256 "int32",
257 "int64",
258 "float16",
259 "bfloat16",
260 )
261 },
262 "paddle",
263 )
264 def __long__(self):
265 return int(self._ivy_array)
266
267 # Instance Methods #
268 # ---------------- #
269
270 def reshape(self, *args, shape=None):
271 if args and shape:
272 raise TypeError("reshape() got multiple values for argument 'shape'")
273 if shape is not None:
274 return paddle_frontend.reshape(self, shape)
275 if args:
276 if isinstance(args[0], (tuple, list)):
277 shape = args[0]
278 return paddle_frontend.reshape(self, shape)
279 else:
280 return paddle_frontend.reshape(self, args)
281 else:
282 raise ValueError("reshape() got no values for argument 'shape'")
283
284 def reshape_(self, *args, shape=None):
285 if args and shape:
286 raise TypeError("reshape() got multiple values for argument 'shape'")
287 if shape is not None:
288 self.ivy_array = paddle_frontend.reshape(
289 self._ivy_array, shape=shape
290 ).ivy_array
291 return self
292 if args:
293 if isinstance(args[0], (tuple, list)):
294 shape = args[0]
295 self.ivy_array = paddle_frontend.reshape(
296 self._ivy_array, shape=shape
297 ).ivy_array
298 return self
299 else:
300 self.ivy_array = paddle_frontend.reshape(
301 self._ivy_array, args
302 ).ivy_array
303 return self
304
305 self.ivy_array = paddle_frontend.reshape(self._ivy_array).ivy_array
306 return self
307
308 def dim(self):
309 return self.ivy_array.ndim
310
311 @with_unsupported_dtypes({"2.5.2 and below": ("float16", "bfloat16")}, "paddle")
312 def abs(self):
313 return paddle_frontend.abs(self)
314
315 @with_unsupported_dtypes({"2.5.2 and below": ("float16", "bfloat16")}, "paddle")
316 def acosh(self, name=None):
317 return paddle_frontend.acosh(self)
318
319 @with_unsupported_dtypes({"2.5.2 and below": ("float16", "bfloat16")}, "paddle")
320 def add_n(self, inputs, name=None):
321 inputs = ivy.array(inputs)
322 return ivy.sum(inputs, dtype=inputs.dtype, axis=0)
323
324 @with_unsupported_dtypes({"2.5.2 and below": ("float16", "bfloat16")}, "paddle")
325 def ceil(self):
326 return paddle_frontend.ceil(self)
327
328 @with_supported_dtypes({"2.5.2 and below": ("float32", "float64")}, "paddle")
329 def ceil_(self):
330 self.ivy_array = self.ceil().ivy_array
331 return self
332
333 @with_unsupported_dtypes({"2.5.2 and below": ("complex", "int8")}, "paddle")
334 def numel(self):
335 return paddle_frontend.numel(self)
336
337 @with_unsupported_dtypes({"2.5.2 and below": ("float16",)}, "paddle")
338 def asinh(self, name=None):
339 return paddle_frontend.asinh(self)
340
341 @with_supported_dtypes({"2.5.2 and below": ("float32", "float64")}, "paddle")
342 def asin(self, name=None):
343 return paddle_frontend.asin(self)
344
345 @with_supported_dtypes({"2.5.2 and below": ("float32", "float64")}, "paddle")
346 def cosh(self, name=None):
347 return paddle_frontend.cosh(self)
348
349 @with_supported_dtypes(
350 {
351 "2.5.2 and below": (
352 "int32",
353 "int64",
354 "float64",
355 "complex128",
356 "float32",
357 "complex64",
358 "bool",
359 )
360 },
361 "paddle",
362 )
363 def diagonal(self, offset, axis1=0, axis2=1, name=None):
364 return paddle_frontend.diagonal(self, offset=offset, axis1=axis1, axis2=axis2)
365
366 @with_supported_dtypes({"2.5.2 and below": ("float32", "float64")}, "paddle")
367 def log(self, name=None):
368 return paddle_frontend.log(self)
369
370 @with_supported_dtypes({"2.5.2 and below": ("float32", "float64")}, "paddle")
371 def sin(self, name=None):
372 return paddle_frontend.sin(self)
373
374 @with_supported_dtypes({"2.5.2 and below": ("float32", "float64")}, "paddle")
375 def sinh(self, name=None):
376 return paddle_frontend.sinh(self)
377
378 @with_supported_dtypes({"2.5.2 and below": ("float32", "float64")}, "paddle")
379 def lerp(self, y, weight, name=None):
380 return paddle_frontend.lerp(self, y, weight)
381
382 @with_supported_dtypes({"2.5.2 and below": ("float32", "float64")}, "paddle")
383 def lerp_(self, y, weight, name=None):
384 self.ivy_array = paddle_frontend.lerp(self, y, weight).ivy_array
385 return self
386
387 @with_unsupported_dtypes({"2.5.2 and below": ("float16", "bfloat16")}, "paddle")
388 def argmax(self, axis=None, keepdim=False, dtype=None, name=None):
389 return paddle_frontend.argmax(self, axis=axis, keepdim=keepdim, dtype=dtype)
390
391 @with_unsupported_dtypes({"2.5.2 and below": ("float16", "uint16")}, "paddle")
392 def unsqueeze(self, axis=None, name=None):
393 return paddle_frontend.Tensor(ivy.expand_dims(self._ivy_array, axis=axis))
394
395 @with_supported_dtypes({"2.5.2 and below": ("float32", "float64")}, "paddle")
396 def sqrt(self, name=None):
397 return paddle_frontend.sqrt(self)
398
399 @with_supported_dtypes({"2.5.2 and below": ("float32", "float64")}, "paddle")
400 def sqrt_(self, name=None):
401 self.ivy_array = self.sqrt().ivy_array
402 return self
403
404 @with_unsupported_dtypes({"2.5.2 and below": ("bfloat16", "uint16")}, "paddle")
405 def zero_(self):
406 self.ivy_array = paddle_frontend.zeros_like(self).ivy_array
407 return self
408
409 @with_supported_dtypes({"2.5.2 and below": ("float32", "float64")}, "paddle")
410 def cos(self, name=None):
411 return paddle_frontend.cos(self)
412
413 @with_unsupported_dtypes({"2.5.2 and below": ("float16", "bfloat16")}, "paddle")
414 def exp(self, name=None):
415 return paddle_frontend.exp(self)
416
417 @with_unsupported_dtypes({"2.5.2 and below": ("float16", "bfloat16")}, "paddle")
418 def exp_(self, name=None):
419 self.ivy_array = self.exp().ivy_array
420 return self
421
422 @with_supported_dtypes({"2.5.2 and below": ("float32", "float64")}, "paddle")
423 def erf(self, name=None):
424 return paddle_frontend.erf(self)
425
426 @with_unsupported_dtypes({"2.5.2 and below": ("float16", "bfloat16")}, "paddle")
427 def subtract(self, y, name=None):
428 return paddle_frontend.subtract(self, y)
429
430 @with_unsupported_dtypes(
431 {"2.5.2 and below": ("float16", "uint8", "int8", "bool")}, "paddle"
432 )
433 def subtract_(self, y, name=None):
434 self.ivy_array = self.subtract(y).ivy_array
435 return self
436
437 @with_unsupported_dtypes({"2.5.2 and below": ("float16", "bfloat16")}, "paddle")
438 def log10(self, name=None):
439 return paddle_frontend.Tensor(ivy.log10(self._ivy_array))
440
441 @with_unsupported_dtypes({"2.5.2 and below": ("float16", "bfloat16")}, "paddle")
442 def argsort(self, axis=-1, descending=False, name=None):
443 return paddle_frontend.argsort(self, axis=axis, descending=descending)
444
445 @with_unsupported_dtypes({"2.5.2 and below": ("float16", "bfloat16")}, "paddle")
446 def floor(self, name=None):
447 return paddle_frontend.floor(self)
448
449 @with_unsupported_dtypes({"2.5.2 and below": ("float16", "bfloat16")}, "paddle")
450 def floor_(self):
451 self.ivy_array = self.floor().ivy_array
452 return self
453
454 @with_supported_dtypes({"2.5.2 and below": ("float32", "float64")}, "paddle")
455 def round_(self, name=None):
456 self.ivy_array = paddle_frontend.round(self).ivy_array
457 return self
458
459 @with_supported_dtypes(
460 {"2.5.2 and below": ("float32", "float64", "int32", "int64")}, "paddle"
461 )
462 def clip(self, min=None, max=None, name=None):
463 ivy.utils.assertions.check_all_or_any_fn(
464 min,
465 max,
466 fn=ivy.exists,
467 type="any",
468 limit=[1, 2],
469 message="at most one of min or max can be None",
470 )
471 if min is None:
472 ret = ivy.minimum(self._ivy_array, max)
473 elif max is None:
474 ret = ivy.maximum(self._ivy_array, min)
475 else:
476 ret = ivy.clip(self._ivy_array, min, max)
477 return paddle_frontend.Tensor(ret)
478
479 @with_supported_dtypes(
480 {"2.5.2 and below": ("float32", "float64", "int32", "int64")}, "paddle"
481 )
482 def clip_(self, min=None, max=None, name=None):
483 self._ivy_array = self.clip(min, max).ivy_array
484 return self
485
486 @with_supported_dtypes({"2.5.2 and below": ("float32", "float64")}, "paddle")
487 def tanh(self, name=None):
488 return paddle_frontend.tanh(self)
489
490 @with_supported_dtypes({"2.5.2 and below": ("float32", "float64")}, "paddle")
491 def add(self, y, name=None):
492 return paddle_frontend.Tensor(ivy.add(self._ivy_array, _to_ivy_array(y)))
493
494 @with_supported_dtypes({"2.5.2 and below": ("float32", "float64")}, "paddle")
495 def add_(self, y, name=None):
496 self.ivy_array = paddle_frontend.add(self, y).ivy_array
497 return self
498
499 @with_supported_dtypes({"2.5.2 and below": ("float32", "float64")}, "paddle")
500 def addmm(self, x, y, beta=1.0, alpha=1.0, name=None):
501 return paddle_frontend.addmm(self, x, y, beta, alpha)
502
503 @with_supported_dtypes(
504 {"2.5.2 and below": ("float16", "float32", "float64", "int32", "int64")},
505 "paddle",
506 )
507 def isinf(self, name=None):
508 return paddle_frontend.isinf(self)
509
510 @with_unsupported_dtypes({"2.5.2 and below": ("float16", "uint16")}, "paddle")
511 def unsqueeze_(self, axis=None, name=None):
512 self.ivy_array = self.unsqueeze(axis=axis).ivy_array
513 return self
514
515 @with_supported_dtypes({"2.5.2 and below": ("float32", "float64")}, "paddle")
516 def square(self, name=None):
517 return paddle_frontend.square(self)
518
519 @with_unsupported_dtypes({"2.5.2 and below": ("float16", "bfloat16")}, "paddle")
520 def remainder_(self, y, name=None):
521 self.ivy_array = paddle_frontend.remainder(self, y).ivy_array
522 return self
523
524 @with_supported_dtypes({"2.5.2 and below": ("float32", "float64")}, "paddle")
525 def cholesky(self, upper=False, name=None):
526 return paddle_frontend.cholesky(self, upper=upper)
527
528 @with_unsupported_dtypes(
529 {"2.5.2 and below": ("float16", "uint16", "int16")}, "paddle"
530 )
531 def squeeze(self, axis=None, name=None):
532 if isinstance(axis, int) and self.ndim > 0:
533 if self.shape[axis] > 1:
534 return self
535 if len(self.shape) == 0:
536 return self
537 return paddle_frontend.squeeze(self, axis=axis)
538
539 @with_unsupported_dtypes(
540 {"2.5.2 and below": ("float16", "uint16", "int16")}, "paddle"
541 )
542 def squeeze_(self, axis=None, name=None):
543 self.ivy_array = paddle_frontend.squeeze(self, axis=axis).ivy_array
544 return self
545
546 @with_unsupported_dtypes({"2.5.2 and below": ("float16", "bfloat16")}, "paddle")
547 def multiply(self, y, name=None):
548 return paddle_frontend.multiply(self, y)
549
550 @with_unsupported_dtypes({"2.5.2 and below": ("float16", "bfloat16")}, "paddle")
551 def matmul(self, y, transpose_x=False, transpose_y=False, name=None):
552 return paddle_frontend.matmul(
553 self, y, transpose_x=transpose_x, transpose_y=transpose_y
554 )
555
556 @with_supported_dtypes(
557 {"2.5.2 and below": ("float16", "float32", "float64", "int32", "int64")},
558 "paddle",
559 )
560 def isfinite(self, name=None):
561 return paddle_frontend.isfinite(self)
562
563 @with_supported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle")
564 def all(self, axis=None, keepdim=False, dtype=None, name=None):
565 return paddle_frontend.Tensor(
566 ivy.all(self.ivy_array, axis=axis, keepdims=keepdim, dtype=dtype)
567 )
568
569 @with_supported_dtypes({"2.5.2 and below": ("float16", "bfloat16")}, "paddle")
570 def allclose(self, other, rtol=1e-05, atol=1e-08, equal_nan=False, name=None):
571 return paddle_frontend.allclose(
572 self, other, rtol=rtol, atol=atol, equal_nan=equal_nan
573 )
574
575 @with_unsupported_dtypes({"2.5.2 and below": ("float16", "bfloat16")}, "paddle")
576 def sort(self, axis=-1, descending=False, name=None):
577 return paddle_frontend.sort(self, axis=axis, descending=descending)
578
579 @with_unsupported_dtypes({"2.5.2 and below": ("float16", "bfloat16")}, "paddle")
580 def log1p(self, name=None):
581 return paddle_frontend.log1p(self)
582
583 @with_supported_dtypes(
584 {
585 "2.4.2 and below": (
586 "bool",
587 "uint8",
588 "int8",
589 "int16",
590 "int32",
591 "int64",
592 )
593 },
594 "paddle",
595 )
596 def bitwise_and(self, y, out=None, name=None):
597 return paddle_frontend.bitwise_and(self, y)
598
599 @with_supported_dtypes(
600 {
601 "2.5.2 and below": (
602 "bool",
603 "int8",
604 "int16",
605 "int32",
606 "int64",
607 "float32",
608 "float64",
609 )
610 },
611 "paddle",
612 )
613 def logical_or(self, y, out=None, name=None):
614 return paddle_frontend.logical_or(self, y, out=out)
615
616 @with_supported_dtypes(
617 {"2.5.2 and below": ("bool", "uint8", "int8", "int16", "int32", "int64")},
618 "paddle",
619 )
620 def bitwise_xor(self, y, out=None, name=None):
621 return paddle_frontend.bitwise_xor(self, y)
622
623 @with_supported_dtypes({"2.5.2 and below": ("float16", "bfloat16")}, "paddle")
624 def any(self, axis=None, keepdim=False, name=None):
625 return paddle_frontend.any(self, axis=axis, keepdim=keepdim)
626
627 @with_unsupported_dtypes({"2.5.2 and below": "bfloat16"}, "paddle")
628 def astype(self, dtype):
629 return paddle_frontend.Tensor(ivy.astype(self._ivy_array, dtype))
630
631 @with_supported_dtypes(
632 {"2.5.2 and below": ("bool", "uint8", "int8", "int16", "int32", "int64")},
633 "paddle",
634 )
635 def bitwise_not(self, out=None, name=None):
636 return paddle_frontend.bitwise_not(self, out=out)
637
638 @with_supported_dtypes(
639 {
640 "2.5.2 and below": (
641 "bool",
642 "int8",
643 "int16",
644 "int32",
645 "int64",
646 )
647 },
648 "paddle",
649 )
650 def bitwise_or(self, y, out=None, name=None):
651 return paddle_frontend.bitwise_or(self, y, out=out)
652
653 @with_supported_dtypes(
654 {
655 "2.5.2 and below": (
656 "bool",
657 "int8",
658 "int16",
659 "int32",
660 "int64",
661 "float32",
662 "float64",
663 )
664 },
665 "paddle",
666 )
667 def logical_xor(self, y, out=None, name=None):
668 return paddle_frontend.logical_xor(self, y, out=out)
669
670 @with_supported_dtypes(
671 {"2.5.2 and below": ("float16", "float32", "float64", "int32", "int64")},
672 "paddle",
673 )
674 def isnan(self, name=None):
675 return paddle_frontend.isnan(self)
676
677 @with_unsupported_dtypes(
678 {
679 "2.5.2 and below": (
680 "bool",
681 "uint8",
682 "int8",
683 "int16",
684 "complex64",
685 "complex128",
686 )
687 },
688 "paddle",
689 )
690 def greater_than(self, y, name=None):
691 return paddle_frontend.greater_than(self, y)
692
693 @with_supported_dtypes({"2.5.2 and below": ("float32", "float64")}, "paddle")
694 def rsqrt(self, name=None):
695 return paddle_frontend.rsqrt(self)
696
697 @with_supported_dtypes({"2.5.2 and below": ("float32", "float64")}, "paddle")
698 def rsqrt_(self, name=None):
699 self.ivy_array = self.rsqrt().ivy_array
700 return self
701
702 @with_supported_dtypes({"2.5.2 and below": ("float32", "float64")}, "paddle")
703 def reciprocal(self, name=None):
704 return paddle_frontend.reciprocal(self)
705
706 @with_supported_dtypes(
707 {
708 "2.5.2 and below": (
709 "bool",
710 "int8",
711 "int16",
712 "int32",
713 "int64",
714 "float32",
715 "float64",
716 )
717 },
718 "paddle",
719 )
720 def logical_and(self, y, out=None, name=None):
721 return paddle_frontend.logical_and(self, y, out=out)
722
723 @with_supported_dtypes({"2.5.2 and below": ("float32", "float64")}, "paddle")
724 def divide(self, y, name=None):
725 return paddle_frontend.divide(self, y)
726
727 @with_supported_dtypes(
728 {"2.5.2 and below": ("float32", "float64", "complex64", "complex128")},
729 "paddle",
730 )
731 def eigvals(self, name=None):
732 return paddle_frontend.eigvals(self)
733
734 @with_unsupported_dtypes(
735 {
736 "2.5.2 and below": (
737 "bool",
738 "uint8",
739 "int8",
740 "int16",
741 "complex64",
742 "complex128",
743 )
744 },
745 "paddle",
746 )
747 def less_than(self, y, name=None):
748 return paddle_frontend.less_than(self, y)
749
750 @with_unsupported_dtypes({"2.5.2 and below": ("float16", "bfloat16")}, "paddle")
751 def cumprod(self, dim=None, dtype=None, name=None):
752 return paddle_frontend.cumprod(self, dim=dim, dtype=dtype)
753
754 @with_unsupported_dtypes({"2.5.2 and below": ("float16", "bfloat16")}, "paddle")
755 def cumsum(self, axis=None, dtype=None, name=None):
756 return paddle_frontend.Tensor(
757 ivy.cumsum(self._ivy_array, axis=axis, dtype=dtype)
758 )
759
760 @with_supported_dtypes(
761 {"2.5.2 and below": ("complex64", "complex128", "float32", "float64")},
762 "paddle",
763 )
764 def angle(self, name=None):
765 return paddle_frontend.angle(self)
766
767 @with_unsupported_dtypes(
768 {
769 "2.5.2 and below": (
770 "uint8",
771 "int8",
772 "int16",
773 "complex64",
774 "complex128",
775 )
776 },
777 "paddle",
778 )
779 def equal(self, y, name=None):
780 return paddle_frontend.equal(self, y)
781
782 @with_unsupported_dtypes({"2.5.2 and below": ("float16", "bfloat16")}, "paddle")
783 def rad2deg(self, name=None):
784 return paddle_frontend.rad2deg(self)
785
786 @with_unsupported_dtypes(
787 {
788 "2.5.2 and below": (
789 "uint8",
790 "int8",
791 "int16",
792 "float16",
793 "complex64",
794 "complex128",
795 )
796 },
797 "paddle",
798 )
799 def equal_all(self, y, name=None):
800 return paddle_frontend.equal_all(self, y)
801
802 @with_supported_dtypes({"2.5.2 and below": ("float32", "float64")}, "paddle")
803 def maximum(self, other, name=None):
804 return paddle_frontend.maximum(self, other)
805
806 @with_unsupported_dtypes({"2.5.2 and below": "bfloat16"}, "paddle")
807 def fmax(self, y, name=None):
808 return paddle_frontend.fmax(self, y)
809
810 @with_unsupported_dtypes({"2.5.2 and below": "bfloat16"}, "paddle")
811 def fmin(self, y, name=None):
812 return paddle_frontend.fmin(self, y)
813
814 @with_supported_dtypes(
815 {"2.5.2 and below": ("float32", "float64", "int32", "int64")}, "paddle"
816 )
817 def minimum(self, y, name=None):
818 return paddle_frontend.minimum(self, y)
819
820 @with_supported_dtypes(
821 {"2.5.2 and below": ("float32", "float64", "int32", "int64")}, "paddle"
822 )
823 def max(self, axis=None, keepdim=False, name=None):
824 return paddle_frontend.max(self, axis=axis, keepdim=keepdim)
825
826 @with_unsupported_dtypes({"2.5.2 and below": ("float16", "bfloat16")}, "paddle")
827 def deg2rad(self, name=None):
828 return paddle_frontend.deg2rad(self)
829
830 @with_supported_dtypes({"2.5.2 and below": ("float32", "float64")}, "paddle")
831 def digamma(self, name=None):
832 return paddle_frontend.digamma(self)
833
834 @with_supported_dtypes(
835 {"2.5.2 and below": ("float32", "float64", "int32", "int64", "bool")}, "paddle"
836 )
837 def rot90(self, k=1, axes=(0, 1), name=None):
838 return paddle_frontend.rot90(self, k=k, axes=axes)
839
840 @with_supported_dtypes(
841 {"2.5.2 and below": ("complex64", "complex128")},
842 "paddle",
843 )
844 def imag(self, name=None):
845 return paddle_frontend.imag(self)
846
847 def is_tensor(self):
848 return paddle_frontend.is_tensor(self)
849
850 @with_supported_dtypes(
851 {
852 "2.5.2 and below": (
853 "float32",
854 "float64",
855 )
856 },
857 "paddle",
858 )
859 def isclose(self, y, rtol=1e-05, atol=1e-08, equal_nan=False, name=None):
860 return paddle_frontend.isclose(
861 self, y, rtol=rtol, atol=atol, equal_nan=equal_nan
862 )
863
864 @with_supported_dtypes({"2.5.2 and below": ("int32", "int64")}, "paddle")
865 def floor_divide(self, y, name=None):
866 return paddle_frontend.floor_divide(self, y)
867
868 @with_supported_dtypes({"2.5.2 and below": ("int32", "int64")}, "paddle")
869 def mod(self, y, name=None):
870 return paddle_frontend.Tensor(ivy.fmod(self._ivy_array, _to_ivy_array(y)))
871
872 @with_supported_dtypes(
873 {"2.5.2 and below": ("float32", "float64", "int32", "int64")}, "paddle"
874 )
875 def floor_mod(self, y, name=None):
876 return paddle_frontend.remainder(self, y)
877
878 # cond
879 @with_supported_dtypes({"2.5.2 and below": ("float32", "float64")}, "paddle")
880 def cond(self, p=None, name=None):
881 return paddle_frontend.cond(self, p=p, name=name)
882
883 @with_unsupported_dtypes({"2.4.2 and below": ("int16", "float16")}, "paddle")
884 def conj(self, name=None):
885 return paddle_frontend.conj(self)
886
887 @with_supported_dtypes({"2.5.2 and below": ("float32", "float64")}, "paddle")
888 def log2(self, name=None):
889 return paddle_frontend.log2(self)
890
891 @with_unsupported_dtypes(
892 {"2.4.2 and below": ("float32", "float64", "int32", "int64")}, "paddle"
893 )
894 def neg(self, name=None):
895 return paddle_frontend.neg(self)
896
897 @with_supported_dtypes(
898 {
899 "2.5.2 and below": (
900 "bool",
901 "int8",
902 "int16",
903 "int32",
904 "int64",
905 "float32",
906 "float64",
907 )
908 },
909 "paddle",
910 )
911 def logical_not(self, out=None, name=None):
912 return paddle_frontend.logical_not(self)
913
914 @with_unsupported_dtypes({"2.5.2 and below": ("float16", "bfloat16")}, "paddle")
915 def sign(self, name=None):
916 return paddle_frontend.sign(self)
917
918 @with_supported_dtypes({"2.5.2 and below": ("float32", "float64")}, "paddle")
919 def var(self, axis=None, unbiased=True, keepdim=False, name=None):
920 return paddle_frontend.var(self, axis=axis, unbiased=unbiased, keepdim=keepdim)
921
922 @with_unsupported_dtypes({"2.5.2 and below": ("float16", "bfloat16")}, "paddle")
923 def sgn(self, name=None):
924 return paddle_frontend.sgn(self)
925
926 def tolist(self):
927 return paddle_frontend.Tensor(ivy.to_list(self._ivy_array))
928
929 @with_supported_dtypes(
930 {"2.5.2 and below": ("float32", "float64", "int32", "int64")},
931 "paddle",
932 )
933 def min(self, axis=None, keepdim=False, name=None):
934 return paddle_frontend.min(self, axis=axis, keepdim=keepdim)
935
936 @with_supported_dtypes(
937 {"2.5.2 and below": ("int32", "int64", "float32", "float64")}, "paddle"
938 )
939 def pow(self, y, name=None):
940 return paddle_frontend.pow(self, y)
941
942 @with_supported_dtypes(
943 {"2.5.2 and below": ("float32", "float64", "int32", "int64")}, "paddle"
944 )
945 def prod(self, axis=None, keepdim=False, dtype=None, name=None):
946 return paddle_frontend.Tensor(
947 ivy.prod(self._ivy_array, axis=axis, keepdims=keepdim, dtype=dtype)
948 )
949
950 @with_supported_dtypes({"2.5.2 and below": ("float32", "float64")}, "paddle")
951 def atan(self, name=None):
952 return paddle_frontend.atan(self)
953
954 @with_supported_dtypes({"2.5.2 and below": ("float32", "float64")}, "paddle")
955 def atanh(self, name=None):
956 return paddle_frontend.atanh(self)
957
958 @with_supported_dtypes({"2.5.2 and below": ("float32", "float64")}, "paddle")
959 def std(self, axis=None, unbiased=True, keepdim=False, name=None):
960 return paddle_frontend.std(self, axis=axis, unbiased=unbiased, keepdim=keepdim)
961
962 @with_supported_dtypes(
963 {"2.5.2 and below": ("int32", "int64", "float32", "float64")}, "paddle"
964 )
965 def trunc(self, name=None):
966 return paddle_frontend.trunc(self)
967
968 @with_supported_dtypes({"2.5.2 and below": ("complex64", "complex128")}, "paddle")
969 def as_real(self, name=None):
970 if not ivy.is_complex_dtype(self._ivy_array):
971 raise ivy.exceptions.IvyError(
972 "as_real is only supported for complex tensors"
973 )
974 re_part = ivy.real(self._ivy_array)
975 im_part = ivy.imag(self._ivy_array)
976 return paddle_frontend.Tensor(ivy.stack((re_part, im_part), axis=-1))
977
978 @with_supported_dtypes({"2.5.2 and below": ("float32", "float64")}, "paddle")
979 def stanh(self, scale_a=0.67, scale_b=1.7159, name=None):
980 return paddle_frontend.stanh(self, scale_a=scale_a, scale_b=scale_b)
981
982 @with_supported_dtypes(
983 {"2.5.2 and below": ("int32", "int64", "float32", "float64")}, "paddle"
984 )
985 def trace(self, offset=0, axis1=0, axis2=1, name=None):
986 return paddle_frontend.Tensor(
987 ivy.trace(self._ivy_array, offset=offset, axis1=axis1, axis2=axis2)
988 )
989
990 @with_supported_dtypes(
991 {
992 "2.5.2 and below": (
993 "bfloat16",
994 "float32",
995 "float64",
996 "int8",
997 "int16",
998 "int32",
999 "int64",
1000 "uint8",
1001 )
1002 },
1003 "paddle",
1004 )
1005 def flatten(self, start_axis=0, stop_axis=-1, name=None):
1006 if len(self.shape) == 0:
1007 return self.unsqueeze(axis=0)
1008 return paddle_frontend.Tensor(
1009 ivy.flatten(self.ivy_array, start_dim=start_axis, end_dim=stop_axis)
1010 )
1011
1012 @with_supported_dtypes(
1013 {
1014 "2.5.2 and below": (
1015 "float32",
1016 "float64",
1017 "int16",
1018 "int32",
1019 "int64",
1020 "uint8",
1021 )
1022 },
1023 "paddle",
1024 )
1025 def argmin(self, axis=None, keepdim=False, dtype=None, name=None):
1026 return paddle_frontend.argmin(self, axis=axis, keepdim=keepdim, dtype=dtype)
1027
1028 @with_supported_dtypes(
1029 {"2.5.2 and below": ("float32", "float64", "int32", "int64")},
1030 "paddle",
1031 )
1032 def topk(self, k, axis=None, largest=True, sorted=True, name=None):
1033 return paddle_frontend.topk(self, k, axis=axis, largest=largest, sorted=sorted)
1034
1035 @with_unsupported_dtypes({"2.5.2 and below": ("float16", "bfloat16")}, "paddle")
1036 def remainder(self, y, name=None):
1037 return paddle_frontend.remainder(self, y)
1038
1039 def is_floating_point(self):
1040 return paddle_frontend.is_floating_point(self)
1041
1042 @with_supported_dtypes({"2.5.2 and below": ("float32", "float64")}, "paddle")
1043 def tanh_(self, name=None):
1044 y = self.tanh(self)
1045 return ivy.inplace_update(self, y)
1046
1047 @with_supported_dtypes({"2.5.2 and below": ("float32", "float64")}, "paddle")
1048 def reciprocal_(self, name=None):
1049 y = self.reciprocal(self)
1050 return ivy.inplace_update(self, y)
1051
1052 @with_unsupported_dtypes(
1053 {"2.5.2 and below": ("complex", "uint8", "uint16")}, "paddle"
1054 )
1055 def numpy(self):
1056 return self.ivy_array.to_numpy()
1057
1058 @with_unsupported_dtypes({"2.5.2 and below": ("float16", "bfloat16")}, "paddle")
1059 def nonzero(self):
1060 return paddle_frontend.nonzero(self)
1061
1062 @with_supported_dtypes({"2.5.2 and below": ("float32", "float64")}, "paddle")
1063 def inner(self, y, name=None):
1064 return paddle_frontend.inner(self, y, name)
1065
1066 @with_supported_dtypes({"2.5.2 and below": ("float32", "float64")}, "paddle")
1067 def acos(self, name=None):
1068 return paddle_frontend.Tensor(ivy.acos(self._ivy_array))
1069
1070 @with_supported_dtypes({"2.5.2 and below": ("float32", "float64")}, "paddle")
1071 def mean(self, axis=None, keepdim=False, name=None):
1072 return paddle_frontend.mean(self, axis=axis, keepdim=keepdim)
1073
1074 @with_supported_dtypes({"2.5.2 and below": ("float32", "float64")}, "paddle")
1075 def as_complex(self, name=None):
1076 if self.ivy_array.shape[-1] != 2:
1077 raise ivy.exceptions.IvyError(
1078 "The size of the last dimension of tensor does not equals 2"
1079 )
1080 dtype = (
1081 ivy.complex64 if ivy.dtype(self.ivy_array) == "float32" else ivy.complex128
1082 )
1083 re_part = self.ivy_array[..., 0]
1084 im_part = ivy.multiply(1j, self.ivy_array[..., 1])
1085 value = paddle_frontend.Tensor(ivy.add(re_part, im_part).astype(dtype))
1086 return value
1087
1088 @with_supported_dtypes(
1089 {"2.5.2 and below": ("int32", "int64", "float32", "float64", "bool")}, "paddle"
1090 )
1091 def not_equal(self, y, name=None):
1092 return paddle_frontend.not_equal(self._ivy_array, y)
1093
1094 @with_supported_dtypes(
1095 {"2.5.2 and below": ("float32", "float64", "int32", "int64")}, "paddle"
1096 )
1097 def less_equal(self, y, name=None):
1098 return paddle_frontend.less_equal(self._ivy_array, y)
1099
1100 @with_supported_dtypes({"2.5.2 and below": ("complex64", "complex128")}, "paddle")
1101 def real(self, name=None):
1102 return paddle_frontend.real(self._ivy_array)
1103
1104 @with_unsupported_dtypes({"2.5.2 and below": ("float16", "bfloat16")}, "paddle")
1105 def t(self, name=None):
1106 axes = list(range(len(self.ivy_array.shape)))[::-1]
1107 return ivy.permute_dims(self.ivy_array, axes=axes)
1108
1109 @with_supported_dtypes(
1110 {
1111 "2.5.2 and below": (
1112 "bool",
1113 "float16",
1114 "float32",
1115 "float64",
1116 "int32",
1117 "int64",
1118 "uint8",
1119 )
1120 },
1121 "paddle",
1122 )
1123 def cast(self, dtype):
1124 return paddle_frontend.cast(self, dtype)
1125
1126 @with_unsupported_dtypes({"2.5.2 and below": ("float16", "bfloat16")}, "paddle")
1127 def bmm(self, y, transpose_x=False, transpose_y=False, name=None):
1128 return paddle_frontend.bmm(self, y, transpose_x, transpose_y)
1129
1130 @with_supported_dtypes(
1131 {"2.5.2 and below": ("float16", "float32", "float64", "int32", "int64")},
1132 "paddle",
1133 )
1134 def fill_(self, value):
1135 filled_tensor = paddle_frontend.full_like(self, value)
1136 return ivy.inplace_update(self, filled_tensor)
1137
1138 @with_supported_dtypes(
1139 {
1140 "2.5.2 and below": (
1141 "bool",
1142 "int32",
1143 "int64",
1144 "float16",
1145 "float32",
1146 "float64",
1147 )
1148 },
1149 "paddle",
1150 )
1151 def unbind(self, axis=0):
1152 return paddle_frontend.unbind(self._ivy_array, axis=axis)
1153
1154 @with_supported_dtypes(
1155 {
1156 "2.5.2 and below": (
1157 "bool",
1158 "int32",
1159 "int64",
1160 "float16",
1161 "float32",
1162 "float64",
1163 )
1164 },
1165 "paddle",
1166 )
1167 def unique_consecutive(self, axis=0):
1168 return paddle_frontend.unique_consecutive(self._ivy_array, axis=axis)
1169
1170 def cpu(self):
1171 self.ivy_array = ivy.to_device(self.ivy_array, ivy.as_ivy_dev("cpu"))
1172 return self
1173
1174 @with_unsupported_dtypes(
1175 {"2.5.2 and below": ("int16", "complex64", "complex128")},
1176 "paddle",
1177 )
1178 def split(self, num_or_sections, axis=0, name=None):
1179 return paddle_frontend.split(self._ivy_array, num_or_sections, axis, name)
1180
1181 @with_supported_dtypes(
1182 {"2.5.2 and below": ("float32", "float64", "int32", "int64")}, "paddle"
1183 )
1184 def frac(self, name=None):
1185 return paddle_frontend.frac(self._ivy_array)
1186
1187 @with_unsupported_dtypes({"2.5.2 and below": ("float16", "bfloat16")}, "paddle")
1188 def gather(self, y, name=None):
1189 return paddle_frontend.gather(self, y)
1190
1191 def is_complex(self):
1192 return paddle_frontend.is_complex(self)
1193
1194 @with_unsupported_dtypes(
1195 {"2.5.2 and below": ("float16", "uint8", "int8", "bool")}, "paddle"
1196 )
1197 def gather_(self, y, name=None):
1198 res = self.gather(self, y)
1199 return ivy.inplace_update(self, res)
1200
1201 @with_supported_dtypes(
1202 {"2.5.2 and below": ("float32", "float64", "int32", "int64")}, "paddle"
1203 )
1204 def heaviside(self, y, name=None):
1205 return paddle_frontend.heaviside(self, y)
1206
1207 @with_supported_dtypes(
1208 {"2.5.2 and below": ("bool", "int32", "int64", "float32", "float64")}, "paddle"
1209 )
1210 def expand(self, shape, name=None):
1211 return paddle_frontend.expand(self._ivy_array, shape)
1212
1213 @with_supported_device_and_dtypes(
1214 {
1215 "2.5.2 and below": {
1216 "cpu": (
1217 "bool",
1218 "int32",
1219 "int64",
1220 "float32",
1221 "float64",
1222 "complex64",
1223 "complex128",
1224 )
1225 }
1226 },
1227 "paddle",
1228 )
1229 def tile(self, repeat_times):
1230 return paddle_frontend.Tensor(ivy.tile(self._ivy_array, repeats=repeat_times))
1231
1232 @with_supported_dtypes(
1233 {
1234 "2.5.2 and below": (
1235 "bool",
1236 "float16",
1237 "float32",
1238 "float64",
1239 "int8",
1240 "int16",
1241 "int32",
1242 "int64",
1243 )
1244 },
1245 "paddle",
1246 )
1247 def chunk(self, chunks, axis=0, name=None):
1248 return paddle_frontend.split(self._ivy_array, num_or_sections=chunks, axis=axis)
1249
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ivy/functional/frontends/paddle/tensor/tensor.py b/ivy/functional/frontends/paddle/tensor/tensor.py
--- a/ivy/functional/frontends/paddle/tensor/tensor.py
+++ b/ivy/functional/frontends/paddle/tensor/tensor.py
@@ -301,9 +301,8 @@
self._ivy_array, args
).ivy_array
return self
-
- self.ivy_array = paddle_frontend.reshape(self._ivy_array).ivy_array
- return self
+ else:
+ raise ValueError("reshape_() got no values for argument 'shape'")
def dim(self):
return self.ivy_array.ndim
| {"golden_diff": "diff --git a/ivy/functional/frontends/paddle/tensor/tensor.py b/ivy/functional/frontends/paddle/tensor/tensor.py\n--- a/ivy/functional/frontends/paddle/tensor/tensor.py\n+++ b/ivy/functional/frontends/paddle/tensor/tensor.py\n@@ -301,9 +301,8 @@\n self._ivy_array, args\n ).ivy_array\n return self\n-\n- self.ivy_array = paddle_frontend.reshape(self._ivy_array).ivy_array\n- return self\n+ else:\n+ raise ValueError(\"reshape_() got no values for argument 'shape'\")\n \n def dim(self):\n return self.ivy_array.ndim\n", "issue": "No value for argument `shape` in `reshape()` function call \nIn the following function call, the `shape` argument is not passed.\r\nhttps://github.com/unifyai/ivy/blob/ef2c6d04e7c6c76535ff159011dbfd8b1f7f3704/ivy/functional/frontends/paddle/tensor/tensor.py#L305\r\nFrom the if-else conditions above, this function call happens when both `shape` and `args` are `None`.\r\nThis is similar to this issue (https://github.com/unifyai/ivy/issues/27351)\n", "before_files": [{"content": "# local\nimport ivy\nimport ivy.functional.frontends.paddle as paddle_frontend\nfrom ivy.func_wrapper import (\n with_supported_dtypes,\n with_unsupported_dtypes,\n with_supported_device_and_dtypes,\n)\nfrom ivy.functional.frontends.paddle.func_wrapper import _to_ivy_array\n\n\nclass Tensor:\n def __init__(self, array, dtype=None, place=\"cpu\", stop_gradient=True):\n self._ivy_array = (\n ivy.array(array, dtype=dtype, device=place)\n if not isinstance(array, ivy.Array)\n else array\n )\n self._dtype = dtype\n self._place = place\n self._stop_gradient = stop_gradient\n\n def __repr__(self):\n return (\n f\"ivy.frontends.paddle.Tensor(shape={self.shape}, dtype={self.dtype}, \"\n + str(self.ivy_array.__repr__()).replace(\"ivy.array(\", \"\")\n )\n\n # Properties #\n # ---------- #\n\n @property\n def ivy_array(self):\n return self._ivy_array\n\n @property\n def place(self):\n return self.ivy_array.device\n\n @property\n def dtype(self):\n return self._ivy_array.dtype\n\n @property\n def shape(self):\n return list(self.ivy_array.shape.shape)\n\n @property\n def ndim(self):\n return self.dim()\n\n # Setters #\n # --------#\n\n @ivy_array.setter\n def ivy_array(self, array):\n self._ivy_array = (\n ivy.array(array) if not isinstance(array, ivy.Array) else array\n )\n\n # Special Methods #\n # -------------------#\n\n @with_unsupported_dtypes(\n {\"2.5.2 and below\": (\"bool\", \"unsigned\", \"int8\", \"float16\", \"bfloat16\")},\n \"paddle\",\n )\n def __add__(self, y, /, name=None):\n return paddle_frontend.add(self, y)\n\n @with_unsupported_dtypes(\n {\"2.5.2 and below\": (\"bool\", \"unsigned\", \"int8\", \"float16\", \"bfloat16\")},\n \"paddle\",\n )\n def __radd__(self, x, /, name=None):\n return paddle_frontend.add(self, x)\n\n @with_unsupported_dtypes(\n {\"2.5.2 and below\": (\"bool\", \"unsigned\", \"int8\", \"float16\", \"bfloat16\")},\n \"paddle\",\n )\n def __sub__(self, y, /, name=None):\n return paddle_frontend.subtract(self, y)\n\n @with_unsupported_dtypes(\n {\"2.5.2 and below\": (\"uint8\", \"int8\", \"int16\", \"float16\", \"bfloat16\")},\n \"paddle\",\n )\n def __mul__(self, y, /, name=None):\n return paddle_frontend.multiply(self, y)\n\n @with_unsupported_dtypes(\n {\n \"2.5.2 and below\": (\n \"bool\",\n \"uint8\",\n \"int8\",\n \"int16\",\n \"complex64\",\n \"complex128\",\n )\n },\n \"paddle\",\n )\n def __gt__(self, y, /, name=None):\n return paddle_frontend.logic.greater_than(self, y)\n\n @with_unsupported_dtypes(\n {\n \"2.5.2 and below\": (\n \"bool\",\n \"uint8\",\n \"int8\",\n \"int16\",\n \"complex64\",\n \"complex128\",\n )\n },\n \"paddle\",\n )\n def __lt__(self, y, /, name=None):\n return paddle_frontend.logic.less_than(self, y)\n\n @with_unsupported_dtypes(\n {\n \"2.5.2 and below\": (\n \"bool\",\n \"uint8\",\n \"int8\",\n \"int16\",\n \"complex64\",\n \"complex128\",\n )\n },\n \"paddle\",\n )\n def __ge__(self, y, /, name=None):\n return paddle_frontend.logic.greater_equal(self, y)\n\n @with_unsupported_dtypes(\n {\n \"2.5.2 and below\": (\n \"bool\",\n \"uint8\",\n \"int8\",\n \"int16\",\n \"complex64\",\n \"complex128\",\n )\n },\n \"paddle\",\n )\n def __le__(self, y, /, name=None):\n return paddle_frontend.logic.less_equal(self, y)\n\n @with_supported_dtypes(\n {\n \"2.5.2 and below\": (\n \"bool\",\n \"uint8\",\n \"int8\",\n \"int16\",\n \"int32\",\n \"int64\",\n )\n },\n \"paddle\",\n )\n def __or__(self, y, /, name=None):\n return paddle_frontend.logic.bitwise_or(self, y)\n\n @with_unsupported_dtypes(\n {\"2.5.2 and below\": (\"bool\", \"unsigned\", \"int8\", \"float16\", \"bfloat16\")},\n \"paddle\",\n )\n def __rsub__(self, x, /, name=None):\n return paddle_frontend.subtract(x, self)\n\n def __getitem__(self, item):\n ivy_args = ivy.nested_map(_to_ivy_array, [self, item])\n ret = ivy.get_item(*ivy_args)\n return paddle_frontend.Tensor(ret)\n\n def __setitem__(self, item, value):\n raise ivy.utils.exceptions.IvyException(\n \"ivy.functional.frontends.paddle.Tensor object doesn't support assignment\"\n )\n\n @with_unsupported_dtypes({\"2.5.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n def __floordiv__(self, y, /, name=None):\n return paddle_frontend.floor_divide(self, y)\n\n @with_unsupported_dtypes({\"2.5.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n def __ne__(self, y, /, name=None):\n return paddle_frontend.not_equal(self, y)\n\n def __iter__(self):\n if self.ndim == 0:\n raise TypeError(\"iteration over a 0-d tensor not supported\")\n for i in range(self.shape[0]):\n yield self[i]\n\n @with_unsupported_dtypes(\n {\"2.5.2 and below\": (\"bool\", \"unsigned\", \"int8\", \"float16\", \"bfloat16\")},\n \"paddle\",\n )\n def __rmul__(self, y, /, name=None):\n return paddle_frontend.multiply(self, y)\n\n @with_unsupported_dtypes(\n {\"2.5.2 and below\": (\"bool\", \"unsigned\", \"int8\", \"float16\", \"bfloat16\")},\n \"paddle\",\n )\n def __float__(self):\n return float(self._ivy_array)\n\n def __xor__(self, y, /, name=None):\n return paddle_frontend.logic.bitwise_xor(self, y)\n\n def __invert__(self, out=None, name=None):\n return paddle_frontend.logic.bitwise_not(self)\n\n def __len__(self):\n return len(self._ivy_array)\n\n def __neg__(self):\n return paddle_frontend.neg(self)\n\n @with_unsupported_dtypes(\n {\"2.5.2 and below\": (\"bool\", \"unsigned\", \"int8\", \"float16\", \"bfloat16\")},\n \"paddle\",\n )\n def __rdiv__(self, y, /, name=None):\n return paddle_frontend.divide(y, self)\n\n @with_unsupported_dtypes(\n {\"2.5.2 and below\": (\"bool\", \"unsigned\", \"int8\", \"float16\", \"bfloat16\")},\n \"paddle\",\n )\n def __rtruediv__(self, y, /, name=None):\n return paddle_frontend.divide(y, self)\n\n @with_unsupported_dtypes(\n {\"2.5.2 and below\": (\"bool\", \"unsigned\", \"int8\", \"float16\", \"bfloat16\")},\n \"paddle\",\n )\n def __int__(self):\n return int(self._ivy_array)\n\n @with_unsupported_dtypes(\n {\n \"2.5.2 and below\": (\n \"bool\",\n \"unsigned\",\n \"int8\",\n \"int32\",\n \"int64\",\n \"float16\",\n \"bfloat16\",\n )\n },\n \"paddle\",\n )\n def __long__(self):\n return int(self._ivy_array)\n\n # Instance Methods #\n # ---------------- #\n\n def reshape(self, *args, shape=None):\n if args and shape:\n raise TypeError(\"reshape() got multiple values for argument 'shape'\")\n if shape is not None:\n return paddle_frontend.reshape(self, shape)\n if args:\n if isinstance(args[0], (tuple, list)):\n shape = args[0]\n return paddle_frontend.reshape(self, shape)\n else:\n return paddle_frontend.reshape(self, args)\n else:\n raise ValueError(\"reshape() got no values for argument 'shape'\")\n\n def reshape_(self, *args, shape=None):\n if args and shape:\n raise TypeError(\"reshape() got multiple values for argument 'shape'\")\n if shape is not None:\n self.ivy_array = paddle_frontend.reshape(\n self._ivy_array, shape=shape\n ).ivy_array\n return self\n if args:\n if isinstance(args[0], (tuple, list)):\n shape = args[0]\n self.ivy_array = paddle_frontend.reshape(\n self._ivy_array, shape=shape\n ).ivy_array\n return self\n else:\n self.ivy_array = paddle_frontend.reshape(\n self._ivy_array, args\n ).ivy_array\n return self\n\n self.ivy_array = paddle_frontend.reshape(self._ivy_array).ivy_array\n return self\n\n def dim(self):\n return self.ivy_array.ndim\n\n @with_unsupported_dtypes({\"2.5.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n def abs(self):\n return paddle_frontend.abs(self)\n\n @with_unsupported_dtypes({\"2.5.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n def acosh(self, name=None):\n return paddle_frontend.acosh(self)\n\n @with_unsupported_dtypes({\"2.5.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n def add_n(self, inputs, name=None):\n inputs = ivy.array(inputs)\n return ivy.sum(inputs, dtype=inputs.dtype, axis=0)\n\n @with_unsupported_dtypes({\"2.5.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n def ceil(self):\n return paddle_frontend.ceil(self)\n\n @with_supported_dtypes({\"2.5.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n def ceil_(self):\n self.ivy_array = self.ceil().ivy_array\n return self\n\n @with_unsupported_dtypes({\"2.5.2 and below\": (\"complex\", \"int8\")}, \"paddle\")\n def numel(self):\n return paddle_frontend.numel(self)\n\n @with_unsupported_dtypes({\"2.5.2 and below\": (\"float16\",)}, \"paddle\")\n def asinh(self, name=None):\n return paddle_frontend.asinh(self)\n\n @with_supported_dtypes({\"2.5.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n def asin(self, name=None):\n return paddle_frontend.asin(self)\n\n @with_supported_dtypes({\"2.5.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n def cosh(self, name=None):\n return paddle_frontend.cosh(self)\n\n @with_supported_dtypes(\n {\n \"2.5.2 and below\": (\n \"int32\",\n \"int64\",\n \"float64\",\n \"complex128\",\n \"float32\",\n \"complex64\",\n \"bool\",\n )\n },\n \"paddle\",\n )\n def diagonal(self, offset, axis1=0, axis2=1, name=None):\n return paddle_frontend.diagonal(self, offset=offset, axis1=axis1, axis2=axis2)\n\n @with_supported_dtypes({\"2.5.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n def log(self, name=None):\n return paddle_frontend.log(self)\n\n @with_supported_dtypes({\"2.5.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n def sin(self, name=None):\n return paddle_frontend.sin(self)\n\n @with_supported_dtypes({\"2.5.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n def sinh(self, name=None):\n return paddle_frontend.sinh(self)\n\n @with_supported_dtypes({\"2.5.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n def lerp(self, y, weight, name=None):\n return paddle_frontend.lerp(self, y, weight)\n\n @with_supported_dtypes({\"2.5.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n def lerp_(self, y, weight, name=None):\n self.ivy_array = paddle_frontend.lerp(self, y, weight).ivy_array\n return self\n\n @with_unsupported_dtypes({\"2.5.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n def argmax(self, axis=None, keepdim=False, dtype=None, name=None):\n return paddle_frontend.argmax(self, axis=axis, keepdim=keepdim, dtype=dtype)\n\n @with_unsupported_dtypes({\"2.5.2 and below\": (\"float16\", \"uint16\")}, \"paddle\")\n def unsqueeze(self, axis=None, name=None):\n return paddle_frontend.Tensor(ivy.expand_dims(self._ivy_array, axis=axis))\n\n @with_supported_dtypes({\"2.5.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n def sqrt(self, name=None):\n return paddle_frontend.sqrt(self)\n\n @with_supported_dtypes({\"2.5.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n def sqrt_(self, name=None):\n self.ivy_array = self.sqrt().ivy_array\n return self\n\n @with_unsupported_dtypes({\"2.5.2 and below\": (\"bfloat16\", \"uint16\")}, \"paddle\")\n def zero_(self):\n self.ivy_array = paddle_frontend.zeros_like(self).ivy_array\n return self\n\n @with_supported_dtypes({\"2.5.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n def cos(self, name=None):\n return paddle_frontend.cos(self)\n\n @with_unsupported_dtypes({\"2.5.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n def exp(self, name=None):\n return paddle_frontend.exp(self)\n\n @with_unsupported_dtypes({\"2.5.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n def exp_(self, name=None):\n self.ivy_array = self.exp().ivy_array\n return self\n\n @with_supported_dtypes({\"2.5.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n def erf(self, name=None):\n return paddle_frontend.erf(self)\n\n @with_unsupported_dtypes({\"2.5.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n def subtract(self, y, name=None):\n return paddle_frontend.subtract(self, y)\n\n @with_unsupported_dtypes(\n {\"2.5.2 and below\": (\"float16\", \"uint8\", \"int8\", \"bool\")}, \"paddle\"\n )\n def subtract_(self, y, name=None):\n self.ivy_array = self.subtract(y).ivy_array\n return self\n\n @with_unsupported_dtypes({\"2.5.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n def log10(self, name=None):\n return paddle_frontend.Tensor(ivy.log10(self._ivy_array))\n\n @with_unsupported_dtypes({\"2.5.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n def argsort(self, axis=-1, descending=False, name=None):\n return paddle_frontend.argsort(self, axis=axis, descending=descending)\n\n @with_unsupported_dtypes({\"2.5.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n def floor(self, name=None):\n return paddle_frontend.floor(self)\n\n @with_unsupported_dtypes({\"2.5.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n def floor_(self):\n self.ivy_array = self.floor().ivy_array\n return self\n\n @with_supported_dtypes({\"2.5.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n def round_(self, name=None):\n self.ivy_array = paddle_frontend.round(self).ivy_array\n return self\n\n @with_supported_dtypes(\n {\"2.5.2 and below\": (\"float32\", \"float64\", \"int32\", \"int64\")}, \"paddle\"\n )\n def clip(self, min=None, max=None, name=None):\n ivy.utils.assertions.check_all_or_any_fn(\n min,\n max,\n fn=ivy.exists,\n type=\"any\",\n limit=[1, 2],\n message=\"at most one of min or max can be None\",\n )\n if min is None:\n ret = ivy.minimum(self._ivy_array, max)\n elif max is None:\n ret = ivy.maximum(self._ivy_array, min)\n else:\n ret = ivy.clip(self._ivy_array, min, max)\n return paddle_frontend.Tensor(ret)\n\n @with_supported_dtypes(\n {\"2.5.2 and below\": (\"float32\", \"float64\", \"int32\", \"int64\")}, \"paddle\"\n )\n def clip_(self, min=None, max=None, name=None):\n self._ivy_array = self.clip(min, max).ivy_array\n return self\n\n @with_supported_dtypes({\"2.5.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n def tanh(self, name=None):\n return paddle_frontend.tanh(self)\n\n @with_supported_dtypes({\"2.5.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n def add(self, y, name=None):\n return paddle_frontend.Tensor(ivy.add(self._ivy_array, _to_ivy_array(y)))\n\n @with_supported_dtypes({\"2.5.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n def add_(self, y, name=None):\n self.ivy_array = paddle_frontend.add(self, y).ivy_array\n return self\n\n @with_supported_dtypes({\"2.5.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n def addmm(self, x, y, beta=1.0, alpha=1.0, name=None):\n return paddle_frontend.addmm(self, x, y, beta, alpha)\n\n @with_supported_dtypes(\n {\"2.5.2 and below\": (\"float16\", \"float32\", \"float64\", \"int32\", \"int64\")},\n \"paddle\",\n )\n def isinf(self, name=None):\n return paddle_frontend.isinf(self)\n\n @with_unsupported_dtypes({\"2.5.2 and below\": (\"float16\", \"uint16\")}, \"paddle\")\n def unsqueeze_(self, axis=None, name=None):\n self.ivy_array = self.unsqueeze(axis=axis).ivy_array\n return self\n\n @with_supported_dtypes({\"2.5.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n def square(self, name=None):\n return paddle_frontend.square(self)\n\n @with_unsupported_dtypes({\"2.5.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n def remainder_(self, y, name=None):\n self.ivy_array = paddle_frontend.remainder(self, y).ivy_array\n return self\n\n @with_supported_dtypes({\"2.5.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n def cholesky(self, upper=False, name=None):\n return paddle_frontend.cholesky(self, upper=upper)\n\n @with_unsupported_dtypes(\n {\"2.5.2 and below\": (\"float16\", \"uint16\", \"int16\")}, \"paddle\"\n )\n def squeeze(self, axis=None, name=None):\n if isinstance(axis, int) and self.ndim > 0:\n if self.shape[axis] > 1:\n return self\n if len(self.shape) == 0:\n return self\n return paddle_frontend.squeeze(self, axis=axis)\n\n @with_unsupported_dtypes(\n {\"2.5.2 and below\": (\"float16\", \"uint16\", \"int16\")}, \"paddle\"\n )\n def squeeze_(self, axis=None, name=None):\n self.ivy_array = paddle_frontend.squeeze(self, axis=axis).ivy_array\n return self\n\n @with_unsupported_dtypes({\"2.5.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n def multiply(self, y, name=None):\n return paddle_frontend.multiply(self, y)\n\n @with_unsupported_dtypes({\"2.5.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n def matmul(self, y, transpose_x=False, transpose_y=False, name=None):\n return paddle_frontend.matmul(\n self, y, transpose_x=transpose_x, transpose_y=transpose_y\n )\n\n @with_supported_dtypes(\n {\"2.5.2 and below\": (\"float16\", \"float32\", \"float64\", \"int32\", \"int64\")},\n \"paddle\",\n )\n def isfinite(self, name=None):\n return paddle_frontend.isfinite(self)\n\n @with_supported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n def all(self, axis=None, keepdim=False, dtype=None, name=None):\n return paddle_frontend.Tensor(\n ivy.all(self.ivy_array, axis=axis, keepdims=keepdim, dtype=dtype)\n )\n\n @with_supported_dtypes({\"2.5.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n def allclose(self, other, rtol=1e-05, atol=1e-08, equal_nan=False, name=None):\n return paddle_frontend.allclose(\n self, other, rtol=rtol, atol=atol, equal_nan=equal_nan\n )\n\n @with_unsupported_dtypes({\"2.5.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n def sort(self, axis=-1, descending=False, name=None):\n return paddle_frontend.sort(self, axis=axis, descending=descending)\n\n @with_unsupported_dtypes({\"2.5.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n def log1p(self, name=None):\n return paddle_frontend.log1p(self)\n\n @with_supported_dtypes(\n {\n \"2.4.2 and below\": (\n \"bool\",\n \"uint8\",\n \"int8\",\n \"int16\",\n \"int32\",\n \"int64\",\n )\n },\n \"paddle\",\n )\n def bitwise_and(self, y, out=None, name=None):\n return paddle_frontend.bitwise_and(self, y)\n\n @with_supported_dtypes(\n {\n \"2.5.2 and below\": (\n \"bool\",\n \"int8\",\n \"int16\",\n \"int32\",\n \"int64\",\n \"float32\",\n \"float64\",\n )\n },\n \"paddle\",\n )\n def logical_or(self, y, out=None, name=None):\n return paddle_frontend.logical_or(self, y, out=out)\n\n @with_supported_dtypes(\n {\"2.5.2 and below\": (\"bool\", \"uint8\", \"int8\", \"int16\", \"int32\", \"int64\")},\n \"paddle\",\n )\n def bitwise_xor(self, y, out=None, name=None):\n return paddle_frontend.bitwise_xor(self, y)\n\n @with_supported_dtypes({\"2.5.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n def any(self, axis=None, keepdim=False, name=None):\n return paddle_frontend.any(self, axis=axis, keepdim=keepdim)\n\n @with_unsupported_dtypes({\"2.5.2 and below\": \"bfloat16\"}, \"paddle\")\n def astype(self, dtype):\n return paddle_frontend.Tensor(ivy.astype(self._ivy_array, dtype))\n\n @with_supported_dtypes(\n {\"2.5.2 and below\": (\"bool\", \"uint8\", \"int8\", \"int16\", \"int32\", \"int64\")},\n \"paddle\",\n )\n def bitwise_not(self, out=None, name=None):\n return paddle_frontend.bitwise_not(self, out=out)\n\n @with_supported_dtypes(\n {\n \"2.5.2 and below\": (\n \"bool\",\n \"int8\",\n \"int16\",\n \"int32\",\n \"int64\",\n )\n },\n \"paddle\",\n )\n def bitwise_or(self, y, out=None, name=None):\n return paddle_frontend.bitwise_or(self, y, out=out)\n\n @with_supported_dtypes(\n {\n \"2.5.2 and below\": (\n \"bool\",\n \"int8\",\n \"int16\",\n \"int32\",\n \"int64\",\n \"float32\",\n \"float64\",\n )\n },\n \"paddle\",\n )\n def logical_xor(self, y, out=None, name=None):\n return paddle_frontend.logical_xor(self, y, out=out)\n\n @with_supported_dtypes(\n {\"2.5.2 and below\": (\"float16\", \"float32\", \"float64\", \"int32\", \"int64\")},\n \"paddle\",\n )\n def isnan(self, name=None):\n return paddle_frontend.isnan(self)\n\n @with_unsupported_dtypes(\n {\n \"2.5.2 and below\": (\n \"bool\",\n \"uint8\",\n \"int8\",\n \"int16\",\n \"complex64\",\n \"complex128\",\n )\n },\n \"paddle\",\n )\n def greater_than(self, y, name=None):\n return paddle_frontend.greater_than(self, y)\n\n @with_supported_dtypes({\"2.5.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n def rsqrt(self, name=None):\n return paddle_frontend.rsqrt(self)\n\n @with_supported_dtypes({\"2.5.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n def rsqrt_(self, name=None):\n self.ivy_array = self.rsqrt().ivy_array\n return self\n\n @with_supported_dtypes({\"2.5.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n def reciprocal(self, name=None):\n return paddle_frontend.reciprocal(self)\n\n @with_supported_dtypes(\n {\n \"2.5.2 and below\": (\n \"bool\",\n \"int8\",\n \"int16\",\n \"int32\",\n \"int64\",\n \"float32\",\n \"float64\",\n )\n },\n \"paddle\",\n )\n def logical_and(self, y, out=None, name=None):\n return paddle_frontend.logical_and(self, y, out=out)\n\n @with_supported_dtypes({\"2.5.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n def divide(self, y, name=None):\n return paddle_frontend.divide(self, y)\n\n @with_supported_dtypes(\n {\"2.5.2 and below\": (\"float32\", \"float64\", \"complex64\", \"complex128\")},\n \"paddle\",\n )\n def eigvals(self, name=None):\n return paddle_frontend.eigvals(self)\n\n @with_unsupported_dtypes(\n {\n \"2.5.2 and below\": (\n \"bool\",\n \"uint8\",\n \"int8\",\n \"int16\",\n \"complex64\",\n \"complex128\",\n )\n },\n \"paddle\",\n )\n def less_than(self, y, name=None):\n return paddle_frontend.less_than(self, y)\n\n @with_unsupported_dtypes({\"2.5.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n def cumprod(self, dim=None, dtype=None, name=None):\n return paddle_frontend.cumprod(self, dim=dim, dtype=dtype)\n\n @with_unsupported_dtypes({\"2.5.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n def cumsum(self, axis=None, dtype=None, name=None):\n return paddle_frontend.Tensor(\n ivy.cumsum(self._ivy_array, axis=axis, dtype=dtype)\n )\n\n @with_supported_dtypes(\n {\"2.5.2 and below\": (\"complex64\", \"complex128\", \"float32\", \"float64\")},\n \"paddle\",\n )\n def angle(self, name=None):\n return paddle_frontend.angle(self)\n\n @with_unsupported_dtypes(\n {\n \"2.5.2 and below\": (\n \"uint8\",\n \"int8\",\n \"int16\",\n \"complex64\",\n \"complex128\",\n )\n },\n \"paddle\",\n )\n def equal(self, y, name=None):\n return paddle_frontend.equal(self, y)\n\n @with_unsupported_dtypes({\"2.5.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n def rad2deg(self, name=None):\n return paddle_frontend.rad2deg(self)\n\n @with_unsupported_dtypes(\n {\n \"2.5.2 and below\": (\n \"uint8\",\n \"int8\",\n \"int16\",\n \"float16\",\n \"complex64\",\n \"complex128\",\n )\n },\n \"paddle\",\n )\n def equal_all(self, y, name=None):\n return paddle_frontend.equal_all(self, y)\n\n @with_supported_dtypes({\"2.5.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n def maximum(self, other, name=None):\n return paddle_frontend.maximum(self, other)\n\n @with_unsupported_dtypes({\"2.5.2 and below\": \"bfloat16\"}, \"paddle\")\n def fmax(self, y, name=None):\n return paddle_frontend.fmax(self, y)\n\n @with_unsupported_dtypes({\"2.5.2 and below\": \"bfloat16\"}, \"paddle\")\n def fmin(self, y, name=None):\n return paddle_frontend.fmin(self, y)\n\n @with_supported_dtypes(\n {\"2.5.2 and below\": (\"float32\", \"float64\", \"int32\", \"int64\")}, \"paddle\"\n )\n def minimum(self, y, name=None):\n return paddle_frontend.minimum(self, y)\n\n @with_supported_dtypes(\n {\"2.5.2 and below\": (\"float32\", \"float64\", \"int32\", \"int64\")}, \"paddle\"\n )\n def max(self, axis=None, keepdim=False, name=None):\n return paddle_frontend.max(self, axis=axis, keepdim=keepdim)\n\n @with_unsupported_dtypes({\"2.5.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n def deg2rad(self, name=None):\n return paddle_frontend.deg2rad(self)\n\n @with_supported_dtypes({\"2.5.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n def digamma(self, name=None):\n return paddle_frontend.digamma(self)\n\n @with_supported_dtypes(\n {\"2.5.2 and below\": (\"float32\", \"float64\", \"int32\", \"int64\", \"bool\")}, \"paddle\"\n )\n def rot90(self, k=1, axes=(0, 1), name=None):\n return paddle_frontend.rot90(self, k=k, axes=axes)\n\n @with_supported_dtypes(\n {\"2.5.2 and below\": (\"complex64\", \"complex128\")},\n \"paddle\",\n )\n def imag(self, name=None):\n return paddle_frontend.imag(self)\n\n def is_tensor(self):\n return paddle_frontend.is_tensor(self)\n\n @with_supported_dtypes(\n {\n \"2.5.2 and below\": (\n \"float32\",\n \"float64\",\n )\n },\n \"paddle\",\n )\n def isclose(self, y, rtol=1e-05, atol=1e-08, equal_nan=False, name=None):\n return paddle_frontend.isclose(\n self, y, rtol=rtol, atol=atol, equal_nan=equal_nan\n )\n\n @with_supported_dtypes({\"2.5.2 and below\": (\"int32\", \"int64\")}, \"paddle\")\n def floor_divide(self, y, name=None):\n return paddle_frontend.floor_divide(self, y)\n\n @with_supported_dtypes({\"2.5.2 and below\": (\"int32\", \"int64\")}, \"paddle\")\n def mod(self, y, name=None):\n return paddle_frontend.Tensor(ivy.fmod(self._ivy_array, _to_ivy_array(y)))\n\n @with_supported_dtypes(\n {\"2.5.2 and below\": (\"float32\", \"float64\", \"int32\", \"int64\")}, \"paddle\"\n )\n def floor_mod(self, y, name=None):\n return paddle_frontend.remainder(self, y)\n\n # cond\n @with_supported_dtypes({\"2.5.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n def cond(self, p=None, name=None):\n return paddle_frontend.cond(self, p=p, name=name)\n\n @with_unsupported_dtypes({\"2.4.2 and below\": (\"int16\", \"float16\")}, \"paddle\")\n def conj(self, name=None):\n return paddle_frontend.conj(self)\n\n @with_supported_dtypes({\"2.5.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n def log2(self, name=None):\n return paddle_frontend.log2(self)\n\n @with_unsupported_dtypes(\n {\"2.4.2 and below\": (\"float32\", \"float64\", \"int32\", \"int64\")}, \"paddle\"\n )\n def neg(self, name=None):\n return paddle_frontend.neg(self)\n\n @with_supported_dtypes(\n {\n \"2.5.2 and below\": (\n \"bool\",\n \"int8\",\n \"int16\",\n \"int32\",\n \"int64\",\n \"float32\",\n \"float64\",\n )\n },\n \"paddle\",\n )\n def logical_not(self, out=None, name=None):\n return paddle_frontend.logical_not(self)\n\n @with_unsupported_dtypes({\"2.5.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n def sign(self, name=None):\n return paddle_frontend.sign(self)\n\n @with_supported_dtypes({\"2.5.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n def var(self, axis=None, unbiased=True, keepdim=False, name=None):\n return paddle_frontend.var(self, axis=axis, unbiased=unbiased, keepdim=keepdim)\n\n @with_unsupported_dtypes({\"2.5.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n def sgn(self, name=None):\n return paddle_frontend.sgn(self)\n\n def tolist(self):\n return paddle_frontend.Tensor(ivy.to_list(self._ivy_array))\n\n @with_supported_dtypes(\n {\"2.5.2 and below\": (\"float32\", \"float64\", \"int32\", \"int64\")},\n \"paddle\",\n )\n def min(self, axis=None, keepdim=False, name=None):\n return paddle_frontend.min(self, axis=axis, keepdim=keepdim)\n\n @with_supported_dtypes(\n {\"2.5.2 and below\": (\"int32\", \"int64\", \"float32\", \"float64\")}, \"paddle\"\n )\n def pow(self, y, name=None):\n return paddle_frontend.pow(self, y)\n\n @with_supported_dtypes(\n {\"2.5.2 and below\": (\"float32\", \"float64\", \"int32\", \"int64\")}, \"paddle\"\n )\n def prod(self, axis=None, keepdim=False, dtype=None, name=None):\n return paddle_frontend.Tensor(\n ivy.prod(self._ivy_array, axis=axis, keepdims=keepdim, dtype=dtype)\n )\n\n @with_supported_dtypes({\"2.5.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n def atan(self, name=None):\n return paddle_frontend.atan(self)\n\n @with_supported_dtypes({\"2.5.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n def atanh(self, name=None):\n return paddle_frontend.atanh(self)\n\n @with_supported_dtypes({\"2.5.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n def std(self, axis=None, unbiased=True, keepdim=False, name=None):\n return paddle_frontend.std(self, axis=axis, unbiased=unbiased, keepdim=keepdim)\n\n @with_supported_dtypes(\n {\"2.5.2 and below\": (\"int32\", \"int64\", \"float32\", \"float64\")}, \"paddle\"\n )\n def trunc(self, name=None):\n return paddle_frontend.trunc(self)\n\n @with_supported_dtypes({\"2.5.2 and below\": (\"complex64\", \"complex128\")}, \"paddle\")\n def as_real(self, name=None):\n if not ivy.is_complex_dtype(self._ivy_array):\n raise ivy.exceptions.IvyError(\n \"as_real is only supported for complex tensors\"\n )\n re_part = ivy.real(self._ivy_array)\n im_part = ivy.imag(self._ivy_array)\n return paddle_frontend.Tensor(ivy.stack((re_part, im_part), axis=-1))\n\n @with_supported_dtypes({\"2.5.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n def stanh(self, scale_a=0.67, scale_b=1.7159, name=None):\n return paddle_frontend.stanh(self, scale_a=scale_a, scale_b=scale_b)\n\n @with_supported_dtypes(\n {\"2.5.2 and below\": (\"int32\", \"int64\", \"float32\", \"float64\")}, \"paddle\"\n )\n def trace(self, offset=0, axis1=0, axis2=1, name=None):\n return paddle_frontend.Tensor(\n ivy.trace(self._ivy_array, offset=offset, axis1=axis1, axis2=axis2)\n )\n\n @with_supported_dtypes(\n {\n \"2.5.2 and below\": (\n \"bfloat16\",\n \"float32\",\n \"float64\",\n \"int8\",\n \"int16\",\n \"int32\",\n \"int64\",\n \"uint8\",\n )\n },\n \"paddle\",\n )\n def flatten(self, start_axis=0, stop_axis=-1, name=None):\n if len(self.shape) == 0:\n return self.unsqueeze(axis=0)\n return paddle_frontend.Tensor(\n ivy.flatten(self.ivy_array, start_dim=start_axis, end_dim=stop_axis)\n )\n\n @with_supported_dtypes(\n {\n \"2.5.2 and below\": (\n \"float32\",\n \"float64\",\n \"int16\",\n \"int32\",\n \"int64\",\n \"uint8\",\n )\n },\n \"paddle\",\n )\n def argmin(self, axis=None, keepdim=False, dtype=None, name=None):\n return paddle_frontend.argmin(self, axis=axis, keepdim=keepdim, dtype=dtype)\n\n @with_supported_dtypes(\n {\"2.5.2 and below\": (\"float32\", \"float64\", \"int32\", \"int64\")},\n \"paddle\",\n )\n def topk(self, k, axis=None, largest=True, sorted=True, name=None):\n return paddle_frontend.topk(self, k, axis=axis, largest=largest, sorted=sorted)\n\n @with_unsupported_dtypes({\"2.5.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n def remainder(self, y, name=None):\n return paddle_frontend.remainder(self, y)\n\n def is_floating_point(self):\n return paddle_frontend.is_floating_point(self)\n\n @with_supported_dtypes({\"2.5.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n def tanh_(self, name=None):\n y = self.tanh(self)\n return ivy.inplace_update(self, y)\n\n @with_supported_dtypes({\"2.5.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n def reciprocal_(self, name=None):\n y = self.reciprocal(self)\n return ivy.inplace_update(self, y)\n\n @with_unsupported_dtypes(\n {\"2.5.2 and below\": (\"complex\", \"uint8\", \"uint16\")}, \"paddle\"\n )\n def numpy(self):\n return self.ivy_array.to_numpy()\n\n @with_unsupported_dtypes({\"2.5.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n def nonzero(self):\n return paddle_frontend.nonzero(self)\n\n @with_supported_dtypes({\"2.5.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n def inner(self, y, name=None):\n return paddle_frontend.inner(self, y, name)\n\n @with_supported_dtypes({\"2.5.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n def acos(self, name=None):\n return paddle_frontend.Tensor(ivy.acos(self._ivy_array))\n\n @with_supported_dtypes({\"2.5.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n def mean(self, axis=None, keepdim=False, name=None):\n return paddle_frontend.mean(self, axis=axis, keepdim=keepdim)\n\n @with_supported_dtypes({\"2.5.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n def as_complex(self, name=None):\n if self.ivy_array.shape[-1] != 2:\n raise ivy.exceptions.IvyError(\n \"The size of the last dimension of tensor does not equals 2\"\n )\n dtype = (\n ivy.complex64 if ivy.dtype(self.ivy_array) == \"float32\" else ivy.complex128\n )\n re_part = self.ivy_array[..., 0]\n im_part = ivy.multiply(1j, self.ivy_array[..., 1])\n value = paddle_frontend.Tensor(ivy.add(re_part, im_part).astype(dtype))\n return value\n\n @with_supported_dtypes(\n {\"2.5.2 and below\": (\"int32\", \"int64\", \"float32\", \"float64\", \"bool\")}, \"paddle\"\n )\n def not_equal(self, y, name=None):\n return paddle_frontend.not_equal(self._ivy_array, y)\n\n @with_supported_dtypes(\n {\"2.5.2 and below\": (\"float32\", \"float64\", \"int32\", \"int64\")}, \"paddle\"\n )\n def less_equal(self, y, name=None):\n return paddle_frontend.less_equal(self._ivy_array, y)\n\n @with_supported_dtypes({\"2.5.2 and below\": (\"complex64\", \"complex128\")}, \"paddle\")\n def real(self, name=None):\n return paddle_frontend.real(self._ivy_array)\n\n @with_unsupported_dtypes({\"2.5.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n def t(self, name=None):\n axes = list(range(len(self.ivy_array.shape)))[::-1]\n return ivy.permute_dims(self.ivy_array, axes=axes)\n\n @with_supported_dtypes(\n {\n \"2.5.2 and below\": (\n \"bool\",\n \"float16\",\n \"float32\",\n \"float64\",\n \"int32\",\n \"int64\",\n \"uint8\",\n )\n },\n \"paddle\",\n )\n def cast(self, dtype):\n return paddle_frontend.cast(self, dtype)\n\n @with_unsupported_dtypes({\"2.5.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n def bmm(self, y, transpose_x=False, transpose_y=False, name=None):\n return paddle_frontend.bmm(self, y, transpose_x, transpose_y)\n\n @with_supported_dtypes(\n {\"2.5.2 and below\": (\"float16\", \"float32\", \"float64\", \"int32\", \"int64\")},\n \"paddle\",\n )\n def fill_(self, value):\n filled_tensor = paddle_frontend.full_like(self, value)\n return ivy.inplace_update(self, filled_tensor)\n\n @with_supported_dtypes(\n {\n \"2.5.2 and below\": (\n \"bool\",\n \"int32\",\n \"int64\",\n \"float16\",\n \"float32\",\n \"float64\",\n )\n },\n \"paddle\",\n )\n def unbind(self, axis=0):\n return paddle_frontend.unbind(self._ivy_array, axis=axis)\n\n @with_supported_dtypes(\n {\n \"2.5.2 and below\": (\n \"bool\",\n \"int32\",\n \"int64\",\n \"float16\",\n \"float32\",\n \"float64\",\n )\n },\n \"paddle\",\n )\n def unique_consecutive(self, axis=0):\n return paddle_frontend.unique_consecutive(self._ivy_array, axis=axis)\n\n def cpu(self):\n self.ivy_array = ivy.to_device(self.ivy_array, ivy.as_ivy_dev(\"cpu\"))\n return self\n\n @with_unsupported_dtypes(\n {\"2.5.2 and below\": (\"int16\", \"complex64\", \"complex128\")},\n \"paddle\",\n )\n def split(self, num_or_sections, axis=0, name=None):\n return paddle_frontend.split(self._ivy_array, num_or_sections, axis, name)\n\n @with_supported_dtypes(\n {\"2.5.2 and below\": (\"float32\", \"float64\", \"int32\", \"int64\")}, \"paddle\"\n )\n def frac(self, name=None):\n return paddle_frontend.frac(self._ivy_array)\n\n @with_unsupported_dtypes({\"2.5.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n def gather(self, y, name=None):\n return paddle_frontend.gather(self, y)\n\n def is_complex(self):\n return paddle_frontend.is_complex(self)\n\n @with_unsupported_dtypes(\n {\"2.5.2 and below\": (\"float16\", \"uint8\", \"int8\", \"bool\")}, \"paddle\"\n )\n def gather_(self, y, name=None):\n res = self.gather(self, y)\n return ivy.inplace_update(self, res)\n\n @with_supported_dtypes(\n {\"2.5.2 and below\": (\"float32\", \"float64\", \"int32\", \"int64\")}, \"paddle\"\n )\n def heaviside(self, y, name=None):\n return paddle_frontend.heaviside(self, y)\n\n @with_supported_dtypes(\n {\"2.5.2 and below\": (\"bool\", \"int32\", \"int64\", \"float32\", \"float64\")}, \"paddle\"\n )\n def expand(self, shape, name=None):\n return paddle_frontend.expand(self._ivy_array, shape)\n\n @with_supported_device_and_dtypes(\n {\n \"2.5.2 and below\": {\n \"cpu\": (\n \"bool\",\n \"int32\",\n \"int64\",\n \"float32\",\n \"float64\",\n \"complex64\",\n \"complex128\",\n )\n }\n },\n \"paddle\",\n )\n def tile(self, repeat_times):\n return paddle_frontend.Tensor(ivy.tile(self._ivy_array, repeats=repeat_times))\n\n @with_supported_dtypes(\n {\n \"2.5.2 and below\": (\n \"bool\",\n \"float16\",\n \"float32\",\n \"float64\",\n \"int8\",\n \"int16\",\n \"int32\",\n \"int64\",\n )\n },\n \"paddle\",\n )\n def chunk(self, chunks, axis=0, name=None):\n return paddle_frontend.split(self._ivy_array, num_or_sections=chunks, axis=axis)\n", "path": "ivy/functional/frontends/paddle/tensor/tensor.py"}], "after_files": [{"content": "# local\nimport ivy\nimport ivy.functional.frontends.paddle as paddle_frontend\nfrom ivy.func_wrapper import (\n with_supported_dtypes,\n with_unsupported_dtypes,\n with_supported_device_and_dtypes,\n)\nfrom ivy.functional.frontends.paddle.func_wrapper import _to_ivy_array\n\n\nclass Tensor:\n def __init__(self, array, dtype=None, place=\"cpu\", stop_gradient=True):\n self._ivy_array = (\n ivy.array(array, dtype=dtype, device=place)\n if not isinstance(array, ivy.Array)\n else array\n )\n self._dtype = dtype\n self._place = place\n self._stop_gradient = stop_gradient\n\n def __repr__(self):\n return (\n f\"ivy.frontends.paddle.Tensor(shape={self.shape}, dtype={self.dtype}, \"\n + str(self.ivy_array.__repr__()).replace(\"ivy.array(\", \"\")\n )\n\n # Properties #\n # ---------- #\n\n @property\n def ivy_array(self):\n return self._ivy_array\n\n @property\n def place(self):\n return self.ivy_array.device\n\n @property\n def dtype(self):\n return self._ivy_array.dtype\n\n @property\n def shape(self):\n return list(self.ivy_array.shape.shape)\n\n @property\n def ndim(self):\n return self.dim()\n\n # Setters #\n # --------#\n\n @ivy_array.setter\n def ivy_array(self, array):\n self._ivy_array = (\n ivy.array(array) if not isinstance(array, ivy.Array) else array\n )\n\n # Special Methods #\n # -------------------#\n\n @with_unsupported_dtypes(\n {\"2.5.2 and below\": (\"bool\", \"unsigned\", \"int8\", \"float16\", \"bfloat16\")},\n \"paddle\",\n )\n def __add__(self, y, /, name=None):\n return paddle_frontend.add(self, y)\n\n @with_unsupported_dtypes(\n {\"2.5.2 and below\": (\"bool\", \"unsigned\", \"int8\", \"float16\", \"bfloat16\")},\n \"paddle\",\n )\n def __radd__(self, x, /, name=None):\n return paddle_frontend.add(self, x)\n\n @with_unsupported_dtypes(\n {\"2.5.2 and below\": (\"bool\", \"unsigned\", \"int8\", \"float16\", \"bfloat16\")},\n \"paddle\",\n )\n def __sub__(self, y, /, name=None):\n return paddle_frontend.subtract(self, y)\n\n @with_unsupported_dtypes(\n {\"2.5.2 and below\": (\"uint8\", \"int8\", \"int16\", \"float16\", \"bfloat16\")},\n \"paddle\",\n )\n def __mul__(self, y, /, name=None):\n return paddle_frontend.multiply(self, y)\n\n @with_unsupported_dtypes(\n {\n \"2.5.2 and below\": (\n \"bool\",\n \"uint8\",\n \"int8\",\n \"int16\",\n \"complex64\",\n \"complex128\",\n )\n },\n \"paddle\",\n )\n def __gt__(self, y, /, name=None):\n return paddle_frontend.logic.greater_than(self, y)\n\n @with_unsupported_dtypes(\n {\n \"2.5.2 and below\": (\n \"bool\",\n \"uint8\",\n \"int8\",\n \"int16\",\n \"complex64\",\n \"complex128\",\n )\n },\n \"paddle\",\n )\n def __lt__(self, y, /, name=None):\n return paddle_frontend.logic.less_than(self, y)\n\n @with_unsupported_dtypes(\n {\n \"2.5.2 and below\": (\n \"bool\",\n \"uint8\",\n \"int8\",\n \"int16\",\n \"complex64\",\n \"complex128\",\n )\n },\n \"paddle\",\n )\n def __ge__(self, y, /, name=None):\n return paddle_frontend.logic.greater_equal(self, y)\n\n @with_unsupported_dtypes(\n {\n \"2.5.2 and below\": (\n \"bool\",\n \"uint8\",\n \"int8\",\n \"int16\",\n \"complex64\",\n \"complex128\",\n )\n },\n \"paddle\",\n )\n def __le__(self, y, /, name=None):\n return paddle_frontend.logic.less_equal(self, y)\n\n @with_supported_dtypes(\n {\n \"2.5.2 and below\": (\n \"bool\",\n \"uint8\",\n \"int8\",\n \"int16\",\n \"int32\",\n \"int64\",\n )\n },\n \"paddle\",\n )\n def __or__(self, y, /, name=None):\n return paddle_frontend.logic.bitwise_or(self, y)\n\n @with_unsupported_dtypes(\n {\"2.5.2 and below\": (\"bool\", \"unsigned\", \"int8\", \"float16\", \"bfloat16\")},\n \"paddle\",\n )\n def __rsub__(self, x, /, name=None):\n return paddle_frontend.subtract(x, self)\n\n def __getitem__(self, item):\n ivy_args = ivy.nested_map(_to_ivy_array, [self, item])\n ret = ivy.get_item(*ivy_args)\n return paddle_frontend.Tensor(ret)\n\n def __setitem__(self, item, value):\n raise ivy.utils.exceptions.IvyException(\n \"ivy.functional.frontends.paddle.Tensor object doesn't support assignment\"\n )\n\n @with_unsupported_dtypes({\"2.5.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n def __floordiv__(self, y, /, name=None):\n return paddle_frontend.floor_divide(self, y)\n\n @with_unsupported_dtypes({\"2.5.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n def __ne__(self, y, /, name=None):\n return paddle_frontend.not_equal(self, y)\n\n def __iter__(self):\n if self.ndim == 0:\n raise TypeError(\"iteration over a 0-d tensor not supported\")\n for i in range(self.shape[0]):\n yield self[i]\n\n @with_unsupported_dtypes(\n {\"2.5.2 and below\": (\"bool\", \"unsigned\", \"int8\", \"float16\", \"bfloat16\")},\n \"paddle\",\n )\n def __rmul__(self, y, /, name=None):\n return paddle_frontend.multiply(self, y)\n\n @with_unsupported_dtypes(\n {\"2.5.2 and below\": (\"bool\", \"unsigned\", \"int8\", \"float16\", \"bfloat16\")},\n \"paddle\",\n )\n def __float__(self):\n return float(self._ivy_array)\n\n def __xor__(self, y, /, name=None):\n return paddle_frontend.logic.bitwise_xor(self, y)\n\n def __invert__(self, out=None, name=None):\n return paddle_frontend.logic.bitwise_not(self)\n\n def __len__(self):\n return len(self._ivy_array)\n\n def __neg__(self):\n return paddle_frontend.neg(self)\n\n @with_unsupported_dtypes(\n {\"2.5.2 and below\": (\"bool\", \"unsigned\", \"int8\", \"float16\", \"bfloat16\")},\n \"paddle\",\n )\n def __rdiv__(self, y, /, name=None):\n return paddle_frontend.divide(y, self)\n\n @with_unsupported_dtypes(\n {\"2.5.2 and below\": (\"bool\", \"unsigned\", \"int8\", \"float16\", \"bfloat16\")},\n \"paddle\",\n )\n def __rtruediv__(self, y, /, name=None):\n return paddle_frontend.divide(y, self)\n\n @with_unsupported_dtypes(\n {\"2.5.2 and below\": (\"bool\", \"unsigned\", \"int8\", \"float16\", \"bfloat16\")},\n \"paddle\",\n )\n def __int__(self):\n return int(self._ivy_array)\n\n @with_unsupported_dtypes(\n {\n \"2.5.2 and below\": (\n \"bool\",\n \"unsigned\",\n \"int8\",\n \"int32\",\n \"int64\",\n \"float16\",\n \"bfloat16\",\n )\n },\n \"paddle\",\n )\n def __long__(self):\n return int(self._ivy_array)\n\n # Instance Methods #\n # ---------------- #\n\n def reshape(self, *args, shape=None):\n if args and shape:\n raise TypeError(\"reshape() got multiple values for argument 'shape'\")\n if shape is not None:\n return paddle_frontend.reshape(self, shape)\n if args:\n if isinstance(args[0], (tuple, list)):\n shape = args[0]\n return paddle_frontend.reshape(self, shape)\n else:\n return paddle_frontend.reshape(self, args)\n else:\n raise ValueError(\"reshape() got no values for argument 'shape'\")\n\n def reshape_(self, *args, shape=None):\n if args and shape:\n raise TypeError(\"reshape() got multiple values for argument 'shape'\")\n if shape is not None:\n self.ivy_array = paddle_frontend.reshape(\n self._ivy_array, shape=shape\n ).ivy_array\n return self\n if args:\n if isinstance(args[0], (tuple, list)):\n shape = args[0]\n self.ivy_array = paddle_frontend.reshape(\n self._ivy_array, shape=shape\n ).ivy_array\n return self\n else:\n self.ivy_array = paddle_frontend.reshape(\n self._ivy_array, args\n ).ivy_array\n return self\n else:\n raise ValueError(\"reshape_() got no values for argument 'shape'\")\n\n def dim(self):\n return self.ivy_array.ndim\n\n @with_unsupported_dtypes({\"2.5.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n def abs(self):\n return paddle_frontend.abs(self)\n\n @with_unsupported_dtypes({\"2.5.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n def acosh(self, name=None):\n return paddle_frontend.acosh(self)\n\n @with_unsupported_dtypes({\"2.5.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n def add_n(self, inputs, name=None):\n inputs = ivy.array(inputs)\n return ivy.sum(inputs, dtype=inputs.dtype, axis=0)\n\n @with_unsupported_dtypes({\"2.5.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n def ceil(self):\n return paddle_frontend.ceil(self)\n\n @with_supported_dtypes({\"2.5.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n def ceil_(self):\n self.ivy_array = self.ceil().ivy_array\n return self\n\n @with_unsupported_dtypes({\"2.5.2 and below\": (\"complex\", \"int8\")}, \"paddle\")\n def numel(self):\n return paddle_frontend.numel(self)\n\n @with_unsupported_dtypes({\"2.5.2 and below\": (\"float16\",)}, \"paddle\")\n def asinh(self, name=None):\n return paddle_frontend.asinh(self)\n\n @with_supported_dtypes({\"2.5.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n def asin(self, name=None):\n return paddle_frontend.asin(self)\n\n @with_supported_dtypes({\"2.5.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n def cosh(self, name=None):\n return paddle_frontend.cosh(self)\n\n @with_supported_dtypes(\n {\n \"2.5.2 and below\": (\n \"int32\",\n \"int64\",\n \"float64\",\n \"complex128\",\n \"float32\",\n \"complex64\",\n \"bool\",\n )\n },\n \"paddle\",\n )\n def diagonal(self, offset, axis1=0, axis2=1, name=None):\n return paddle_frontend.diagonal(self, offset=offset, axis1=axis1, axis2=axis2)\n\n @with_supported_dtypes({\"2.5.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n def log(self, name=None):\n return paddle_frontend.log(self)\n\n @with_supported_dtypes({\"2.5.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n def sin(self, name=None):\n return paddle_frontend.sin(self)\n\n @with_supported_dtypes({\"2.5.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n def sinh(self, name=None):\n return paddle_frontend.sinh(self)\n\n @with_supported_dtypes({\"2.5.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n def lerp(self, y, weight, name=None):\n return paddle_frontend.lerp(self, y, weight)\n\n @with_supported_dtypes({\"2.5.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n def lerp_(self, y, weight, name=None):\n self.ivy_array = paddle_frontend.lerp(self, y, weight).ivy_array\n return self\n\n @with_unsupported_dtypes({\"2.5.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n def argmax(self, axis=None, keepdim=False, dtype=None, name=None):\n return paddle_frontend.argmax(self, axis=axis, keepdim=keepdim, dtype=dtype)\n\n @with_unsupported_dtypes({\"2.5.2 and below\": (\"float16\", \"uint16\")}, \"paddle\")\n def unsqueeze(self, axis=None, name=None):\n return paddle_frontend.Tensor(ivy.expand_dims(self._ivy_array, axis=axis))\n\n @with_supported_dtypes({\"2.5.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n def sqrt(self, name=None):\n return paddle_frontend.sqrt(self)\n\n @with_supported_dtypes({\"2.5.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n def sqrt_(self, name=None):\n self.ivy_array = self.sqrt().ivy_array\n return self\n\n @with_unsupported_dtypes({\"2.5.2 and below\": (\"bfloat16\", \"uint16\")}, \"paddle\")\n def zero_(self):\n self.ivy_array = paddle_frontend.zeros_like(self).ivy_array\n return self\n\n @with_supported_dtypes({\"2.5.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n def cos(self, name=None):\n return paddle_frontend.cos(self)\n\n @with_unsupported_dtypes({\"2.5.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n def exp(self, name=None):\n return paddle_frontend.exp(self)\n\n @with_unsupported_dtypes({\"2.5.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n def exp_(self, name=None):\n self.ivy_array = self.exp().ivy_array\n return self\n\n @with_supported_dtypes({\"2.5.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n def erf(self, name=None):\n return paddle_frontend.erf(self)\n\n @with_unsupported_dtypes({\"2.5.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n def subtract(self, y, name=None):\n return paddle_frontend.subtract(self, y)\n\n @with_unsupported_dtypes(\n {\"2.5.2 and below\": (\"float16\", \"uint8\", \"int8\", \"bool\")}, \"paddle\"\n )\n def subtract_(self, y, name=None):\n self.ivy_array = self.subtract(y).ivy_array\n return self\n\n @with_unsupported_dtypes({\"2.5.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n def log10(self, name=None):\n return paddle_frontend.Tensor(ivy.log10(self._ivy_array))\n\n @with_unsupported_dtypes({\"2.5.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n def argsort(self, axis=-1, descending=False, name=None):\n return paddle_frontend.argsort(self, axis=axis, descending=descending)\n\n @with_unsupported_dtypes({\"2.5.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n def floor(self, name=None):\n return paddle_frontend.floor(self)\n\n @with_unsupported_dtypes({\"2.5.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n def floor_(self):\n self.ivy_array = self.floor().ivy_array\n return self\n\n @with_supported_dtypes({\"2.5.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n def round_(self, name=None):\n self.ivy_array = paddle_frontend.round(self).ivy_array\n return self\n\n @with_supported_dtypes(\n {\"2.5.2 and below\": (\"float32\", \"float64\", \"int32\", \"int64\")}, \"paddle\"\n )\n def clip(self, min=None, max=None, name=None):\n ivy.utils.assertions.check_all_or_any_fn(\n min,\n max,\n fn=ivy.exists,\n type=\"any\",\n limit=[1, 2],\n message=\"at most one of min or max can be None\",\n )\n if min is None:\n ret = ivy.minimum(self._ivy_array, max)\n elif max is None:\n ret = ivy.maximum(self._ivy_array, min)\n else:\n ret = ivy.clip(self._ivy_array, min, max)\n return paddle_frontend.Tensor(ret)\n\n @with_supported_dtypes(\n {\"2.5.2 and below\": (\"float32\", \"float64\", \"int32\", \"int64\")}, \"paddle\"\n )\n def clip_(self, min=None, max=None, name=None):\n self._ivy_array = self.clip(min, max).ivy_array\n return self\n\n @with_supported_dtypes({\"2.5.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n def tanh(self, name=None):\n return paddle_frontend.tanh(self)\n\n @with_supported_dtypes({\"2.5.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n def add(self, y, name=None):\n return paddle_frontend.Tensor(ivy.add(self._ivy_array, _to_ivy_array(y)))\n\n @with_supported_dtypes({\"2.5.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n def add_(self, y, name=None):\n self.ivy_array = paddle_frontend.add(self, y).ivy_array\n return self\n\n @with_supported_dtypes({\"2.5.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n def addmm(self, x, y, beta=1.0, alpha=1.0, name=None):\n return paddle_frontend.addmm(self, x, y, beta, alpha)\n\n @with_supported_dtypes(\n {\"2.5.2 and below\": (\"float16\", \"float32\", \"float64\", \"int32\", \"int64\")},\n \"paddle\",\n )\n def isinf(self, name=None):\n return paddle_frontend.isinf(self)\n\n @with_unsupported_dtypes({\"2.5.2 and below\": (\"float16\", \"uint16\")}, \"paddle\")\n def unsqueeze_(self, axis=None, name=None):\n self.ivy_array = self.unsqueeze(axis=axis).ivy_array\n return self\n\n @with_supported_dtypes({\"2.5.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n def square(self, name=None):\n return paddle_frontend.square(self)\n\n @with_unsupported_dtypes({\"2.5.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n def remainder_(self, y, name=None):\n self.ivy_array = paddle_frontend.remainder(self, y).ivy_array\n return self\n\n @with_supported_dtypes({\"2.5.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n def cholesky(self, upper=False, name=None):\n return paddle_frontend.cholesky(self, upper=upper)\n\n @with_unsupported_dtypes(\n {\"2.5.2 and below\": (\"float16\", \"uint16\", \"int16\")}, \"paddle\"\n )\n def squeeze(self, axis=None, name=None):\n if isinstance(axis, int) and self.ndim > 0:\n if self.shape[axis] > 1:\n return self\n if len(self.shape) == 0:\n return self\n return paddle_frontend.squeeze(self, axis=axis)\n\n @with_unsupported_dtypes(\n {\"2.5.2 and below\": (\"float16\", \"uint16\", \"int16\")}, \"paddle\"\n )\n def squeeze_(self, axis=None, name=None):\n self.ivy_array = paddle_frontend.squeeze(self, axis=axis).ivy_array\n return self\n\n @with_unsupported_dtypes({\"2.5.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n def multiply(self, y, name=None):\n return paddle_frontend.multiply(self, y)\n\n @with_unsupported_dtypes({\"2.5.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n def matmul(self, y, transpose_x=False, transpose_y=False, name=None):\n return paddle_frontend.matmul(\n self, y, transpose_x=transpose_x, transpose_y=transpose_y\n )\n\n @with_supported_dtypes(\n {\"2.5.2 and below\": (\"float16\", \"float32\", \"float64\", \"int32\", \"int64\")},\n \"paddle\",\n )\n def isfinite(self, name=None):\n return paddle_frontend.isfinite(self)\n\n @with_supported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n def all(self, axis=None, keepdim=False, dtype=None, name=None):\n return paddle_frontend.Tensor(\n ivy.all(self.ivy_array, axis=axis, keepdims=keepdim, dtype=dtype)\n )\n\n @with_supported_dtypes({\"2.5.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n def allclose(self, other, rtol=1e-05, atol=1e-08, equal_nan=False, name=None):\n return paddle_frontend.allclose(\n self, other, rtol=rtol, atol=atol, equal_nan=equal_nan\n )\n\n @with_unsupported_dtypes({\"2.5.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n def sort(self, axis=-1, descending=False, name=None):\n return paddle_frontend.sort(self, axis=axis, descending=descending)\n\n @with_unsupported_dtypes({\"2.5.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n def log1p(self, name=None):\n return paddle_frontend.log1p(self)\n\n @with_supported_dtypes(\n {\n \"2.4.2 and below\": (\n \"bool\",\n \"uint8\",\n \"int8\",\n \"int16\",\n \"int32\",\n \"int64\",\n )\n },\n \"paddle\",\n )\n def bitwise_and(self, y, out=None, name=None):\n return paddle_frontend.bitwise_and(self, y)\n\n @with_supported_dtypes(\n {\n \"2.5.2 and below\": (\n \"bool\",\n \"int8\",\n \"int16\",\n \"int32\",\n \"int64\",\n \"float32\",\n \"float64\",\n )\n },\n \"paddle\",\n )\n def logical_or(self, y, out=None, name=None):\n return paddle_frontend.logical_or(self, y, out=out)\n\n @with_supported_dtypes(\n {\"2.5.2 and below\": (\"bool\", \"uint8\", \"int8\", \"int16\", \"int32\", \"int64\")},\n \"paddle\",\n )\n def bitwise_xor(self, y, out=None, name=None):\n return paddle_frontend.bitwise_xor(self, y)\n\n @with_supported_dtypes({\"2.5.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n def any(self, axis=None, keepdim=False, name=None):\n return paddle_frontend.any(self, axis=axis, keepdim=keepdim)\n\n @with_unsupported_dtypes({\"2.5.2 and below\": \"bfloat16\"}, \"paddle\")\n def astype(self, dtype):\n return paddle_frontend.Tensor(ivy.astype(self._ivy_array, dtype))\n\n @with_supported_dtypes(\n {\"2.5.2 and below\": (\"bool\", \"uint8\", \"int8\", \"int16\", \"int32\", \"int64\")},\n \"paddle\",\n )\n def bitwise_not(self, out=None, name=None):\n return paddle_frontend.bitwise_not(self, out=out)\n\n @with_supported_dtypes(\n {\n \"2.5.2 and below\": (\n \"bool\",\n \"int8\",\n \"int16\",\n \"int32\",\n \"int64\",\n )\n },\n \"paddle\",\n )\n def bitwise_or(self, y, out=None, name=None):\n return paddle_frontend.bitwise_or(self, y, out=out)\n\n @with_supported_dtypes(\n {\n \"2.5.2 and below\": (\n \"bool\",\n \"int8\",\n \"int16\",\n \"int32\",\n \"int64\",\n \"float32\",\n \"float64\",\n )\n },\n \"paddle\",\n )\n def logical_xor(self, y, out=None, name=None):\n return paddle_frontend.logical_xor(self, y, out=out)\n\n @with_supported_dtypes(\n {\"2.5.2 and below\": (\"float16\", \"float32\", \"float64\", \"int32\", \"int64\")},\n \"paddle\",\n )\n def isnan(self, name=None):\n return paddle_frontend.isnan(self)\n\n @with_unsupported_dtypes(\n {\n \"2.5.2 and below\": (\n \"bool\",\n \"uint8\",\n \"int8\",\n \"int16\",\n \"complex64\",\n \"complex128\",\n )\n },\n \"paddle\",\n )\n def greater_than(self, y, name=None):\n return paddle_frontend.greater_than(self, y)\n\n @with_supported_dtypes({\"2.5.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n def rsqrt(self, name=None):\n return paddle_frontend.rsqrt(self)\n\n @with_supported_dtypes({\"2.5.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n def rsqrt_(self, name=None):\n self.ivy_array = self.rsqrt().ivy_array\n return self\n\n @with_supported_dtypes({\"2.5.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n def reciprocal(self, name=None):\n return paddle_frontend.reciprocal(self)\n\n @with_supported_dtypes(\n {\n \"2.5.2 and below\": (\n \"bool\",\n \"int8\",\n \"int16\",\n \"int32\",\n \"int64\",\n \"float32\",\n \"float64\",\n )\n },\n \"paddle\",\n )\n def logical_and(self, y, out=None, name=None):\n return paddle_frontend.logical_and(self, y, out=out)\n\n @with_supported_dtypes({\"2.5.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n def divide(self, y, name=None):\n return paddle_frontend.divide(self, y)\n\n @with_supported_dtypes(\n {\"2.5.2 and below\": (\"float32\", \"float64\", \"complex64\", \"complex128\")},\n \"paddle\",\n )\n def eigvals(self, name=None):\n return paddle_frontend.eigvals(self)\n\n @with_unsupported_dtypes(\n {\n \"2.5.2 and below\": (\n \"bool\",\n \"uint8\",\n \"int8\",\n \"int16\",\n \"complex64\",\n \"complex128\",\n )\n },\n \"paddle\",\n )\n def less_than(self, y, name=None):\n return paddle_frontend.less_than(self, y)\n\n @with_unsupported_dtypes({\"2.5.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n def cumprod(self, dim=None, dtype=None, name=None):\n return paddle_frontend.cumprod(self, dim=dim, dtype=dtype)\n\n @with_unsupported_dtypes({\"2.5.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n def cumsum(self, axis=None, dtype=None, name=None):\n return paddle_frontend.Tensor(\n ivy.cumsum(self._ivy_array, axis=axis, dtype=dtype)\n )\n\n @with_supported_dtypes(\n {\"2.5.2 and below\": (\"complex64\", \"complex128\", \"float32\", \"float64\")},\n \"paddle\",\n )\n def angle(self, name=None):\n return paddle_frontend.angle(self)\n\n @with_unsupported_dtypes(\n {\n \"2.5.2 and below\": (\n \"uint8\",\n \"int8\",\n \"int16\",\n \"complex64\",\n \"complex128\",\n )\n },\n \"paddle\",\n )\n def equal(self, y, name=None):\n return paddle_frontend.equal(self, y)\n\n @with_unsupported_dtypes({\"2.5.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n def rad2deg(self, name=None):\n return paddle_frontend.rad2deg(self)\n\n @with_unsupported_dtypes(\n {\n \"2.5.2 and below\": (\n \"uint8\",\n \"int8\",\n \"int16\",\n \"float16\",\n \"complex64\",\n \"complex128\",\n )\n },\n \"paddle\",\n )\n def equal_all(self, y, name=None):\n return paddle_frontend.equal_all(self, y)\n\n @with_supported_dtypes({\"2.5.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n def maximum(self, other, name=None):\n return paddle_frontend.maximum(self, other)\n\n @with_unsupported_dtypes({\"2.5.2 and below\": \"bfloat16\"}, \"paddle\")\n def fmax(self, y, name=None):\n return paddle_frontend.fmax(self, y)\n\n @with_unsupported_dtypes({\"2.5.2 and below\": \"bfloat16\"}, \"paddle\")\n def fmin(self, y, name=None):\n return paddle_frontend.fmin(self, y)\n\n @with_supported_dtypes(\n {\"2.5.2 and below\": (\"float32\", \"float64\", \"int32\", \"int64\")}, \"paddle\"\n )\n def minimum(self, y, name=None):\n return paddle_frontend.minimum(self, y)\n\n @with_supported_dtypes(\n {\"2.5.2 and below\": (\"float32\", \"float64\", \"int32\", \"int64\")}, \"paddle\"\n )\n def max(self, axis=None, keepdim=False, name=None):\n return paddle_frontend.max(self, axis=axis, keepdim=keepdim)\n\n @with_unsupported_dtypes({\"2.5.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n def deg2rad(self, name=None):\n return paddle_frontend.deg2rad(self)\n\n @with_supported_dtypes({\"2.5.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n def digamma(self, name=None):\n return paddle_frontend.digamma(self)\n\n @with_supported_dtypes(\n {\"2.5.2 and below\": (\"float32\", \"float64\", \"int32\", \"int64\", \"bool\")}, \"paddle\"\n )\n def rot90(self, k=1, axes=(0, 1), name=None):\n return paddle_frontend.rot90(self, k=k, axes=axes)\n\n @with_supported_dtypes(\n {\"2.5.2 and below\": (\"complex64\", \"complex128\")},\n \"paddle\",\n )\n def imag(self, name=None):\n return paddle_frontend.imag(self)\n\n def is_tensor(self):\n return paddle_frontend.is_tensor(self)\n\n @with_supported_dtypes(\n {\n \"2.5.2 and below\": (\n \"float32\",\n \"float64\",\n )\n },\n \"paddle\",\n )\n def isclose(self, y, rtol=1e-05, atol=1e-08, equal_nan=False, name=None):\n return paddle_frontend.isclose(\n self, y, rtol=rtol, atol=atol, equal_nan=equal_nan\n )\n\n @with_supported_dtypes({\"2.5.2 and below\": (\"int32\", \"int64\")}, \"paddle\")\n def floor_divide(self, y, name=None):\n return paddle_frontend.floor_divide(self, y)\n\n @with_supported_dtypes({\"2.5.2 and below\": (\"int32\", \"int64\")}, \"paddle\")\n def mod(self, y, name=None):\n return paddle_frontend.Tensor(ivy.fmod(self._ivy_array, _to_ivy_array(y)))\n\n @with_supported_dtypes(\n {\"2.5.2 and below\": (\"float32\", \"float64\", \"int32\", \"int64\")}, \"paddle\"\n )\n def floor_mod(self, y, name=None):\n return paddle_frontend.remainder(self, y)\n\n # cond\n @with_supported_dtypes({\"2.5.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n def cond(self, p=None, name=None):\n return paddle_frontend.cond(self, p=p, name=name)\n\n @with_unsupported_dtypes({\"2.4.2 and below\": (\"int16\", \"float16\")}, \"paddle\")\n def conj(self, name=None):\n return paddle_frontend.conj(self)\n\n @with_supported_dtypes({\"2.5.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n def log2(self, name=None):\n return paddle_frontend.log2(self)\n\n @with_unsupported_dtypes(\n {\"2.4.2 and below\": (\"float32\", \"float64\", \"int32\", \"int64\")}, \"paddle\"\n )\n def neg(self, name=None):\n return paddle_frontend.neg(self)\n\n @with_supported_dtypes(\n {\n \"2.5.2 and below\": (\n \"bool\",\n \"int8\",\n \"int16\",\n \"int32\",\n \"int64\",\n \"float32\",\n \"float64\",\n )\n },\n \"paddle\",\n )\n def logical_not(self, out=None, name=None):\n return paddle_frontend.logical_not(self)\n\n @with_unsupported_dtypes({\"2.5.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n def sign(self, name=None):\n return paddle_frontend.sign(self)\n\n @with_supported_dtypes({\"2.5.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n def var(self, axis=None, unbiased=True, keepdim=False, name=None):\n return paddle_frontend.var(self, axis=axis, unbiased=unbiased, keepdim=keepdim)\n\n @with_unsupported_dtypes({\"2.5.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n def sgn(self, name=None):\n return paddle_frontend.sgn(self)\n\n def tolist(self):\n return paddle_frontend.Tensor(ivy.to_list(self._ivy_array))\n\n @with_supported_dtypes(\n {\"2.5.2 and below\": (\"float32\", \"float64\", \"int32\", \"int64\")},\n \"paddle\",\n )\n def min(self, axis=None, keepdim=False, name=None):\n return paddle_frontend.min(self, axis=axis, keepdim=keepdim)\n\n @with_supported_dtypes(\n {\"2.5.2 and below\": (\"int32\", \"int64\", \"float32\", \"float64\")}, \"paddle\"\n )\n def pow(self, y, name=None):\n return paddle_frontend.pow(self, y)\n\n @with_supported_dtypes(\n {\"2.5.2 and below\": (\"float32\", \"float64\", \"int32\", \"int64\")}, \"paddle\"\n )\n def prod(self, axis=None, keepdim=False, dtype=None, name=None):\n return paddle_frontend.Tensor(\n ivy.prod(self._ivy_array, axis=axis, keepdims=keepdim, dtype=dtype)\n )\n\n @with_supported_dtypes({\"2.5.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n def atan(self, name=None):\n return paddle_frontend.atan(self)\n\n @with_supported_dtypes({\"2.5.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n def atanh(self, name=None):\n return paddle_frontend.atanh(self)\n\n @with_supported_dtypes({\"2.5.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n def std(self, axis=None, unbiased=True, keepdim=False, name=None):\n return paddle_frontend.std(self, axis=axis, unbiased=unbiased, keepdim=keepdim)\n\n @with_supported_dtypes(\n {\"2.5.2 and below\": (\"int32\", \"int64\", \"float32\", \"float64\")}, \"paddle\"\n )\n def trunc(self, name=None):\n return paddle_frontend.trunc(self)\n\n @with_supported_dtypes({\"2.5.2 and below\": (\"complex64\", \"complex128\")}, \"paddle\")\n def as_real(self, name=None):\n if not ivy.is_complex_dtype(self._ivy_array):\n raise ivy.exceptions.IvyError(\n \"as_real is only supported for complex tensors\"\n )\n re_part = ivy.real(self._ivy_array)\n im_part = ivy.imag(self._ivy_array)\n return paddle_frontend.Tensor(ivy.stack((re_part, im_part), axis=-1))\n\n @with_supported_dtypes({\"2.5.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n def stanh(self, scale_a=0.67, scale_b=1.7159, name=None):\n return paddle_frontend.stanh(self, scale_a=scale_a, scale_b=scale_b)\n\n @with_supported_dtypes(\n {\"2.5.2 and below\": (\"int32\", \"int64\", \"float32\", \"float64\")}, \"paddle\"\n )\n def trace(self, offset=0, axis1=0, axis2=1, name=None):\n return paddle_frontend.Tensor(\n ivy.trace(self._ivy_array, offset=offset, axis1=axis1, axis2=axis2)\n )\n\n @with_supported_dtypes(\n {\n \"2.5.2 and below\": (\n \"bfloat16\",\n \"float32\",\n \"float64\",\n \"int8\",\n \"int16\",\n \"int32\",\n \"int64\",\n \"uint8\",\n )\n },\n \"paddle\",\n )\n def flatten(self, start_axis=0, stop_axis=-1, name=None):\n if len(self.shape) == 0:\n return self.unsqueeze(axis=0)\n return paddle_frontend.Tensor(\n ivy.flatten(self.ivy_array, start_dim=start_axis, end_dim=stop_axis)\n )\n\n @with_supported_dtypes(\n {\n \"2.5.2 and below\": (\n \"float32\",\n \"float64\",\n \"int16\",\n \"int32\",\n \"int64\",\n \"uint8\",\n )\n },\n \"paddle\",\n )\n def argmin(self, axis=None, keepdim=False, dtype=None, name=None):\n return paddle_frontend.argmin(self, axis=axis, keepdim=keepdim, dtype=dtype)\n\n @with_supported_dtypes(\n {\"2.5.2 and below\": (\"float32\", \"float64\", \"int32\", \"int64\")},\n \"paddle\",\n )\n def topk(self, k, axis=None, largest=True, sorted=True, name=None):\n return paddle_frontend.topk(self, k, axis=axis, largest=largest, sorted=sorted)\n\n @with_unsupported_dtypes({\"2.5.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n def remainder(self, y, name=None):\n return paddle_frontend.remainder(self, y)\n\n def is_floating_point(self):\n return paddle_frontend.is_floating_point(self)\n\n @with_supported_dtypes({\"2.5.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n def tanh_(self, name=None):\n y = self.tanh(self)\n return ivy.inplace_update(self, y)\n\n @with_supported_dtypes({\"2.5.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n def reciprocal_(self, name=None):\n y = self.reciprocal(self)\n return ivy.inplace_update(self, y)\n\n @with_unsupported_dtypes(\n {\"2.5.2 and below\": (\"complex\", \"uint8\", \"uint16\")}, \"paddle\"\n )\n def numpy(self):\n return self.ivy_array.to_numpy()\n\n @with_unsupported_dtypes({\"2.5.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n def nonzero(self):\n return paddle_frontend.nonzero(self)\n\n @with_supported_dtypes({\"2.5.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n def inner(self, y, name=None):\n return paddle_frontend.inner(self, y, name)\n\n @with_supported_dtypes({\"2.5.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n def acos(self, name=None):\n return paddle_frontend.Tensor(ivy.acos(self._ivy_array))\n\n @with_supported_dtypes({\"2.5.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n def mean(self, axis=None, keepdim=False, name=None):\n return paddle_frontend.mean(self, axis=axis, keepdim=keepdim)\n\n @with_supported_dtypes({\"2.5.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n def as_complex(self, name=None):\n if self.ivy_array.shape[-1] != 2:\n raise ivy.exceptions.IvyError(\n \"The size of the last dimension of tensor does not equals 2\"\n )\n dtype = (\n ivy.complex64 if ivy.dtype(self.ivy_array) == \"float32\" else ivy.complex128\n )\n re_part = self.ivy_array[..., 0]\n im_part = ivy.multiply(1j, self.ivy_array[..., 1])\n value = paddle_frontend.Tensor(ivy.add(re_part, im_part).astype(dtype))\n return value\n\n @with_supported_dtypes(\n {\"2.5.2 and below\": (\"int32\", \"int64\", \"float32\", \"float64\", \"bool\")}, \"paddle\"\n )\n def not_equal(self, y, name=None):\n return paddle_frontend.not_equal(self._ivy_array, y)\n\n @with_supported_dtypes(\n {\"2.5.2 and below\": (\"float32\", \"float64\", \"int32\", \"int64\")}, \"paddle\"\n )\n def less_equal(self, y, name=None):\n return paddle_frontend.less_equal(self._ivy_array, y)\n\n @with_supported_dtypes({\"2.5.2 and below\": (\"complex64\", \"complex128\")}, \"paddle\")\n def real(self, name=None):\n return paddle_frontend.real(self._ivy_array)\n\n @with_unsupported_dtypes({\"2.5.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n def t(self, name=None):\n axes = list(range(len(self.ivy_array.shape)))[::-1]\n return ivy.permute_dims(self.ivy_array, axes=axes)\n\n @with_supported_dtypes(\n {\n \"2.5.2 and below\": (\n \"bool\",\n \"float16\",\n \"float32\",\n \"float64\",\n \"int32\",\n \"int64\",\n \"uint8\",\n )\n },\n \"paddle\",\n )\n def cast(self, dtype):\n return paddle_frontend.cast(self, dtype)\n\n @with_unsupported_dtypes({\"2.5.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n def bmm(self, y, transpose_x=False, transpose_y=False, name=None):\n return paddle_frontend.bmm(self, y, transpose_x, transpose_y)\n\n @with_supported_dtypes(\n {\"2.5.2 and below\": (\"float16\", \"float32\", \"float64\", \"int32\", \"int64\")},\n \"paddle\",\n )\n def fill_(self, value):\n filled_tensor = paddle_frontend.full_like(self, value)\n return ivy.inplace_update(self, filled_tensor)\n\n @with_supported_dtypes(\n {\n \"2.5.2 and below\": (\n \"bool\",\n \"int32\",\n \"int64\",\n \"float16\",\n \"float32\",\n \"float64\",\n )\n },\n \"paddle\",\n )\n def unbind(self, axis=0):\n return paddle_frontend.unbind(self._ivy_array, axis=axis)\n\n @with_supported_dtypes(\n {\n \"2.5.2 and below\": (\n \"bool\",\n \"int32\",\n \"int64\",\n \"float16\",\n \"float32\",\n \"float64\",\n )\n },\n \"paddle\",\n )\n def unique_consecutive(self, axis=0):\n return paddle_frontend.unique_consecutive(self._ivy_array, axis=axis)\n\n def cpu(self):\n self.ivy_array = ivy.to_device(self.ivy_array, ivy.as_ivy_dev(\"cpu\"))\n return self\n\n @with_unsupported_dtypes(\n {\"2.5.2 and below\": (\"int16\", \"complex64\", \"complex128\")},\n \"paddle\",\n )\n def split(self, num_or_sections, axis=0, name=None):\n return paddle_frontend.split(self._ivy_array, num_or_sections, axis, name)\n\n @with_supported_dtypes(\n {\"2.5.2 and below\": (\"float32\", \"float64\", \"int32\", \"int64\")}, \"paddle\"\n )\n def frac(self, name=None):\n return paddle_frontend.frac(self._ivy_array)\n\n @with_unsupported_dtypes({\"2.5.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n def gather(self, y, name=None):\n return paddle_frontend.gather(self, y)\n\n def is_complex(self):\n return paddle_frontend.is_complex(self)\n\n @with_unsupported_dtypes(\n {\"2.5.2 and below\": (\"float16\", \"uint8\", \"int8\", \"bool\")}, \"paddle\"\n )\n def gather_(self, y, name=None):\n res = self.gather(self, y)\n return ivy.inplace_update(self, res)\n\n @with_supported_dtypes(\n {\"2.5.2 and below\": (\"float32\", \"float64\", \"int32\", \"int64\")}, \"paddle\"\n )\n def heaviside(self, y, name=None):\n return paddle_frontend.heaviside(self, y)\n\n @with_supported_dtypes(\n {\"2.5.2 and below\": (\"bool\", \"int32\", \"int64\", \"float32\", \"float64\")}, \"paddle\"\n )\n def expand(self, shape, name=None):\n return paddle_frontend.expand(self._ivy_array, shape)\n\n @with_supported_device_and_dtypes(\n {\n \"2.5.2 and below\": {\n \"cpu\": (\n \"bool\",\n \"int32\",\n \"int64\",\n \"float32\",\n \"float64\",\n \"complex64\",\n \"complex128\",\n )\n }\n },\n \"paddle\",\n )\n def tile(self, repeat_times):\n return paddle_frontend.Tensor(ivy.tile(self._ivy_array, repeats=repeat_times))\n\n @with_supported_dtypes(\n {\n \"2.5.2 and below\": (\n \"bool\",\n \"float16\",\n \"float32\",\n \"float64\",\n \"int8\",\n \"int16\",\n \"int32\",\n \"int64\",\n )\n },\n \"paddle\",\n )\n def chunk(self, chunks, axis=0, name=None):\n return paddle_frontend.split(self._ivy_array, num_or_sections=chunks, axis=axis)\n", "path": "ivy/functional/frontends/paddle/tensor/tensor.py"}]} |
gh_patches_debug_1330 | rasdani/github-patches | git_diff | twisted__twisted-1650 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
MaildirMessage cannot work with a file in text mode nor in binary mode
|[<img alt="vmario's avatar" src="https://avatars.githubusercontent.com/u/62034?s=50" width="50" height="50">](https://github.com/vmario)| @vmario reported|
|-|-|
|Trac ID|trac#10244|
|Type|defect|
|Created|2021-07-30 10:16:47Z|
I've tried to use `MaildirMessage` object (Twisted v21.7.0) with file opened in text mode but I've got this:
```
2021-07-30T12:05:59+0200 [ESMTP (TLSMemoryBIOProtocol),0,10.127.1.50] Unhandled Error
Traceback (most recent call last):
File "/home/user/.local/lib/python3.8/site-packages/twisted/protocols/policies.py", line 109, in dataReceived
self.wrappedProtocol.dataReceived(data)
File "/home/user/.local/lib/python3.8/site-packages/twisted/protocols/basic.py", line 439, in dataReceived
self.lineReceived(line)
File "/home/user/.local/lib/python3.8/site-packages/twisted/mail/smtp.py", line 501, in lineReceived
return getattr(self, "state_" + self.mode)(line)
File "/home/user/.local/lib/python3.8/site-packages/twisted/mail/smtp.py", line 515, in state_COMMAND
method(b"")
--- <exception caught here> ---
File "/home/user/.local/lib/python3.8/site-packages/twisted/mail/smtp.py", line 690, in do_DATA
msg.lineReceived(rcvdhdr)
File "/home/user/.local/lib/python3.8/site-packages/twisted/mail/maildir.py", line 143, in lineReceived
mail.FileMessage.lineReceived(self, line)
File "/home/user/.local/lib/python3.8/site-packages/twisted/mail/mail.py", line 434, in lineReceived
self.fp.write(line + b"\n")
builtins.TypeError: write() argument must be str, not bytes
```
This is clear because you cannot write bytes to the text file. Unfortunately with file in binary mode I've got this:
```
2021-07-30T12:03:10+0200 [ESMTP (TLSMemoryBIOProtocol),0,10.127.1.50] Unhandled Error
Traceback (most recent call last):
File "/home/user/.local/lib/python3.8/site-packages/twisted/protocols/policies.py", line 109, in dataReceived
self.wrappedProtocol.dataReceived(data)
File "/home/user/.local/lib/python3.8/site-packages/twisted/protocols/basic.py", line 439, in dataReceived
self.lineReceived(line)
File "/home/user/.local/lib/python3.8/site-packages/twisted/mail/smtp.py", line 501, in lineReceived
return getattr(self, "state_" + self.mode)(line)
File "/home/user/.local/lib/python3.8/site-packages/twisted/mail/smtp.py", line 515, in state_COMMAND
method(b"")
--- <exception caught here> ---
File "/home/user/.local/lib/python3.8/site-packages/twisted/mail/smtp.py", line 687, in do_DATA
msg = msgFunc()
File "/home/user/Kodowanie/Git/RKPProxy/smtp/client.py", line 80, in <lambda>
return lambda: LocalMessageFactory(user, self.usersDirPath).message()
File "/home/user/Kodowanie/Git/RKPProxy/smtp/client.py", line 217, in message
return maildir.MaildirMessage(
File "/home/user/.local/lib/python3.8/site-packages/twisted/mail/maildir.py", line 132, in __init__
fp.write(header)
builtins.TypeError: a bytes-like object is required, not 'str'
```
`MaildirMessage` writes header to the file but this header is string not bytes.
<details><summary>Searchable metadata</summary>
```
trac-id__10244 10244
type__defect defect
reporter__vmario vmario
priority__normal normal
milestone__None None
branch__
branch_author__
status__new new
resolution__None None
component__mail mail
keywords__None None
time__1627640207974143 1627640207974143
changetime__1656140124886537 1656140124886537
version__None None
owner__vmario vmario
```
</details>
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/twisted/mail/maildir.py`
Content:
```
1 # -*- test-case-name: twisted.mail.test.test_mail -*-
2 # Copyright (c) Twisted Matrix Laboratories.
3 # See LICENSE for details.
4
5
6 """
7 Maildir-style mailbox support.
8 """
9
10 import io
11 import os
12 import socket
13 import stat
14 from hashlib import md5
15 from typing import IO
16
17 from zope.interface import implementer
18
19 from twisted.cred import checkers, credentials, portal
20 from twisted.cred.error import UnauthorizedLogin
21 from twisted.internet import defer, interfaces, reactor
22 from twisted.mail import mail, pop3, smtp
23 from twisted.persisted import dirdbm
24 from twisted.protocols import basic
25 from twisted.python import failure, log
26
27 INTERNAL_ERROR = """\
28 From: Twisted.mail Internals
29 Subject: An Error Occurred
30
31 An internal server error has occurred. Please contact the
32 server administrator.
33 """
34
35
36 class _MaildirNameGenerator:
37 """
38 A utility class to generate a unique maildir name.
39
40 @type n: L{int}
41 @ivar n: A counter used to generate unique integers.
42
43 @type p: L{int}
44 @ivar p: The ID of the current process.
45
46 @type s: L{bytes}
47 @ivar s: A representation of the hostname.
48
49 @ivar _clock: See C{clock} parameter of L{__init__}.
50 """
51
52 n = 0
53 p = os.getpid()
54 s = socket.gethostname().replace("/", r"\057").replace(":", r"\072")
55
56 def __init__(self, clock):
57 """
58 @type clock: L{IReactorTime <interfaces.IReactorTime>} provider
59 @param clock: A reactor which will be used to learn the current time.
60 """
61 self._clock = clock
62
63 def generate(self):
64 """
65 Generate a string which is intended to be unique across all calls to
66 this function (across all processes, reboots, etc).
67
68 Strings returned by earlier calls to this method will compare less
69 than strings returned by later calls as long as the clock provided
70 doesn't go backwards.
71
72 @rtype: L{bytes}
73 @return: A unique string.
74 """
75 self.n = self.n + 1
76 t = self._clock.seconds()
77 seconds = str(int(t))
78 microseconds = "%07d" % (int((t - int(t)) * 10e6),)
79 return f"{seconds}.M{microseconds}P{self.p}Q{self.n}.{self.s}"
80
81
82 _generateMaildirName = _MaildirNameGenerator(reactor).generate
83
84
85 def initializeMaildir(dir):
86 """
87 Create a maildir user directory if it doesn't already exist.
88
89 @type dir: L{bytes}
90 @param dir: The path name for a user directory.
91 """
92 dir = os.fsdecode(dir)
93 if not os.path.isdir(dir):
94 os.mkdir(dir, 0o700)
95 for subdir in ["new", "cur", "tmp", ".Trash"]:
96 os.mkdir(os.path.join(dir, subdir), 0o700)
97 for subdir in ["new", "cur", "tmp"]:
98 os.mkdir(os.path.join(dir, ".Trash", subdir), 0o700)
99 # touch
100 open(os.path.join(dir, ".Trash", "maildirfolder"), "w").close()
101
102
103 class MaildirMessage(mail.FileMessage):
104 """
105 A message receiver which adds a header and delivers a message to a file
106 whose name includes the size of the message.
107
108 @type size: L{int}
109 @ivar size: The number of octets in the message.
110 """
111
112 size = None
113
114 def __init__(self, address, fp, *a, **kw):
115 """
116 @type address: L{bytes}
117 @param address: The address of the message recipient.
118
119 @type fp: file-like object
120 @param fp: The file in which to store the message while it is being
121 received.
122
123 @type a: 2-L{tuple} of (0) L{bytes}, (1) L{bytes}
124 @param a: Positional arguments for L{FileMessage.__init__}.
125
126 @type kw: L{dict}
127 @param kw: Keyword arguments for L{FileMessage.__init__}.
128 """
129 header = "Delivered-To: %s\n" % address
130 fp.write(header)
131 self.size = len(header)
132 mail.FileMessage.__init__(self, fp, *a, **kw)
133
134 def lineReceived(self, line):
135 """
136 Write a line to the file.
137
138 @type line: L{bytes}
139 @param line: A received line.
140 """
141 mail.FileMessage.lineReceived(self, line)
142 self.size += len(line) + 1
143
144 def eomReceived(self):
145 """
146 At the end of message, rename the file holding the message to its final
147 name concatenated with the size of the file.
148
149 @rtype: L{Deferred <defer.Deferred>} which successfully results in
150 L{bytes}
151 @return: A deferred which returns the name of the file holding the
152 message.
153 """
154 self.finalName = self.finalName + ",S=%d" % self.size
155 return mail.FileMessage.eomReceived(self)
156
157
158 @implementer(mail.IAliasableDomain)
159 class AbstractMaildirDomain:
160 """
161 An abstract maildir-backed domain.
162
163 @type alias: L{None} or L{dict} mapping
164 L{bytes} to L{AliasBase}
165 @ivar alias: A mapping of username to alias.
166
167 @ivar root: See L{__init__}.
168 """
169
170 alias = None
171 root = None
172
173 def __init__(self, service, root):
174 """
175 @type service: L{MailService}
176 @param service: An email service.
177
178 @type root: L{bytes}
179 @param root: The maildir root directory.
180 """
181 self.root = root
182
183 def userDirectory(self, user):
184 """
185 Return the maildir directory for a user.
186
187 @type user: L{bytes}
188 @param user: A username.
189
190 @rtype: L{bytes} or L{None}
191 @return: The user's mail directory for a valid user. Otherwise,
192 L{None}.
193 """
194 return None
195
196 def setAliasGroup(self, alias):
197 """
198 Set the group of defined aliases for this domain.
199
200 @type alias: L{dict} mapping L{bytes} to L{IAlias} provider.
201 @param alias: A mapping of domain name to alias.
202 """
203 self.alias = alias
204
205 def exists(self, user, memo=None):
206 """
207 Check whether a user exists in this domain or an alias of it.
208
209 @type user: L{User}
210 @param user: A user.
211
212 @type memo: L{None} or L{dict} of L{AliasBase}
213 @param memo: A record of the addresses already considered while
214 resolving aliases. The default value should be used by all
215 external code.
216
217 @rtype: no-argument callable which returns L{IMessage <smtp.IMessage>}
218 provider.
219 @return: A function which takes no arguments and returns a message
220 receiver for the user.
221
222 @raises SMTPBadRcpt: When the given user does not exist in this domain
223 or an alias of it.
224 """
225 if self.userDirectory(user.dest.local) is not None:
226 return lambda: self.startMessage(user)
227 try:
228 a = self.alias[user.dest.local]
229 except BaseException:
230 raise smtp.SMTPBadRcpt(user)
231 else:
232 aliases = a.resolve(self.alias, memo)
233 if aliases:
234 return lambda: aliases
235 log.err("Bad alias configuration: " + str(user))
236 raise smtp.SMTPBadRcpt(user)
237
238 def startMessage(self, user):
239 """
240 Create a maildir message for a user.
241
242 @type user: L{bytes}
243 @param user: A username.
244
245 @rtype: L{MaildirMessage}
246 @return: A message receiver for this user.
247 """
248 if isinstance(user, str):
249 name, domain = user.split("@", 1)
250 else:
251 name, domain = user.dest.local, user.dest.domain
252 dir = self.userDirectory(name)
253 fname = _generateMaildirName()
254 filename = os.path.join(dir, "tmp", fname)
255 fp = open(filename, "w")
256 return MaildirMessage(
257 f"{name}@{domain}", fp, filename, os.path.join(dir, "new", fname)
258 )
259
260 def willRelay(self, user, protocol):
261 """
262 Check whether this domain will relay.
263
264 @type user: L{Address}
265 @param user: The destination address.
266
267 @type protocol: L{SMTP}
268 @param protocol: The protocol over which the message to be relayed is
269 being received.
270
271 @rtype: L{bool}
272 @return: An indication of whether this domain will relay the message to
273 the destination.
274 """
275 return False
276
277 def addUser(self, user, password):
278 """
279 Add a user to this domain.
280
281 Subclasses should override this method.
282
283 @type user: L{bytes}
284 @param user: A username.
285
286 @type password: L{bytes}
287 @param password: A password.
288 """
289 raise NotImplementedError
290
291 def getCredentialsCheckers(self):
292 """
293 Return credentials checkers for this domain.
294
295 Subclasses should override this method.
296
297 @rtype: L{list} of L{ICredentialsChecker
298 <checkers.ICredentialsChecker>} provider
299 @return: Credentials checkers for this domain.
300 """
301 raise NotImplementedError
302
303
304 @implementer(interfaces.IConsumer)
305 class _MaildirMailboxAppendMessageTask:
306 """
307 A task which adds a message to a maildir mailbox.
308
309 @ivar mbox: See L{__init__}.
310
311 @type defer: L{Deferred <defer.Deferred>} which successfully returns
312 L{None}
313 @ivar defer: A deferred which fires when the task has completed.
314
315 @type opencall: L{IDelayedCall <interfaces.IDelayedCall>} provider or
316 L{None}
317 @ivar opencall: A scheduled call to L{prodProducer}.
318
319 @type msg: file-like object
320 @ivar msg: The message to add.
321
322 @type tmpname: L{bytes}
323 @ivar tmpname: The pathname of the temporary file holding the message while
324 it is being transferred.
325
326 @type fh: file
327 @ivar fh: The new maildir file.
328
329 @type filesender: L{FileSender <basic.FileSender>}
330 @ivar filesender: A file sender which sends the message.
331
332 @type myproducer: L{IProducer <interfaces.IProducer>}
333 @ivar myproducer: The registered producer.
334
335 @type streaming: L{bool}
336 @ivar streaming: Indicates whether the registered producer provides a
337 streaming interface.
338 """
339
340 osopen = staticmethod(os.open)
341 oswrite = staticmethod(os.write)
342 osclose = staticmethod(os.close)
343 osrename = staticmethod(os.rename)
344
345 def __init__(self, mbox, msg):
346 """
347 @type mbox: L{MaildirMailbox}
348 @param mbox: A maildir mailbox.
349
350 @type msg: L{bytes} or file-like object
351 @param msg: The message to add.
352 """
353 self.mbox = mbox
354 self.defer = defer.Deferred()
355 self.openCall = None
356 if not hasattr(msg, "read"):
357 msg = io.BytesIO(msg)
358 self.msg = msg
359
360 def startUp(self):
361 """
362 Start transferring the message to the mailbox.
363 """
364 self.createTempFile()
365 if self.fh != -1:
366 self.filesender = basic.FileSender()
367 self.filesender.beginFileTransfer(self.msg, self)
368
369 def registerProducer(self, producer, streaming):
370 """
371 Register a producer and start asking it for data if it is
372 non-streaming.
373
374 @type producer: L{IProducer <interfaces.IProducer>}
375 @param producer: A producer.
376
377 @type streaming: L{bool}
378 @param streaming: A flag indicating whether the producer provides a
379 streaming interface.
380 """
381 self.myproducer = producer
382 self.streaming = streaming
383 if not streaming:
384 self.prodProducer()
385
386 def prodProducer(self):
387 """
388 Repeatedly prod a non-streaming producer to produce data.
389 """
390 self.openCall = None
391 if self.myproducer is not None:
392 self.openCall = reactor.callLater(0, self.prodProducer)
393 self.myproducer.resumeProducing()
394
395 def unregisterProducer(self):
396 """
397 Finish transferring the message to the mailbox.
398 """
399 self.myproducer = None
400 self.streaming = None
401 self.osclose(self.fh)
402 self.moveFileToNew()
403
404 def write(self, data):
405 """
406 Write data to the maildir file.
407
408 @type data: L{bytes}
409 @param data: Data to be written to the file.
410 """
411 try:
412 self.oswrite(self.fh, data)
413 except BaseException:
414 self.fail()
415
416 def fail(self, err=None):
417 """
418 Fire the deferred to indicate the task completed with a failure.
419
420 @type err: L{Failure <failure.Failure>}
421 @param err: The error that occurred.
422 """
423 if err is None:
424 err = failure.Failure()
425 if self.openCall is not None:
426 self.openCall.cancel()
427 self.defer.errback(err)
428 self.defer = None
429
430 def moveFileToNew(self):
431 """
432 Place the message in the I{new/} directory, add it to the mailbox and
433 fire the deferred to indicate that the task has completed
434 successfully.
435 """
436 while True:
437 newname = os.path.join(self.mbox.path, "new", _generateMaildirName())
438 try:
439 self.osrename(self.tmpname, newname)
440 break
441 except OSError as e:
442 (err, estr) = e.args
443 import errno
444
445 # if the newname exists, retry with a new newname.
446 if err != errno.EEXIST:
447 self.fail()
448 newname = None
449 break
450 if newname is not None:
451 self.mbox.list.append(newname)
452 self.defer.callback(None)
453 self.defer = None
454
455 def createTempFile(self):
456 """
457 Create a temporary file to hold the message as it is being transferred.
458 """
459 attr = (
460 os.O_RDWR
461 | os.O_CREAT
462 | os.O_EXCL
463 | getattr(os, "O_NOINHERIT", 0)
464 | getattr(os, "O_NOFOLLOW", 0)
465 )
466 tries = 0
467 self.fh = -1
468 while True:
469 self.tmpname = os.path.join(self.mbox.path, "tmp", _generateMaildirName())
470 try:
471 self.fh = self.osopen(self.tmpname, attr, 0o600)
472 return None
473 except OSError:
474 tries += 1
475 if tries > 500:
476 self.defer.errback(
477 RuntimeError(
478 "Could not create tmp file for %s" % self.mbox.path
479 )
480 )
481 self.defer = None
482 return None
483
484
485 class MaildirMailbox(pop3.Mailbox):
486 """
487 A maildir-backed mailbox.
488
489 @ivar path: See L{__init__}.
490
491 @type list: L{list} of L{int} or 2-L{tuple} of (0) file-like object,
492 (1) L{bytes}
493 @ivar list: Information about the messages in the mailbox. For undeleted
494 messages, the file containing the message and the
495 full path name of the file are stored. Deleted messages are indicated
496 by 0.
497
498 @type deleted: L{dict} mapping 2-L{tuple} of (0) file-like object,
499 (1) L{bytes} to L{bytes}
500 @type deleted: A mapping of the information about a file before it was
501 deleted to the full path name of the deleted file in the I{.Trash/}
502 subfolder.
503 """
504
505 AppendFactory = _MaildirMailboxAppendMessageTask
506
507 def __init__(self, path):
508 """
509 @type path: L{bytes}
510 @param path: The directory name for a maildir mailbox.
511 """
512 self.path = path
513 self.list = []
514 self.deleted = {}
515 initializeMaildir(path)
516 for name in ("cur", "new"):
517 for file in os.listdir(os.path.join(path, name)):
518 self.list.append((file, os.path.join(path, name, file)))
519 self.list.sort()
520 self.list = [e[1] for e in self.list]
521
522 def listMessages(self, i=None):
523 """
524 Retrieve the size of a message, or, if none is specified, the size of
525 each message in the mailbox.
526
527 @type i: L{int} or L{None}
528 @param i: The 0-based index of a message.
529
530 @rtype: L{int} or L{list} of L{int}
531 @return: The number of octets in the specified message, or, if an index
532 is not specified, a list of the number of octets for all messages
533 in the mailbox. Any value which corresponds to a deleted message
534 is set to 0.
535
536 @raise IndexError: When the index does not correspond to a message in
537 the mailbox.
538 """
539 if i is None:
540 ret = []
541 for mess in self.list:
542 if mess:
543 ret.append(os.stat(mess)[stat.ST_SIZE])
544 else:
545 ret.append(0)
546 return ret
547 return self.list[i] and os.stat(self.list[i])[stat.ST_SIZE] or 0
548
549 def getMessage(self, i):
550 """
551 Retrieve a file-like object with the contents of a message.
552
553 @type i: L{int}
554 @param i: The 0-based index of a message.
555
556 @rtype: file-like object
557 @return: A file containing the message.
558
559 @raise IndexError: When the index does not correspond to a message in
560 the mailbox.
561 """
562 return open(self.list[i])
563
564 def getUidl(self, i):
565 """
566 Get a unique identifier for a message.
567
568 @type i: L{int}
569 @param i: The 0-based index of a message.
570
571 @rtype: L{bytes}
572 @return: A string of printable characters uniquely identifying the
573 message for all time.
574
575 @raise IndexError: When the index does not correspond to a message in
576 the mailbox.
577 """
578 # Returning the actual filename is a mistake. Hash it.
579 base = os.path.basename(self.list[i])
580 return md5(base).hexdigest()
581
582 def deleteMessage(self, i):
583 """
584 Mark a message for deletion.
585
586 Move the message to the I{.Trash/} subfolder so it can be undeleted
587 by an administrator.
588
589 @type i: L{int}
590 @param i: The 0-based index of a message.
591
592 @raise IndexError: When the index does not correspond to a message in
593 the mailbox.
594 """
595 trashFile = os.path.join(
596 self.path, ".Trash", "cur", os.path.basename(self.list[i])
597 )
598 os.rename(self.list[i], trashFile)
599 self.deleted[self.list[i]] = trashFile
600 self.list[i] = 0
601
602 def undeleteMessages(self):
603 """
604 Undelete all messages marked for deletion.
605
606 Move each message marked for deletion from the I{.Trash/} subfolder back
607 to its original position.
608 """
609 for (real, trash) in self.deleted.items():
610 try:
611 os.rename(trash, real)
612 except OSError as e:
613 (err, estr) = e.args
614 import errno
615
616 # If the file has been deleted from disk, oh well!
617 if err != errno.ENOENT:
618 raise
619 # This is a pass
620 else:
621 try:
622 self.list[self.list.index(0)] = real
623 except ValueError:
624 self.list.append(real)
625 self.deleted.clear()
626
627 def appendMessage(self, txt):
628 """
629 Add a message to the mailbox.
630
631 @type txt: L{bytes} or file-like object
632 @param txt: A message to add.
633
634 @rtype: L{Deferred <defer.Deferred>}
635 @return: A deferred which fires when the message has been added to
636 the mailbox.
637 """
638 task = self.AppendFactory(self, txt)
639 result = task.defer
640 task.startUp()
641 return result
642
643
644 @implementer(pop3.IMailbox)
645 class StringListMailbox:
646 """
647 An in-memory mailbox.
648
649 @ivar msgs: See L{__init__}.
650
651 @type _delete: L{set} of L{int}
652 @ivar _delete: The indices of messages which have been marked for deletion.
653 """
654
655 def __init__(self, msgs):
656 """
657 @type msgs: L{list} of L{bytes}
658 @param msgs: The contents of each message in the mailbox.
659 """
660 self.msgs = msgs
661 self._delete = set()
662
663 def listMessages(self, i=None):
664 """
665 Retrieve the size of a message, or, if none is specified, the size of
666 each message in the mailbox.
667
668 @type i: L{int} or L{None}
669 @param i: The 0-based index of a message.
670
671 @rtype: L{int} or L{list} of L{int}
672 @return: The number of octets in the specified message, or, if an index
673 is not specified, a list of the number of octets in each message in
674 the mailbox. Any value which corresponds to a deleted message is
675 set to 0.
676
677 @raise IndexError: When the index does not correspond to a message in
678 the mailbox.
679 """
680 if i is None:
681 return [self.listMessages(msg) for msg in range(len(self.msgs))]
682 if i in self._delete:
683 return 0
684 return len(self.msgs[i])
685
686 def getMessage(self, i: int) -> IO[bytes]:
687 """
688 Return an in-memory file-like object with the contents of a message.
689
690 @param i: The 0-based index of a message.
691
692 @return: An in-memory file-like object containing the message.
693
694 @raise IndexError: When the index does not correspond to a message in
695 the mailbox.
696 """
697 return io.BytesIO(self.msgs[i])
698
699 def getUidl(self, i):
700 """
701 Get a unique identifier for a message.
702
703 @type i: L{int}
704 @param i: The 0-based index of a message.
705
706 @rtype: L{bytes}
707 @return: A hash of the contents of the message at the given index.
708
709 @raise IndexError: When the index does not correspond to a message in
710 the mailbox.
711 """
712 return md5(self.msgs[i]).hexdigest()
713
714 def deleteMessage(self, i):
715 """
716 Mark a message for deletion.
717
718 @type i: L{int}
719 @param i: The 0-based index of a message to delete.
720
721 @raise IndexError: When the index does not correspond to a message in
722 the mailbox.
723 """
724 self._delete.add(i)
725
726 def undeleteMessages(self):
727 """
728 Undelete any messages which have been marked for deletion.
729 """
730 self._delete = set()
731
732 def sync(self):
733 """
734 Discard the contents of any messages marked for deletion.
735 """
736 for index in self._delete:
737 self.msgs[index] = ""
738 self._delete = set()
739
740
741 @implementer(portal.IRealm)
742 class MaildirDirdbmDomain(AbstractMaildirDomain):
743 """
744 A maildir-backed domain where membership is checked with a
745 L{DirDBM <dirdbm.DirDBM>} database.
746
747 The directory structure of a MaildirDirdbmDomain is:
748
749 /passwd <-- a DirDBM directory
750
751 /USER/{cur, new, del} <-- each user has these three directories
752
753 @ivar postmaster: See L{__init__}.
754
755 @type dbm: L{DirDBM <dirdbm.DirDBM>}
756 @ivar dbm: The authentication database for the domain.
757 """
758
759 portal = None
760 _credcheckers = None
761
762 def __init__(self, service, root, postmaster=0):
763 """
764 @type service: L{MailService}
765 @param service: An email service.
766
767 @type root: L{bytes}
768 @param root: The maildir root directory.
769
770 @type postmaster: L{bool}
771 @param postmaster: A flag indicating whether non-existent addresses
772 should be forwarded to the postmaster (C{True}) or
773 bounced (C{False}).
774 """
775 root = os.fsencode(root)
776 AbstractMaildirDomain.__init__(self, service, root)
777 dbm = os.path.join(root, b"passwd")
778 if not os.path.exists(dbm):
779 os.makedirs(dbm)
780 self.dbm = dirdbm.open(dbm)
781 self.postmaster = postmaster
782
783 def userDirectory(self, name):
784 """
785 Return the path to a user's mail directory.
786
787 @type name: L{bytes}
788 @param name: A username.
789
790 @rtype: L{bytes} or L{None}
791 @return: The path to the user's mail directory for a valid user. For
792 an invalid user, the path to the postmaster's mailbox if bounces
793 are redirected there. Otherwise, L{None}.
794 """
795 if name not in self.dbm:
796 if not self.postmaster:
797 return None
798 name = "postmaster"
799 dir = os.path.join(self.root, name)
800 if not os.path.exists(dir):
801 initializeMaildir(dir)
802 return dir
803
804 def addUser(self, user, password):
805 """
806 Add a user to this domain by adding an entry in the authentication
807 database and initializing the user's mail directory.
808
809 @type user: L{bytes}
810 @param user: A username.
811
812 @type password: L{bytes}
813 @param password: A password.
814 """
815 self.dbm[user] = password
816 # Ensure it is initialized
817 self.userDirectory(user)
818
819 def getCredentialsCheckers(self):
820 """
821 Return credentials checkers for this domain.
822
823 @rtype: L{list} of L{ICredentialsChecker
824 <checkers.ICredentialsChecker>} provider
825 @return: Credentials checkers for this domain.
826 """
827 if self._credcheckers is None:
828 self._credcheckers = [DirdbmDatabase(self.dbm)]
829 return self._credcheckers
830
831 def requestAvatar(self, avatarId, mind, *interfaces):
832 """
833 Get the mailbox for an authenticated user.
834
835 The mailbox for the authenticated user will be returned only if the
836 given interfaces include L{IMailbox <pop3.IMailbox>}. Requests for
837 anonymous access will be met with a mailbox containing a message
838 indicating that an internal error has occurred.
839
840 @type avatarId: L{bytes} or C{twisted.cred.checkers.ANONYMOUS}
841 @param avatarId: A string which identifies a user or an object which
842 signals a request for anonymous access.
843
844 @type mind: L{None}
845 @param mind: Unused.
846
847 @type interfaces: n-L{tuple} of C{zope.interface.Interface}
848 @param interfaces: A group of interfaces, one of which the avatar
849 must support.
850
851 @rtype: 3-L{tuple} of (0) L{IMailbox <pop3.IMailbox>},
852 (1) L{IMailbox <pop3.IMailbox>} provider, (2) no-argument
853 callable
854 @return: A tuple of the supported interface, a mailbox, and a
855 logout function.
856
857 @raise NotImplementedError: When the given interfaces do not include
858 L{IMailbox <pop3.IMailbox>}.
859 """
860 if pop3.IMailbox not in interfaces:
861 raise NotImplementedError("No interface")
862 if avatarId == checkers.ANONYMOUS:
863 mbox = StringListMailbox([INTERNAL_ERROR])
864 else:
865 mbox = MaildirMailbox(os.path.join(self.root, avatarId))
866
867 return (pop3.IMailbox, mbox, lambda: None)
868
869
870 @implementer(checkers.ICredentialsChecker)
871 class DirdbmDatabase:
872 """
873 A credentials checker which authenticates users out of a
874 L{DirDBM <dirdbm.DirDBM>} database.
875
876 @type dirdbm: L{DirDBM <dirdbm.DirDBM>}
877 @ivar dirdbm: An authentication database.
878 """
879
880 # credentialInterfaces is not used by the class
881 credentialInterfaces = (
882 credentials.IUsernamePassword,
883 credentials.IUsernameHashedPassword,
884 )
885
886 def __init__(self, dbm):
887 """
888 @type dbm: L{DirDBM <dirdbm.DirDBM>}
889 @param dbm: An authentication database.
890 """
891 self.dirdbm = dbm
892
893 def requestAvatarId(self, c):
894 """
895 Authenticate a user and, if successful, return their username.
896
897 @type c: L{IUsernamePassword <credentials.IUsernamePassword>} or
898 L{IUsernameHashedPassword <credentials.IUsernameHashedPassword>}
899 provider.
900 @param c: Credentials.
901
902 @rtype: L{bytes}
903 @return: A string which identifies an user.
904
905 @raise UnauthorizedLogin: When the credentials check fails.
906 """
907 if c.username in self.dirdbm:
908 if c.checkPassword(self.dirdbm[c.username]):
909 return c.username
910 raise UnauthorizedLogin()
911
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/twisted/mail/maildir.py b/src/twisted/mail/maildir.py
--- a/src/twisted/mail/maildir.py
+++ b/src/twisted/mail/maildir.py
@@ -126,7 +126,7 @@
@type kw: L{dict}
@param kw: Keyword arguments for L{FileMessage.__init__}.
"""
- header = "Delivered-To: %s\n" % address
+ header = b"Delivered-To: %s\n" % address
fp.write(header)
self.size = len(header)
mail.FileMessage.__init__(self, fp, *a, **kw)
| {"golden_diff": "diff --git a/src/twisted/mail/maildir.py b/src/twisted/mail/maildir.py\n--- a/src/twisted/mail/maildir.py\n+++ b/src/twisted/mail/maildir.py\n@@ -126,7 +126,7 @@\n @type kw: L{dict}\n @param kw: Keyword arguments for L{FileMessage.__init__}.\n \"\"\"\n- header = \"Delivered-To: %s\\n\" % address\n+ header = b\"Delivered-To: %s\\n\" % address\n fp.write(header)\n self.size = len(header)\n mail.FileMessage.__init__(self, fp, *a, **kw)\n", "issue": "MaildirMessage cannot work with a file in text mode nor in binary mode\n|[<img alt=\"vmario's avatar\" src=\"https://avatars.githubusercontent.com/u/62034?s=50\" width=\"50\" height=\"50\">](https://github.com/vmario)| @vmario reported|\n|-|-|\n|Trac ID|trac#10244|\n|Type|defect|\n|Created|2021-07-30 10:16:47Z|\n\nI've tried to use `MaildirMessage` object (Twisted v21.7.0) with file opened in text mode but I've got this:\n\n```\n2021-07-30T12:05:59+0200 [ESMTP (TLSMemoryBIOProtocol),0,10.127.1.50] Unhandled Error\n Traceback (most recent call last):\n File \"/home/user/.local/lib/python3.8/site-packages/twisted/protocols/policies.py\", line 109, in dataReceived\n self.wrappedProtocol.dataReceived(data)\n File \"/home/user/.local/lib/python3.8/site-packages/twisted/protocols/basic.py\", line 439, in dataReceived\n self.lineReceived(line)\n File \"/home/user/.local/lib/python3.8/site-packages/twisted/mail/smtp.py\", line 501, in lineReceived\n return getattr(self, \"state_\" + self.mode)(line)\n File \"/home/user/.local/lib/python3.8/site-packages/twisted/mail/smtp.py\", line 515, in state_COMMAND\n method(b\"\")\n --- <exception caught here> ---\n File \"/home/user/.local/lib/python3.8/site-packages/twisted/mail/smtp.py\", line 690, in do_DATA\n msg.lineReceived(rcvdhdr)\n File \"/home/user/.local/lib/python3.8/site-packages/twisted/mail/maildir.py\", line 143, in lineReceived\n mail.FileMessage.lineReceived(self, line)\n File \"/home/user/.local/lib/python3.8/site-packages/twisted/mail/mail.py\", line 434, in lineReceived\n self.fp.write(line + b\"\\n\")\n builtins.TypeError: write() argument must be str, not bytes\n```\n\nThis is clear because you cannot write bytes to the text file. Unfortunately with file in binary mode I've got this:\n\n```\n2021-07-30T12:03:10+0200 [ESMTP (TLSMemoryBIOProtocol),0,10.127.1.50] Unhandled Error\n Traceback (most recent call last):\n File \"/home/user/.local/lib/python3.8/site-packages/twisted/protocols/policies.py\", line 109, in dataReceived\n self.wrappedProtocol.dataReceived(data)\n File \"/home/user/.local/lib/python3.8/site-packages/twisted/protocols/basic.py\", line 439, in dataReceived\n self.lineReceived(line)\n File \"/home/user/.local/lib/python3.8/site-packages/twisted/mail/smtp.py\", line 501, in lineReceived\n return getattr(self, \"state_\" + self.mode)(line)\n File \"/home/user/.local/lib/python3.8/site-packages/twisted/mail/smtp.py\", line 515, in state_COMMAND\n method(b\"\")\n --- <exception caught here> ---\n File \"/home/user/.local/lib/python3.8/site-packages/twisted/mail/smtp.py\", line 687, in do_DATA\n msg = msgFunc()\n File \"/home/user/Kodowanie/Git/RKPProxy/smtp/client.py\", line 80, in <lambda>\n return lambda: LocalMessageFactory(user, self.usersDirPath).message()\n File \"/home/user/Kodowanie/Git/RKPProxy/smtp/client.py\", line 217, in message\n return maildir.MaildirMessage(\n File \"/home/user/.local/lib/python3.8/site-packages/twisted/mail/maildir.py\", line 132, in __init__\n fp.write(header)\n builtins.TypeError: a bytes-like object is required, not 'str'\n```\n\n`MaildirMessage` writes header to the file but this header is string not bytes.\n\n<details><summary>Searchable metadata</summary>\n\n```\ntrac-id__10244 10244\ntype__defect defect\nreporter__vmario vmario\npriority__normal normal\nmilestone__None None\nbranch__ \nbranch_author__ \nstatus__new new\nresolution__None None\ncomponent__mail mail\nkeywords__None None\ntime__1627640207974143 1627640207974143\nchangetime__1656140124886537 1656140124886537\nversion__None None\nowner__vmario vmario\n\n```\n</details>\n\n", "before_files": [{"content": "# -*- test-case-name: twisted.mail.test.test_mail -*-\n# Copyright (c) Twisted Matrix Laboratories.\n# See LICENSE for details.\n\n\n\"\"\"\nMaildir-style mailbox support.\n\"\"\"\n\nimport io\nimport os\nimport socket\nimport stat\nfrom hashlib import md5\nfrom typing import IO\n\nfrom zope.interface import implementer\n\nfrom twisted.cred import checkers, credentials, portal\nfrom twisted.cred.error import UnauthorizedLogin\nfrom twisted.internet import defer, interfaces, reactor\nfrom twisted.mail import mail, pop3, smtp\nfrom twisted.persisted import dirdbm\nfrom twisted.protocols import basic\nfrom twisted.python import failure, log\n\nINTERNAL_ERROR = \"\"\"\\\nFrom: Twisted.mail Internals\nSubject: An Error Occurred\n\n An internal server error has occurred. Please contact the\n server administrator.\n\"\"\"\n\n\nclass _MaildirNameGenerator:\n \"\"\"\n A utility class to generate a unique maildir name.\n\n @type n: L{int}\n @ivar n: A counter used to generate unique integers.\n\n @type p: L{int}\n @ivar p: The ID of the current process.\n\n @type s: L{bytes}\n @ivar s: A representation of the hostname.\n\n @ivar _clock: See C{clock} parameter of L{__init__}.\n \"\"\"\n\n n = 0\n p = os.getpid()\n s = socket.gethostname().replace(\"/\", r\"\\057\").replace(\":\", r\"\\072\")\n\n def __init__(self, clock):\n \"\"\"\n @type clock: L{IReactorTime <interfaces.IReactorTime>} provider\n @param clock: A reactor which will be used to learn the current time.\n \"\"\"\n self._clock = clock\n\n def generate(self):\n \"\"\"\n Generate a string which is intended to be unique across all calls to\n this function (across all processes, reboots, etc).\n\n Strings returned by earlier calls to this method will compare less\n than strings returned by later calls as long as the clock provided\n doesn't go backwards.\n\n @rtype: L{bytes}\n @return: A unique string.\n \"\"\"\n self.n = self.n + 1\n t = self._clock.seconds()\n seconds = str(int(t))\n microseconds = \"%07d\" % (int((t - int(t)) * 10e6),)\n return f\"{seconds}.M{microseconds}P{self.p}Q{self.n}.{self.s}\"\n\n\n_generateMaildirName = _MaildirNameGenerator(reactor).generate\n\n\ndef initializeMaildir(dir):\n \"\"\"\n Create a maildir user directory if it doesn't already exist.\n\n @type dir: L{bytes}\n @param dir: The path name for a user directory.\n \"\"\"\n dir = os.fsdecode(dir)\n if not os.path.isdir(dir):\n os.mkdir(dir, 0o700)\n for subdir in [\"new\", \"cur\", \"tmp\", \".Trash\"]:\n os.mkdir(os.path.join(dir, subdir), 0o700)\n for subdir in [\"new\", \"cur\", \"tmp\"]:\n os.mkdir(os.path.join(dir, \".Trash\", subdir), 0o700)\n # touch\n open(os.path.join(dir, \".Trash\", \"maildirfolder\"), \"w\").close()\n\n\nclass MaildirMessage(mail.FileMessage):\n \"\"\"\n A message receiver which adds a header and delivers a message to a file\n whose name includes the size of the message.\n\n @type size: L{int}\n @ivar size: The number of octets in the message.\n \"\"\"\n\n size = None\n\n def __init__(self, address, fp, *a, **kw):\n \"\"\"\n @type address: L{bytes}\n @param address: The address of the message recipient.\n\n @type fp: file-like object\n @param fp: The file in which to store the message while it is being\n received.\n\n @type a: 2-L{tuple} of (0) L{bytes}, (1) L{bytes}\n @param a: Positional arguments for L{FileMessage.__init__}.\n\n @type kw: L{dict}\n @param kw: Keyword arguments for L{FileMessage.__init__}.\n \"\"\"\n header = \"Delivered-To: %s\\n\" % address\n fp.write(header)\n self.size = len(header)\n mail.FileMessage.__init__(self, fp, *a, **kw)\n\n def lineReceived(self, line):\n \"\"\"\n Write a line to the file.\n\n @type line: L{bytes}\n @param line: A received line.\n \"\"\"\n mail.FileMessage.lineReceived(self, line)\n self.size += len(line) + 1\n\n def eomReceived(self):\n \"\"\"\n At the end of message, rename the file holding the message to its final\n name concatenated with the size of the file.\n\n @rtype: L{Deferred <defer.Deferred>} which successfully results in\n L{bytes}\n @return: A deferred which returns the name of the file holding the\n message.\n \"\"\"\n self.finalName = self.finalName + \",S=%d\" % self.size\n return mail.FileMessage.eomReceived(self)\n\n\n@implementer(mail.IAliasableDomain)\nclass AbstractMaildirDomain:\n \"\"\"\n An abstract maildir-backed domain.\n\n @type alias: L{None} or L{dict} mapping\n L{bytes} to L{AliasBase}\n @ivar alias: A mapping of username to alias.\n\n @ivar root: See L{__init__}.\n \"\"\"\n\n alias = None\n root = None\n\n def __init__(self, service, root):\n \"\"\"\n @type service: L{MailService}\n @param service: An email service.\n\n @type root: L{bytes}\n @param root: The maildir root directory.\n \"\"\"\n self.root = root\n\n def userDirectory(self, user):\n \"\"\"\n Return the maildir directory for a user.\n\n @type user: L{bytes}\n @param user: A username.\n\n @rtype: L{bytes} or L{None}\n @return: The user's mail directory for a valid user. Otherwise,\n L{None}.\n \"\"\"\n return None\n\n def setAliasGroup(self, alias):\n \"\"\"\n Set the group of defined aliases for this domain.\n\n @type alias: L{dict} mapping L{bytes} to L{IAlias} provider.\n @param alias: A mapping of domain name to alias.\n \"\"\"\n self.alias = alias\n\n def exists(self, user, memo=None):\n \"\"\"\n Check whether a user exists in this domain or an alias of it.\n\n @type user: L{User}\n @param user: A user.\n\n @type memo: L{None} or L{dict} of L{AliasBase}\n @param memo: A record of the addresses already considered while\n resolving aliases. The default value should be used by all\n external code.\n\n @rtype: no-argument callable which returns L{IMessage <smtp.IMessage>}\n provider.\n @return: A function which takes no arguments and returns a message\n receiver for the user.\n\n @raises SMTPBadRcpt: When the given user does not exist in this domain\n or an alias of it.\n \"\"\"\n if self.userDirectory(user.dest.local) is not None:\n return lambda: self.startMessage(user)\n try:\n a = self.alias[user.dest.local]\n except BaseException:\n raise smtp.SMTPBadRcpt(user)\n else:\n aliases = a.resolve(self.alias, memo)\n if aliases:\n return lambda: aliases\n log.err(\"Bad alias configuration: \" + str(user))\n raise smtp.SMTPBadRcpt(user)\n\n def startMessage(self, user):\n \"\"\"\n Create a maildir message for a user.\n\n @type user: L{bytes}\n @param user: A username.\n\n @rtype: L{MaildirMessage}\n @return: A message receiver for this user.\n \"\"\"\n if isinstance(user, str):\n name, domain = user.split(\"@\", 1)\n else:\n name, domain = user.dest.local, user.dest.domain\n dir = self.userDirectory(name)\n fname = _generateMaildirName()\n filename = os.path.join(dir, \"tmp\", fname)\n fp = open(filename, \"w\")\n return MaildirMessage(\n f\"{name}@{domain}\", fp, filename, os.path.join(dir, \"new\", fname)\n )\n\n def willRelay(self, user, protocol):\n \"\"\"\n Check whether this domain will relay.\n\n @type user: L{Address}\n @param user: The destination address.\n\n @type protocol: L{SMTP}\n @param protocol: The protocol over which the message to be relayed is\n being received.\n\n @rtype: L{bool}\n @return: An indication of whether this domain will relay the message to\n the destination.\n \"\"\"\n return False\n\n def addUser(self, user, password):\n \"\"\"\n Add a user to this domain.\n\n Subclasses should override this method.\n\n @type user: L{bytes}\n @param user: A username.\n\n @type password: L{bytes}\n @param password: A password.\n \"\"\"\n raise NotImplementedError\n\n def getCredentialsCheckers(self):\n \"\"\"\n Return credentials checkers for this domain.\n\n Subclasses should override this method.\n\n @rtype: L{list} of L{ICredentialsChecker\n <checkers.ICredentialsChecker>} provider\n @return: Credentials checkers for this domain.\n \"\"\"\n raise NotImplementedError\n\n\n@implementer(interfaces.IConsumer)\nclass _MaildirMailboxAppendMessageTask:\n \"\"\"\n A task which adds a message to a maildir mailbox.\n\n @ivar mbox: See L{__init__}.\n\n @type defer: L{Deferred <defer.Deferred>} which successfully returns\n L{None}\n @ivar defer: A deferred which fires when the task has completed.\n\n @type opencall: L{IDelayedCall <interfaces.IDelayedCall>} provider or\n L{None}\n @ivar opencall: A scheduled call to L{prodProducer}.\n\n @type msg: file-like object\n @ivar msg: The message to add.\n\n @type tmpname: L{bytes}\n @ivar tmpname: The pathname of the temporary file holding the message while\n it is being transferred.\n\n @type fh: file\n @ivar fh: The new maildir file.\n\n @type filesender: L{FileSender <basic.FileSender>}\n @ivar filesender: A file sender which sends the message.\n\n @type myproducer: L{IProducer <interfaces.IProducer>}\n @ivar myproducer: The registered producer.\n\n @type streaming: L{bool}\n @ivar streaming: Indicates whether the registered producer provides a\n streaming interface.\n \"\"\"\n\n osopen = staticmethod(os.open)\n oswrite = staticmethod(os.write)\n osclose = staticmethod(os.close)\n osrename = staticmethod(os.rename)\n\n def __init__(self, mbox, msg):\n \"\"\"\n @type mbox: L{MaildirMailbox}\n @param mbox: A maildir mailbox.\n\n @type msg: L{bytes} or file-like object\n @param msg: The message to add.\n \"\"\"\n self.mbox = mbox\n self.defer = defer.Deferred()\n self.openCall = None\n if not hasattr(msg, \"read\"):\n msg = io.BytesIO(msg)\n self.msg = msg\n\n def startUp(self):\n \"\"\"\n Start transferring the message to the mailbox.\n \"\"\"\n self.createTempFile()\n if self.fh != -1:\n self.filesender = basic.FileSender()\n self.filesender.beginFileTransfer(self.msg, self)\n\n def registerProducer(self, producer, streaming):\n \"\"\"\n Register a producer and start asking it for data if it is\n non-streaming.\n\n @type producer: L{IProducer <interfaces.IProducer>}\n @param producer: A producer.\n\n @type streaming: L{bool}\n @param streaming: A flag indicating whether the producer provides a\n streaming interface.\n \"\"\"\n self.myproducer = producer\n self.streaming = streaming\n if not streaming:\n self.prodProducer()\n\n def prodProducer(self):\n \"\"\"\n Repeatedly prod a non-streaming producer to produce data.\n \"\"\"\n self.openCall = None\n if self.myproducer is not None:\n self.openCall = reactor.callLater(0, self.prodProducer)\n self.myproducer.resumeProducing()\n\n def unregisterProducer(self):\n \"\"\"\n Finish transferring the message to the mailbox.\n \"\"\"\n self.myproducer = None\n self.streaming = None\n self.osclose(self.fh)\n self.moveFileToNew()\n\n def write(self, data):\n \"\"\"\n Write data to the maildir file.\n\n @type data: L{bytes}\n @param data: Data to be written to the file.\n \"\"\"\n try:\n self.oswrite(self.fh, data)\n except BaseException:\n self.fail()\n\n def fail(self, err=None):\n \"\"\"\n Fire the deferred to indicate the task completed with a failure.\n\n @type err: L{Failure <failure.Failure>}\n @param err: The error that occurred.\n \"\"\"\n if err is None:\n err = failure.Failure()\n if self.openCall is not None:\n self.openCall.cancel()\n self.defer.errback(err)\n self.defer = None\n\n def moveFileToNew(self):\n \"\"\"\n Place the message in the I{new/} directory, add it to the mailbox and\n fire the deferred to indicate that the task has completed\n successfully.\n \"\"\"\n while True:\n newname = os.path.join(self.mbox.path, \"new\", _generateMaildirName())\n try:\n self.osrename(self.tmpname, newname)\n break\n except OSError as e:\n (err, estr) = e.args\n import errno\n\n # if the newname exists, retry with a new newname.\n if err != errno.EEXIST:\n self.fail()\n newname = None\n break\n if newname is not None:\n self.mbox.list.append(newname)\n self.defer.callback(None)\n self.defer = None\n\n def createTempFile(self):\n \"\"\"\n Create a temporary file to hold the message as it is being transferred.\n \"\"\"\n attr = (\n os.O_RDWR\n | os.O_CREAT\n | os.O_EXCL\n | getattr(os, \"O_NOINHERIT\", 0)\n | getattr(os, \"O_NOFOLLOW\", 0)\n )\n tries = 0\n self.fh = -1\n while True:\n self.tmpname = os.path.join(self.mbox.path, \"tmp\", _generateMaildirName())\n try:\n self.fh = self.osopen(self.tmpname, attr, 0o600)\n return None\n except OSError:\n tries += 1\n if tries > 500:\n self.defer.errback(\n RuntimeError(\n \"Could not create tmp file for %s\" % self.mbox.path\n )\n )\n self.defer = None\n return None\n\n\nclass MaildirMailbox(pop3.Mailbox):\n \"\"\"\n A maildir-backed mailbox.\n\n @ivar path: See L{__init__}.\n\n @type list: L{list} of L{int} or 2-L{tuple} of (0) file-like object,\n (1) L{bytes}\n @ivar list: Information about the messages in the mailbox. For undeleted\n messages, the file containing the message and the\n full path name of the file are stored. Deleted messages are indicated\n by 0.\n\n @type deleted: L{dict} mapping 2-L{tuple} of (0) file-like object,\n (1) L{bytes} to L{bytes}\n @type deleted: A mapping of the information about a file before it was\n deleted to the full path name of the deleted file in the I{.Trash/}\n subfolder.\n \"\"\"\n\n AppendFactory = _MaildirMailboxAppendMessageTask\n\n def __init__(self, path):\n \"\"\"\n @type path: L{bytes}\n @param path: The directory name for a maildir mailbox.\n \"\"\"\n self.path = path\n self.list = []\n self.deleted = {}\n initializeMaildir(path)\n for name in (\"cur\", \"new\"):\n for file in os.listdir(os.path.join(path, name)):\n self.list.append((file, os.path.join(path, name, file)))\n self.list.sort()\n self.list = [e[1] for e in self.list]\n\n def listMessages(self, i=None):\n \"\"\"\n Retrieve the size of a message, or, if none is specified, the size of\n each message in the mailbox.\n\n @type i: L{int} or L{None}\n @param i: The 0-based index of a message.\n\n @rtype: L{int} or L{list} of L{int}\n @return: The number of octets in the specified message, or, if an index\n is not specified, a list of the number of octets for all messages\n in the mailbox. Any value which corresponds to a deleted message\n is set to 0.\n\n @raise IndexError: When the index does not correspond to a message in\n the mailbox.\n \"\"\"\n if i is None:\n ret = []\n for mess in self.list:\n if mess:\n ret.append(os.stat(mess)[stat.ST_SIZE])\n else:\n ret.append(0)\n return ret\n return self.list[i] and os.stat(self.list[i])[stat.ST_SIZE] or 0\n\n def getMessage(self, i):\n \"\"\"\n Retrieve a file-like object with the contents of a message.\n\n @type i: L{int}\n @param i: The 0-based index of a message.\n\n @rtype: file-like object\n @return: A file containing the message.\n\n @raise IndexError: When the index does not correspond to a message in\n the mailbox.\n \"\"\"\n return open(self.list[i])\n\n def getUidl(self, i):\n \"\"\"\n Get a unique identifier for a message.\n\n @type i: L{int}\n @param i: The 0-based index of a message.\n\n @rtype: L{bytes}\n @return: A string of printable characters uniquely identifying the\n message for all time.\n\n @raise IndexError: When the index does not correspond to a message in\n the mailbox.\n \"\"\"\n # Returning the actual filename is a mistake. Hash it.\n base = os.path.basename(self.list[i])\n return md5(base).hexdigest()\n\n def deleteMessage(self, i):\n \"\"\"\n Mark a message for deletion.\n\n Move the message to the I{.Trash/} subfolder so it can be undeleted\n by an administrator.\n\n @type i: L{int}\n @param i: The 0-based index of a message.\n\n @raise IndexError: When the index does not correspond to a message in\n the mailbox.\n \"\"\"\n trashFile = os.path.join(\n self.path, \".Trash\", \"cur\", os.path.basename(self.list[i])\n )\n os.rename(self.list[i], trashFile)\n self.deleted[self.list[i]] = trashFile\n self.list[i] = 0\n\n def undeleteMessages(self):\n \"\"\"\n Undelete all messages marked for deletion.\n\n Move each message marked for deletion from the I{.Trash/} subfolder back\n to its original position.\n \"\"\"\n for (real, trash) in self.deleted.items():\n try:\n os.rename(trash, real)\n except OSError as e:\n (err, estr) = e.args\n import errno\n\n # If the file has been deleted from disk, oh well!\n if err != errno.ENOENT:\n raise\n # This is a pass\n else:\n try:\n self.list[self.list.index(0)] = real\n except ValueError:\n self.list.append(real)\n self.deleted.clear()\n\n def appendMessage(self, txt):\n \"\"\"\n Add a message to the mailbox.\n\n @type txt: L{bytes} or file-like object\n @param txt: A message to add.\n\n @rtype: L{Deferred <defer.Deferred>}\n @return: A deferred which fires when the message has been added to\n the mailbox.\n \"\"\"\n task = self.AppendFactory(self, txt)\n result = task.defer\n task.startUp()\n return result\n\n\n@implementer(pop3.IMailbox)\nclass StringListMailbox:\n \"\"\"\n An in-memory mailbox.\n\n @ivar msgs: See L{__init__}.\n\n @type _delete: L{set} of L{int}\n @ivar _delete: The indices of messages which have been marked for deletion.\n \"\"\"\n\n def __init__(self, msgs):\n \"\"\"\n @type msgs: L{list} of L{bytes}\n @param msgs: The contents of each message in the mailbox.\n \"\"\"\n self.msgs = msgs\n self._delete = set()\n\n def listMessages(self, i=None):\n \"\"\"\n Retrieve the size of a message, or, if none is specified, the size of\n each message in the mailbox.\n\n @type i: L{int} or L{None}\n @param i: The 0-based index of a message.\n\n @rtype: L{int} or L{list} of L{int}\n @return: The number of octets in the specified message, or, if an index\n is not specified, a list of the number of octets in each message in\n the mailbox. Any value which corresponds to a deleted message is\n set to 0.\n\n @raise IndexError: When the index does not correspond to a message in\n the mailbox.\n \"\"\"\n if i is None:\n return [self.listMessages(msg) for msg in range(len(self.msgs))]\n if i in self._delete:\n return 0\n return len(self.msgs[i])\n\n def getMessage(self, i: int) -> IO[bytes]:\n \"\"\"\n Return an in-memory file-like object with the contents of a message.\n\n @param i: The 0-based index of a message.\n\n @return: An in-memory file-like object containing the message.\n\n @raise IndexError: When the index does not correspond to a message in\n the mailbox.\n \"\"\"\n return io.BytesIO(self.msgs[i])\n\n def getUidl(self, i):\n \"\"\"\n Get a unique identifier for a message.\n\n @type i: L{int}\n @param i: The 0-based index of a message.\n\n @rtype: L{bytes}\n @return: A hash of the contents of the message at the given index.\n\n @raise IndexError: When the index does not correspond to a message in\n the mailbox.\n \"\"\"\n return md5(self.msgs[i]).hexdigest()\n\n def deleteMessage(self, i):\n \"\"\"\n Mark a message for deletion.\n\n @type i: L{int}\n @param i: The 0-based index of a message to delete.\n\n @raise IndexError: When the index does not correspond to a message in\n the mailbox.\n \"\"\"\n self._delete.add(i)\n\n def undeleteMessages(self):\n \"\"\"\n Undelete any messages which have been marked for deletion.\n \"\"\"\n self._delete = set()\n\n def sync(self):\n \"\"\"\n Discard the contents of any messages marked for deletion.\n \"\"\"\n for index in self._delete:\n self.msgs[index] = \"\"\n self._delete = set()\n\n\n@implementer(portal.IRealm)\nclass MaildirDirdbmDomain(AbstractMaildirDomain):\n \"\"\"\n A maildir-backed domain where membership is checked with a\n L{DirDBM <dirdbm.DirDBM>} database.\n\n The directory structure of a MaildirDirdbmDomain is:\n\n /passwd <-- a DirDBM directory\n\n /USER/{cur, new, del} <-- each user has these three directories\n\n @ivar postmaster: See L{__init__}.\n\n @type dbm: L{DirDBM <dirdbm.DirDBM>}\n @ivar dbm: The authentication database for the domain.\n \"\"\"\n\n portal = None\n _credcheckers = None\n\n def __init__(self, service, root, postmaster=0):\n \"\"\"\n @type service: L{MailService}\n @param service: An email service.\n\n @type root: L{bytes}\n @param root: The maildir root directory.\n\n @type postmaster: L{bool}\n @param postmaster: A flag indicating whether non-existent addresses\n should be forwarded to the postmaster (C{True}) or\n bounced (C{False}).\n \"\"\"\n root = os.fsencode(root)\n AbstractMaildirDomain.__init__(self, service, root)\n dbm = os.path.join(root, b\"passwd\")\n if not os.path.exists(dbm):\n os.makedirs(dbm)\n self.dbm = dirdbm.open(dbm)\n self.postmaster = postmaster\n\n def userDirectory(self, name):\n \"\"\"\n Return the path to a user's mail directory.\n\n @type name: L{bytes}\n @param name: A username.\n\n @rtype: L{bytes} or L{None}\n @return: The path to the user's mail directory for a valid user. For\n an invalid user, the path to the postmaster's mailbox if bounces\n are redirected there. Otherwise, L{None}.\n \"\"\"\n if name not in self.dbm:\n if not self.postmaster:\n return None\n name = \"postmaster\"\n dir = os.path.join(self.root, name)\n if not os.path.exists(dir):\n initializeMaildir(dir)\n return dir\n\n def addUser(self, user, password):\n \"\"\"\n Add a user to this domain by adding an entry in the authentication\n database and initializing the user's mail directory.\n\n @type user: L{bytes}\n @param user: A username.\n\n @type password: L{bytes}\n @param password: A password.\n \"\"\"\n self.dbm[user] = password\n # Ensure it is initialized\n self.userDirectory(user)\n\n def getCredentialsCheckers(self):\n \"\"\"\n Return credentials checkers for this domain.\n\n @rtype: L{list} of L{ICredentialsChecker\n <checkers.ICredentialsChecker>} provider\n @return: Credentials checkers for this domain.\n \"\"\"\n if self._credcheckers is None:\n self._credcheckers = [DirdbmDatabase(self.dbm)]\n return self._credcheckers\n\n def requestAvatar(self, avatarId, mind, *interfaces):\n \"\"\"\n Get the mailbox for an authenticated user.\n\n The mailbox for the authenticated user will be returned only if the\n given interfaces include L{IMailbox <pop3.IMailbox>}. Requests for\n anonymous access will be met with a mailbox containing a message\n indicating that an internal error has occurred.\n\n @type avatarId: L{bytes} or C{twisted.cred.checkers.ANONYMOUS}\n @param avatarId: A string which identifies a user or an object which\n signals a request for anonymous access.\n\n @type mind: L{None}\n @param mind: Unused.\n\n @type interfaces: n-L{tuple} of C{zope.interface.Interface}\n @param interfaces: A group of interfaces, one of which the avatar\n must support.\n\n @rtype: 3-L{tuple} of (0) L{IMailbox <pop3.IMailbox>},\n (1) L{IMailbox <pop3.IMailbox>} provider, (2) no-argument\n callable\n @return: A tuple of the supported interface, a mailbox, and a\n logout function.\n\n @raise NotImplementedError: When the given interfaces do not include\n L{IMailbox <pop3.IMailbox>}.\n \"\"\"\n if pop3.IMailbox not in interfaces:\n raise NotImplementedError(\"No interface\")\n if avatarId == checkers.ANONYMOUS:\n mbox = StringListMailbox([INTERNAL_ERROR])\n else:\n mbox = MaildirMailbox(os.path.join(self.root, avatarId))\n\n return (pop3.IMailbox, mbox, lambda: None)\n\n\n@implementer(checkers.ICredentialsChecker)\nclass DirdbmDatabase:\n \"\"\"\n A credentials checker which authenticates users out of a\n L{DirDBM <dirdbm.DirDBM>} database.\n\n @type dirdbm: L{DirDBM <dirdbm.DirDBM>}\n @ivar dirdbm: An authentication database.\n \"\"\"\n\n # credentialInterfaces is not used by the class\n credentialInterfaces = (\n credentials.IUsernamePassword,\n credentials.IUsernameHashedPassword,\n )\n\n def __init__(self, dbm):\n \"\"\"\n @type dbm: L{DirDBM <dirdbm.DirDBM>}\n @param dbm: An authentication database.\n \"\"\"\n self.dirdbm = dbm\n\n def requestAvatarId(self, c):\n \"\"\"\n Authenticate a user and, if successful, return their username.\n\n @type c: L{IUsernamePassword <credentials.IUsernamePassword>} or\n L{IUsernameHashedPassword <credentials.IUsernameHashedPassword>}\n provider.\n @param c: Credentials.\n\n @rtype: L{bytes}\n @return: A string which identifies an user.\n\n @raise UnauthorizedLogin: When the credentials check fails.\n \"\"\"\n if c.username in self.dirdbm:\n if c.checkPassword(self.dirdbm[c.username]):\n return c.username\n raise UnauthorizedLogin()\n", "path": "src/twisted/mail/maildir.py"}], "after_files": [{"content": "# -*- test-case-name: twisted.mail.test.test_mail -*-\n# Copyright (c) Twisted Matrix Laboratories.\n# See LICENSE for details.\n\n\n\"\"\"\nMaildir-style mailbox support.\n\"\"\"\n\nimport io\nimport os\nimport socket\nimport stat\nfrom hashlib import md5\nfrom typing import IO\n\nfrom zope.interface import implementer\n\nfrom twisted.cred import checkers, credentials, portal\nfrom twisted.cred.error import UnauthorizedLogin\nfrom twisted.internet import defer, interfaces, reactor\nfrom twisted.mail import mail, pop3, smtp\nfrom twisted.persisted import dirdbm\nfrom twisted.protocols import basic\nfrom twisted.python import failure, log\n\nINTERNAL_ERROR = \"\"\"\\\nFrom: Twisted.mail Internals\nSubject: An Error Occurred\n\n An internal server error has occurred. Please contact the\n server administrator.\n\"\"\"\n\n\nclass _MaildirNameGenerator:\n \"\"\"\n A utility class to generate a unique maildir name.\n\n @type n: L{int}\n @ivar n: A counter used to generate unique integers.\n\n @type p: L{int}\n @ivar p: The ID of the current process.\n\n @type s: L{bytes}\n @ivar s: A representation of the hostname.\n\n @ivar _clock: See C{clock} parameter of L{__init__}.\n \"\"\"\n\n n = 0\n p = os.getpid()\n s = socket.gethostname().replace(\"/\", r\"\\057\").replace(\":\", r\"\\072\")\n\n def __init__(self, clock):\n \"\"\"\n @type clock: L{IReactorTime <interfaces.IReactorTime>} provider\n @param clock: A reactor which will be used to learn the current time.\n \"\"\"\n self._clock = clock\n\n def generate(self):\n \"\"\"\n Generate a string which is intended to be unique across all calls to\n this function (across all processes, reboots, etc).\n\n Strings returned by earlier calls to this method will compare less\n than strings returned by later calls as long as the clock provided\n doesn't go backwards.\n\n @rtype: L{bytes}\n @return: A unique string.\n \"\"\"\n self.n = self.n + 1\n t = self._clock.seconds()\n seconds = str(int(t))\n microseconds = \"%07d\" % (int((t - int(t)) * 10e6),)\n return f\"{seconds}.M{microseconds}P{self.p}Q{self.n}.{self.s}\"\n\n\n_generateMaildirName = _MaildirNameGenerator(reactor).generate\n\n\ndef initializeMaildir(dir):\n \"\"\"\n Create a maildir user directory if it doesn't already exist.\n\n @type dir: L{bytes}\n @param dir: The path name for a user directory.\n \"\"\"\n dir = os.fsdecode(dir)\n if not os.path.isdir(dir):\n os.mkdir(dir, 0o700)\n for subdir in [\"new\", \"cur\", \"tmp\", \".Trash\"]:\n os.mkdir(os.path.join(dir, subdir), 0o700)\n for subdir in [\"new\", \"cur\", \"tmp\"]:\n os.mkdir(os.path.join(dir, \".Trash\", subdir), 0o700)\n # touch\n open(os.path.join(dir, \".Trash\", \"maildirfolder\"), \"w\").close()\n\n\nclass MaildirMessage(mail.FileMessage):\n \"\"\"\n A message receiver which adds a header and delivers a message to a file\n whose name includes the size of the message.\n\n @type size: L{int}\n @ivar size: The number of octets in the message.\n \"\"\"\n\n size = None\n\n def __init__(self, address, fp, *a, **kw):\n \"\"\"\n @type address: L{bytes}\n @param address: The address of the message recipient.\n\n @type fp: file-like object\n @param fp: The file in which to store the message while it is being\n received.\n\n @type a: 2-L{tuple} of (0) L{bytes}, (1) L{bytes}\n @param a: Positional arguments for L{FileMessage.__init__}.\n\n @type kw: L{dict}\n @param kw: Keyword arguments for L{FileMessage.__init__}.\n \"\"\"\n header = b\"Delivered-To: %s\\n\" % address\n fp.write(header)\n self.size = len(header)\n mail.FileMessage.__init__(self, fp, *a, **kw)\n\n def lineReceived(self, line):\n \"\"\"\n Write a line to the file.\n\n @type line: L{bytes}\n @param line: A received line.\n \"\"\"\n mail.FileMessage.lineReceived(self, line)\n self.size += len(line) + 1\n\n def eomReceived(self):\n \"\"\"\n At the end of message, rename the file holding the message to its final\n name concatenated with the size of the file.\n\n @rtype: L{Deferred <defer.Deferred>} which successfully results in\n L{bytes}\n @return: A deferred which returns the name of the file holding the\n message.\n \"\"\"\n self.finalName = self.finalName + \",S=%d\" % self.size\n return mail.FileMessage.eomReceived(self)\n\n\n@implementer(mail.IAliasableDomain)\nclass AbstractMaildirDomain:\n \"\"\"\n An abstract maildir-backed domain.\n\n @type alias: L{None} or L{dict} mapping\n L{bytes} to L{AliasBase}\n @ivar alias: A mapping of username to alias.\n\n @ivar root: See L{__init__}.\n \"\"\"\n\n alias = None\n root = None\n\n def __init__(self, service, root):\n \"\"\"\n @type service: L{MailService}\n @param service: An email service.\n\n @type root: L{bytes}\n @param root: The maildir root directory.\n \"\"\"\n self.root = root\n\n def userDirectory(self, user):\n \"\"\"\n Return the maildir directory for a user.\n\n @type user: L{bytes}\n @param user: A username.\n\n @rtype: L{bytes} or L{None}\n @return: The user's mail directory for a valid user. Otherwise,\n L{None}.\n \"\"\"\n return None\n\n def setAliasGroup(self, alias):\n \"\"\"\n Set the group of defined aliases for this domain.\n\n @type alias: L{dict} mapping L{bytes} to L{IAlias} provider.\n @param alias: A mapping of domain name to alias.\n \"\"\"\n self.alias = alias\n\n def exists(self, user, memo=None):\n \"\"\"\n Check whether a user exists in this domain or an alias of it.\n\n @type user: L{User}\n @param user: A user.\n\n @type memo: L{None} or L{dict} of L{AliasBase}\n @param memo: A record of the addresses already considered while\n resolving aliases. The default value should be used by all\n external code.\n\n @rtype: no-argument callable which returns L{IMessage <smtp.IMessage>}\n provider.\n @return: A function which takes no arguments and returns a message\n receiver for the user.\n\n @raises SMTPBadRcpt: When the given user does not exist in this domain\n or an alias of it.\n \"\"\"\n if self.userDirectory(user.dest.local) is not None:\n return lambda: self.startMessage(user)\n try:\n a = self.alias[user.dest.local]\n except BaseException:\n raise smtp.SMTPBadRcpt(user)\n else:\n aliases = a.resolve(self.alias, memo)\n if aliases:\n return lambda: aliases\n log.err(\"Bad alias configuration: \" + str(user))\n raise smtp.SMTPBadRcpt(user)\n\n def startMessage(self, user):\n \"\"\"\n Create a maildir message for a user.\n\n @type user: L{bytes}\n @param user: A username.\n\n @rtype: L{MaildirMessage}\n @return: A message receiver for this user.\n \"\"\"\n if isinstance(user, str):\n name, domain = user.split(\"@\", 1)\n else:\n name, domain = user.dest.local, user.dest.domain\n dir = self.userDirectory(name)\n fname = _generateMaildirName()\n filename = os.path.join(dir, \"tmp\", fname)\n fp = open(filename, \"w\")\n return MaildirMessage(\n f\"{name}@{domain}\", fp, filename, os.path.join(dir, \"new\", fname)\n )\n\n def willRelay(self, user, protocol):\n \"\"\"\n Check whether this domain will relay.\n\n @type user: L{Address}\n @param user: The destination address.\n\n @type protocol: L{SMTP}\n @param protocol: The protocol over which the message to be relayed is\n being received.\n\n @rtype: L{bool}\n @return: An indication of whether this domain will relay the message to\n the destination.\n \"\"\"\n return False\n\n def addUser(self, user, password):\n \"\"\"\n Add a user to this domain.\n\n Subclasses should override this method.\n\n @type user: L{bytes}\n @param user: A username.\n\n @type password: L{bytes}\n @param password: A password.\n \"\"\"\n raise NotImplementedError\n\n def getCredentialsCheckers(self):\n \"\"\"\n Return credentials checkers for this domain.\n\n Subclasses should override this method.\n\n @rtype: L{list} of L{ICredentialsChecker\n <checkers.ICredentialsChecker>} provider\n @return: Credentials checkers for this domain.\n \"\"\"\n raise NotImplementedError\n\n\n@implementer(interfaces.IConsumer)\nclass _MaildirMailboxAppendMessageTask:\n \"\"\"\n A task which adds a message to a maildir mailbox.\n\n @ivar mbox: See L{__init__}.\n\n @type defer: L{Deferred <defer.Deferred>} which successfully returns\n L{None}\n @ivar defer: A deferred which fires when the task has completed.\n\n @type opencall: L{IDelayedCall <interfaces.IDelayedCall>} provider or\n L{None}\n @ivar opencall: A scheduled call to L{prodProducer}.\n\n @type msg: file-like object\n @ivar msg: The message to add.\n\n @type tmpname: L{bytes}\n @ivar tmpname: The pathname of the temporary file holding the message while\n it is being transferred.\n\n @type fh: file\n @ivar fh: The new maildir file.\n\n @type filesender: L{FileSender <basic.FileSender>}\n @ivar filesender: A file sender which sends the message.\n\n @type myproducer: L{IProducer <interfaces.IProducer>}\n @ivar myproducer: The registered producer.\n\n @type streaming: L{bool}\n @ivar streaming: Indicates whether the registered producer provides a\n streaming interface.\n \"\"\"\n\n osopen = staticmethod(os.open)\n oswrite = staticmethod(os.write)\n osclose = staticmethod(os.close)\n osrename = staticmethod(os.rename)\n\n def __init__(self, mbox, msg):\n \"\"\"\n @type mbox: L{MaildirMailbox}\n @param mbox: A maildir mailbox.\n\n @type msg: L{bytes} or file-like object\n @param msg: The message to add.\n \"\"\"\n self.mbox = mbox\n self.defer = defer.Deferred()\n self.openCall = None\n if not hasattr(msg, \"read\"):\n msg = io.BytesIO(msg)\n self.msg = msg\n\n def startUp(self):\n \"\"\"\n Start transferring the message to the mailbox.\n \"\"\"\n self.createTempFile()\n if self.fh != -1:\n self.filesender = basic.FileSender()\n self.filesender.beginFileTransfer(self.msg, self)\n\n def registerProducer(self, producer, streaming):\n \"\"\"\n Register a producer and start asking it for data if it is\n non-streaming.\n\n @type producer: L{IProducer <interfaces.IProducer>}\n @param producer: A producer.\n\n @type streaming: L{bool}\n @param streaming: A flag indicating whether the producer provides a\n streaming interface.\n \"\"\"\n self.myproducer = producer\n self.streaming = streaming\n if not streaming:\n self.prodProducer()\n\n def prodProducer(self):\n \"\"\"\n Repeatedly prod a non-streaming producer to produce data.\n \"\"\"\n self.openCall = None\n if self.myproducer is not None:\n self.openCall = reactor.callLater(0, self.prodProducer)\n self.myproducer.resumeProducing()\n\n def unregisterProducer(self):\n \"\"\"\n Finish transferring the message to the mailbox.\n \"\"\"\n self.myproducer = None\n self.streaming = None\n self.osclose(self.fh)\n self.moveFileToNew()\n\n def write(self, data):\n \"\"\"\n Write data to the maildir file.\n\n @type data: L{bytes}\n @param data: Data to be written to the file.\n \"\"\"\n try:\n self.oswrite(self.fh, data)\n except BaseException:\n self.fail()\n\n def fail(self, err=None):\n \"\"\"\n Fire the deferred to indicate the task completed with a failure.\n\n @type err: L{Failure <failure.Failure>}\n @param err: The error that occurred.\n \"\"\"\n if err is None:\n err = failure.Failure()\n if self.openCall is not None:\n self.openCall.cancel()\n self.defer.errback(err)\n self.defer = None\n\n def moveFileToNew(self):\n \"\"\"\n Place the message in the I{new/} directory, add it to the mailbox and\n fire the deferred to indicate that the task has completed\n successfully.\n \"\"\"\n while True:\n newname = os.path.join(self.mbox.path, \"new\", _generateMaildirName())\n try:\n self.osrename(self.tmpname, newname)\n break\n except OSError as e:\n (err, estr) = e.args\n import errno\n\n # if the newname exists, retry with a new newname.\n if err != errno.EEXIST:\n self.fail()\n newname = None\n break\n if newname is not None:\n self.mbox.list.append(newname)\n self.defer.callback(None)\n self.defer = None\n\n def createTempFile(self):\n \"\"\"\n Create a temporary file to hold the message as it is being transferred.\n \"\"\"\n attr = (\n os.O_RDWR\n | os.O_CREAT\n | os.O_EXCL\n | getattr(os, \"O_NOINHERIT\", 0)\n | getattr(os, \"O_NOFOLLOW\", 0)\n )\n tries = 0\n self.fh = -1\n while True:\n self.tmpname = os.path.join(self.mbox.path, \"tmp\", _generateMaildirName())\n try:\n self.fh = self.osopen(self.tmpname, attr, 0o600)\n return None\n except OSError:\n tries += 1\n if tries > 500:\n self.defer.errback(\n RuntimeError(\n \"Could not create tmp file for %s\" % self.mbox.path\n )\n )\n self.defer = None\n return None\n\n\nclass MaildirMailbox(pop3.Mailbox):\n \"\"\"\n A maildir-backed mailbox.\n\n @ivar path: See L{__init__}.\n\n @type list: L{list} of L{int} or 2-L{tuple} of (0) file-like object,\n (1) L{bytes}\n @ivar list: Information about the messages in the mailbox. For undeleted\n messages, the file containing the message and the\n full path name of the file are stored. Deleted messages are indicated\n by 0.\n\n @type deleted: L{dict} mapping 2-L{tuple} of (0) file-like object,\n (1) L{bytes} to L{bytes}\n @type deleted: A mapping of the information about a file before it was\n deleted to the full path name of the deleted file in the I{.Trash/}\n subfolder.\n \"\"\"\n\n AppendFactory = _MaildirMailboxAppendMessageTask\n\n def __init__(self, path):\n \"\"\"\n @type path: L{bytes}\n @param path: The directory name for a maildir mailbox.\n \"\"\"\n self.path = path\n self.list = []\n self.deleted = {}\n initializeMaildir(path)\n for name in (\"cur\", \"new\"):\n for file in os.listdir(os.path.join(path, name)):\n self.list.append((file, os.path.join(path, name, file)))\n self.list.sort()\n self.list = [e[1] for e in self.list]\n\n def listMessages(self, i=None):\n \"\"\"\n Retrieve the size of a message, or, if none is specified, the size of\n each message in the mailbox.\n\n @type i: L{int} or L{None}\n @param i: The 0-based index of a message.\n\n @rtype: L{int} or L{list} of L{int}\n @return: The number of octets in the specified message, or, if an index\n is not specified, a list of the number of octets for all messages\n in the mailbox. Any value which corresponds to a deleted message\n is set to 0.\n\n @raise IndexError: When the index does not correspond to a message in\n the mailbox.\n \"\"\"\n if i is None:\n ret = []\n for mess in self.list:\n if mess:\n ret.append(os.stat(mess)[stat.ST_SIZE])\n else:\n ret.append(0)\n return ret\n return self.list[i] and os.stat(self.list[i])[stat.ST_SIZE] or 0\n\n def getMessage(self, i):\n \"\"\"\n Retrieve a file-like object with the contents of a message.\n\n @type i: L{int}\n @param i: The 0-based index of a message.\n\n @rtype: file-like object\n @return: A file containing the message.\n\n @raise IndexError: When the index does not correspond to a message in\n the mailbox.\n \"\"\"\n return open(self.list[i])\n\n def getUidl(self, i):\n \"\"\"\n Get a unique identifier for a message.\n\n @type i: L{int}\n @param i: The 0-based index of a message.\n\n @rtype: L{bytes}\n @return: A string of printable characters uniquely identifying the\n message for all time.\n\n @raise IndexError: When the index does not correspond to a message in\n the mailbox.\n \"\"\"\n # Returning the actual filename is a mistake. Hash it.\n base = os.path.basename(self.list[i])\n return md5(base).hexdigest()\n\n def deleteMessage(self, i):\n \"\"\"\n Mark a message for deletion.\n\n Move the message to the I{.Trash/} subfolder so it can be undeleted\n by an administrator.\n\n @type i: L{int}\n @param i: The 0-based index of a message.\n\n @raise IndexError: When the index does not correspond to a message in\n the mailbox.\n \"\"\"\n trashFile = os.path.join(\n self.path, \".Trash\", \"cur\", os.path.basename(self.list[i])\n )\n os.rename(self.list[i], trashFile)\n self.deleted[self.list[i]] = trashFile\n self.list[i] = 0\n\n def undeleteMessages(self):\n \"\"\"\n Undelete all messages marked for deletion.\n\n Move each message marked for deletion from the I{.Trash/} subfolder back\n to its original position.\n \"\"\"\n for (real, trash) in self.deleted.items():\n try:\n os.rename(trash, real)\n except OSError as e:\n (err, estr) = e.args\n import errno\n\n # If the file has been deleted from disk, oh well!\n if err != errno.ENOENT:\n raise\n # This is a pass\n else:\n try:\n self.list[self.list.index(0)] = real\n except ValueError:\n self.list.append(real)\n self.deleted.clear()\n\n def appendMessage(self, txt):\n \"\"\"\n Add a message to the mailbox.\n\n @type txt: L{bytes} or file-like object\n @param txt: A message to add.\n\n @rtype: L{Deferred <defer.Deferred>}\n @return: A deferred which fires when the message has been added to\n the mailbox.\n \"\"\"\n task = self.AppendFactory(self, txt)\n result = task.defer\n task.startUp()\n return result\n\n\n@implementer(pop3.IMailbox)\nclass StringListMailbox:\n \"\"\"\n An in-memory mailbox.\n\n @ivar msgs: See L{__init__}.\n\n @type _delete: L{set} of L{int}\n @ivar _delete: The indices of messages which have been marked for deletion.\n \"\"\"\n\n def __init__(self, msgs):\n \"\"\"\n @type msgs: L{list} of L{bytes}\n @param msgs: The contents of each message in the mailbox.\n \"\"\"\n self.msgs = msgs\n self._delete = set()\n\n def listMessages(self, i=None):\n \"\"\"\n Retrieve the size of a message, or, if none is specified, the size of\n each message in the mailbox.\n\n @type i: L{int} or L{None}\n @param i: The 0-based index of a message.\n\n @rtype: L{int} or L{list} of L{int}\n @return: The number of octets in the specified message, or, if an index\n is not specified, a list of the number of octets in each message in\n the mailbox. Any value which corresponds to a deleted message is\n set to 0.\n\n @raise IndexError: When the index does not correspond to a message in\n the mailbox.\n \"\"\"\n if i is None:\n return [self.listMessages(msg) for msg in range(len(self.msgs))]\n if i in self._delete:\n return 0\n return len(self.msgs[i])\n\n def getMessage(self, i: int) -> IO[bytes]:\n \"\"\"\n Return an in-memory file-like object with the contents of a message.\n\n @param i: The 0-based index of a message.\n\n @return: An in-memory file-like object containing the message.\n\n @raise IndexError: When the index does not correspond to a message in\n the mailbox.\n \"\"\"\n return io.BytesIO(self.msgs[i])\n\n def getUidl(self, i):\n \"\"\"\n Get a unique identifier for a message.\n\n @type i: L{int}\n @param i: The 0-based index of a message.\n\n @rtype: L{bytes}\n @return: A hash of the contents of the message at the given index.\n\n @raise IndexError: When the index does not correspond to a message in\n the mailbox.\n \"\"\"\n return md5(self.msgs[i]).hexdigest()\n\n def deleteMessage(self, i):\n \"\"\"\n Mark a message for deletion.\n\n @type i: L{int}\n @param i: The 0-based index of a message to delete.\n\n @raise IndexError: When the index does not correspond to a message in\n the mailbox.\n \"\"\"\n self._delete.add(i)\n\n def undeleteMessages(self):\n \"\"\"\n Undelete any messages which have been marked for deletion.\n \"\"\"\n self._delete = set()\n\n def sync(self):\n \"\"\"\n Discard the contents of any messages marked for deletion.\n \"\"\"\n for index in self._delete:\n self.msgs[index] = \"\"\n self._delete = set()\n\n\n@implementer(portal.IRealm)\nclass MaildirDirdbmDomain(AbstractMaildirDomain):\n \"\"\"\n A maildir-backed domain where membership is checked with a\n L{DirDBM <dirdbm.DirDBM>} database.\n\n The directory structure of a MaildirDirdbmDomain is:\n\n /passwd <-- a DirDBM directory\n\n /USER/{cur, new, del} <-- each user has these three directories\n\n @ivar postmaster: See L{__init__}.\n\n @type dbm: L{DirDBM <dirdbm.DirDBM>}\n @ivar dbm: The authentication database for the domain.\n \"\"\"\n\n portal = None\n _credcheckers = None\n\n def __init__(self, service, root, postmaster=0):\n \"\"\"\n @type service: L{MailService}\n @param service: An email service.\n\n @type root: L{bytes}\n @param root: The maildir root directory.\n\n @type postmaster: L{bool}\n @param postmaster: A flag indicating whether non-existent addresses\n should be forwarded to the postmaster (C{True}) or\n bounced (C{False}).\n \"\"\"\n root = os.fsencode(root)\n AbstractMaildirDomain.__init__(self, service, root)\n dbm = os.path.join(root, b\"passwd\")\n if not os.path.exists(dbm):\n os.makedirs(dbm)\n self.dbm = dirdbm.open(dbm)\n self.postmaster = postmaster\n\n def userDirectory(self, name):\n \"\"\"\n Return the path to a user's mail directory.\n\n @type name: L{bytes}\n @param name: A username.\n\n @rtype: L{bytes} or L{None}\n @return: The path to the user's mail directory for a valid user. For\n an invalid user, the path to the postmaster's mailbox if bounces\n are redirected there. Otherwise, L{None}.\n \"\"\"\n if name not in self.dbm:\n if not self.postmaster:\n return None\n name = \"postmaster\"\n dir = os.path.join(self.root, name)\n if not os.path.exists(dir):\n initializeMaildir(dir)\n return dir\n\n def addUser(self, user, password):\n \"\"\"\n Add a user to this domain by adding an entry in the authentication\n database and initializing the user's mail directory.\n\n @type user: L{bytes}\n @param user: A username.\n\n @type password: L{bytes}\n @param password: A password.\n \"\"\"\n self.dbm[user] = password\n # Ensure it is initialized\n self.userDirectory(user)\n\n def getCredentialsCheckers(self):\n \"\"\"\n Return credentials checkers for this domain.\n\n @rtype: L{list} of L{ICredentialsChecker\n <checkers.ICredentialsChecker>} provider\n @return: Credentials checkers for this domain.\n \"\"\"\n if self._credcheckers is None:\n self._credcheckers = [DirdbmDatabase(self.dbm)]\n return self._credcheckers\n\n def requestAvatar(self, avatarId, mind, *interfaces):\n \"\"\"\n Get the mailbox for an authenticated user.\n\n The mailbox for the authenticated user will be returned only if the\n given interfaces include L{IMailbox <pop3.IMailbox>}. Requests for\n anonymous access will be met with a mailbox containing a message\n indicating that an internal error has occurred.\n\n @type avatarId: L{bytes} or C{twisted.cred.checkers.ANONYMOUS}\n @param avatarId: A string which identifies a user or an object which\n signals a request for anonymous access.\n\n @type mind: L{None}\n @param mind: Unused.\n\n @type interfaces: n-L{tuple} of C{zope.interface.Interface}\n @param interfaces: A group of interfaces, one of which the avatar\n must support.\n\n @rtype: 3-L{tuple} of (0) L{IMailbox <pop3.IMailbox>},\n (1) L{IMailbox <pop3.IMailbox>} provider, (2) no-argument\n callable\n @return: A tuple of the supported interface, a mailbox, and a\n logout function.\n\n @raise NotImplementedError: When the given interfaces do not include\n L{IMailbox <pop3.IMailbox>}.\n \"\"\"\n if pop3.IMailbox not in interfaces:\n raise NotImplementedError(\"No interface\")\n if avatarId == checkers.ANONYMOUS:\n mbox = StringListMailbox([INTERNAL_ERROR])\n else:\n mbox = MaildirMailbox(os.path.join(self.root, avatarId))\n\n return (pop3.IMailbox, mbox, lambda: None)\n\n\n@implementer(checkers.ICredentialsChecker)\nclass DirdbmDatabase:\n \"\"\"\n A credentials checker which authenticates users out of a\n L{DirDBM <dirdbm.DirDBM>} database.\n\n @type dirdbm: L{DirDBM <dirdbm.DirDBM>}\n @ivar dirdbm: An authentication database.\n \"\"\"\n\n # credentialInterfaces is not used by the class\n credentialInterfaces = (\n credentials.IUsernamePassword,\n credentials.IUsernameHashedPassword,\n )\n\n def __init__(self, dbm):\n \"\"\"\n @type dbm: L{DirDBM <dirdbm.DirDBM>}\n @param dbm: An authentication database.\n \"\"\"\n self.dirdbm = dbm\n\n def requestAvatarId(self, c):\n \"\"\"\n Authenticate a user and, if successful, return their username.\n\n @type c: L{IUsernamePassword <credentials.IUsernamePassword>} or\n L{IUsernameHashedPassword <credentials.IUsernameHashedPassword>}\n provider.\n @param c: Credentials.\n\n @rtype: L{bytes}\n @return: A string which identifies an user.\n\n @raise UnauthorizedLogin: When the credentials check fails.\n \"\"\"\n if c.username in self.dirdbm:\n if c.checkPassword(self.dirdbm[c.username]):\n return c.username\n raise UnauthorizedLogin()\n", "path": "src/twisted/mail/maildir.py"}]} |
gh_patches_debug_1331 | rasdani/github-patches | git_diff | openstates__openstates-scrapers-2345 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
OR failing since at least 2018-05-19
OR has been failing since 2018-05-19
Based on automated runs it appears that OR has not run successfully in 6 days (2018-05-19).
```
08:02:19 INFO billy: Save person Julie Fahey
08:02:19 INFO billy: Save person Tim Knopp
08:02:19 INFO billy: Save person Laurie Monnes Anderson
08:02:19 INFO billy: Save person Ginny Burdick
08:02:19 INFO billy: Save person Betsy Johnson
08:02:19 INFO billy: Save person Cedric Hayden
08:02:19 INFO billy: Save person Brad Witt
08:02:19 INFO billy: Save person Alissa Keny-Guyer
08:02:19 INFO billy: Save person Diego Hernandez
08:02:19 INFO billy: Save person Margaret Doherty
08:02:19 INFO billy: Save committee Sole Proprietors
08:02:19 INFO billy: Save bill lower 2018 Special Session: HB4301
08:02:19 WARNING billy: Value u'2018 Special Session' for field '<obj>.session' is not in the enumeration: ['2007 Regular Session', '2008 Special Session', '2009 Regular Session', '2010 Special Session', '2011 Regular Session', '2012 Regular Session', '2012 Special Session', '2013 Regular Session', '2013 Special Session', '2014 Regular Session', '2015 Regular Session', '2016 Regular Session', '2017 Regular Session', '2018 Regular Session']
08:02:19 INFO billy: Save bill lower 2018 Special Session: HCR301
08:02:19 WARNING billy: Value u'2018 Special Session' for field '<obj>.session' is not in the enumeration: ['2007 Regular Session', '2008 Special Session', '2009 Regular Session', '2010 Special Session', '2011 Regular Session', '2012 Regular Session', '2012 Special Session', '2013 Regular Session', '2013 Special Session', '2014 Regular Session', '2015 Regular Session', '2016 Regular Session', '2017 Regular Session', '2018 Regular Session']
08:02:19 INFO billy: Save vote 2018 Special Session upper: HCR 301 'Rules suspended. Final reading. Carried by Steiner Hayward. Adopted.'
08:02:19 WARNING billy: Value u'2018 Special Session' for field '<obj>.session' is not in the enumeration: ['2007 Regular Session', '2008 Special Session', '2009 Regular Session', '2010 Special Session', '2011 Regular Session', '2012 Regular Session', '2012 Special Session', '2013 Regular Session', '2013 Special Session', '2014 Regular Session', '2015 Regular Session', '2016 Regular Session', '2017 Regular Session', '2018 Regular Session']
08:02:19 INFO billy: Save vote 2018 Special Session lower: HB 4301 'Rules suspended. Third reading. Carried by Barnhart. Passed.'
08:02:19 WARNING billy: Value u'2018 Special Session' for field '<obj>.session' is not in the enumeration: ['2007 Regular Session', '2008 Special Session', '2009 Regular Session', '2010 Special Session', '2011 Regular Session', '2012 Regular Session', '2012 Special Session', '2013 Regular Session', '2013 Special Session', '2014 Regular Session', '2015 Regular Session', '2016 Regular Session', '2017 Regular Session', '2018 Regular Session']
08:02:19 INFO billy: Save vote 2018 Special Session upper: HB 4301 'Expands availability of elective reduced personal income tax rate for certain pass-through income to taxpayers doing business as sole proprietors.'
08:02:19 WARNING billy: Value u'2018 Special Session' for field '<obj>.session' is not in the enumeration: ['2007 Regular Session', '2008 Special Session', '2009 Regular Session', '2010 Special Session', '2011 Regular Session', '2012 Regular Session', '2012 Special Session', '2013 Regular Session', '2013 Special Session', '2014 Regular Session', '2015 Regular Session', '2016 Regular Session', '2017 Regular Session', '2018 Regular Session']
08:02:19 INFO billy: Save vote 2018 Special Session upper: HCR 301 'Adjourns sine die 2018 special session of Seventy-ninth Legislative Assembly.'
08:02:19 WARNING billy: Value u'2018 Special Session' for field '<obj>.session' is not in the enumeration: ['2007 Regular Session', '2008 Special Session', '2009 Regular Session', '2010 Special Session', '2011 Regular Session', '2012 Regular Session', '2012 Special Session', '2013 Regular Session', '2013 Special Session', '2014 Regular Session', '2015 Regular Session', '2016 Regular Session', '2017 Regular Session', '2018 Regular Session']
08:02:19 INFO billy: Save vote 2018 Special Session lower: HCR 301 'Rules suspended. Read. Carried by Williamson. Adopted.'
08:02:19 WARNING billy: Value u'2018 Special Session' for field '<obj>.session' is not in the enumeration: ['2007 Regular Session', '2008 Special Session', '2009 Regular Session', '2010 Special Session', '2011 Regular Session', '2012 Regular Session', '2012 Special Session', '2013 Regular Session', '2013 Special Session', '2014 Regular Session', '2015 Regular Session', '2016 Regular Session', '2017 Regular Session', '2018 Regular Session']
08:02:19 INFO billy: Save vote 2018 Special Session upper: HB 4301 'Rules suspended. Third reading. Carried by Beyer. Passed.'
08:02:19 WARNING billy: Value u'2018 Special Session' for field '<obj>.session' is not in the enumeration: ['2007 Regular Session', '2008 Special Session', '2009 Regular Session', '2010 Special Session', '2011 Regular Session', '2012 Regular Session', '2012 Special Session', '2013 Regular Session', '2013 Special Session', '2014 Regular Session', '2015 Regular Session', '2016 Regular Session', '2017 Regular Session', '2018 Regular Session']
08:02:21 INFO billy: billy-update abbr=or
actions=import,report
types=bills,legislators,votes,committees,alldata
sessions=2018 Regular Session
terms=2017-2018
08:02:21 INFO billy: Finished importing 90 legislator files.
08:02:21 INFO billy: imported 6 vote files
Traceback (most recent call last):
File "/opt/openstates/venv-billy//bin/billy-update", line 11, in <module>
load_entry_point('billy', 'console_scripts', 'billy-update')()
File "/opt/openstates/venv-billy/src/billy/billy/bin/update.py", line 413, in main
import_report = _do_imports(abbrev, args)
File "/opt/openstates/venv-billy/src/billy/billy/bin/update.py", line 152, in _do_imports
report['bills'] = import_bills(abbrev, settings.BILLY_DATA_DIR)
File "/opt/openstates/venv-billy/src/billy/billy/importers/bills.py", line 413, in import_bills
ret = import_bill(data, votes, categorizer)
File "/opt/openstates/venv-billy/src/billy/billy/importers/bills.py", line 231, in import_bill
match_sponsor_ids(abbr, data)
File "/opt/openstates/venv-billy/src/billy/billy/importers/bills.py", line 37, in match_sponsor_ids
sponsor['name'])
File "/opt/openstates/venv-billy/src/billy/billy/importers/names.py", line 26, in get_legislator_id
raise Exception("bad session: " + session)
Exception: bad session: 2018 Special Session
```
Visit http://bobsled.openstates.org for more info.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `billy_metadata/or.py`
Content:
```
1 import re
2 import datetime
3 import lxml.html
4
5 metadata = dict(
6 name='Oregon',
7 abbreviation='or',
8 capitol_timezone='America/Los_Angeles',
9 legislature_name='Oregon Legislative Assembly',
10 legislature_url='http://www.leg.state.or.us/',
11 chambers = {
12 'upper': {'name': 'Senate', 'title': 'Senator'},
13 'lower': {'name': 'House', 'title': 'Representative'},
14 },
15 terms=[
16 {'name': '2007-2008',
17 'sessions': ['2007 Regular Session',
18 '2008 Special Session' ],
19 'start_year': 2007, 'end_year': 2008},
20 {'name': '2009-2010',
21 'sessions': ['2009 Regular Session',
22 '2010 Special Session' ],
23 'start_year': 2009, 'end_year': 2010},
24 {'name': '2011-2012',
25 'sessions': ['2011 Regular Session',
26 '2012 Regular Session',
27 '2012 Special Session' ],
28 'start_year': 2011, 'end_year': 2012},
29 {'name': '2013-2014',
30 'sessions': ['2013 Regular Session',
31 '2013 Special Session',
32 '2014 Regular Session'],
33 'start_year': 2013, 'end_year': 2014},
34 {'name': '2015-2016',
35 'sessions': ['2015 Regular Session',
36 '2016 Regular Session',],
37 'start_year': 2015, 'end_year': 2016},
38 {'name': '2017-2018',
39 'sessions': ['2017 Regular Session',
40 '2018 Regular Session'],
41 'start_year': 2017, 'end_year': 2018},
42 ],
43 session_details={
44 '2007 Regular Session': {
45 'display_name': '2007 Regular Session',
46 '_scraped_name': '2007 Regular Session',
47 'slug': '2007 Regular Session',
48 },
49 '2008 Special Session': {
50 'display_name': '2008 Special Session',
51 '_scraped_name': '2008 Special Session',
52 'slug': '2008 Special Session',
53 },
54 '2009 Regular Session': {
55 'display_name': '2009 Regular Session',
56 '_scraped_name': '2009 Regular Session',
57 'slug': '2009 Regular Session',
58 },
59 '2010 Special Session': {
60 'display_name': '2010 Special Session',
61 '_scraped_name': '2010 Special Session',
62 'slug': '2010 Special Session',
63 },
64 '2011 Regular Session': {
65 'display_name': '2011 Regular Session',
66 '_scraped_name': '2011 Regular Session',
67 'slug': '2011 Regular Session',
68 },
69 '2012 Regular Session': {
70 'display_name': '2012 Regular Session',
71 '_scraped_name': '2012 Regular Session',
72 'slug': '2012 Regular Session',
73 },
74 '2012 Special Session' : {
75 'display_name': '2012 Speical Session',
76 '_scraped_name': '2012 Special Session',
77 'slug': '2012 Special Session',
78 },
79 '2013 Regular Session': {
80 'display_name': '2013 Regular Session',
81 '_scraped_name': '2013 Regular Session',
82 'slug': '2013 Regular Session',
83 },
84 '2013 Special Session': {
85 'display_name': '2013 Special Session',
86 '_scraped_name': '2013 Special Session',
87 'slug': '2013 Special Session',
88 },
89 '2014 Regular Session': {
90 'display_name': '2014 Regular Session',
91 '_scraped_name': '2014 Regular Session',
92 'slug': '2014 Regular Session',
93 },
94 '2015 Regular Session': {
95 'display_name': '2015 Regular Session',
96 '_scraped_name': '2015 Regular Session',
97 'slug': '2015 Regular Session',
98 },
99 '2016 Regular Session': {
100 'display_name': '2016 Regular Session',
101 '_scraped_name': '2016 Regular Session',
102 'slug': '2016 Regular Session',
103 },
104 '2017 Regular Session': {
105 'display_name': '2017 Regular Session',
106 '_scraped_name': '2017 Regular Session',
107 'slug': '2017 Regular Session',
108 'start_date': datetime.date(2017, 2, 1),
109 'end_date': datetime.date(2017, 7, 10),
110 },
111 '2018 Regular Session': {
112 'display_name': '2018 Regular Session',
113 '_scraped_name': '2018 Regular Session',
114 'slug': '2018 Regular Session',
115 },
116 '2018 Special Session': {
117 'display_name': '2018 Special Session',
118 '_scraped_name': '2018 1st Special Session',
119 'slug': '2018 Special Session',
120 },
121 },
122 _ignored_scraped_sessions=['Today',
123 '2015-2016 Interim',
124 '2013 1st Special Session',
125 '2012 1st Special Session',
126 '2013 - 2014 Interim',
127 '2011 - 2012 Interim',
128 '2009 - 2010 Interim',
129 '2007 - 2008 Interim'],
130 feature_flags=[],
131 )
132
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/billy_metadata/or.py b/billy_metadata/or.py
--- a/billy_metadata/or.py
+++ b/billy_metadata/or.py
@@ -37,7 +37,8 @@
'start_year': 2015, 'end_year': 2016},
{'name': '2017-2018',
'sessions': ['2017 Regular Session',
- '2018 Regular Session'],
+ '2018 Regular Session',
+ '2018 Special Session',],
'start_year': 2017, 'end_year': 2018},
],
session_details={
| {"golden_diff": "diff --git a/billy_metadata/or.py b/billy_metadata/or.py\n--- a/billy_metadata/or.py\n+++ b/billy_metadata/or.py\n@@ -37,7 +37,8 @@\n 'start_year': 2015, 'end_year': 2016},\n {'name': '2017-2018',\n 'sessions': ['2017 Regular Session',\n- '2018 Regular Session'],\n+ '2018 Regular Session',\n+ '2018 Special Session',],\n 'start_year': 2017, 'end_year': 2018},\n ],\n session_details={\n", "issue": "OR failing since at least 2018-05-19\nOR has been failing since 2018-05-19\n\nBased on automated runs it appears that OR has not run successfully in 6 days (2018-05-19).\n\n\n```\n 08:02:19 INFO billy: Save person Julie Fahey\n08:02:19 INFO billy: Save person Tim Knopp\n08:02:19 INFO billy: Save person Laurie Monnes Anderson\n08:02:19 INFO billy: Save person Ginny Burdick\n08:02:19 INFO billy: Save person Betsy Johnson\n08:02:19 INFO billy: Save person Cedric Hayden\n08:02:19 INFO billy: Save person Brad Witt\n08:02:19 INFO billy: Save person Alissa Keny-Guyer\n08:02:19 INFO billy: Save person Diego Hernandez\n08:02:19 INFO billy: Save person Margaret Doherty\n08:02:19 INFO billy: Save committee Sole Proprietors\n08:02:19 INFO billy: Save bill lower 2018 Special Session: HB4301\n08:02:19 WARNING billy: Value u'2018 Special Session' for field '<obj>.session' is not in the enumeration: ['2007 Regular Session', '2008 Special Session', '2009 Regular Session', '2010 Special Session', '2011 Regular Session', '2012 Regular Session', '2012 Special Session', '2013 Regular Session', '2013 Special Session', '2014 Regular Session', '2015 Regular Session', '2016 Regular Session', '2017 Regular Session', '2018 Regular Session']\n08:02:19 INFO billy: Save bill lower 2018 Special Session: HCR301\n08:02:19 WARNING billy: Value u'2018 Special Session' for field '<obj>.session' is not in the enumeration: ['2007 Regular Session', '2008 Special Session', '2009 Regular Session', '2010 Special Session', '2011 Regular Session', '2012 Regular Session', '2012 Special Session', '2013 Regular Session', '2013 Special Session', '2014 Regular Session', '2015 Regular Session', '2016 Regular Session', '2017 Regular Session', '2018 Regular Session']\n08:02:19 INFO billy: Save vote 2018 Special Session upper: HCR 301 'Rules suspended. Final reading. Carried by Steiner Hayward. Adopted.'\n08:02:19 WARNING billy: Value u'2018 Special Session' for field '<obj>.session' is not in the enumeration: ['2007 Regular Session', '2008 Special Session', '2009 Regular Session', '2010 Special Session', '2011 Regular Session', '2012 Regular Session', '2012 Special Session', '2013 Regular Session', '2013 Special Session', '2014 Regular Session', '2015 Regular Session', '2016 Regular Session', '2017 Regular Session', '2018 Regular Session']\n08:02:19 INFO billy: Save vote 2018 Special Session lower: HB 4301 'Rules suspended. Third reading. Carried by Barnhart. Passed.'\n08:02:19 WARNING billy: Value u'2018 Special Session' for field '<obj>.session' is not in the enumeration: ['2007 Regular Session', '2008 Special Session', '2009 Regular Session', '2010 Special Session', '2011 Regular Session', '2012 Regular Session', '2012 Special Session', '2013 Regular Session', '2013 Special Session', '2014 Regular Session', '2015 Regular Session', '2016 Regular Session', '2017 Regular Session', '2018 Regular Session']\n08:02:19 INFO billy: Save vote 2018 Special Session upper: HB 4301 'Expands availability of elective reduced personal income tax rate for certain pass-through income to taxpayers doing business as sole proprietors.'\n08:02:19 WARNING billy: Value u'2018 Special Session' for field '<obj>.session' is not in the enumeration: ['2007 Regular Session', '2008 Special Session', '2009 Regular Session', '2010 Special Session', '2011 Regular Session', '2012 Regular Session', '2012 Special Session', '2013 Regular Session', '2013 Special Session', '2014 Regular Session', '2015 Regular Session', '2016 Regular Session', '2017 Regular Session', '2018 Regular Session']\n08:02:19 INFO billy: Save vote 2018 Special Session upper: HCR 301 'Adjourns sine die 2018 special session of Seventy-ninth Legislative Assembly.'\n08:02:19 WARNING billy: Value u'2018 Special Session' for field '<obj>.session' is not in the enumeration: ['2007 Regular Session', '2008 Special Session', '2009 Regular Session', '2010 Special Session', '2011 Regular Session', '2012 Regular Session', '2012 Special Session', '2013 Regular Session', '2013 Special Session', '2014 Regular Session', '2015 Regular Session', '2016 Regular Session', '2017 Regular Session', '2018 Regular Session']\n08:02:19 INFO billy: Save vote 2018 Special Session lower: HCR 301 'Rules suspended. Read. Carried by Williamson. Adopted.'\n08:02:19 WARNING billy: Value u'2018 Special Session' for field '<obj>.session' is not in the enumeration: ['2007 Regular Session', '2008 Special Session', '2009 Regular Session', '2010 Special Session', '2011 Regular Session', '2012 Regular Session', '2012 Special Session', '2013 Regular Session', '2013 Special Session', '2014 Regular Session', '2015 Regular Session', '2016 Regular Session', '2017 Regular Session', '2018 Regular Session']\n08:02:19 INFO billy: Save vote 2018 Special Session upper: HB 4301 'Rules suspended. Third reading. Carried by Beyer. Passed.'\n08:02:19 WARNING billy: Value u'2018 Special Session' for field '<obj>.session' is not in the enumeration: ['2007 Regular Session', '2008 Special Session', '2009 Regular Session', '2010 Special Session', '2011 Regular Session', '2012 Regular Session', '2012 Special Session', '2013 Regular Session', '2013 Special Session', '2014 Regular Session', '2015 Regular Session', '2016 Regular Session', '2017 Regular Session', '2018 Regular Session']\n08:02:21 INFO billy: billy-update abbr=or\n actions=import,report\n types=bills,legislators,votes,committees,alldata\n sessions=2018 Regular Session\n terms=2017-2018\n08:02:21 INFO billy: Finished importing 90 legislator files.\n08:02:21 INFO billy: imported 6 vote files\nTraceback (most recent call last):\n File \"/opt/openstates/venv-billy//bin/billy-update\", line 11, in <module>\n load_entry_point('billy', 'console_scripts', 'billy-update')()\n File \"/opt/openstates/venv-billy/src/billy/billy/bin/update.py\", line 413, in main\n import_report = _do_imports(abbrev, args)\n File \"/opt/openstates/venv-billy/src/billy/billy/bin/update.py\", line 152, in _do_imports\n report['bills'] = import_bills(abbrev, settings.BILLY_DATA_DIR)\n File \"/opt/openstates/venv-billy/src/billy/billy/importers/bills.py\", line 413, in import_bills\n ret = import_bill(data, votes, categorizer)\n File \"/opt/openstates/venv-billy/src/billy/billy/importers/bills.py\", line 231, in import_bill\n match_sponsor_ids(abbr, data)\n File \"/opt/openstates/venv-billy/src/billy/billy/importers/bills.py\", line 37, in match_sponsor_ids\n sponsor['name'])\n File \"/opt/openstates/venv-billy/src/billy/billy/importers/names.py\", line 26, in get_legislator_id\n raise Exception(\"bad session: \" + session)\nException: bad session: 2018 Special Session\n```\n\nVisit http://bobsled.openstates.org for more info.\n\n", "before_files": [{"content": "import re\nimport datetime\nimport lxml.html\n\nmetadata = dict(\n name='Oregon',\n abbreviation='or',\n capitol_timezone='America/Los_Angeles',\n legislature_name='Oregon Legislative Assembly',\n legislature_url='http://www.leg.state.or.us/',\n chambers = {\n 'upper': {'name': 'Senate', 'title': 'Senator'},\n 'lower': {'name': 'House', 'title': 'Representative'},\n },\n terms=[\n {'name': '2007-2008',\n 'sessions': ['2007 Regular Session',\n '2008 Special Session' ],\n 'start_year': 2007, 'end_year': 2008},\n {'name': '2009-2010',\n 'sessions': ['2009 Regular Session',\n '2010 Special Session' ],\n 'start_year': 2009, 'end_year': 2010},\n {'name': '2011-2012',\n 'sessions': ['2011 Regular Session',\n '2012 Regular Session',\n '2012 Special Session' ],\n 'start_year': 2011, 'end_year': 2012},\n {'name': '2013-2014',\n 'sessions': ['2013 Regular Session',\n '2013 Special Session',\n '2014 Regular Session'],\n 'start_year': 2013, 'end_year': 2014},\n {'name': '2015-2016',\n 'sessions': ['2015 Regular Session',\n '2016 Regular Session',],\n 'start_year': 2015, 'end_year': 2016},\n {'name': '2017-2018',\n 'sessions': ['2017 Regular Session',\n '2018 Regular Session'],\n 'start_year': 2017, 'end_year': 2018},\n ],\n session_details={\n '2007 Regular Session': {\n 'display_name': '2007 Regular Session',\n '_scraped_name': '2007 Regular Session',\n 'slug': '2007 Regular Session',\n },\n '2008 Special Session': {\n 'display_name': '2008 Special Session',\n '_scraped_name': '2008 Special Session',\n 'slug': '2008 Special Session',\n },\n '2009 Regular Session': {\n 'display_name': '2009 Regular Session',\n '_scraped_name': '2009 Regular Session',\n 'slug': '2009 Regular Session',\n },\n '2010 Special Session': {\n 'display_name': '2010 Special Session',\n '_scraped_name': '2010 Special Session',\n 'slug': '2010 Special Session',\n },\n '2011 Regular Session': {\n 'display_name': '2011 Regular Session',\n '_scraped_name': '2011 Regular Session',\n 'slug': '2011 Regular Session',\n },\n '2012 Regular Session': {\n 'display_name': '2012 Regular Session',\n '_scraped_name': '2012 Regular Session',\n 'slug': '2012 Regular Session',\n },\n '2012 Special Session' : {\n 'display_name': '2012 Speical Session',\n '_scraped_name': '2012 Special Session',\n 'slug': '2012 Special Session',\n },\n '2013 Regular Session': {\n 'display_name': '2013 Regular Session',\n '_scraped_name': '2013 Regular Session',\n 'slug': '2013 Regular Session',\n },\n '2013 Special Session': {\n 'display_name': '2013 Special Session',\n '_scraped_name': '2013 Special Session',\n 'slug': '2013 Special Session',\n },\n '2014 Regular Session': {\n 'display_name': '2014 Regular Session',\n '_scraped_name': '2014 Regular Session',\n 'slug': '2014 Regular Session',\n },\n '2015 Regular Session': {\n 'display_name': '2015 Regular Session',\n '_scraped_name': '2015 Regular Session',\n 'slug': '2015 Regular Session',\n },\n '2016 Regular Session': {\n 'display_name': '2016 Regular Session',\n '_scraped_name': '2016 Regular Session',\n 'slug': '2016 Regular Session',\n },\n '2017 Regular Session': {\n 'display_name': '2017 Regular Session',\n '_scraped_name': '2017 Regular Session',\n 'slug': '2017 Regular Session',\n 'start_date': datetime.date(2017, 2, 1),\n 'end_date': datetime.date(2017, 7, 10),\n },\n '2018 Regular Session': {\n 'display_name': '2018 Regular Session',\n '_scraped_name': '2018 Regular Session',\n 'slug': '2018 Regular Session',\n },\n '2018 Special Session': {\n 'display_name': '2018 Special Session',\n '_scraped_name': '2018 1st Special Session',\n 'slug': '2018 Special Session',\n },\n },\n _ignored_scraped_sessions=['Today',\n '2015-2016 Interim',\n '2013 1st Special Session',\n '2012 1st Special Session',\n '2013 - 2014 Interim',\n '2011 - 2012 Interim',\n '2009 - 2010 Interim',\n '2007 - 2008 Interim'],\n feature_flags=[],\n)\n", "path": "billy_metadata/or.py"}], "after_files": [{"content": "import re\nimport datetime\nimport lxml.html\n\nmetadata = dict(\n name='Oregon',\n abbreviation='or',\n capitol_timezone='America/Los_Angeles',\n legislature_name='Oregon Legislative Assembly',\n legislature_url='http://www.leg.state.or.us/',\n chambers = {\n 'upper': {'name': 'Senate', 'title': 'Senator'},\n 'lower': {'name': 'House', 'title': 'Representative'},\n },\n terms=[\n {'name': '2007-2008',\n 'sessions': ['2007 Regular Session',\n '2008 Special Session' ],\n 'start_year': 2007, 'end_year': 2008},\n {'name': '2009-2010',\n 'sessions': ['2009 Regular Session',\n '2010 Special Session' ],\n 'start_year': 2009, 'end_year': 2010},\n {'name': '2011-2012',\n 'sessions': ['2011 Regular Session',\n '2012 Regular Session',\n '2012 Special Session' ],\n 'start_year': 2011, 'end_year': 2012},\n {'name': '2013-2014',\n 'sessions': ['2013 Regular Session',\n '2013 Special Session',\n '2014 Regular Session'],\n 'start_year': 2013, 'end_year': 2014},\n {'name': '2015-2016',\n 'sessions': ['2015 Regular Session',\n '2016 Regular Session',],\n 'start_year': 2015, 'end_year': 2016},\n {'name': '2017-2018',\n 'sessions': ['2017 Regular Session',\n '2018 Regular Session',\n '2018 Special Session',],\n 'start_year': 2017, 'end_year': 2018},\n ],\n session_details={\n '2007 Regular Session': {\n 'display_name': '2007 Regular Session',\n '_scraped_name': '2007 Regular Session',\n 'slug': '2007 Regular Session',\n },\n '2008 Special Session': {\n 'display_name': '2008 Special Session',\n '_scraped_name': '2008 Special Session',\n 'slug': '2008 Special Session',\n },\n '2009 Regular Session': {\n 'display_name': '2009 Regular Session',\n '_scraped_name': '2009 Regular Session',\n 'slug': '2009 Regular Session',\n },\n '2010 Special Session': {\n 'display_name': '2010 Special Session',\n '_scraped_name': '2010 Special Session',\n 'slug': '2010 Special Session',\n },\n '2011 Regular Session': {\n 'display_name': '2011 Regular Session',\n '_scraped_name': '2011 Regular Session',\n 'slug': '2011 Regular Session',\n },\n '2012 Regular Session': {\n 'display_name': '2012 Regular Session',\n '_scraped_name': '2012 Regular Session',\n 'slug': '2012 Regular Session',\n },\n '2012 Special Session' : {\n 'display_name': '2012 Speical Session',\n '_scraped_name': '2012 Special Session',\n 'slug': '2012 Special Session',\n },\n '2013 Regular Session': {\n 'display_name': '2013 Regular Session',\n '_scraped_name': '2013 Regular Session',\n 'slug': '2013 Regular Session',\n },\n '2013 Special Session': {\n 'display_name': '2013 Special Session',\n '_scraped_name': '2013 Special Session',\n 'slug': '2013 Special Session',\n },\n '2014 Regular Session': {\n 'display_name': '2014 Regular Session',\n '_scraped_name': '2014 Regular Session',\n 'slug': '2014 Regular Session',\n },\n '2015 Regular Session': {\n 'display_name': '2015 Regular Session',\n '_scraped_name': '2015 Regular Session',\n 'slug': '2015 Regular Session',\n },\n '2016 Regular Session': {\n 'display_name': '2016 Regular Session',\n '_scraped_name': '2016 Regular Session',\n 'slug': '2016 Regular Session',\n },\n '2017 Regular Session': {\n 'display_name': '2017 Regular Session',\n '_scraped_name': '2017 Regular Session',\n 'slug': '2017 Regular Session',\n 'start_date': datetime.date(2017, 2, 1),\n 'end_date': datetime.date(2017, 7, 10),\n },\n '2018 Regular Session': {\n 'display_name': '2018 Regular Session',\n '_scraped_name': '2018 Regular Session',\n 'slug': '2018 Regular Session',\n },\n '2018 Special Session': {\n 'display_name': '2018 Special Session',\n '_scraped_name': '2018 1st Special Session',\n 'slug': '2018 Special Session',\n },\n },\n _ignored_scraped_sessions=['Today',\n '2015-2016 Interim',\n '2013 1st Special Session',\n '2012 1st Special Session',\n '2013 - 2014 Interim',\n '2011 - 2012 Interim',\n '2009 - 2010 Interim',\n '2007 - 2008 Interim'],\n feature_flags=[],\n)\n", "path": "billy_metadata/or.py"}]} |
gh_patches_debug_1332 | rasdani/github-patches | git_diff | translate__pootle-6486 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
No critical checks are displayed after sending translation
If you send a translation that has critical checks the editor correctly keeps you in the unit and displays a red background, but the critical errors are not displayed, so the translator has to reload to know which critical errors were reported or to mute them:

No critical checks are displayed after sending translation
If you send a translation that has critical checks the editor correctly keeps you in the unit and displays a red background, but the critical errors are not displayed, so the translator has to reload to know which critical errors were reported or to mute them:

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pootle/apps/pootle_store/views.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright (C) Pootle contributors.
4 #
5 # This file is a part of the Pootle project. It is distributed under the GPL3
6 # or later license. See the LICENSE file for a copy of the license and the
7 # AUTHORS file for copyright and authorship information.
8
9 import calendar
10 import unicodedata
11 from collections import OrderedDict
12
13 from translate.lang import data
14
15 from django import forms
16 from django.conf import settings
17 from django.core.exceptions import ObjectDoesNotExist
18 from django.http import Http404, QueryDict
19 from django.shortcuts import get_object_or_404, redirect
20 from django.template import loader
21 from django.utils.functional import cached_property
22 from django.utils.lru_cache import lru_cache
23 from django.utils.translation import to_locale
24 from django.utils.translation.trans_real import parse_accept_lang_header
25 from django.views.decorators.http import require_http_methods
26 from django.views.generic import FormView
27
28 from pootle.core.delegate import review, search_backend
29 from pootle.core.exceptions import Http400
30 from pootle.core.http import JsonResponse, JsonResponseBadRequest
31 from pootle.core.utils import dateformat
32 from pootle.core.views import PootleJSON
33 from pootle.core.views.decorators import requires_permission, set_permissions
34 from pootle.core.views.mixins import GatherContextMixin, PootleJSONMixin
35 from pootle.i18n.dates import timesince
36 from pootle.i18n.gettext import ugettext as _
37 from pootle_app.models.permissions import check_user_permission
38 from pootle_language.models import Language
39 from pootle_misc.util import ajax_required
40
41 from .decorators import get_unit_context
42 from .forms import (
43 SubmitForm, SuggestionReviewForm, SuggestionSubmitForm, UnitSearchForm,
44 unit_comment_form_factory, unit_form_factory)
45 from .models import Suggestion, Unit
46 from .templatetags.store_tags import pluralize_source, pluralize_target
47 from .unit.results import GroupedResults
48 from .unit.timeline import Timeline
49 from .util import find_altsrcs
50
51
52 def get_alt_src_langs(request, user, translation_project):
53 language = translation_project.language
54 project = translation_project.project
55 source_language = project.source_language
56 langs = list(
57 user.alt_src_langs.filter(
58 translationproject__project=project))
59 if langs:
60 return langs
61 accept = request.META.get('HTTP_ACCEPT_LANGUAGE', '')
62 for accept_lang, __ in parse_accept_lang_header(accept):
63 if accept_lang == '*':
64 continue
65 normalized = to_locale(
66 data.normalize_code(
67 data.simplify_to_common(accept_lang)))
68 code = to_locale(accept_lang)
69 is_source_lang = any(
70 langcode in ('en', 'en_US', source_language.code, language.code)
71 for langcode in [code, normalized])
72 if is_source_lang:
73 continue
74
75 langs = list(
76 Language.objects.filter(
77 code__in=(normalized, code),
78 translationproject__project=project))
79 if langs:
80 return langs
81
82
83 #
84 # Views used with XMLHttpRequest requests.
85 #
86
87 def _filter_ctx_units(units_qs, unit, how_many, gap=0):
88 """Returns ``how_many``*2 units that are before and after ``index``."""
89 result = {'before': [], 'after': []}
90
91 if how_many and unit.index - gap > 0:
92 before = units_qs.filter(store=unit.store_id, index__lt=unit.index) \
93 .order_by('-index')[gap:how_many+gap]
94 result['before'] = _build_units_list(before, reverse=True)
95 result['before'].reverse()
96
97 # FIXME: can we avoid this query if length is known?
98 if how_many:
99 after = units_qs.filter(store=unit.store_id,
100 index__gt=unit.index)[gap:how_many+gap]
101 result['after'] = _build_units_list(after)
102
103 return result
104
105
106 def _prepare_unit(unit):
107 """Constructs a dictionary with relevant `unit` data."""
108 return {
109 'id': unit.id,
110 'url': unit.get_translate_url(),
111 'isfuzzy': unit.isfuzzy(),
112 'source': [source[1] for source in pluralize_source(unit)],
113 'target': [target[1] for target in pluralize_target(unit)],
114 }
115
116
117 def _build_units_list(units, reverse=False):
118 """Given a list/queryset of units, builds a list with the unit data
119 contained in a dictionary ready to be returned as JSON.
120
121 :return: A list with unit id, source, and target texts. In case of
122 having plural forms, a title for the plural form is also provided.
123 """
124 return_units = []
125
126 for unit in iter(units):
127 return_units.append(_prepare_unit(unit))
128
129 return return_units
130
131
132 def _get_critical_checks_snippet(request, unit):
133 """Retrieves the critical checks snippet.
134
135 :param request: an `HttpRequest` object
136 :param unit: a `Unit` instance for which critical checks need to be
137 rendered.
138 :return: rendered HTML snippet with the failing checks, or `None` if
139 there are no critical failing checks.
140 """
141 if not unit.has_critical_checks():
142 return None
143
144 can_review = check_user_permission(request.user, 'review',
145 unit.store.parent)
146 ctx = {
147 'canreview': can_review,
148 'unit': unit,
149 }
150 template = loader.get_template('editor/units/xhr_checks.html')
151 return template.render(context=ctx, request=request)
152
153
154 @ajax_required
155 def get_units(request, **kwargs_):
156 """Gets source and target texts and its metadata.
157
158 :return: A JSON-encoded string containing the source and target texts
159 grouped by the store they belong to.
160
161 The optional `count` GET parameter defines the chunk size to
162 consider. The user's preference will be used by default.
163
164 When the `initial` GET parameter is present, a sorted list of
165 the result set ids will be returned too.
166 """
167 search_form = UnitSearchForm(request.GET, user=request.user)
168
169 if not search_form.is_valid():
170 errors = search_form.errors.as_data()
171 if "path" in errors:
172 for error in errors["path"]:
173 if error.code == "max_length":
174 raise Http400(_('Path too long.'))
175 elif error.code == "required":
176 raise Http400(_('Arguments missing.'))
177 raise Http404(forms.ValidationError(search_form.errors).messages)
178
179 total, start, end, units_qs = search_backend.get(Unit)(
180 request.user, **search_form.cleaned_data).search()
181 return JsonResponse(
182 {'start': start,
183 'end': end,
184 'total': total,
185 'unitGroups': GroupedResults(units_qs).data})
186
187
188 @ajax_required
189 @get_unit_context('view')
190 def get_more_context(request, unit, **kwargs_):
191 """Retrieves more context units.
192
193 :return: An object in JSON notation that contains the source and target
194 texts for units that are in the context of unit ``uid``.
195 """
196 store = request.store
197 json = {}
198 gap = int(request.GET.get('gap', 0))
199 qty = int(request.GET.get('qty', 1))
200
201 json["ctx"] = _filter_ctx_units(store.units, unit, qty, gap)
202 return JsonResponse(json)
203
204
205 @ajax_required
206 @require_http_methods(['POST', 'DELETE'])
207 @get_unit_context('translate')
208 def comment(request, unit, **kwargs_):
209 """Dispatches the comment action according to the HTTP verb."""
210 if request.method == 'DELETE':
211 return delete_comment(request, unit)
212 elif request.method == 'POST':
213 return save_comment(request, unit)
214
215
216 def delete_comment(request, unit, **kwargs_):
217 """Deletes a comment by blanking its contents and records a new
218 submission.
219 """
220 unit.change.commented_by = None
221 unit.change.commented_on = None
222
223 language = request.translation_project.language
224 comment_form_class = unit_comment_form_factory(language)
225 form = comment_form_class({}, instance=unit, request=request)
226
227 if form.is_valid():
228 form.save()
229 return JsonResponse({})
230
231 return JsonResponseBadRequest({'msg': _("Failed to remove comment.")})
232
233
234 def save_comment(request, unit):
235 """Stores a new comment for the given ``unit``.
236
237 :return: If the form validates, the cleaned comment is returned.
238 An error message is returned otherwise.
239 """
240
241 language = request.translation_project.language
242 form = unit_comment_form_factory(language)(request.POST, instance=unit,
243 request=request)
244
245 if form.is_valid():
246 form.save()
247
248 user = request.user
249 directory = unit.store.parent
250
251 ctx = {
252 'unit': unit,
253 'language': language,
254 'cantranslate': check_user_permission(user, 'translate',
255 directory),
256 'cansuggest': check_user_permission(user, 'suggest', directory),
257 }
258 t = loader.get_template('editor/units/xhr_comment.html')
259
260 return JsonResponse({'comment': t.render(context=ctx,
261 request=request)})
262
263 return JsonResponseBadRequest({'msg': _("Comment submission failed.")})
264
265
266 class PootleUnitJSON(PootleJSON):
267 model = Unit
268 pk_url_kwarg = "uid"
269
270 @cached_property
271 def permission_context(self):
272 self.object = self.get_object()
273 return self.store.parent
274
275 @property
276 def pootle_path(self):
277 return self.store.pootle_path
278
279 @cached_property
280 def tp(self):
281 return self.store.translation_project
282
283 @cached_property
284 def store(self):
285 return self.object.store
286
287 @cached_property
288 def source_language(self):
289 return self.project.source_language
290
291 @cached_property
292 def directory(self):
293 return self.store.parent
294
295 @lru_cache()
296 def get_object(self):
297 return super(PootleUnitJSON, self).get_object()
298
299
300 class UnitTimelineJSON(PootleUnitJSON):
301
302 model = Unit
303 pk_url_kwarg = "uid"
304
305 template_name = 'editor/units/xhr_timeline.html'
306
307 @property
308 def language(self):
309 return self.object.store.translation_project.language
310
311 @cached_property
312 def permission_context(self):
313 self.object = self.get_object()
314 return self.project.directory
315
316 @property
317 def project(self):
318 return self.object.store.translation_project.project
319
320 @property
321 def timeline(self):
322 return Timeline(self.object)
323
324 def get_context_data(self, *args, **kwargs):
325 return dict(
326 event_groups=self.timeline.grouped_events(),
327 language=self.language)
328
329 def get_queryset(self):
330 return Unit.objects.get_translatable(self.request.user).select_related(
331 "change",
332 "store__translation_project__language",
333 "store__translation_project__project__directory")
334
335 def get_response_data(self, context):
336 return {
337 'uid': self.object.id,
338 'event_groups': self.get_event_groups_data(context),
339 'timeline': self.render_timeline(context)}
340
341 def render_timeline(self, context):
342 return loader.get_template(self.template_name).render(context=context)
343
344 def get_event_groups_data(self, context):
345 result = []
346 for event_group in context['event_groups']:
347 display_dt = event_group['datetime']
348 if display_dt is not None:
349 display_dt = dateformat.format(display_dt)
350 iso_dt = event_group['datetime'].isoformat()
351 relative_time = timesince(
352 calendar.timegm(event_group['datetime'].timetuple()),
353 self.request_lang)
354 else:
355 iso_dt = None
356 relative_time = None
357 result.append({
358 "display_datetime": display_dt,
359 "iso_datetime": iso_dt,
360 "relative_time": relative_time,
361 "via_upload": event_group.get('via_upload', False),
362 })
363 return result
364
365
366 CHARACTERS_NAMES = OrderedDict(
367 (
368 # Code Display name
369 (8204, 'ZWNJ'),
370 (8205, 'ZWJ'),
371 (8206, 'LRM'),
372 (8207, 'RLM'),
373 (8234, 'LRE'),
374 (8235, 'RLE'),
375 (8236, 'PDF'),
376 (8237, 'LRO'),
377 (8238, 'RLO'),
378 )
379 )
380
381 CHARACTERS = u"".join([unichr(index) for index in CHARACTERS_NAMES.keys()])
382
383
384 class UnitEditJSON(PootleUnitJSON):
385
386 @property
387 def special_characters(self):
388 if self.language.direction == "rtl":
389 # Inject some extra special characters for RTL languages.
390 language_specialchars = CHARACTERS
391 # Do not repeat special chars.
392 language_specialchars += u"".join(
393 [c for c in self.language.specialchars if c not in CHARACTERS])
394 else:
395 language_specialchars = self.language.specialchars
396
397 special_chars = []
398 for specialchar in language_specialchars:
399 code = ord(specialchar)
400 special_chars.append({
401 'display': CHARACTERS_NAMES.get(code, specialchar),
402 'code': code,
403 'hex_code': "U+" + hex(code)[2:].upper(), # Like U+200C
404 'name': unicodedata.name(specialchar, ''),
405 })
406 return special_chars
407
408 def get_edit_template(self):
409 if self.project.is_terminology or self.store.has_terminology:
410 return loader.get_template('editor/units/term_edit.html')
411 return loader.get_template('editor/units/edit.html')
412
413 def render_edit_template(self, context):
414 return self.get_edit_template().render(context=context,
415 request=self.request)
416
417 def get_source_nplurals(self):
418 if self.object.hasplural():
419 return len(self.object.source.strings)
420 return None
421
422 def get_target_nplurals(self):
423 source_nplurals = self.get_source_nplurals()
424 return self.language.nplurals if source_nplurals is not None else 1
425
426 def get_unit_values(self):
427 target_nplurals = self.get_target_nplurals()
428 unit_values = [value for value in self.object.target_f.strings]
429 if len(unit_values) < target_nplurals:
430 return unit_values + ((target_nplurals - len(unit_values)) * [''])
431 return unit_values
432
433 def get_unit_edit_form(self):
434 form_class = unit_form_factory(self.language,
435 self.get_source_nplurals(),
436 self.request)
437 return form_class(instance=self.object, request=self.request)
438
439 def get_unit_comment_form(self):
440 comment_form_class = unit_comment_form_factory(self.language)
441 return comment_form_class({}, instance=self.object, request=self.request)
442
443 @lru_cache()
444 def get_alt_srcs(self):
445 if self.request.user.is_anonymous:
446 return []
447 return find_altsrcs(
448 self.object,
449 get_alt_src_langs(self.request, self.request.user, self.tp),
450 store=self.store,
451 project=self.project)
452
453 def get_queryset(self):
454 return Unit.objects.get_translatable(self.request.user).select_related(
455 "change",
456 "change__submitted_by",
457 "store",
458 "store__filetype",
459 "store__parent",
460 "store__translation_project",
461 "store__translation_project__project",
462 "store__translation_project__project__directory",
463 "store__translation_project__project__source_language",
464 "store__translation_project__language")
465
466 def get_sources(self):
467 sources = {
468 unit.language_code: unit.target.strings
469 for unit in self.get_alt_srcs()}
470 sources[self.source_language.code] = self.object.source_f.strings
471 return sources
472
473 def get_context_data(self, *args, **kwargs):
474 priority = (
475 self.store.priority
476 if 'virtualfolder' in settings.INSTALLED_APPS
477 else None)
478 suggestions = self.object.get_suggestions()
479 latest_target_submission = self.object.get_latest_target_submission()
480 accepted_suggestion = None
481 if latest_target_submission is not None:
482 accepted_suggestion = latest_target_submission.suggestion
483 return {
484 'unit': self.object,
485 'accepted_suggestion': accepted_suggestion,
486 'form': self.get_unit_edit_form(),
487 'comment_form': self.get_unit_comment_form(),
488 'priority': priority,
489 'store': self.store,
490 'directory': self.directory,
491 'user': self.request.user,
492 'project': self.project,
493 'language': self.language,
494 'special_characters': self.special_characters,
495 'source_language': self.source_language,
496 'cantranslate': check_user_permission(self.request.user,
497 "translate",
498 self.directory),
499 'cantranslatexlang': check_user_permission(self.request.user,
500 "administrate",
501 self.project.directory),
502 'cansuggest': check_user_permission(self.request.user,
503 "suggest",
504 self.directory),
505 'canreview': check_user_permission(self.request.user,
506 "review",
507 self.directory),
508 'has_admin_access': check_user_permission(self.request.user,
509 'administrate',
510 self.directory),
511 'altsrcs': {x.id: x.data for x in self.get_alt_srcs()},
512 'unit_values': self.get_unit_values(),
513 'target_nplurals': self.get_target_nplurals(),
514 'has_plurals': self.object.hasplural(),
515 'filetype': self.object.store.filetype.name,
516 'suggestions': suggestions,
517 'suggestions_dict': {x.id: dict(id=x.id, target=x.target.strings)
518 for x in suggestions},
519 "critical_checks": list(
520 self.object.get_critical_qualitychecks()),
521 "warning_checks": list(
522 self.object.get_warning_qualitychecks()),
523 "terms": self.object.get_terminology()}
524
525 def get_response_data(self, context):
526 return {
527 'editor': self.render_edit_template(context),
528 'tm_suggestions': self.object.get_tm_suggestions(),
529 'is_obsolete': self.object.isobsolete(),
530 'sources': self.get_sources()}
531
532
533 @get_unit_context('view')
534 def permalink_redirect(request, unit):
535 return redirect(request.build_absolute_uri(unit.get_translate_url()))
536
537
538 @ajax_required
539 @get_unit_context('suggest')
540 def suggest(request, unit, **kwargs_):
541 """Processes translation suggestions and stores them in the database.
542
543 :return: An object in JSON notation that contains the previous and last
544 units for the unit next to unit ``uid``.
545 """
546 json = {}
547
548 translation_project = request.translation_project
549 language = translation_project.language
550
551 if unit.hasplural():
552 snplurals = len(unit.source.strings)
553 else:
554 snplurals = None
555
556 form_class = unit_form_factory(language, snplurals, request)
557 form = form_class(request.POST, instance=unit, request=request)
558
559 unit_target = unit.target
560 if form.is_valid():
561 target = form.cleaned_data["target_f"]
562 if target and target != unit_target:
563 unit = Unit.objects.get(id=unit.id)
564 review.get(Suggestion)().add(
565 unit,
566 form.cleaned_data['target_f'],
567 user=request.user)
568
569 if not request.user.is_anonymous:
570 json['user_score'] = request.user.public_score
571
572 return JsonResponse(json)
573
574 return JsonResponseBadRequest({'msg': _("Failed to process suggestion.")})
575
576
577 class UnitSuggestionJSON(PootleJSONMixin, GatherContextMixin, FormView):
578
579 action = "accept"
580 form_class = SuggestionReviewForm
581 http_method_names = ['post', 'delete']
582
583 @property
584 def permission_context(self):
585 return self.get_object().unit.store.parent
586
587 @set_permissions
588 @requires_permission("view")
589 def dispatch(self, request, *args, **kwargs):
590 # get funky with the request 8/
591 return super(UnitSuggestionJSON, self).dispatch(request, *args, **kwargs)
592
593 @lru_cache()
594 def get_object(self):
595 return get_object_or_404(
596 Suggestion.objects.select_related(
597 "unit",
598 "unit__store",
599 "unit__store__parent",
600 "unit__change",
601 "state"),
602 unit_id=self.request.resolver_match.kwargs["uid"],
603 id=self.request.resolver_match.kwargs["sugg_id"])
604
605 def get_form_kwargs(self, **kwargs):
606 comment = (
607 QueryDict(self.request.body).get("comment")
608 if self.action == "reject"
609 else self.request.POST.get("comment"))
610 is_fuzzy = (
611 QueryDict(self.request.body).get("is_fuzzy")
612 if self.action == "reject"
613 else self.request.POST.get("is_fuzzy"))
614 return dict(
615 target_object=self.get_object(),
616 request_user=self.request.user,
617 data=dict(
618 is_fuzzy=is_fuzzy,
619 comment=comment,
620 action=self.action))
621
622 def delete(self, request, *args, **kwargs):
623 self.action = "reject"
624 return self.post(request, *args, **kwargs)
625
626 def get_context_data(self, *args, **kwargs):
627 ctx = super(UnitSuggestionJSON, self).get_context_data(*args, **kwargs)
628 form = ctx["form"]
629 if form.is_valid():
630 result = dict(
631 udbid=form.target_object.unit.id,
632 sugid=form.target_object.id,
633 user_score=self.request.user.public_score)
634 if form.cleaned_data["action"] == "accept":
635 result.update(
636 dict(
637 newtargets=[
638 target
639 for target
640 in form.target_object.unit.target.strings],
641 checks=_get_critical_checks_snippet(
642 self.request,
643 form.target_object.unit)))
644 return result
645
646 def form_valid(self, form):
647 form.save()
648 return self.render_to_response(
649 self.get_context_data(form=form))
650
651 def form_invalid(self, form):
652 if form.non_field_errors():
653 raise Http404
654 raise Http400(form.errors)
655
656
657 @ajax_required
658 @get_unit_context('review')
659 def toggle_qualitycheck(request, unit, check_id, **kwargs_):
660 try:
661 unit.toggle_qualitycheck(check_id, 'mute' in request.POST, request.user)
662 except ObjectDoesNotExist:
663 raise Http404
664
665 return JsonResponse({})
666
667
668 class UnitSubmitJSON(UnitSuggestionJSON):
669
670 @set_permissions
671 @requires_permission("translate")
672 def dispatch(self, request, *args, **kwargs):
673 # get funky with the request 8/
674 return super(UnitSuggestionJSON, self).dispatch(request, *args, **kwargs)
675
676 @property
677 def form_class(self):
678 if self.get_suggestion():
679 return SuggestionSubmitForm
680 return SubmitForm
681
682 @property
683 def permission_context(self):
684 return self.get_object().store.parent
685
686 @lru_cache()
687 def get_object(self):
688 return get_object_or_404(
689 Unit.objects.select_related(
690 "store",
691 "change",
692 "store__parent",
693 "store__translation_project",
694 "store__filetype",
695 "store__translation_project__language",
696 "store__translation_project__project",
697 "store__data",
698 "store__translation_project__data"),
699 id=self.request.resolver_match.kwargs["uid"])
700
701 @lru_cache()
702 def get_suggestion(self):
703 if "suggestion" in self.request.POST:
704 return get_object_or_404(
705 Suggestion,
706 unit_id=self.get_object().id,
707 id=self.request.POST["suggestion"])
708
709 def get_form_kwargs(self, **kwargs):
710 kwargs = dict(
711 unit=self.get_object(),
712 request_user=self.request.user,
713 data=self.request.POST)
714 if self.get_suggestion():
715 kwargs["target_object"] = self.get_suggestion()
716 return kwargs
717
718 def get_context_data(self, *args, **kwargs):
719 ctx = super(UnitSuggestionJSON, self).get_context_data(*args, **kwargs)
720 form = ctx["form"]
721 if form.is_valid():
722 form.unit.refresh_from_db()
723 result = dict(
724 checks=_get_critical_checks_snippet(self.request, form.unit),
725 user_score=self.request.user.public_score,
726 newtargets=[target for target in form.unit.target.strings])
727 return result
728
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pootle/apps/pootle_store/views.py b/pootle/apps/pootle_store/views.py
--- a/pootle/apps/pootle_store/views.py
+++ b/pootle/apps/pootle_store/views.py
@@ -146,6 +146,8 @@
ctx = {
'canreview': can_review,
'unit': unit,
+ 'critical_checks': list(unit.get_critical_qualitychecks()),
+ 'warning_checks': list(unit.get_warning_qualitychecks()),
}
template = loader.get_template('editor/units/xhr_checks.html')
return template.render(context=ctx, request=request)
| {"golden_diff": "diff --git a/pootle/apps/pootle_store/views.py b/pootle/apps/pootle_store/views.py\n--- a/pootle/apps/pootle_store/views.py\n+++ b/pootle/apps/pootle_store/views.py\n@@ -146,6 +146,8 @@\n ctx = {\n 'canreview': can_review,\n 'unit': unit,\n+ 'critical_checks': list(unit.get_critical_qualitychecks()),\n+ 'warning_checks': list(unit.get_warning_qualitychecks()),\n }\n template = loader.get_template('editor/units/xhr_checks.html')\n return template.render(context=ctx, request=request)\n", "issue": "No critical checks are displayed after sending translation\nIf you send a translation that has critical checks the editor correctly keeps you in the unit and displays a red background, but the critical errors are not displayed, so the translator has to reload to know which critical errors were reported or to mute them:\r\n\r\n\r\n\r\n\nNo critical checks are displayed after sending translation\nIf you send a translation that has critical checks the editor correctly keeps you in the unit and displays a red background, but the critical errors are not displayed, so the translator has to reload to know which critical errors were reported or to mute them:\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nimport calendar\nimport unicodedata\nfrom collections import OrderedDict\n\nfrom translate.lang import data\n\nfrom django import forms\nfrom django.conf import settings\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.http import Http404, QueryDict\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.template import loader\nfrom django.utils.functional import cached_property\nfrom django.utils.lru_cache import lru_cache\nfrom django.utils.translation import to_locale\nfrom django.utils.translation.trans_real import parse_accept_lang_header\nfrom django.views.decorators.http import require_http_methods\nfrom django.views.generic import FormView\n\nfrom pootle.core.delegate import review, search_backend\nfrom pootle.core.exceptions import Http400\nfrom pootle.core.http import JsonResponse, JsonResponseBadRequest\nfrom pootle.core.utils import dateformat\nfrom pootle.core.views import PootleJSON\nfrom pootle.core.views.decorators import requires_permission, set_permissions\nfrom pootle.core.views.mixins import GatherContextMixin, PootleJSONMixin\nfrom pootle.i18n.dates import timesince\nfrom pootle.i18n.gettext import ugettext as _\nfrom pootle_app.models.permissions import check_user_permission\nfrom pootle_language.models import Language\nfrom pootle_misc.util import ajax_required\n\nfrom .decorators import get_unit_context\nfrom .forms import (\n SubmitForm, SuggestionReviewForm, SuggestionSubmitForm, UnitSearchForm,\n unit_comment_form_factory, unit_form_factory)\nfrom .models import Suggestion, Unit\nfrom .templatetags.store_tags import pluralize_source, pluralize_target\nfrom .unit.results import GroupedResults\nfrom .unit.timeline import Timeline\nfrom .util import find_altsrcs\n\n\ndef get_alt_src_langs(request, user, translation_project):\n language = translation_project.language\n project = translation_project.project\n source_language = project.source_language\n langs = list(\n user.alt_src_langs.filter(\n translationproject__project=project))\n if langs:\n return langs\n accept = request.META.get('HTTP_ACCEPT_LANGUAGE', '')\n for accept_lang, __ in parse_accept_lang_header(accept):\n if accept_lang == '*':\n continue\n normalized = to_locale(\n data.normalize_code(\n data.simplify_to_common(accept_lang)))\n code = to_locale(accept_lang)\n is_source_lang = any(\n langcode in ('en', 'en_US', source_language.code, language.code)\n for langcode in [code, normalized])\n if is_source_lang:\n continue\n\n langs = list(\n Language.objects.filter(\n code__in=(normalized, code),\n translationproject__project=project))\n if langs:\n return langs\n\n\n#\n# Views used with XMLHttpRequest requests.\n#\n\ndef _filter_ctx_units(units_qs, unit, how_many, gap=0):\n \"\"\"Returns ``how_many``*2 units that are before and after ``index``.\"\"\"\n result = {'before': [], 'after': []}\n\n if how_many and unit.index - gap > 0:\n before = units_qs.filter(store=unit.store_id, index__lt=unit.index) \\\n .order_by('-index')[gap:how_many+gap]\n result['before'] = _build_units_list(before, reverse=True)\n result['before'].reverse()\n\n # FIXME: can we avoid this query if length is known?\n if how_many:\n after = units_qs.filter(store=unit.store_id,\n index__gt=unit.index)[gap:how_many+gap]\n result['after'] = _build_units_list(after)\n\n return result\n\n\ndef _prepare_unit(unit):\n \"\"\"Constructs a dictionary with relevant `unit` data.\"\"\"\n return {\n 'id': unit.id,\n 'url': unit.get_translate_url(),\n 'isfuzzy': unit.isfuzzy(),\n 'source': [source[1] for source in pluralize_source(unit)],\n 'target': [target[1] for target in pluralize_target(unit)],\n }\n\n\ndef _build_units_list(units, reverse=False):\n \"\"\"Given a list/queryset of units, builds a list with the unit data\n contained in a dictionary ready to be returned as JSON.\n\n :return: A list with unit id, source, and target texts. In case of\n having plural forms, a title for the plural form is also provided.\n \"\"\"\n return_units = []\n\n for unit in iter(units):\n return_units.append(_prepare_unit(unit))\n\n return return_units\n\n\ndef _get_critical_checks_snippet(request, unit):\n \"\"\"Retrieves the critical checks snippet.\n\n :param request: an `HttpRequest` object\n :param unit: a `Unit` instance for which critical checks need to be\n rendered.\n :return: rendered HTML snippet with the failing checks, or `None` if\n there are no critical failing checks.\n \"\"\"\n if not unit.has_critical_checks():\n return None\n\n can_review = check_user_permission(request.user, 'review',\n unit.store.parent)\n ctx = {\n 'canreview': can_review,\n 'unit': unit,\n }\n template = loader.get_template('editor/units/xhr_checks.html')\n return template.render(context=ctx, request=request)\n\n\n@ajax_required\ndef get_units(request, **kwargs_):\n \"\"\"Gets source and target texts and its metadata.\n\n :return: A JSON-encoded string containing the source and target texts\n grouped by the store they belong to.\n\n The optional `count` GET parameter defines the chunk size to\n consider. The user's preference will be used by default.\n\n When the `initial` GET parameter is present, a sorted list of\n the result set ids will be returned too.\n \"\"\"\n search_form = UnitSearchForm(request.GET, user=request.user)\n\n if not search_form.is_valid():\n errors = search_form.errors.as_data()\n if \"path\" in errors:\n for error in errors[\"path\"]:\n if error.code == \"max_length\":\n raise Http400(_('Path too long.'))\n elif error.code == \"required\":\n raise Http400(_('Arguments missing.'))\n raise Http404(forms.ValidationError(search_form.errors).messages)\n\n total, start, end, units_qs = search_backend.get(Unit)(\n request.user, **search_form.cleaned_data).search()\n return JsonResponse(\n {'start': start,\n 'end': end,\n 'total': total,\n 'unitGroups': GroupedResults(units_qs).data})\n\n\n@ajax_required\n@get_unit_context('view')\ndef get_more_context(request, unit, **kwargs_):\n \"\"\"Retrieves more context units.\n\n :return: An object in JSON notation that contains the source and target\n texts for units that are in the context of unit ``uid``.\n \"\"\"\n store = request.store\n json = {}\n gap = int(request.GET.get('gap', 0))\n qty = int(request.GET.get('qty', 1))\n\n json[\"ctx\"] = _filter_ctx_units(store.units, unit, qty, gap)\n return JsonResponse(json)\n\n\n@ajax_required\n@require_http_methods(['POST', 'DELETE'])\n@get_unit_context('translate')\ndef comment(request, unit, **kwargs_):\n \"\"\"Dispatches the comment action according to the HTTP verb.\"\"\"\n if request.method == 'DELETE':\n return delete_comment(request, unit)\n elif request.method == 'POST':\n return save_comment(request, unit)\n\n\ndef delete_comment(request, unit, **kwargs_):\n \"\"\"Deletes a comment by blanking its contents and records a new\n submission.\n \"\"\"\n unit.change.commented_by = None\n unit.change.commented_on = None\n\n language = request.translation_project.language\n comment_form_class = unit_comment_form_factory(language)\n form = comment_form_class({}, instance=unit, request=request)\n\n if form.is_valid():\n form.save()\n return JsonResponse({})\n\n return JsonResponseBadRequest({'msg': _(\"Failed to remove comment.\")})\n\n\ndef save_comment(request, unit):\n \"\"\"Stores a new comment for the given ``unit``.\n\n :return: If the form validates, the cleaned comment is returned.\n An error message is returned otherwise.\n \"\"\"\n\n language = request.translation_project.language\n form = unit_comment_form_factory(language)(request.POST, instance=unit,\n request=request)\n\n if form.is_valid():\n form.save()\n\n user = request.user\n directory = unit.store.parent\n\n ctx = {\n 'unit': unit,\n 'language': language,\n 'cantranslate': check_user_permission(user, 'translate',\n directory),\n 'cansuggest': check_user_permission(user, 'suggest', directory),\n }\n t = loader.get_template('editor/units/xhr_comment.html')\n\n return JsonResponse({'comment': t.render(context=ctx,\n request=request)})\n\n return JsonResponseBadRequest({'msg': _(\"Comment submission failed.\")})\n\n\nclass PootleUnitJSON(PootleJSON):\n model = Unit\n pk_url_kwarg = \"uid\"\n\n @cached_property\n def permission_context(self):\n self.object = self.get_object()\n return self.store.parent\n\n @property\n def pootle_path(self):\n return self.store.pootle_path\n\n @cached_property\n def tp(self):\n return self.store.translation_project\n\n @cached_property\n def store(self):\n return self.object.store\n\n @cached_property\n def source_language(self):\n return self.project.source_language\n\n @cached_property\n def directory(self):\n return self.store.parent\n\n @lru_cache()\n def get_object(self):\n return super(PootleUnitJSON, self).get_object()\n\n\nclass UnitTimelineJSON(PootleUnitJSON):\n\n model = Unit\n pk_url_kwarg = \"uid\"\n\n template_name = 'editor/units/xhr_timeline.html'\n\n @property\n def language(self):\n return self.object.store.translation_project.language\n\n @cached_property\n def permission_context(self):\n self.object = self.get_object()\n return self.project.directory\n\n @property\n def project(self):\n return self.object.store.translation_project.project\n\n @property\n def timeline(self):\n return Timeline(self.object)\n\n def get_context_data(self, *args, **kwargs):\n return dict(\n event_groups=self.timeline.grouped_events(),\n language=self.language)\n\n def get_queryset(self):\n return Unit.objects.get_translatable(self.request.user).select_related(\n \"change\",\n \"store__translation_project__language\",\n \"store__translation_project__project__directory\")\n\n def get_response_data(self, context):\n return {\n 'uid': self.object.id,\n 'event_groups': self.get_event_groups_data(context),\n 'timeline': self.render_timeline(context)}\n\n def render_timeline(self, context):\n return loader.get_template(self.template_name).render(context=context)\n\n def get_event_groups_data(self, context):\n result = []\n for event_group in context['event_groups']:\n display_dt = event_group['datetime']\n if display_dt is not None:\n display_dt = dateformat.format(display_dt)\n iso_dt = event_group['datetime'].isoformat()\n relative_time = timesince(\n calendar.timegm(event_group['datetime'].timetuple()),\n self.request_lang)\n else:\n iso_dt = None\n relative_time = None\n result.append({\n \"display_datetime\": display_dt,\n \"iso_datetime\": iso_dt,\n \"relative_time\": relative_time,\n \"via_upload\": event_group.get('via_upload', False),\n })\n return result\n\n\nCHARACTERS_NAMES = OrderedDict(\n (\n # Code Display name\n (8204, 'ZWNJ'),\n (8205, 'ZWJ'),\n (8206, 'LRM'),\n (8207, 'RLM'),\n (8234, 'LRE'),\n (8235, 'RLE'),\n (8236, 'PDF'),\n (8237, 'LRO'),\n (8238, 'RLO'),\n )\n)\n\nCHARACTERS = u\"\".join([unichr(index) for index in CHARACTERS_NAMES.keys()])\n\n\nclass UnitEditJSON(PootleUnitJSON):\n\n @property\n def special_characters(self):\n if self.language.direction == \"rtl\":\n # Inject some extra special characters for RTL languages.\n language_specialchars = CHARACTERS\n # Do not repeat special chars.\n language_specialchars += u\"\".join(\n [c for c in self.language.specialchars if c not in CHARACTERS])\n else:\n language_specialchars = self.language.specialchars\n\n special_chars = []\n for specialchar in language_specialchars:\n code = ord(specialchar)\n special_chars.append({\n 'display': CHARACTERS_NAMES.get(code, specialchar),\n 'code': code,\n 'hex_code': \"U+\" + hex(code)[2:].upper(), # Like U+200C\n 'name': unicodedata.name(specialchar, ''),\n })\n return special_chars\n\n def get_edit_template(self):\n if self.project.is_terminology or self.store.has_terminology:\n return loader.get_template('editor/units/term_edit.html')\n return loader.get_template('editor/units/edit.html')\n\n def render_edit_template(self, context):\n return self.get_edit_template().render(context=context,\n request=self.request)\n\n def get_source_nplurals(self):\n if self.object.hasplural():\n return len(self.object.source.strings)\n return None\n\n def get_target_nplurals(self):\n source_nplurals = self.get_source_nplurals()\n return self.language.nplurals if source_nplurals is not None else 1\n\n def get_unit_values(self):\n target_nplurals = self.get_target_nplurals()\n unit_values = [value for value in self.object.target_f.strings]\n if len(unit_values) < target_nplurals:\n return unit_values + ((target_nplurals - len(unit_values)) * [''])\n return unit_values\n\n def get_unit_edit_form(self):\n form_class = unit_form_factory(self.language,\n self.get_source_nplurals(),\n self.request)\n return form_class(instance=self.object, request=self.request)\n\n def get_unit_comment_form(self):\n comment_form_class = unit_comment_form_factory(self.language)\n return comment_form_class({}, instance=self.object, request=self.request)\n\n @lru_cache()\n def get_alt_srcs(self):\n if self.request.user.is_anonymous:\n return []\n return find_altsrcs(\n self.object,\n get_alt_src_langs(self.request, self.request.user, self.tp),\n store=self.store,\n project=self.project)\n\n def get_queryset(self):\n return Unit.objects.get_translatable(self.request.user).select_related(\n \"change\",\n \"change__submitted_by\",\n \"store\",\n \"store__filetype\",\n \"store__parent\",\n \"store__translation_project\",\n \"store__translation_project__project\",\n \"store__translation_project__project__directory\",\n \"store__translation_project__project__source_language\",\n \"store__translation_project__language\")\n\n def get_sources(self):\n sources = {\n unit.language_code: unit.target.strings\n for unit in self.get_alt_srcs()}\n sources[self.source_language.code] = self.object.source_f.strings\n return sources\n\n def get_context_data(self, *args, **kwargs):\n priority = (\n self.store.priority\n if 'virtualfolder' in settings.INSTALLED_APPS\n else None)\n suggestions = self.object.get_suggestions()\n latest_target_submission = self.object.get_latest_target_submission()\n accepted_suggestion = None\n if latest_target_submission is not None:\n accepted_suggestion = latest_target_submission.suggestion\n return {\n 'unit': self.object,\n 'accepted_suggestion': accepted_suggestion,\n 'form': self.get_unit_edit_form(),\n 'comment_form': self.get_unit_comment_form(),\n 'priority': priority,\n 'store': self.store,\n 'directory': self.directory,\n 'user': self.request.user,\n 'project': self.project,\n 'language': self.language,\n 'special_characters': self.special_characters,\n 'source_language': self.source_language,\n 'cantranslate': check_user_permission(self.request.user,\n \"translate\",\n self.directory),\n 'cantranslatexlang': check_user_permission(self.request.user,\n \"administrate\",\n self.project.directory),\n 'cansuggest': check_user_permission(self.request.user,\n \"suggest\",\n self.directory),\n 'canreview': check_user_permission(self.request.user,\n \"review\",\n self.directory),\n 'has_admin_access': check_user_permission(self.request.user,\n 'administrate',\n self.directory),\n 'altsrcs': {x.id: x.data for x in self.get_alt_srcs()},\n 'unit_values': self.get_unit_values(),\n 'target_nplurals': self.get_target_nplurals(),\n 'has_plurals': self.object.hasplural(),\n 'filetype': self.object.store.filetype.name,\n 'suggestions': suggestions,\n 'suggestions_dict': {x.id: dict(id=x.id, target=x.target.strings)\n for x in suggestions},\n \"critical_checks\": list(\n self.object.get_critical_qualitychecks()),\n \"warning_checks\": list(\n self.object.get_warning_qualitychecks()),\n \"terms\": self.object.get_terminology()}\n\n def get_response_data(self, context):\n return {\n 'editor': self.render_edit_template(context),\n 'tm_suggestions': self.object.get_tm_suggestions(),\n 'is_obsolete': self.object.isobsolete(),\n 'sources': self.get_sources()}\n\n\n@get_unit_context('view')\ndef permalink_redirect(request, unit):\n return redirect(request.build_absolute_uri(unit.get_translate_url()))\n\n\n@ajax_required\n@get_unit_context('suggest')\ndef suggest(request, unit, **kwargs_):\n \"\"\"Processes translation suggestions and stores them in the database.\n\n :return: An object in JSON notation that contains the previous and last\n units for the unit next to unit ``uid``.\n \"\"\"\n json = {}\n\n translation_project = request.translation_project\n language = translation_project.language\n\n if unit.hasplural():\n snplurals = len(unit.source.strings)\n else:\n snplurals = None\n\n form_class = unit_form_factory(language, snplurals, request)\n form = form_class(request.POST, instance=unit, request=request)\n\n unit_target = unit.target\n if form.is_valid():\n target = form.cleaned_data[\"target_f\"]\n if target and target != unit_target:\n unit = Unit.objects.get(id=unit.id)\n review.get(Suggestion)().add(\n unit,\n form.cleaned_data['target_f'],\n user=request.user)\n\n if not request.user.is_anonymous:\n json['user_score'] = request.user.public_score\n\n return JsonResponse(json)\n\n return JsonResponseBadRequest({'msg': _(\"Failed to process suggestion.\")})\n\n\nclass UnitSuggestionJSON(PootleJSONMixin, GatherContextMixin, FormView):\n\n action = \"accept\"\n form_class = SuggestionReviewForm\n http_method_names = ['post', 'delete']\n\n @property\n def permission_context(self):\n return self.get_object().unit.store.parent\n\n @set_permissions\n @requires_permission(\"view\")\n def dispatch(self, request, *args, **kwargs):\n # get funky with the request 8/\n return super(UnitSuggestionJSON, self).dispatch(request, *args, **kwargs)\n\n @lru_cache()\n def get_object(self):\n return get_object_or_404(\n Suggestion.objects.select_related(\n \"unit\",\n \"unit__store\",\n \"unit__store__parent\",\n \"unit__change\",\n \"state\"),\n unit_id=self.request.resolver_match.kwargs[\"uid\"],\n id=self.request.resolver_match.kwargs[\"sugg_id\"])\n\n def get_form_kwargs(self, **kwargs):\n comment = (\n QueryDict(self.request.body).get(\"comment\")\n if self.action == \"reject\"\n else self.request.POST.get(\"comment\"))\n is_fuzzy = (\n QueryDict(self.request.body).get(\"is_fuzzy\")\n if self.action == \"reject\"\n else self.request.POST.get(\"is_fuzzy\"))\n return dict(\n target_object=self.get_object(),\n request_user=self.request.user,\n data=dict(\n is_fuzzy=is_fuzzy,\n comment=comment,\n action=self.action))\n\n def delete(self, request, *args, **kwargs):\n self.action = \"reject\"\n return self.post(request, *args, **kwargs)\n\n def get_context_data(self, *args, **kwargs):\n ctx = super(UnitSuggestionJSON, self).get_context_data(*args, **kwargs)\n form = ctx[\"form\"]\n if form.is_valid():\n result = dict(\n udbid=form.target_object.unit.id,\n sugid=form.target_object.id,\n user_score=self.request.user.public_score)\n if form.cleaned_data[\"action\"] == \"accept\":\n result.update(\n dict(\n newtargets=[\n target\n for target\n in form.target_object.unit.target.strings],\n checks=_get_critical_checks_snippet(\n self.request,\n form.target_object.unit)))\n return result\n\n def form_valid(self, form):\n form.save()\n return self.render_to_response(\n self.get_context_data(form=form))\n\n def form_invalid(self, form):\n if form.non_field_errors():\n raise Http404\n raise Http400(form.errors)\n\n\n@ajax_required\n@get_unit_context('review')\ndef toggle_qualitycheck(request, unit, check_id, **kwargs_):\n try:\n unit.toggle_qualitycheck(check_id, 'mute' in request.POST, request.user)\n except ObjectDoesNotExist:\n raise Http404\n\n return JsonResponse({})\n\n\nclass UnitSubmitJSON(UnitSuggestionJSON):\n\n @set_permissions\n @requires_permission(\"translate\")\n def dispatch(self, request, *args, **kwargs):\n # get funky with the request 8/\n return super(UnitSuggestionJSON, self).dispatch(request, *args, **kwargs)\n\n @property\n def form_class(self):\n if self.get_suggestion():\n return SuggestionSubmitForm\n return SubmitForm\n\n @property\n def permission_context(self):\n return self.get_object().store.parent\n\n @lru_cache()\n def get_object(self):\n return get_object_or_404(\n Unit.objects.select_related(\n \"store\",\n \"change\",\n \"store__parent\",\n \"store__translation_project\",\n \"store__filetype\",\n \"store__translation_project__language\",\n \"store__translation_project__project\",\n \"store__data\",\n \"store__translation_project__data\"),\n id=self.request.resolver_match.kwargs[\"uid\"])\n\n @lru_cache()\n def get_suggestion(self):\n if \"suggestion\" in self.request.POST:\n return get_object_or_404(\n Suggestion,\n unit_id=self.get_object().id,\n id=self.request.POST[\"suggestion\"])\n\n def get_form_kwargs(self, **kwargs):\n kwargs = dict(\n unit=self.get_object(),\n request_user=self.request.user,\n data=self.request.POST)\n if self.get_suggestion():\n kwargs[\"target_object\"] = self.get_suggestion()\n return kwargs\n\n def get_context_data(self, *args, **kwargs):\n ctx = super(UnitSuggestionJSON, self).get_context_data(*args, **kwargs)\n form = ctx[\"form\"]\n if form.is_valid():\n form.unit.refresh_from_db()\n result = dict(\n checks=_get_critical_checks_snippet(self.request, form.unit),\n user_score=self.request.user.public_score,\n newtargets=[target for target in form.unit.target.strings])\n return result\n", "path": "pootle/apps/pootle_store/views.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nimport calendar\nimport unicodedata\nfrom collections import OrderedDict\n\nfrom translate.lang import data\n\nfrom django import forms\nfrom django.conf import settings\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.http import Http404, QueryDict\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.template import loader\nfrom django.utils.functional import cached_property\nfrom django.utils.lru_cache import lru_cache\nfrom django.utils.translation import to_locale\nfrom django.utils.translation.trans_real import parse_accept_lang_header\nfrom django.views.decorators.http import require_http_methods\nfrom django.views.generic import FormView\n\nfrom pootle.core.delegate import review, search_backend\nfrom pootle.core.exceptions import Http400\nfrom pootle.core.http import JsonResponse, JsonResponseBadRequest\nfrom pootle.core.utils import dateformat\nfrom pootle.core.views import PootleJSON\nfrom pootle.core.views.decorators import requires_permission, set_permissions\nfrom pootle.core.views.mixins import GatherContextMixin, PootleJSONMixin\nfrom pootle.i18n.dates import timesince\nfrom pootle.i18n.gettext import ugettext as _\nfrom pootle_app.models.permissions import check_user_permission\nfrom pootle_language.models import Language\nfrom pootle_misc.util import ajax_required\n\nfrom .decorators import get_unit_context\nfrom .forms import (\n SubmitForm, SuggestionReviewForm, SuggestionSubmitForm, UnitSearchForm,\n unit_comment_form_factory, unit_form_factory)\nfrom .models import Suggestion, Unit\nfrom .templatetags.store_tags import pluralize_source, pluralize_target\nfrom .unit.results import GroupedResults\nfrom .unit.timeline import Timeline\nfrom .util import find_altsrcs\n\n\ndef get_alt_src_langs(request, user, translation_project):\n language = translation_project.language\n project = translation_project.project\n source_language = project.source_language\n langs = list(\n user.alt_src_langs.filter(\n translationproject__project=project))\n if langs:\n return langs\n accept = request.META.get('HTTP_ACCEPT_LANGUAGE', '')\n for accept_lang, __ in parse_accept_lang_header(accept):\n if accept_lang == '*':\n continue\n normalized = to_locale(\n data.normalize_code(\n data.simplify_to_common(accept_lang)))\n code = to_locale(accept_lang)\n is_source_lang = any(\n langcode in ('en', 'en_US', source_language.code, language.code)\n for langcode in [code, normalized])\n if is_source_lang:\n continue\n\n langs = list(\n Language.objects.filter(\n code__in=(normalized, code),\n translationproject__project=project))\n if langs:\n return langs\n\n\n#\n# Views used with XMLHttpRequest requests.\n#\n\ndef _filter_ctx_units(units_qs, unit, how_many, gap=0):\n \"\"\"Returns ``how_many``*2 units that are before and after ``index``.\"\"\"\n result = {'before': [], 'after': []}\n\n if how_many and unit.index - gap > 0:\n before = units_qs.filter(store=unit.store_id, index__lt=unit.index) \\\n .order_by('-index')[gap:how_many+gap]\n result['before'] = _build_units_list(before, reverse=True)\n result['before'].reverse()\n\n # FIXME: can we avoid this query if length is known?\n if how_many:\n after = units_qs.filter(store=unit.store_id,\n index__gt=unit.index)[gap:how_many+gap]\n result['after'] = _build_units_list(after)\n\n return result\n\n\ndef _prepare_unit(unit):\n \"\"\"Constructs a dictionary with relevant `unit` data.\"\"\"\n return {\n 'id': unit.id,\n 'url': unit.get_translate_url(),\n 'isfuzzy': unit.isfuzzy(),\n 'source': [source[1] for source in pluralize_source(unit)],\n 'target': [target[1] for target in pluralize_target(unit)],\n }\n\n\ndef _build_units_list(units, reverse=False):\n \"\"\"Given a list/queryset of units, builds a list with the unit data\n contained in a dictionary ready to be returned as JSON.\n\n :return: A list with unit id, source, and target texts. In case of\n having plural forms, a title for the plural form is also provided.\n \"\"\"\n return_units = []\n\n for unit in iter(units):\n return_units.append(_prepare_unit(unit))\n\n return return_units\n\n\ndef _get_critical_checks_snippet(request, unit):\n \"\"\"Retrieves the critical checks snippet.\n\n :param request: an `HttpRequest` object\n :param unit: a `Unit` instance for which critical checks need to be\n rendered.\n :return: rendered HTML snippet with the failing checks, or `None` if\n there are no critical failing checks.\n \"\"\"\n if not unit.has_critical_checks():\n return None\n\n can_review = check_user_permission(request.user, 'review',\n unit.store.parent)\n ctx = {\n 'canreview': can_review,\n 'unit': unit,\n 'critical_checks': list(unit.get_critical_qualitychecks()),\n 'warning_checks': list(unit.get_warning_qualitychecks()),\n }\n template = loader.get_template('editor/units/xhr_checks.html')\n return template.render(context=ctx, request=request)\n\n\n@ajax_required\ndef get_units(request, **kwargs_):\n \"\"\"Gets source and target texts and its metadata.\n\n :return: A JSON-encoded string containing the source and target texts\n grouped by the store they belong to.\n\n The optional `count` GET parameter defines the chunk size to\n consider. The user's preference will be used by default.\n\n When the `initial` GET parameter is present, a sorted list of\n the result set ids will be returned too.\n \"\"\"\n search_form = UnitSearchForm(request.GET, user=request.user)\n\n if not search_form.is_valid():\n errors = search_form.errors.as_data()\n if \"path\" in errors:\n for error in errors[\"path\"]:\n if error.code == \"max_length\":\n raise Http400(_('Path too long.'))\n elif error.code == \"required\":\n raise Http400(_('Arguments missing.'))\n raise Http404(forms.ValidationError(search_form.errors).messages)\n\n total, start, end, units_qs = search_backend.get(Unit)(\n request.user, **search_form.cleaned_data).search()\n return JsonResponse(\n {'start': start,\n 'end': end,\n 'total': total,\n 'unitGroups': GroupedResults(units_qs).data})\n\n\n@ajax_required\n@get_unit_context('view')\ndef get_more_context(request, unit, **kwargs_):\n \"\"\"Retrieves more context units.\n\n :return: An object in JSON notation that contains the source and target\n texts for units that are in the context of unit ``uid``.\n \"\"\"\n store = request.store\n json = {}\n gap = int(request.GET.get('gap', 0))\n qty = int(request.GET.get('qty', 1))\n\n json[\"ctx\"] = _filter_ctx_units(store.units, unit, qty, gap)\n return JsonResponse(json)\n\n\n@ajax_required\n@require_http_methods(['POST', 'DELETE'])\n@get_unit_context('translate')\ndef comment(request, unit, **kwargs_):\n \"\"\"Dispatches the comment action according to the HTTP verb.\"\"\"\n if request.method == 'DELETE':\n return delete_comment(request, unit)\n elif request.method == 'POST':\n return save_comment(request, unit)\n\n\ndef delete_comment(request, unit, **kwargs_):\n \"\"\"Deletes a comment by blanking its contents and records a new\n submission.\n \"\"\"\n unit.change.commented_by = None\n unit.change.commented_on = None\n\n language = request.translation_project.language\n comment_form_class = unit_comment_form_factory(language)\n form = comment_form_class({}, instance=unit, request=request)\n\n if form.is_valid():\n form.save()\n return JsonResponse({})\n\n return JsonResponseBadRequest({'msg': _(\"Failed to remove comment.\")})\n\n\ndef save_comment(request, unit):\n \"\"\"Stores a new comment for the given ``unit``.\n\n :return: If the form validates, the cleaned comment is returned.\n An error message is returned otherwise.\n \"\"\"\n\n language = request.translation_project.language\n form = unit_comment_form_factory(language)(request.POST, instance=unit,\n request=request)\n\n if form.is_valid():\n form.save()\n\n user = request.user\n directory = unit.store.parent\n\n ctx = {\n 'unit': unit,\n 'language': language,\n 'cantranslate': check_user_permission(user, 'translate',\n directory),\n 'cansuggest': check_user_permission(user, 'suggest', directory),\n }\n t = loader.get_template('editor/units/xhr_comment.html')\n\n return JsonResponse({'comment': t.render(context=ctx,\n request=request)})\n\n return JsonResponseBadRequest({'msg': _(\"Comment submission failed.\")})\n\n\nclass PootleUnitJSON(PootleJSON):\n model = Unit\n pk_url_kwarg = \"uid\"\n\n @cached_property\n def permission_context(self):\n self.object = self.get_object()\n return self.store.parent\n\n @property\n def pootle_path(self):\n return self.store.pootle_path\n\n @cached_property\n def tp(self):\n return self.store.translation_project\n\n @cached_property\n def store(self):\n return self.object.store\n\n @cached_property\n def source_language(self):\n return self.project.source_language\n\n @cached_property\n def directory(self):\n return self.store.parent\n\n @lru_cache()\n def get_object(self):\n return super(PootleUnitJSON, self).get_object()\n\n\nclass UnitTimelineJSON(PootleUnitJSON):\n\n model = Unit\n pk_url_kwarg = \"uid\"\n\n template_name = 'editor/units/xhr_timeline.html'\n\n @property\n def language(self):\n return self.object.store.translation_project.language\n\n @cached_property\n def permission_context(self):\n self.object = self.get_object()\n return self.project.directory\n\n @property\n def project(self):\n return self.object.store.translation_project.project\n\n @property\n def timeline(self):\n return Timeline(self.object)\n\n def get_context_data(self, *args, **kwargs):\n return dict(\n event_groups=self.timeline.grouped_events(),\n language=self.language)\n\n def get_queryset(self):\n return Unit.objects.get_translatable(self.request.user).select_related(\n \"change\",\n \"store__translation_project__language\",\n \"store__translation_project__project__directory\")\n\n def get_response_data(self, context):\n return {\n 'uid': self.object.id,\n 'event_groups': self.get_event_groups_data(context),\n 'timeline': self.render_timeline(context)}\n\n def render_timeline(self, context):\n return loader.get_template(self.template_name).render(context=context)\n\n def get_event_groups_data(self, context):\n result = []\n for event_group in context['event_groups']:\n display_dt = event_group['datetime']\n if display_dt is not None:\n display_dt = dateformat.format(display_dt)\n iso_dt = event_group['datetime'].isoformat()\n relative_time = timesince(\n calendar.timegm(event_group['datetime'].timetuple()),\n self.request_lang)\n else:\n iso_dt = None\n relative_time = None\n result.append({\n \"display_datetime\": display_dt,\n \"iso_datetime\": iso_dt,\n \"relative_time\": relative_time,\n \"via_upload\": event_group.get('via_upload', False),\n })\n return result\n\n\nCHARACTERS_NAMES = OrderedDict(\n (\n # Code Display name\n (8204, 'ZWNJ'),\n (8205, 'ZWJ'),\n (8206, 'LRM'),\n (8207, 'RLM'),\n (8234, 'LRE'),\n (8235, 'RLE'),\n (8236, 'PDF'),\n (8237, 'LRO'),\n (8238, 'RLO'),\n )\n)\n\nCHARACTERS = u\"\".join([unichr(index) for index in CHARACTERS_NAMES.keys()])\n\n\nclass UnitEditJSON(PootleUnitJSON):\n\n @property\n def special_characters(self):\n if self.language.direction == \"rtl\":\n # Inject some extra special characters for RTL languages.\n language_specialchars = CHARACTERS\n # Do not repeat special chars.\n language_specialchars += u\"\".join(\n [c for c in self.language.specialchars if c not in CHARACTERS])\n else:\n language_specialchars = self.language.specialchars\n\n special_chars = []\n for specialchar in language_specialchars:\n code = ord(specialchar)\n special_chars.append({\n 'display': CHARACTERS_NAMES.get(code, specialchar),\n 'code': code,\n 'hex_code': \"U+\" + hex(code)[2:].upper(), # Like U+200C\n 'name': unicodedata.name(specialchar, ''),\n })\n return special_chars\n\n def get_edit_template(self):\n if self.project.is_terminology or self.store.has_terminology:\n return loader.get_template('editor/units/term_edit.html')\n return loader.get_template('editor/units/edit.html')\n\n def render_edit_template(self, context):\n return self.get_edit_template().render(context=context,\n request=self.request)\n\n def get_source_nplurals(self):\n if self.object.hasplural():\n return len(self.object.source.strings)\n return None\n\n def get_target_nplurals(self):\n source_nplurals = self.get_source_nplurals()\n return self.language.nplurals if source_nplurals is not None else 1\n\n def get_unit_values(self):\n target_nplurals = self.get_target_nplurals()\n unit_values = [value for value in self.object.target_f.strings]\n if len(unit_values) < target_nplurals:\n return unit_values + ((target_nplurals - len(unit_values)) * [''])\n return unit_values\n\n def get_unit_edit_form(self):\n form_class = unit_form_factory(self.language,\n self.get_source_nplurals(),\n self.request)\n return form_class(instance=self.object, request=self.request)\n\n def get_unit_comment_form(self):\n comment_form_class = unit_comment_form_factory(self.language)\n return comment_form_class({}, instance=self.object, request=self.request)\n\n @lru_cache()\n def get_alt_srcs(self):\n if self.request.user.is_anonymous:\n return []\n return find_altsrcs(\n self.object,\n get_alt_src_langs(self.request, self.request.user, self.tp),\n store=self.store,\n project=self.project)\n\n def get_queryset(self):\n return Unit.objects.get_translatable(self.request.user).select_related(\n \"change\",\n \"change__submitted_by\",\n \"store\",\n \"store__filetype\",\n \"store__parent\",\n \"store__translation_project\",\n \"store__translation_project__project\",\n \"store__translation_project__project__directory\",\n \"store__translation_project__project__source_language\",\n \"store__translation_project__language\")\n\n def get_sources(self):\n sources = {\n unit.language_code: unit.target.strings\n for unit in self.get_alt_srcs()}\n sources[self.source_language.code] = self.object.source_f.strings\n return sources\n\n def get_context_data(self, *args, **kwargs):\n priority = (\n self.store.priority\n if 'virtualfolder' in settings.INSTALLED_APPS\n else None)\n suggestions = self.object.get_suggestions()\n latest_target_submission = self.object.get_latest_target_submission()\n accepted_suggestion = None\n if latest_target_submission is not None:\n accepted_suggestion = latest_target_submission.suggestion\n return {\n 'unit': self.object,\n 'accepted_suggestion': accepted_suggestion,\n 'form': self.get_unit_edit_form(),\n 'comment_form': self.get_unit_comment_form(),\n 'priority': priority,\n 'store': self.store,\n 'directory': self.directory,\n 'user': self.request.user,\n 'project': self.project,\n 'language': self.language,\n 'special_characters': self.special_characters,\n 'source_language': self.source_language,\n 'cantranslate': check_user_permission(self.request.user,\n \"translate\",\n self.directory),\n 'cantranslatexlang': check_user_permission(self.request.user,\n \"administrate\",\n self.project.directory),\n 'cansuggest': check_user_permission(self.request.user,\n \"suggest\",\n self.directory),\n 'canreview': check_user_permission(self.request.user,\n \"review\",\n self.directory),\n 'has_admin_access': check_user_permission(self.request.user,\n 'administrate',\n self.directory),\n 'altsrcs': {x.id: x.data for x in self.get_alt_srcs()},\n 'unit_values': self.get_unit_values(),\n 'target_nplurals': self.get_target_nplurals(),\n 'has_plurals': self.object.hasplural(),\n 'filetype': self.object.store.filetype.name,\n 'suggestions': suggestions,\n 'suggestions_dict': {x.id: dict(id=x.id, target=x.target.strings)\n for x in suggestions},\n \"critical_checks\": list(\n self.object.get_critical_qualitychecks()),\n \"warning_checks\": list(\n self.object.get_warning_qualitychecks()),\n \"terms\": self.object.get_terminology()}\n\n def get_response_data(self, context):\n return {\n 'editor': self.render_edit_template(context),\n 'tm_suggestions': self.object.get_tm_suggestions(),\n 'is_obsolete': self.object.isobsolete(),\n 'sources': self.get_sources()}\n\n\n@get_unit_context('view')\ndef permalink_redirect(request, unit):\n return redirect(request.build_absolute_uri(unit.get_translate_url()))\n\n\n@ajax_required\n@get_unit_context('suggest')\ndef suggest(request, unit, **kwargs_):\n \"\"\"Processes translation suggestions and stores them in the database.\n\n :return: An object in JSON notation that contains the previous and last\n units for the unit next to unit ``uid``.\n \"\"\"\n json = {}\n\n translation_project = request.translation_project\n language = translation_project.language\n\n if unit.hasplural():\n snplurals = len(unit.source.strings)\n else:\n snplurals = None\n\n form_class = unit_form_factory(language, snplurals, request)\n form = form_class(request.POST, instance=unit, request=request)\n\n unit_target = unit.target\n if form.is_valid():\n target = form.cleaned_data[\"target_f\"]\n if target and target != unit_target:\n unit = Unit.objects.get(id=unit.id)\n review.get(Suggestion)().add(\n unit,\n form.cleaned_data['target_f'],\n user=request.user)\n\n if not request.user.is_anonymous:\n json['user_score'] = request.user.public_score\n\n return JsonResponse(json)\n\n return JsonResponseBadRequest({'msg': _(\"Failed to process suggestion.\")})\n\n\nclass UnitSuggestionJSON(PootleJSONMixin, GatherContextMixin, FormView):\n\n action = \"accept\"\n form_class = SuggestionReviewForm\n http_method_names = ['post', 'delete']\n\n @property\n def permission_context(self):\n return self.get_object().unit.store.parent\n\n @set_permissions\n @requires_permission(\"view\")\n def dispatch(self, request, *args, **kwargs):\n # get funky with the request 8/\n return super(UnitSuggestionJSON, self).dispatch(request, *args, **kwargs)\n\n @lru_cache()\n def get_object(self):\n return get_object_or_404(\n Suggestion.objects.select_related(\n \"unit\",\n \"unit__store\",\n \"unit__store__parent\",\n \"unit__change\",\n \"state\"),\n unit_id=self.request.resolver_match.kwargs[\"uid\"],\n id=self.request.resolver_match.kwargs[\"sugg_id\"])\n\n def get_form_kwargs(self, **kwargs):\n comment = (\n QueryDict(self.request.body).get(\"comment\")\n if self.action == \"reject\"\n else self.request.POST.get(\"comment\"))\n is_fuzzy = (\n QueryDict(self.request.body).get(\"is_fuzzy\")\n if self.action == \"reject\"\n else self.request.POST.get(\"is_fuzzy\"))\n return dict(\n target_object=self.get_object(),\n request_user=self.request.user,\n data=dict(\n is_fuzzy=is_fuzzy,\n comment=comment,\n action=self.action))\n\n def delete(self, request, *args, **kwargs):\n self.action = \"reject\"\n return self.post(request, *args, **kwargs)\n\n def get_context_data(self, *args, **kwargs):\n ctx = super(UnitSuggestionJSON, self).get_context_data(*args, **kwargs)\n form = ctx[\"form\"]\n if form.is_valid():\n result = dict(\n udbid=form.target_object.unit.id,\n sugid=form.target_object.id,\n user_score=self.request.user.public_score)\n if form.cleaned_data[\"action\"] == \"accept\":\n result.update(\n dict(\n newtargets=[\n target\n for target\n in form.target_object.unit.target.strings],\n checks=_get_critical_checks_snippet(\n self.request,\n form.target_object.unit)))\n return result\n\n def form_valid(self, form):\n form.save()\n return self.render_to_response(\n self.get_context_data(form=form))\n\n def form_invalid(self, form):\n if form.non_field_errors():\n raise Http404\n raise Http400(form.errors)\n\n\n@ajax_required\n@get_unit_context('review')\ndef toggle_qualitycheck(request, unit, check_id, **kwargs_):\n try:\n unit.toggle_qualitycheck(check_id, 'mute' in request.POST, request.user)\n except ObjectDoesNotExist:\n raise Http404\n\n return JsonResponse({})\n\n\nclass UnitSubmitJSON(UnitSuggestionJSON):\n\n @set_permissions\n @requires_permission(\"translate\")\n def dispatch(self, request, *args, **kwargs):\n # get funky with the request 8/\n return super(UnitSuggestionJSON, self).dispatch(request, *args, **kwargs)\n\n @property\n def form_class(self):\n if self.get_suggestion():\n return SuggestionSubmitForm\n return SubmitForm\n\n @property\n def permission_context(self):\n return self.get_object().store.parent\n\n @lru_cache()\n def get_object(self):\n return get_object_or_404(\n Unit.objects.select_related(\n \"store\",\n \"change\",\n \"store__parent\",\n \"store__translation_project\",\n \"store__filetype\",\n \"store__translation_project__language\",\n \"store__translation_project__project\",\n \"store__data\",\n \"store__translation_project__data\"),\n id=self.request.resolver_match.kwargs[\"uid\"])\n\n @lru_cache()\n def get_suggestion(self):\n if \"suggestion\" in self.request.POST:\n return get_object_or_404(\n Suggestion,\n unit_id=self.get_object().id,\n id=self.request.POST[\"suggestion\"])\n\n def get_form_kwargs(self, **kwargs):\n kwargs = dict(\n unit=self.get_object(),\n request_user=self.request.user,\n data=self.request.POST)\n if self.get_suggestion():\n kwargs[\"target_object\"] = self.get_suggestion()\n return kwargs\n\n def get_context_data(self, *args, **kwargs):\n ctx = super(UnitSuggestionJSON, self).get_context_data(*args, **kwargs)\n form = ctx[\"form\"]\n if form.is_valid():\n form.unit.refresh_from_db()\n result = dict(\n checks=_get_critical_checks_snippet(self.request, form.unit),\n user_score=self.request.user.public_score,\n newtargets=[target for target in form.unit.target.strings])\n return result\n", "path": "pootle/apps/pootle_store/views.py"}]} |
gh_patches_debug_1333 | rasdani/github-patches | git_diff | elastic__apm-agent-python-1590 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Crash with wrong URL
**Describe the bug**:
Crash due to someone sending the wrong URL format:
```
ValueError: Port could not be cast to integer value as '${port}'
File "elasticapm/contrib/django/middleware/__init__.py", line 176, in process_response
elasticapm.set_context(
File "elasticapm/traces.py", line 1155, in set_context
data = data()
File "elasticapm/contrib/django/middleware/__init__.py", line 177, in <lambda>
lambda: self.client.get_data_from_request(request, constants.TRANSACTION), "request"
File "elasticapm/contrib/django/client.py", line 155, in get_data_from_request
result["url"] = get_url_dict(url)
File "elasticapm/utils/__init__.py", line 118, in get_url_dict
port = None if parse_result.port is None else str(parse_result.port)
File "urllib/parse.py", line 175, in port
raise ValueError(message) from None
```
**To Reproduce**
1. Send `${ip}:${port}` as URL (or any other not valid URL). Here's a copy of the headers that were sent from nginx to our gunicorn server.
```
request_headers |
{'accept': '*/*',
'connection': 'close',
'host': '${ip}:${port}',
'user-agent': 'curl/7.64.1',
'x-forwarded-for': '152.32.255.215',
'x-forwarded-host': 'mydomain_here',
'x-forwarded-proto': 'https', 'x-real-ip': '152.32.255.215'}
```
**Environment (please complete the following information)**
- OS: Linux
- Python version: 3.10
- Framework and version: Django/3.2.13
- Agent version: 6.9.1
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `elasticapm/contrib/django/client.py`
Content:
```
1 # BSD 3-Clause License
2 #
3 # Copyright (c) 2012, the Sentry Team, see AUTHORS for more details
4 # Copyright (c) 2019, Elasticsearch BV
5 # All rights reserved.
6 #
7 # Redistribution and use in source and binary forms, with or without
8 # modification, are permitted provided that the following conditions are met:
9 #
10 # * Redistributions of source code must retain the above copyright notice, this
11 # list of conditions and the following disclaimer.
12 #
13 # * Redistributions in binary form must reproduce the above copyright notice,
14 # this list of conditions and the following disclaimer in the documentation
15 # and/or other materials provided with the distribution.
16 #
17 # * Neither the name of the copyright holder nor the names of its
18 # contributors may be used to endorse or promote products derived from
19 # this software without specific prior written permission.
20 #
21 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24 # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
25 # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27 # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28 # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30
31
32 from __future__ import absolute_import
33
34 import django
35 from django.conf import settings as django_settings
36 from django.db import DatabaseError
37 from django.http import HttpRequest
38
39 try:
40 from rest_framework.request import Request as DrfRequest
41 except ImportError:
42 DrfRequest = HttpRequest
43
44 from elasticapm import get_client as _get_client
45 from elasticapm.base import Client
46 from elasticapm.conf import constants
47 from elasticapm.contrib.django.utils import get_raw_uri, iterate_with_template_sources
48 from elasticapm.utils import compat, encoding, get_url_dict
49 from elasticapm.utils.logging import get_logger
50 from elasticapm.utils.module_import import import_string
51 from elasticapm.utils.wsgi import get_environ, get_headers
52
53 __all__ = ("DjangoClient",)
54
55
56 default_client_class = "elasticapm.contrib.django.DjangoClient"
57
58
59 def get_client():
60 """
61 Get an ElasticAPM client.
62
63 :param client:
64 :return:
65 :rtype: elasticapm.base.Client
66 """
67 if _get_client():
68 return _get_client()
69
70 config = getattr(django_settings, "ELASTIC_APM", {})
71 client = config.get("CLIENT", default_client_class)
72 client_class = import_string(client)
73 instance = client_class()
74 # `instance` will already be in elasticapm.base.CLIENT_SINGLETON due to the
75 # `__init__()` for Client
76 return instance
77
78
79 class DjangoClient(Client):
80 logger = get_logger("elasticapm.errors.client.django")
81
82 def __init__(self, config=None, **inline):
83 if config is None:
84 config = getattr(django_settings, "ELASTIC_APM", {})
85 if "framework_name" not in inline:
86 inline["framework_name"] = "django"
87 inline["framework_version"] = django.get_version()
88 super(DjangoClient, self).__init__(config, **inline)
89
90 def get_user_info(self, request):
91 user_info = {}
92
93 if not hasattr(request, "user"):
94 return user_info
95 try:
96 user = request.user
97 if hasattr(user, "is_authenticated"):
98 if callable(user.is_authenticated):
99 user_info["is_authenticated"] = user.is_authenticated()
100 else:
101 user_info["is_authenticated"] = bool(user.is_authenticated)
102 if hasattr(user, "id"):
103 user_info["id"] = encoding.keyword_field(user.id)
104 if hasattr(user, "get_username"):
105 user_info["username"] = encoding.keyword_field(encoding.force_text(user.get_username()))
106 elif hasattr(user, "username"):
107 user_info["username"] = encoding.keyword_field(encoding.force_text(user.username))
108
109 if hasattr(user, "email"):
110 user_info["email"] = encoding.force_text(user.email)
111 except DatabaseError:
112 # If the connection is closed or similar, we'll just skip this
113 return {}
114
115 return user_info
116
117 def get_data_from_request(self, request, event_type):
118 result = {
119 "env": dict(get_environ(request.META)),
120 "method": request.method,
121 "socket": {"remote_address": request.META.get("REMOTE_ADDR")},
122 "cookies": dict(request.COOKIES),
123 }
124 if self.config.capture_headers:
125 request_headers = dict(get_headers(request.META))
126
127 for key, value in request_headers.items():
128 if isinstance(value, (int, float)):
129 request_headers[key] = str(value)
130
131 result["headers"] = request_headers
132
133 if request.method in constants.HTTP_WITH_BODY:
134 capture_body = self.config.capture_body in ("all", event_type)
135 if not capture_body:
136 result["body"] = "[REDACTED]"
137 else:
138 content_type = request.META.get("CONTENT_TYPE")
139 if content_type == "application/x-www-form-urlencoded":
140 data = compat.multidict_to_dict(request.POST)
141 elif content_type and content_type.startswith("multipart/form-data"):
142 data = compat.multidict_to_dict(request.POST)
143 if request.FILES:
144 data["_files"] = {field: file.name for field, file in request.FILES.items()}
145 else:
146 try:
147 data = request.body
148 except Exception as e:
149 self.logger.debug("Can't capture request body: %s", str(e))
150 data = "<unavailable>"
151 if data is not None:
152 result["body"] = data
153
154 url = get_raw_uri(request)
155 result["url"] = get_url_dict(url)
156 return result
157
158 def get_data_from_response(self, response, event_type):
159 result = {"status_code": response.status_code}
160
161 if self.config.capture_headers and hasattr(response, "items"):
162 response_headers = dict(response.items())
163
164 for key, value in response_headers.items():
165 if isinstance(value, (int, float)):
166 response_headers[key] = str(value)
167
168 result["headers"] = response_headers
169
170 return result
171
172 def capture(self, event_type, request=None, **kwargs):
173 if "context" not in kwargs:
174 kwargs["context"] = context = {}
175 else:
176 context = kwargs["context"]
177
178 is_http_request = isinstance(request, (HttpRequest, DrfRequest))
179 if is_http_request:
180 context["request"] = self.get_data_from_request(request, constants.ERROR)
181 context["user"] = self.get_user_info(request)
182
183 result = super(DjangoClient, self).capture(event_type, **kwargs)
184
185 if is_http_request:
186 # attach the elasticapm object to the request
187 request._elasticapm = {"service_name": self.config.service_name, "id": result}
188
189 return result
190
191 def _get_stack_info_for_trace(
192 self,
193 frames,
194 library_frame_context_lines=None,
195 in_app_frame_context_lines=None,
196 with_locals=True,
197 locals_processor_func=None,
198 ):
199 """If the stacktrace originates within the elasticapm module, it will skip
200 frames until some other module comes up."""
201 return list(
202 iterate_with_template_sources(
203 frames,
204 with_locals=with_locals,
205 library_frame_context_lines=library_frame_context_lines,
206 in_app_frame_context_lines=in_app_frame_context_lines,
207 include_paths_re=self.include_paths_re,
208 exclude_paths_re=self.exclude_paths_re,
209 locals_processor_func=locals_processor_func,
210 )
211 )
212
213 def send(self, url, **kwargs):
214 """
215 Serializes and signs ``data`` and passes the payload off to ``send_remote``
216
217 If ``server`` was passed into the constructor, this will serialize the data and pipe it to
218 the server using ``send_remote()``.
219 """
220 if self.config.server_url:
221 return super(DjangoClient, self).send(url, **kwargs)
222 else:
223 self.error_logger.error("No server configured, and elasticapm not installed. Cannot send message")
224 return None
225
226
227 class ProxyClient(object):
228 """
229 A proxy which represents the current client at all times.
230 """
231
232 # introspection support:
233 __members__ = property(lambda x: x.__dir__())
234
235 # Need to pretend to be the wrapped class, for the sake of objects that care
236 # about this (especially in equality tests)
237 __class__ = property(lambda x: get_client().__class__)
238
239 __dict__ = property(lambda o: get_client().__dict__)
240
241 __repr__ = lambda: repr(get_client())
242 __getattr__ = lambda x, o: getattr(get_client(), o)
243 __setattr__ = lambda x, o, v: setattr(get_client(), o, v)
244 __delattr__ = lambda x, o: delattr(get_client(), o)
245
246 __lt__ = lambda x, o: get_client() < o
247 __le__ = lambda x, o: get_client() <= o
248 __eq__ = lambda x, o: get_client() == o
249 __ne__ = lambda x, o: get_client() != o
250 __gt__ = lambda x, o: get_client() > o
251 __ge__ = lambda x, o: get_client() >= o
252 __hash__ = lambda x: hash(get_client())
253 # attributes are currently not callable
254 # __call__ = lambda x, *a, **kw: get_client()(*a, **kw)
255 __nonzero__ = lambda x: bool(get_client())
256 __len__ = lambda x: len(get_client())
257 __getitem__ = lambda x, i: get_client()[i]
258 __iter__ = lambda x: iter(get_client())
259 __contains__ = lambda x, i: i in get_client()
260 __getslice__ = lambda x, i, j: get_client()[i:j]
261 __add__ = lambda x, o: get_client() + o
262 __sub__ = lambda x, o: get_client() - o
263 __mul__ = lambda x, o: get_client() * o
264 __floordiv__ = lambda x, o: get_client() // o
265 __mod__ = lambda x, o: get_client() % o
266 __divmod__ = lambda x, o: get_client().__divmod__(o)
267 __pow__ = lambda x, o: get_client() ** o
268 __lshift__ = lambda x, o: get_client() << o
269 __rshift__ = lambda x, o: get_client() >> o
270 __and__ = lambda x, o: get_client() & o
271 __xor__ = lambda x, o: get_client() ^ o
272 __or__ = lambda x, o: get_client() | o
273 __div__ = lambda x, o: get_client().__div__(o)
274 __truediv__ = lambda x, o: get_client().__truediv__(o)
275 __neg__ = lambda x: -(get_client())
276 __pos__ = lambda x: +(get_client())
277 __abs__ = lambda x: abs(get_client())
278 __invert__ = lambda x: ~(get_client())
279 __complex__ = lambda x: complex(get_client())
280 __int__ = lambda x: int(get_client())
281 __float__ = lambda x: float(get_client())
282 __str__ = lambda x: str(get_client())
283 __unicode__ = lambda x: str(get_client())
284 __oct__ = lambda x: oct(get_client())
285 __hex__ = lambda x: hex(get_client())
286 __index__ = lambda x: get_client().__index__()
287 __coerce__ = lambda x, o: x.__coerce__(x, o)
288 __enter__ = lambda x: x.__enter__()
289 __exit__ = lambda x, *a, **kw: x.__exit__(*a, **kw)
290
291
292 client = ProxyClient()
293
294
295 def _get_installed_apps_paths():
296 """
297 Generate a list of modules in settings.INSTALLED_APPS.
298 """
299 out = set()
300 for app in django_settings.INSTALLED_APPS:
301 out.add(app)
302 return out
303
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/elasticapm/contrib/django/client.py b/elasticapm/contrib/django/client.py
--- a/elasticapm/contrib/django/client.py
+++ b/elasticapm/contrib/django/client.py
@@ -152,7 +152,10 @@
result["body"] = data
url = get_raw_uri(request)
- result["url"] = get_url_dict(url)
+ try:
+ result["url"] = get_url_dict(url)
+ except ValueError as exc:
+ self.logger.warning(f"URL parsing failed: {exc}")
return result
def get_data_from_response(self, response, event_type):
| {"golden_diff": "diff --git a/elasticapm/contrib/django/client.py b/elasticapm/contrib/django/client.py\n--- a/elasticapm/contrib/django/client.py\n+++ b/elasticapm/contrib/django/client.py\n@@ -152,7 +152,10 @@\n result[\"body\"] = data\n \n url = get_raw_uri(request)\n- result[\"url\"] = get_url_dict(url)\n+ try:\n+ result[\"url\"] = get_url_dict(url)\n+ except ValueError as exc:\n+ self.logger.warning(f\"URL parsing failed: {exc}\")\n return result\n \n def get_data_from_response(self, response, event_type):\n", "issue": "Crash with wrong URL\n**Describe the bug**: \r\n\r\nCrash due to someone sending the wrong URL format:\r\n\r\n```\r\n\r\nValueError: Port could not be cast to integer value as '${port}'\r\n File \"elasticapm/contrib/django/middleware/__init__.py\", line 176, in process_response\r\n elasticapm.set_context(\r\n File \"elasticapm/traces.py\", line 1155, in set_context\r\n data = data()\r\n File \"elasticapm/contrib/django/middleware/__init__.py\", line 177, in <lambda>\r\n lambda: self.client.get_data_from_request(request, constants.TRANSACTION), \"request\"\r\n File \"elasticapm/contrib/django/client.py\", line 155, in get_data_from_request\r\n result[\"url\"] = get_url_dict(url)\r\n File \"elasticapm/utils/__init__.py\", line 118, in get_url_dict\r\n port = None if parse_result.port is None else str(parse_result.port)\r\n File \"urllib/parse.py\", line 175, in port\r\n raise ValueError(message) from None\r\n```\r\n\r\n**To Reproduce**\r\n\r\n1. Send `${ip}:${port}` as URL (or any other not valid URL). Here's a copy of the headers that were sent from nginx to our gunicorn server. \r\n\r\n```\r\nrequest_headers | \r\n{'accept': '*/*', \r\n'connection': 'close', \r\n'host': '${ip}:${port}', \r\n'user-agent': 'curl/7.64.1', \r\n'x-forwarded-for': '152.32.255.215', \r\n'x-forwarded-host': 'mydomain_here', \r\n'x-forwarded-proto': 'https', 'x-real-ip': '152.32.255.215'}\r\n```\r\n**Environment (please complete the following information)**\r\n- OS: Linux\r\n- Python version: 3.10\r\n- Framework and version: Django/3.2.13\r\n- Agent version: 6.9.1\r\n\n", "before_files": [{"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2012, the Sentry Team, see AUTHORS for more details\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n\n\nfrom __future__ import absolute_import\n\nimport django\nfrom django.conf import settings as django_settings\nfrom django.db import DatabaseError\nfrom django.http import HttpRequest\n\ntry:\n from rest_framework.request import Request as DrfRequest\nexcept ImportError:\n DrfRequest = HttpRequest\n\nfrom elasticapm import get_client as _get_client\nfrom elasticapm.base import Client\nfrom elasticapm.conf import constants\nfrom elasticapm.contrib.django.utils import get_raw_uri, iterate_with_template_sources\nfrom elasticapm.utils import compat, encoding, get_url_dict\nfrom elasticapm.utils.logging import get_logger\nfrom elasticapm.utils.module_import import import_string\nfrom elasticapm.utils.wsgi import get_environ, get_headers\n\n__all__ = (\"DjangoClient\",)\n\n\ndefault_client_class = \"elasticapm.contrib.django.DjangoClient\"\n\n\ndef get_client():\n \"\"\"\n Get an ElasticAPM client.\n\n :param client:\n :return:\n :rtype: elasticapm.base.Client\n \"\"\"\n if _get_client():\n return _get_client()\n\n config = getattr(django_settings, \"ELASTIC_APM\", {})\n client = config.get(\"CLIENT\", default_client_class)\n client_class = import_string(client)\n instance = client_class()\n # `instance` will already be in elasticapm.base.CLIENT_SINGLETON due to the\n # `__init__()` for Client\n return instance\n\n\nclass DjangoClient(Client):\n logger = get_logger(\"elasticapm.errors.client.django\")\n\n def __init__(self, config=None, **inline):\n if config is None:\n config = getattr(django_settings, \"ELASTIC_APM\", {})\n if \"framework_name\" not in inline:\n inline[\"framework_name\"] = \"django\"\n inline[\"framework_version\"] = django.get_version()\n super(DjangoClient, self).__init__(config, **inline)\n\n def get_user_info(self, request):\n user_info = {}\n\n if not hasattr(request, \"user\"):\n return user_info\n try:\n user = request.user\n if hasattr(user, \"is_authenticated\"):\n if callable(user.is_authenticated):\n user_info[\"is_authenticated\"] = user.is_authenticated()\n else:\n user_info[\"is_authenticated\"] = bool(user.is_authenticated)\n if hasattr(user, \"id\"):\n user_info[\"id\"] = encoding.keyword_field(user.id)\n if hasattr(user, \"get_username\"):\n user_info[\"username\"] = encoding.keyword_field(encoding.force_text(user.get_username()))\n elif hasattr(user, \"username\"):\n user_info[\"username\"] = encoding.keyword_field(encoding.force_text(user.username))\n\n if hasattr(user, \"email\"):\n user_info[\"email\"] = encoding.force_text(user.email)\n except DatabaseError:\n # If the connection is closed or similar, we'll just skip this\n return {}\n\n return user_info\n\n def get_data_from_request(self, request, event_type):\n result = {\n \"env\": dict(get_environ(request.META)),\n \"method\": request.method,\n \"socket\": {\"remote_address\": request.META.get(\"REMOTE_ADDR\")},\n \"cookies\": dict(request.COOKIES),\n }\n if self.config.capture_headers:\n request_headers = dict(get_headers(request.META))\n\n for key, value in request_headers.items():\n if isinstance(value, (int, float)):\n request_headers[key] = str(value)\n\n result[\"headers\"] = request_headers\n\n if request.method in constants.HTTP_WITH_BODY:\n capture_body = self.config.capture_body in (\"all\", event_type)\n if not capture_body:\n result[\"body\"] = \"[REDACTED]\"\n else:\n content_type = request.META.get(\"CONTENT_TYPE\")\n if content_type == \"application/x-www-form-urlencoded\":\n data = compat.multidict_to_dict(request.POST)\n elif content_type and content_type.startswith(\"multipart/form-data\"):\n data = compat.multidict_to_dict(request.POST)\n if request.FILES:\n data[\"_files\"] = {field: file.name for field, file in request.FILES.items()}\n else:\n try:\n data = request.body\n except Exception as e:\n self.logger.debug(\"Can't capture request body: %s\", str(e))\n data = \"<unavailable>\"\n if data is not None:\n result[\"body\"] = data\n\n url = get_raw_uri(request)\n result[\"url\"] = get_url_dict(url)\n return result\n\n def get_data_from_response(self, response, event_type):\n result = {\"status_code\": response.status_code}\n\n if self.config.capture_headers and hasattr(response, \"items\"):\n response_headers = dict(response.items())\n\n for key, value in response_headers.items():\n if isinstance(value, (int, float)):\n response_headers[key] = str(value)\n\n result[\"headers\"] = response_headers\n\n return result\n\n def capture(self, event_type, request=None, **kwargs):\n if \"context\" not in kwargs:\n kwargs[\"context\"] = context = {}\n else:\n context = kwargs[\"context\"]\n\n is_http_request = isinstance(request, (HttpRequest, DrfRequest))\n if is_http_request:\n context[\"request\"] = self.get_data_from_request(request, constants.ERROR)\n context[\"user\"] = self.get_user_info(request)\n\n result = super(DjangoClient, self).capture(event_type, **kwargs)\n\n if is_http_request:\n # attach the elasticapm object to the request\n request._elasticapm = {\"service_name\": self.config.service_name, \"id\": result}\n\n return result\n\n def _get_stack_info_for_trace(\n self,\n frames,\n library_frame_context_lines=None,\n in_app_frame_context_lines=None,\n with_locals=True,\n locals_processor_func=None,\n ):\n \"\"\"If the stacktrace originates within the elasticapm module, it will skip\n frames until some other module comes up.\"\"\"\n return list(\n iterate_with_template_sources(\n frames,\n with_locals=with_locals,\n library_frame_context_lines=library_frame_context_lines,\n in_app_frame_context_lines=in_app_frame_context_lines,\n include_paths_re=self.include_paths_re,\n exclude_paths_re=self.exclude_paths_re,\n locals_processor_func=locals_processor_func,\n )\n )\n\n def send(self, url, **kwargs):\n \"\"\"\n Serializes and signs ``data`` and passes the payload off to ``send_remote``\n\n If ``server`` was passed into the constructor, this will serialize the data and pipe it to\n the server using ``send_remote()``.\n \"\"\"\n if self.config.server_url:\n return super(DjangoClient, self).send(url, **kwargs)\n else:\n self.error_logger.error(\"No server configured, and elasticapm not installed. Cannot send message\")\n return None\n\n\nclass ProxyClient(object):\n \"\"\"\n A proxy which represents the current client at all times.\n \"\"\"\n\n # introspection support:\n __members__ = property(lambda x: x.__dir__())\n\n # Need to pretend to be the wrapped class, for the sake of objects that care\n # about this (especially in equality tests)\n __class__ = property(lambda x: get_client().__class__)\n\n __dict__ = property(lambda o: get_client().__dict__)\n\n __repr__ = lambda: repr(get_client())\n __getattr__ = lambda x, o: getattr(get_client(), o)\n __setattr__ = lambda x, o, v: setattr(get_client(), o, v)\n __delattr__ = lambda x, o: delattr(get_client(), o)\n\n __lt__ = lambda x, o: get_client() < o\n __le__ = lambda x, o: get_client() <= o\n __eq__ = lambda x, o: get_client() == o\n __ne__ = lambda x, o: get_client() != o\n __gt__ = lambda x, o: get_client() > o\n __ge__ = lambda x, o: get_client() >= o\n __hash__ = lambda x: hash(get_client())\n # attributes are currently not callable\n # __call__ = lambda x, *a, **kw: get_client()(*a, **kw)\n __nonzero__ = lambda x: bool(get_client())\n __len__ = lambda x: len(get_client())\n __getitem__ = lambda x, i: get_client()[i]\n __iter__ = lambda x: iter(get_client())\n __contains__ = lambda x, i: i in get_client()\n __getslice__ = lambda x, i, j: get_client()[i:j]\n __add__ = lambda x, o: get_client() + o\n __sub__ = lambda x, o: get_client() - o\n __mul__ = lambda x, o: get_client() * o\n __floordiv__ = lambda x, o: get_client() // o\n __mod__ = lambda x, o: get_client() % o\n __divmod__ = lambda x, o: get_client().__divmod__(o)\n __pow__ = lambda x, o: get_client() ** o\n __lshift__ = lambda x, o: get_client() << o\n __rshift__ = lambda x, o: get_client() >> o\n __and__ = lambda x, o: get_client() & o\n __xor__ = lambda x, o: get_client() ^ o\n __or__ = lambda x, o: get_client() | o\n __div__ = lambda x, o: get_client().__div__(o)\n __truediv__ = lambda x, o: get_client().__truediv__(o)\n __neg__ = lambda x: -(get_client())\n __pos__ = lambda x: +(get_client())\n __abs__ = lambda x: abs(get_client())\n __invert__ = lambda x: ~(get_client())\n __complex__ = lambda x: complex(get_client())\n __int__ = lambda x: int(get_client())\n __float__ = lambda x: float(get_client())\n __str__ = lambda x: str(get_client())\n __unicode__ = lambda x: str(get_client())\n __oct__ = lambda x: oct(get_client())\n __hex__ = lambda x: hex(get_client())\n __index__ = lambda x: get_client().__index__()\n __coerce__ = lambda x, o: x.__coerce__(x, o)\n __enter__ = lambda x: x.__enter__()\n __exit__ = lambda x, *a, **kw: x.__exit__(*a, **kw)\n\n\nclient = ProxyClient()\n\n\ndef _get_installed_apps_paths():\n \"\"\"\n Generate a list of modules in settings.INSTALLED_APPS.\n \"\"\"\n out = set()\n for app in django_settings.INSTALLED_APPS:\n out.add(app)\n return out\n", "path": "elasticapm/contrib/django/client.py"}], "after_files": [{"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2012, the Sentry Team, see AUTHORS for more details\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n\n\nfrom __future__ import absolute_import\n\nimport django\nfrom django.conf import settings as django_settings\nfrom django.db import DatabaseError\nfrom django.http import HttpRequest\n\ntry:\n from rest_framework.request import Request as DrfRequest\nexcept ImportError:\n DrfRequest = HttpRequest\n\nfrom elasticapm import get_client as _get_client\nfrom elasticapm.base import Client\nfrom elasticapm.conf import constants\nfrom elasticapm.contrib.django.utils import get_raw_uri, iterate_with_template_sources\nfrom elasticapm.utils import compat, encoding, get_url_dict\nfrom elasticapm.utils.logging import get_logger\nfrom elasticapm.utils.module_import import import_string\nfrom elasticapm.utils.wsgi import get_environ, get_headers\n\n__all__ = (\"DjangoClient\",)\n\n\ndefault_client_class = \"elasticapm.contrib.django.DjangoClient\"\n\n\ndef get_client():\n \"\"\"\n Get an ElasticAPM client.\n\n :param client:\n :return:\n :rtype: elasticapm.base.Client\n \"\"\"\n if _get_client():\n return _get_client()\n\n config = getattr(django_settings, \"ELASTIC_APM\", {})\n client = config.get(\"CLIENT\", default_client_class)\n client_class = import_string(client)\n instance = client_class()\n # `instance` will already be in elasticapm.base.CLIENT_SINGLETON due to the\n # `__init__()` for Client\n return instance\n\n\nclass DjangoClient(Client):\n logger = get_logger(\"elasticapm.errors.client.django\")\n\n def __init__(self, config=None, **inline):\n if config is None:\n config = getattr(django_settings, \"ELASTIC_APM\", {})\n if \"framework_name\" not in inline:\n inline[\"framework_name\"] = \"django\"\n inline[\"framework_version\"] = django.get_version()\n super(DjangoClient, self).__init__(config, **inline)\n\n def get_user_info(self, request):\n user_info = {}\n\n if not hasattr(request, \"user\"):\n return user_info\n try:\n user = request.user\n if hasattr(user, \"is_authenticated\"):\n if callable(user.is_authenticated):\n user_info[\"is_authenticated\"] = user.is_authenticated()\n else:\n user_info[\"is_authenticated\"] = bool(user.is_authenticated)\n if hasattr(user, \"id\"):\n user_info[\"id\"] = encoding.keyword_field(user.id)\n if hasattr(user, \"get_username\"):\n user_info[\"username\"] = encoding.keyword_field(encoding.force_text(user.get_username()))\n elif hasattr(user, \"username\"):\n user_info[\"username\"] = encoding.keyword_field(encoding.force_text(user.username))\n\n if hasattr(user, \"email\"):\n user_info[\"email\"] = encoding.force_text(user.email)\n except DatabaseError:\n # If the connection is closed or similar, we'll just skip this\n return {}\n\n return user_info\n\n def get_data_from_request(self, request, event_type):\n result = {\n \"env\": dict(get_environ(request.META)),\n \"method\": request.method,\n \"socket\": {\"remote_address\": request.META.get(\"REMOTE_ADDR\")},\n \"cookies\": dict(request.COOKIES),\n }\n if self.config.capture_headers:\n request_headers = dict(get_headers(request.META))\n\n for key, value in request_headers.items():\n if isinstance(value, (int, float)):\n request_headers[key] = str(value)\n\n result[\"headers\"] = request_headers\n\n if request.method in constants.HTTP_WITH_BODY:\n capture_body = self.config.capture_body in (\"all\", event_type)\n if not capture_body:\n result[\"body\"] = \"[REDACTED]\"\n else:\n content_type = request.META.get(\"CONTENT_TYPE\")\n if content_type == \"application/x-www-form-urlencoded\":\n data = compat.multidict_to_dict(request.POST)\n elif content_type and content_type.startswith(\"multipart/form-data\"):\n data = compat.multidict_to_dict(request.POST)\n if request.FILES:\n data[\"_files\"] = {field: file.name for field, file in request.FILES.items()}\n else:\n try:\n data = request.body\n except Exception as e:\n self.logger.debug(\"Can't capture request body: %s\", str(e))\n data = \"<unavailable>\"\n if data is not None:\n result[\"body\"] = data\n\n url = get_raw_uri(request)\n try:\n result[\"url\"] = get_url_dict(url)\n except ValueError as exc:\n self.logger.warning(f\"URL parsing failed: {exc}\")\n return result\n\n def get_data_from_response(self, response, event_type):\n result = {\"status_code\": response.status_code}\n\n if self.config.capture_headers and hasattr(response, \"items\"):\n response_headers = dict(response.items())\n\n for key, value in response_headers.items():\n if isinstance(value, (int, float)):\n response_headers[key] = str(value)\n\n result[\"headers\"] = response_headers\n\n return result\n\n def capture(self, event_type, request=None, **kwargs):\n if \"context\" not in kwargs:\n kwargs[\"context\"] = context = {}\n else:\n context = kwargs[\"context\"]\n\n is_http_request = isinstance(request, (HttpRequest, DrfRequest))\n if is_http_request:\n context[\"request\"] = self.get_data_from_request(request, constants.ERROR)\n context[\"user\"] = self.get_user_info(request)\n\n result = super(DjangoClient, self).capture(event_type, **kwargs)\n\n if is_http_request:\n # attach the elasticapm object to the request\n request._elasticapm = {\"service_name\": self.config.service_name, \"id\": result}\n\n return result\n\n def _get_stack_info_for_trace(\n self,\n frames,\n library_frame_context_lines=None,\n in_app_frame_context_lines=None,\n with_locals=True,\n locals_processor_func=None,\n ):\n \"\"\"If the stacktrace originates within the elasticapm module, it will skip\n frames until some other module comes up.\"\"\"\n return list(\n iterate_with_template_sources(\n frames,\n with_locals=with_locals,\n library_frame_context_lines=library_frame_context_lines,\n in_app_frame_context_lines=in_app_frame_context_lines,\n include_paths_re=self.include_paths_re,\n exclude_paths_re=self.exclude_paths_re,\n locals_processor_func=locals_processor_func,\n )\n )\n\n def send(self, url, **kwargs):\n \"\"\"\n Serializes and signs ``data`` and passes the payload off to ``send_remote``\n\n If ``server`` was passed into the constructor, this will serialize the data and pipe it to\n the server using ``send_remote()``.\n \"\"\"\n if self.config.server_url:\n return super(DjangoClient, self).send(url, **kwargs)\n else:\n self.error_logger.error(\"No server configured, and elasticapm not installed. Cannot send message\")\n return None\n\n\nclass ProxyClient(object):\n \"\"\"\n A proxy which represents the current client at all times.\n \"\"\"\n\n # introspection support:\n __members__ = property(lambda x: x.__dir__())\n\n # Need to pretend to be the wrapped class, for the sake of objects that care\n # about this (especially in equality tests)\n __class__ = property(lambda x: get_client().__class__)\n\n __dict__ = property(lambda o: get_client().__dict__)\n\n __repr__ = lambda: repr(get_client())\n __getattr__ = lambda x, o: getattr(get_client(), o)\n __setattr__ = lambda x, o, v: setattr(get_client(), o, v)\n __delattr__ = lambda x, o: delattr(get_client(), o)\n\n __lt__ = lambda x, o: get_client() < o\n __le__ = lambda x, o: get_client() <= o\n __eq__ = lambda x, o: get_client() == o\n __ne__ = lambda x, o: get_client() != o\n __gt__ = lambda x, o: get_client() > o\n __ge__ = lambda x, o: get_client() >= o\n __hash__ = lambda x: hash(get_client())\n # attributes are currently not callable\n # __call__ = lambda x, *a, **kw: get_client()(*a, **kw)\n __nonzero__ = lambda x: bool(get_client())\n __len__ = lambda x: len(get_client())\n __getitem__ = lambda x, i: get_client()[i]\n __iter__ = lambda x: iter(get_client())\n __contains__ = lambda x, i: i in get_client()\n __getslice__ = lambda x, i, j: get_client()[i:j]\n __add__ = lambda x, o: get_client() + o\n __sub__ = lambda x, o: get_client() - o\n __mul__ = lambda x, o: get_client() * o\n __floordiv__ = lambda x, o: get_client() // o\n __mod__ = lambda x, o: get_client() % o\n __divmod__ = lambda x, o: get_client().__divmod__(o)\n __pow__ = lambda x, o: get_client() ** o\n __lshift__ = lambda x, o: get_client() << o\n __rshift__ = lambda x, o: get_client() >> o\n __and__ = lambda x, o: get_client() & o\n __xor__ = lambda x, o: get_client() ^ o\n __or__ = lambda x, o: get_client() | o\n __div__ = lambda x, o: get_client().__div__(o)\n __truediv__ = lambda x, o: get_client().__truediv__(o)\n __neg__ = lambda x: -(get_client())\n __pos__ = lambda x: +(get_client())\n __abs__ = lambda x: abs(get_client())\n __invert__ = lambda x: ~(get_client())\n __complex__ = lambda x: complex(get_client())\n __int__ = lambda x: int(get_client())\n __float__ = lambda x: float(get_client())\n __str__ = lambda x: str(get_client())\n __unicode__ = lambda x: str(get_client())\n __oct__ = lambda x: oct(get_client())\n __hex__ = lambda x: hex(get_client())\n __index__ = lambda x: get_client().__index__()\n __coerce__ = lambda x, o: x.__coerce__(x, o)\n __enter__ = lambda x: x.__enter__()\n __exit__ = lambda x, *a, **kw: x.__exit__(*a, **kw)\n\n\nclient = ProxyClient()\n\n\ndef _get_installed_apps_paths():\n \"\"\"\n Generate a list of modules in settings.INSTALLED_APPS.\n \"\"\"\n out = set()\n for app in django_settings.INSTALLED_APPS:\n out.add(app)\n return out\n", "path": "elasticapm/contrib/django/client.py"}]} |
gh_patches_debug_1334 | rasdani/github-patches | git_diff | jazzband__django-oauth-toolkit-812 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
AttributeError: get_full_path when running any rest_framework.test.APITestCase
**Describe the bug**
After upgrading from django-oauth-toolkit==1.2.0 to django-oauth-toolkit==1.3.0 every single test using `from rest_framework.test import APITestCase` and then calling `self.client.post` and the like fails with this stack trace:
```
Traceback (most recent call last):
File "/src/app/.venv/lib/python3.6/site-packages/django/core/handlers/exception.py", line 34, in inner
response = get_response(request)
File "/src/app/.venv/lib/python3.6/site-packages/django/core/handlers/base.py", line 115, in _get_response
response = self.process_exception_by_middleware(e, request)
File "/src/app/.venv/lib/python3.6/site-packages/django/core/handlers/base.py", line 113, in _get_response
response = wrapped_callback(request, *callback_args, **callback_kwargs)
File "/src/app/.venv/lib/python3.6/site-packages/django/views/generic/base.py", line 71, in view
return self.dispatch(request, *args, **kwargs)
File "/src/app/.venv/lib/python3.6/site-packages/django/utils/decorators.py", line 45, in _wrapper
return bound_method(*args, **kwargs)
File "/src/app/.venv/lib/python3.6/site-packages/django/views/decorators/csrf.py", line 54, in wrapped_view
return view_func(*args, **kwargs)
File "/src/app/.venv/lib/python3.6/site-packages/django/views/generic/base.py", line 97, in dispatch
return handler(request, *args, **kwargs)
File "/src/app/.venv/lib/python3.6/site-packages/django/utils/decorators.py", line 45, in _wrapper
return bound_method(*args, **kwargs)
File "/src/app/.venv/lib/python3.6/site-packages/django/views/decorators/debug.py", line 76, in sensitive_post_parameters_wrapper
return view(request, *args, **kwargs)
File "/src/app/.venv/lib/python3.6/site-packages/oauth2_provider/views/base.py", line 260, in post
url, headers, body, status = self.create_token_response(request)
File "/src/app/.venv/lib/python3.6/site-packages/oauth2_provider/views/mixins.py", line 124, in create_token_response
return core.create_token_response(request)
File "/src/app/.venv/lib/python3.6/site-packages/oauth2_provider/oauth2_backends.py", line 145, in create_token_response
headers, extra_credentials)
File "/src/app/.venv/lib/python3.6/site-packages/oauthlib/oauth2/rfc6749/endpoints/base.py", line 116, in wrapper
return f(endpoint, uri, *args, **kwargs)
File "/src/app/.venv/lib/python3.6/site-packages/oauthlib/oauth2/rfc6749/endpoints/token.py", line 119, in create_token_response
request, self.default_token_type)
File "/src/app/.venv/lib/python3.6/site-packages/oauthlib/oauth2/rfc6749/grant_types/resource_owner_password_credentials.py", line 101, in create_token_response
self.validate_token_request(request)
File "/src/app/.venv/lib/python3.6/site-packages/oauthlib/oauth2/rfc6749/grant_types/resource_owner_password_credentials.py", line 184, in validate_token_request
request.password, request.client, request):
File "/src/app/.venv/lib/python3.6/site-packages/oauth2_provider/oauth2_validators.py", line 611, in validate_user
u = authenticate(request, username=username, password=password)
File "/src/app/.venv/lib/python3.6/site-packages/django/contrib/auth/__init__.py", line 73, in authenticate
user = backend.authenticate(request, **credentials)
File "/src/app/.venv/lib/python3.6/site-packages/oauth2_provider/backends.py", line 17, in authenticate
valid, r = OAuthLibCore.verify_request(request, scopes=[])
File "/src/app/.venv/lib/python3.6/site-packages/oauth2_provider/oauth2_backends.py", line 172, in verify_request
uri, http_method, body, headers = self._extract_params(request)
File "/src/app/.venv/lib/python3.6/site-packages/oauth2_provider/oauth2_backends.py", line 58, in _extract_params
uri = self._get_escaped_full_path(request)
File "/src/app/.venv/lib/python3.6/site-packages/oauth2_provider/oauth2_backends.py", line 34, in _get_escaped_full_path
parsed = list(urlparse(request.get_full_path()))
File "/src/app/.venv/lib/python3.6/site-packages/oauthlib/common.py", line 436, in __getattr__
raise AttributeError(name)
AttributeError: get_full_path
```
**To Reproduce**
Test case:
```
from django.conf import settings
from django.contrib.auth.models import User
from rest_framework import status
from rest_framework.test import APITestCase
class OauthAuthenticationTest(APITestCase):
def test_should_should_return_ok_when_valid_credentials_passed(self) -> None:
username = "user"
password = "pw"
User.objects.create_user(username=username, email="[email protected]", password=password)
response = self.client.post(
"/api/oauth/token/",
{
"grant_type": "password",
"username": username,
"password": password,
"client_id": settings.REACT_APP_OAUTH_CLIENT_ID,
}
)
self.assertEqual(status.HTTP_200_OK, response.status_code)
```
**Expected behavior**
This test should pass, as it did with version 1.2.0.
**Version**
1.3.0
- [x] I have tested with the latest published release and it's still a problem.
- [ ] I have tested with the master branch and it's still a problem.
**Additional context**
Using django==2.2.11 and djangorestframework==3.11.0.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `oauth2_provider/oauth2_validators.py`
Content:
```
1 import base64
2 import binascii
3 import logging
4 from collections import OrderedDict
5 from datetime import datetime, timedelta
6 from urllib.parse import unquote_plus
7
8 import requests
9 from django.conf import settings
10 from django.contrib.auth import authenticate, get_user_model
11 from django.core.exceptions import ObjectDoesNotExist
12 from django.db import transaction
13 from django.db.models import Q
14 from django.utils import timezone
15 from django.utils.timezone import make_aware
16 from django.utils.translation import gettext_lazy as _
17 from oauthlib.oauth2 import RequestValidator
18
19 from .exceptions import FatalClientError
20 from .models import (
21 AbstractApplication, get_access_token_model,
22 get_application_model, get_grant_model, get_refresh_token_model
23 )
24 from .scopes import get_scopes_backend
25 from .settings import oauth2_settings
26
27
28 log = logging.getLogger("oauth2_provider")
29
30 GRANT_TYPE_MAPPING = {
31 "authorization_code": (AbstractApplication.GRANT_AUTHORIZATION_CODE, ),
32 "password": (AbstractApplication.GRANT_PASSWORD, ),
33 "client_credentials": (AbstractApplication.GRANT_CLIENT_CREDENTIALS, ),
34 "refresh_token": (
35 AbstractApplication.GRANT_AUTHORIZATION_CODE,
36 AbstractApplication.GRANT_PASSWORD,
37 AbstractApplication.GRANT_CLIENT_CREDENTIALS,
38 )
39 }
40
41 Application = get_application_model()
42 AccessToken = get_access_token_model()
43 Grant = get_grant_model()
44 RefreshToken = get_refresh_token_model()
45 UserModel = get_user_model()
46
47
48 class OAuth2Validator(RequestValidator):
49 def _extract_basic_auth(self, request):
50 """
51 Return authentication string if request contains basic auth credentials,
52 otherwise return None
53 """
54 auth = request.headers.get("HTTP_AUTHORIZATION", None)
55 if not auth:
56 return None
57
58 splitted = auth.split(" ", 1)
59 if len(splitted) != 2:
60 return None
61 auth_type, auth_string = splitted
62
63 if auth_type != "Basic":
64 return None
65
66 return auth_string
67
68 def _authenticate_basic_auth(self, request):
69 """
70 Authenticates with HTTP Basic Auth.
71
72 Note: as stated in rfc:`2.3.1`, client_id and client_secret must be encoded with
73 "application/x-www-form-urlencoded" encoding algorithm.
74 """
75 auth_string = self._extract_basic_auth(request)
76 if not auth_string:
77 return False
78
79 try:
80 encoding = request.encoding or settings.DEFAULT_CHARSET or "utf-8"
81 except AttributeError:
82 encoding = "utf-8"
83
84 try:
85 b64_decoded = base64.b64decode(auth_string)
86 except (TypeError, binascii.Error):
87 log.debug("Failed basic auth: %r can't be decoded as base64", auth_string)
88 return False
89
90 try:
91 auth_string_decoded = b64_decoded.decode(encoding)
92 except UnicodeDecodeError:
93 log.debug(
94 "Failed basic auth: %r can't be decoded as unicode by %r",
95 auth_string, encoding
96 )
97 return False
98
99 try:
100 client_id, client_secret = map(unquote_plus, auth_string_decoded.split(":", 1))
101 except ValueError:
102 log.debug("Failed basic auth, Invalid base64 encoding.")
103 return False
104
105 if self._load_application(client_id, request) is None:
106 log.debug("Failed basic auth: Application %s does not exist" % client_id)
107 return False
108 elif request.client.client_id != client_id:
109 log.debug("Failed basic auth: wrong client id %s" % client_id)
110 return False
111 elif request.client.client_secret != client_secret:
112 log.debug("Failed basic auth: wrong client secret %s" % client_secret)
113 return False
114 else:
115 return True
116
117 def _authenticate_request_body(self, request):
118 """
119 Try to authenticate the client using client_id and client_secret
120 parameters included in body.
121
122 Remember that this method is NOT RECOMMENDED and SHOULD be limited to
123 clients unable to directly utilize the HTTP Basic authentication scheme.
124 See rfc:`2.3.1` for more details.
125 """
126 # TODO: check if oauthlib has already unquoted client_id and client_secret
127 try:
128 client_id = request.client_id
129 client_secret = request.client_secret
130 except AttributeError:
131 return False
132
133 if self._load_application(client_id, request) is None:
134 log.debug("Failed body auth: Application %s does not exists" % client_id)
135 return False
136 elif request.client.client_secret != client_secret:
137 log.debug("Failed body auth: wrong client secret %s" % client_secret)
138 return False
139 else:
140 return True
141
142 def _load_application(self, client_id, request):
143 """
144 If request.client was not set, load application instance for given
145 client_id and store it in request.client
146 """
147
148 # we want to be sure that request has the client attribute!
149 assert hasattr(request, "client"), '"request" instance has no "client" attribute'
150
151 try:
152 request.client = request.client or Application.objects.get(client_id=client_id)
153 # Check that the application can be used (defaults to always True)
154 if not request.client.is_usable(request):
155 log.debug("Failed body authentication: Application %r is disabled" % (client_id))
156 return None
157 return request.client
158 except Application.DoesNotExist:
159 log.debug("Failed body authentication: Application %r does not exist" % (client_id))
160 return None
161
162 def _set_oauth2_error_on_request(self, request, access_token, scopes):
163 if access_token is None:
164 error = OrderedDict([
165 ("error", "invalid_token", ),
166 ("error_description", _("The access token is invalid."), ),
167 ])
168 elif access_token.is_expired():
169 error = OrderedDict([
170 ("error", "invalid_token", ),
171 ("error_description", _("The access token has expired."), ),
172 ])
173 elif not access_token.allow_scopes(scopes):
174 error = OrderedDict([
175 ("error", "insufficient_scope", ),
176 ("error_description", _("The access token is valid but does not have enough scope."), ),
177 ])
178 else:
179 log.warning("OAuth2 access token is invalid for an unknown reason.")
180 error = OrderedDict([
181 ("error", "invalid_token", ),
182 ])
183 request.oauth2_error = error
184 return request
185
186 def client_authentication_required(self, request, *args, **kwargs):
187 """
188 Determine if the client has to be authenticated
189
190 This method is called only for grant types that supports client authentication:
191 * Authorization code grant
192 * Resource owner password grant
193 * Refresh token grant
194
195 If the request contains authorization headers, always authenticate the client
196 no matter the grant type.
197
198 If the request does not contain authorization headers, proceed with authentication
199 only if the client is of type `Confidential`.
200
201 If something goes wrong, call oauthlib implementation of the method.
202 """
203 if self._extract_basic_auth(request):
204 return True
205
206 try:
207 if request.client_id and request.client_secret:
208 return True
209 except AttributeError:
210 log.debug("Client ID or client secret not provided...")
211 pass
212
213 self._load_application(request.client_id, request)
214 if request.client:
215 return request.client.client_type == AbstractApplication.CLIENT_CONFIDENTIAL
216
217 return super().client_authentication_required(request, *args, **kwargs)
218
219 def authenticate_client(self, request, *args, **kwargs):
220 """
221 Check if client exists and is authenticating itself as in rfc:`3.2.1`
222
223 First we try to authenticate with HTTP Basic Auth, and that is the PREFERRED
224 authentication method.
225 Whether this fails we support including the client credentials in the request-body,
226 but this method is NOT RECOMMENDED and SHOULD be limited to clients unable to
227 directly utilize the HTTP Basic authentication scheme.
228 See rfc:`2.3.1` for more details
229 """
230 authenticated = self._authenticate_basic_auth(request)
231
232 if not authenticated:
233 authenticated = self._authenticate_request_body(request)
234
235 return authenticated
236
237 def authenticate_client_id(self, client_id, request, *args, **kwargs):
238 """
239 If we are here, the client did not authenticate itself as in rfc:`3.2.1` and we can
240 proceed only if the client exists and is not of type "Confidential".
241 """
242 if self._load_application(client_id, request) is not None:
243 log.debug("Application %r has type %r" % (client_id, request.client.client_type))
244 return request.client.client_type != AbstractApplication.CLIENT_CONFIDENTIAL
245 return False
246
247 def confirm_redirect_uri(self, client_id, code, redirect_uri, client, *args, **kwargs):
248 """
249 Ensure the redirect_uri is listed in the Application instance redirect_uris field
250 """
251 grant = Grant.objects.get(code=code, application=client)
252 return grant.redirect_uri_allowed(redirect_uri)
253
254 def invalidate_authorization_code(self, client_id, code, request, *args, **kwargs):
255 """
256 Remove the temporary grant used to swap the authorization token
257 """
258 grant = Grant.objects.get(code=code, application=request.client)
259 grant.delete()
260
261 def validate_client_id(self, client_id, request, *args, **kwargs):
262 """
263 Ensure an Application exists with given client_id.
264 If it exists, it's assigned to request.client.
265 """
266 return self._load_application(client_id, request) is not None
267
268 def get_default_redirect_uri(self, client_id, request, *args, **kwargs):
269 return request.client.default_redirect_uri
270
271 def _get_token_from_authentication_server(
272 self, token, introspection_url, introspection_token, introspection_credentials
273 ):
274 """Use external introspection endpoint to "crack open" the token.
275 :param introspection_url: introspection endpoint URL
276 :param introspection_token: Bearer token
277 :param introspection_credentials: Basic Auth credentials (id,secret)
278 :return: :class:`models.AccessToken`
279
280 Some RFC 7662 implementations (including this one) use a Bearer token while others use Basic
281 Auth. Depending on the external AS's implementation, provide either the introspection_token
282 or the introspection_credentials.
283
284 If the resulting access_token identifies a username (e.g. Authorization Code grant), add
285 that user to the UserModel. Also cache the access_token up until its expiry time or a
286 configured maximum time.
287
288 """
289 headers = None
290 if introspection_token:
291 headers = {"Authorization": "Bearer {}".format(introspection_token)}
292 elif introspection_credentials:
293 client_id = introspection_credentials[0].encode("utf-8")
294 client_secret = introspection_credentials[1].encode("utf-8")
295 basic_auth = base64.b64encode(client_id + b":" + client_secret)
296 headers = {"Authorization": "Basic {}".format(basic_auth.decode("utf-8"))}
297
298 try:
299 response = requests.post(
300 introspection_url,
301 data={"token": token}, headers=headers
302 )
303 except requests.exceptions.RequestException:
304 log.exception("Introspection: Failed POST to %r in token lookup", introspection_url)
305 return None
306
307 try:
308 content = response.json()
309 except ValueError:
310 log.exception("Introspection: Failed to parse response as json")
311 return None
312
313 if "active" in content and content["active"] is True:
314 if "username" in content:
315 user, _created = UserModel.objects.get_or_create(
316 **{UserModel.USERNAME_FIELD: content["username"]}
317 )
318 else:
319 user = None
320
321 max_caching_time = datetime.now() + timedelta(
322 seconds=oauth2_settings.RESOURCE_SERVER_TOKEN_CACHING_SECONDS
323 )
324
325 if "exp" in content:
326 expires = datetime.utcfromtimestamp(content["exp"])
327 if expires > max_caching_time:
328 expires = max_caching_time
329 else:
330 expires = max_caching_time
331
332 scope = content.get("scope", "")
333 expires = make_aware(expires)
334
335 access_token, _created = AccessToken.objects.update_or_create(
336 token=token,
337 defaults={
338 "user": user,
339 "application": None,
340 "scope": scope,
341 "expires": expires,
342 })
343
344 return access_token
345
346 def validate_bearer_token(self, token, scopes, request):
347 """
348 When users try to access resources, check that provided token is valid
349 """
350 if not token:
351 return False
352
353 introspection_url = oauth2_settings.RESOURCE_SERVER_INTROSPECTION_URL
354 introspection_token = oauth2_settings.RESOURCE_SERVER_AUTH_TOKEN
355 introspection_credentials = oauth2_settings.RESOURCE_SERVER_INTROSPECTION_CREDENTIALS
356
357 try:
358 access_token = AccessToken.objects.select_related("application", "user").get(token=token)
359 except AccessToken.DoesNotExist:
360 access_token = None
361
362 # if there is no token or it's invalid then introspect the token if there's an external OAuth server
363 if not access_token or not access_token.is_valid(scopes):
364 if introspection_url and (introspection_token or introspection_credentials):
365 access_token = self._get_token_from_authentication_server(
366 token,
367 introspection_url,
368 introspection_token,
369 introspection_credentials
370 )
371
372 if access_token and access_token.is_valid(scopes):
373 request.client = access_token.application
374 request.user = access_token.user
375 request.scopes = scopes
376
377 # this is needed by django rest framework
378 request.access_token = access_token
379 return True
380 else:
381 self._set_oauth2_error_on_request(request, access_token, scopes)
382 return False
383
384 def validate_code(self, client_id, code, client, request, *args, **kwargs):
385 try:
386 grant = Grant.objects.get(code=code, application=client)
387 if not grant.is_expired():
388 request.scopes = grant.scope.split(" ")
389 request.user = grant.user
390 return True
391 return False
392
393 except Grant.DoesNotExist:
394 return False
395
396 def validate_grant_type(self, client_id, grant_type, client, request, *args, **kwargs):
397 """
398 Validate both grant_type is a valid string and grant_type is allowed for current workflow
399 """
400 assert(grant_type in GRANT_TYPE_MAPPING) # mapping misconfiguration
401 return request.client.allows_grant_type(*GRANT_TYPE_MAPPING[grant_type])
402
403 def validate_response_type(self, client_id, response_type, client, request, *args, **kwargs):
404 """
405 We currently do not support the Authorization Endpoint Response Types registry as in
406 rfc:`8.4`, so validate the response_type only if it matches "code" or "token"
407 """
408 if response_type == "code":
409 return client.allows_grant_type(AbstractApplication.GRANT_AUTHORIZATION_CODE)
410 elif response_type == "token":
411 return client.allows_grant_type(AbstractApplication.GRANT_IMPLICIT)
412 else:
413 return False
414
415 def validate_scopes(self, client_id, scopes, client, request, *args, **kwargs):
416 """
417 Ensure required scopes are permitted (as specified in the settings file)
418 """
419 available_scopes = get_scopes_backend().get_available_scopes(application=client, request=request)
420 return set(scopes).issubset(set(available_scopes))
421
422 def get_default_scopes(self, client_id, request, *args, **kwargs):
423 default_scopes = get_scopes_backend().get_default_scopes(application=request.client, request=request)
424 return default_scopes
425
426 def validate_redirect_uri(self, client_id, redirect_uri, request, *args, **kwargs):
427 return request.client.redirect_uri_allowed(redirect_uri)
428
429 def is_pkce_required(self, client_id, request):
430 """
431 Enables or disables PKCE verification.
432
433 Uses the setting PKCE_REQUIRED, which can be either a bool or a callable that
434 receives the client id and returns a bool.
435 """
436 if callable(oauth2_settings.PKCE_REQUIRED):
437 return oauth2_settings.PKCE_REQUIRED(client_id)
438 return oauth2_settings.PKCE_REQUIRED
439
440 def get_code_challenge(self, code, request):
441 grant = Grant.objects.get(code=code, application=request.client)
442 return grant.code_challenge or None
443
444 def get_code_challenge_method(self, code, request):
445 grant = Grant.objects.get(code=code, application=request.client)
446 return grant.code_challenge_method or None
447
448 def save_authorization_code(self, client_id, code, request, *args, **kwargs):
449 expires = timezone.now() + timedelta(
450 seconds=oauth2_settings.AUTHORIZATION_CODE_EXPIRE_SECONDS)
451 Grant.objects.create(
452 application=request.client,
453 user=request.user,
454 code=code["code"],
455 expires=expires,
456 redirect_uri=request.redirect_uri,
457 scope=" ".join(request.scopes),
458 code_challenge=request.code_challenge or "",
459 code_challenge_method=request.code_challenge_method or ""
460 )
461
462 def rotate_refresh_token(self, request):
463 """
464 Checks if rotate refresh token is enabled
465 """
466 return oauth2_settings.ROTATE_REFRESH_TOKEN
467
468 @transaction.atomic
469 def save_bearer_token(self, token, request, *args, **kwargs):
470 """
471 Save access and refresh token, If refresh token is issued, remove or
472 reuse old refresh token as in rfc:`6`
473
474 @see: https://tools.ietf.org/html/draft-ietf-oauth-v2-31#page-43
475 """
476
477 if "scope" not in token:
478 raise FatalClientError("Failed to renew access token: missing scope")
479
480 # expires_in is passed to Server on initialization
481 # custom server class can have logic to override this
482 expires = timezone.now() + timedelta(seconds=token.get(
483 "expires_in", oauth2_settings.ACCESS_TOKEN_EXPIRE_SECONDS,
484 ))
485
486 if request.grant_type == "client_credentials":
487 request.user = None
488
489 # This comes from OAuthLib:
490 # https://github.com/idan/oauthlib/blob/1.0.3/oauthlib/oauth2/rfc6749/tokens.py#L267
491 # Its value is either a new random code; or if we are reusing
492 # refresh tokens, then it is the same value that the request passed in
493 # (stored in `request.refresh_token`)
494 refresh_token_code = token.get("refresh_token", None)
495
496 if refresh_token_code:
497 # an instance of `RefreshToken` that matches the old refresh code.
498 # Set on the request in `validate_refresh_token`
499 refresh_token_instance = getattr(request, "refresh_token_instance", None)
500
501 # If we are to reuse tokens, and we can: do so
502 if not self.rotate_refresh_token(request) and \
503 isinstance(refresh_token_instance, RefreshToken) and \
504 refresh_token_instance.access_token:
505
506 access_token = AccessToken.objects.select_for_update().get(
507 pk=refresh_token_instance.access_token.pk
508 )
509 access_token.user = request.user
510 access_token.scope = token["scope"]
511 access_token.expires = expires
512 access_token.token = token["access_token"]
513 access_token.application = request.client
514 access_token.save()
515
516 # else create fresh with access & refresh tokens
517 else:
518 # revoke existing tokens if possible to allow reuse of grant
519 if isinstance(refresh_token_instance, RefreshToken):
520 # First, to ensure we don't have concurrency issues, we refresh the refresh token
521 # from the db while acquiring a lock on it
522 # We also put it in the "request cache"
523 refresh_token_instance = RefreshToken.objects.select_for_update().get(
524 id=refresh_token_instance.id
525 )
526 request.refresh_token_instance = refresh_token_instance
527
528 previous_access_token = AccessToken.objects.filter(
529 source_refresh_token=refresh_token_instance
530 ).first()
531 try:
532 refresh_token_instance.revoke()
533 except (AccessToken.DoesNotExist, RefreshToken.DoesNotExist):
534 pass
535 else:
536 setattr(request, "refresh_token_instance", None)
537 else:
538 previous_access_token = None
539
540 # If the refresh token has already been used to create an
541 # access token (ie it's within the grace period), return that
542 # access token
543 if not previous_access_token:
544 access_token = self._create_access_token(
545 expires,
546 request,
547 token,
548 source_refresh_token=refresh_token_instance,
549 )
550
551 self._create_refresh_token(request, refresh_token_code, access_token)
552 else:
553 # make sure that the token data we're returning matches
554 # the existing token
555 token["access_token"] = previous_access_token.token
556 token["refresh_token"] = RefreshToken.objects.filter(
557 access_token=previous_access_token
558 ).first().token
559 token["scope"] = previous_access_token.scope
560
561 # No refresh token should be created, just access token
562 else:
563 self._create_access_token(expires, request, token)
564
565 def _create_access_token(self, expires, request, token, source_refresh_token=None):
566 return AccessToken.objects.create(
567 user=request.user,
568 scope=token["scope"],
569 expires=expires,
570 token=token["access_token"],
571 application=request.client,
572 source_refresh_token=source_refresh_token,
573 )
574
575 def _create_refresh_token(self, request, refresh_token_code, access_token):
576 return RefreshToken.objects.create(
577 user=request.user,
578 token=refresh_token_code,
579 application=request.client,
580 access_token=access_token
581 )
582
583 def revoke_token(self, token, token_type_hint, request, *args, **kwargs):
584 """
585 Revoke an access or refresh token.
586
587 :param token: The token string.
588 :param token_type_hint: access_token or refresh_token.
589 :param request: The HTTP Request (oauthlib.common.Request)
590 """
591 if token_type_hint not in ["access_token", "refresh_token"]:
592 token_type_hint = None
593
594 token_types = {
595 "access_token": AccessToken,
596 "refresh_token": RefreshToken,
597 }
598
599 token_type = token_types.get(token_type_hint, AccessToken)
600 try:
601 token_type.objects.get(token=token).revoke()
602 except ObjectDoesNotExist:
603 for other_type in [_t for _t in token_types.values() if _t != token_type]:
604 # slightly inefficient on Python2, but the queryset contains only one instance
605 list(map(lambda t: t.revoke(), other_type.objects.filter(token=token)))
606
607 def validate_user(self, username, password, client, request, *args, **kwargs):
608 """
609 Check username and password correspond to a valid and active User
610 """
611 u = authenticate(request, username=username, password=password)
612 if u is not None and u.is_active:
613 request.user = u
614 return True
615 return False
616
617 def get_original_scopes(self, refresh_token, request, *args, **kwargs):
618 # Avoid second query for RefreshToken since this method is invoked *after*
619 # validate_refresh_token.
620 rt = request.refresh_token_instance
621 if not rt.access_token_id:
622 return AccessToken.objects.get(source_refresh_token_id=rt.id).scope
623
624 return rt.access_token.scope
625
626 def validate_refresh_token(self, refresh_token, client, request, *args, **kwargs):
627 """
628 Check refresh_token exists and refers to the right client.
629 Also attach User instance to the request object
630 """
631
632 null_or_recent = Q(revoked__isnull=True) | Q(
633 revoked__gt=timezone.now() - timedelta(
634 seconds=oauth2_settings.REFRESH_TOKEN_GRACE_PERIOD_SECONDS
635 )
636 )
637 rt = RefreshToken.objects.filter(null_or_recent, token=refresh_token).first()
638
639 if not rt:
640 return False
641
642 request.user = rt.user
643 request.refresh_token = rt.token
644 # Temporary store RefreshToken instance to be reused by get_original_scopes and save_bearer_token.
645 request.refresh_token_instance = rt
646 return rt.application == client
647
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/oauth2_provider/oauth2_validators.py b/oauth2_provider/oauth2_validators.py
--- a/oauth2_provider/oauth2_validators.py
+++ b/oauth2_provider/oauth2_validators.py
@@ -608,7 +608,7 @@
"""
Check username and password correspond to a valid and active User
"""
- u = authenticate(request, username=username, password=password)
+ u = authenticate(username=username, password=password)
if u is not None and u.is_active:
request.user = u
return True
| {"golden_diff": "diff --git a/oauth2_provider/oauth2_validators.py b/oauth2_provider/oauth2_validators.py\n--- a/oauth2_provider/oauth2_validators.py\n+++ b/oauth2_provider/oauth2_validators.py\n@@ -608,7 +608,7 @@\n \"\"\"\n Check username and password correspond to a valid and active User\n \"\"\"\n- u = authenticate(request, username=username, password=password)\n+ u = authenticate(username=username, password=password)\n if u is not None and u.is_active:\n request.user = u\n return True\n", "issue": "AttributeError: get_full_path when running any rest_framework.test.APITestCase\n**Describe the bug**\r\nAfter upgrading from django-oauth-toolkit==1.2.0 to django-oauth-toolkit==1.3.0 every single test using `from rest_framework.test import APITestCase` and then calling `self.client.post` and the like fails with this stack trace:\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/src/app/.venv/lib/python3.6/site-packages/django/core/handlers/exception.py\", line 34, in inner\r\n response = get_response(request)\r\n File \"/src/app/.venv/lib/python3.6/site-packages/django/core/handlers/base.py\", line 115, in _get_response\r\n response = self.process_exception_by_middleware(e, request)\r\n File \"/src/app/.venv/lib/python3.6/site-packages/django/core/handlers/base.py\", line 113, in _get_response\r\n response = wrapped_callback(request, *callback_args, **callback_kwargs)\r\n File \"/src/app/.venv/lib/python3.6/site-packages/django/views/generic/base.py\", line 71, in view\r\n return self.dispatch(request, *args, **kwargs)\r\n File \"/src/app/.venv/lib/python3.6/site-packages/django/utils/decorators.py\", line 45, in _wrapper\r\n return bound_method(*args, **kwargs)\r\n File \"/src/app/.venv/lib/python3.6/site-packages/django/views/decorators/csrf.py\", line 54, in wrapped_view\r\n return view_func(*args, **kwargs)\r\n File \"/src/app/.venv/lib/python3.6/site-packages/django/views/generic/base.py\", line 97, in dispatch\r\n return handler(request, *args, **kwargs)\r\n File \"/src/app/.venv/lib/python3.6/site-packages/django/utils/decorators.py\", line 45, in _wrapper\r\n return bound_method(*args, **kwargs)\r\n File \"/src/app/.venv/lib/python3.6/site-packages/django/views/decorators/debug.py\", line 76, in sensitive_post_parameters_wrapper\r\n return view(request, *args, **kwargs)\r\n File \"/src/app/.venv/lib/python3.6/site-packages/oauth2_provider/views/base.py\", line 260, in post\r\n url, headers, body, status = self.create_token_response(request)\r\n File \"/src/app/.venv/lib/python3.6/site-packages/oauth2_provider/views/mixins.py\", line 124, in create_token_response\r\n return core.create_token_response(request)\r\n File \"/src/app/.venv/lib/python3.6/site-packages/oauth2_provider/oauth2_backends.py\", line 145, in create_token_response\r\n headers, extra_credentials)\r\n File \"/src/app/.venv/lib/python3.6/site-packages/oauthlib/oauth2/rfc6749/endpoints/base.py\", line 116, in wrapper\r\n return f(endpoint, uri, *args, **kwargs)\r\n File \"/src/app/.venv/lib/python3.6/site-packages/oauthlib/oauth2/rfc6749/endpoints/token.py\", line 119, in create_token_response\r\n request, self.default_token_type)\r\n File \"/src/app/.venv/lib/python3.6/site-packages/oauthlib/oauth2/rfc6749/grant_types/resource_owner_password_credentials.py\", line 101, in create_token_response\r\n self.validate_token_request(request)\r\n File \"/src/app/.venv/lib/python3.6/site-packages/oauthlib/oauth2/rfc6749/grant_types/resource_owner_password_credentials.py\", line 184, in validate_token_request\r\n request.password, request.client, request):\r\n File \"/src/app/.venv/lib/python3.6/site-packages/oauth2_provider/oauth2_validators.py\", line 611, in validate_user\r\n u = authenticate(request, username=username, password=password)\r\n File \"/src/app/.venv/lib/python3.6/site-packages/django/contrib/auth/__init__.py\", line 73, in authenticate\r\n user = backend.authenticate(request, **credentials)\r\n File \"/src/app/.venv/lib/python3.6/site-packages/oauth2_provider/backends.py\", line 17, in authenticate\r\n valid, r = OAuthLibCore.verify_request(request, scopes=[])\r\n File \"/src/app/.venv/lib/python3.6/site-packages/oauth2_provider/oauth2_backends.py\", line 172, in verify_request\r\n uri, http_method, body, headers = self._extract_params(request)\r\n File \"/src/app/.venv/lib/python3.6/site-packages/oauth2_provider/oauth2_backends.py\", line 58, in _extract_params\r\n uri = self._get_escaped_full_path(request)\r\n File \"/src/app/.venv/lib/python3.6/site-packages/oauth2_provider/oauth2_backends.py\", line 34, in _get_escaped_full_path\r\n parsed = list(urlparse(request.get_full_path()))\r\n File \"/src/app/.venv/lib/python3.6/site-packages/oauthlib/common.py\", line 436, in __getattr__\r\n raise AttributeError(name)\r\nAttributeError: get_full_path\r\n```\r\n\r\n**To Reproduce**\r\n\r\nTest case:\r\n\r\n```\r\nfrom django.conf import settings\r\nfrom django.contrib.auth.models import User\r\nfrom rest_framework import status\r\nfrom rest_framework.test import APITestCase\r\n\r\n\r\nclass OauthAuthenticationTest(APITestCase):\r\n def test_should_should_return_ok_when_valid_credentials_passed(self) -> None:\r\n username = \"user\"\r\n password = \"pw\"\r\n User.objects.create_user(username=username, email=\"[email protected]\", password=password)\r\n response = self.client.post(\r\n \"/api/oauth/token/\",\r\n {\r\n \"grant_type\": \"password\",\r\n \"username\": username,\r\n \"password\": password,\r\n \"client_id\": settings.REACT_APP_OAUTH_CLIENT_ID,\r\n }\r\n )\r\n self.assertEqual(status.HTTP_200_OK, response.status_code)\r\n```\r\n\r\n**Expected behavior**\r\nThis test should pass, as it did with version 1.2.0.\r\n\r\n**Version**\r\n1.3.0\r\n\r\n- [x] I have tested with the latest published release and it's still a problem.\r\n- [ ] I have tested with the master branch and it's still a problem.\r\n\r\n**Additional context**\r\n\r\nUsing django==2.2.11 and djangorestframework==3.11.0.\n", "before_files": [{"content": "import base64\nimport binascii\nimport logging\nfrom collections import OrderedDict\nfrom datetime import datetime, timedelta\nfrom urllib.parse import unquote_plus\n\nimport requests\nfrom django.conf import settings\nfrom django.contrib.auth import authenticate, get_user_model\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.db import transaction\nfrom django.db.models import Q\nfrom django.utils import timezone\nfrom django.utils.timezone import make_aware\nfrom django.utils.translation import gettext_lazy as _\nfrom oauthlib.oauth2 import RequestValidator\n\nfrom .exceptions import FatalClientError\nfrom .models import (\n AbstractApplication, get_access_token_model,\n get_application_model, get_grant_model, get_refresh_token_model\n)\nfrom .scopes import get_scopes_backend\nfrom .settings import oauth2_settings\n\n\nlog = logging.getLogger(\"oauth2_provider\")\n\nGRANT_TYPE_MAPPING = {\n \"authorization_code\": (AbstractApplication.GRANT_AUTHORIZATION_CODE, ),\n \"password\": (AbstractApplication.GRANT_PASSWORD, ),\n \"client_credentials\": (AbstractApplication.GRANT_CLIENT_CREDENTIALS, ),\n \"refresh_token\": (\n AbstractApplication.GRANT_AUTHORIZATION_CODE,\n AbstractApplication.GRANT_PASSWORD,\n AbstractApplication.GRANT_CLIENT_CREDENTIALS,\n )\n}\n\nApplication = get_application_model()\nAccessToken = get_access_token_model()\nGrant = get_grant_model()\nRefreshToken = get_refresh_token_model()\nUserModel = get_user_model()\n\n\nclass OAuth2Validator(RequestValidator):\n def _extract_basic_auth(self, request):\n \"\"\"\n Return authentication string if request contains basic auth credentials,\n otherwise return None\n \"\"\"\n auth = request.headers.get(\"HTTP_AUTHORIZATION\", None)\n if not auth:\n return None\n\n splitted = auth.split(\" \", 1)\n if len(splitted) != 2:\n return None\n auth_type, auth_string = splitted\n\n if auth_type != \"Basic\":\n return None\n\n return auth_string\n\n def _authenticate_basic_auth(self, request):\n \"\"\"\n Authenticates with HTTP Basic Auth.\n\n Note: as stated in rfc:`2.3.1`, client_id and client_secret must be encoded with\n \"application/x-www-form-urlencoded\" encoding algorithm.\n \"\"\"\n auth_string = self._extract_basic_auth(request)\n if not auth_string:\n return False\n\n try:\n encoding = request.encoding or settings.DEFAULT_CHARSET or \"utf-8\"\n except AttributeError:\n encoding = \"utf-8\"\n\n try:\n b64_decoded = base64.b64decode(auth_string)\n except (TypeError, binascii.Error):\n log.debug(\"Failed basic auth: %r can't be decoded as base64\", auth_string)\n return False\n\n try:\n auth_string_decoded = b64_decoded.decode(encoding)\n except UnicodeDecodeError:\n log.debug(\n \"Failed basic auth: %r can't be decoded as unicode by %r\",\n auth_string, encoding\n )\n return False\n\n try:\n client_id, client_secret = map(unquote_plus, auth_string_decoded.split(\":\", 1))\n except ValueError:\n log.debug(\"Failed basic auth, Invalid base64 encoding.\")\n return False\n\n if self._load_application(client_id, request) is None:\n log.debug(\"Failed basic auth: Application %s does not exist\" % client_id)\n return False\n elif request.client.client_id != client_id:\n log.debug(\"Failed basic auth: wrong client id %s\" % client_id)\n return False\n elif request.client.client_secret != client_secret:\n log.debug(\"Failed basic auth: wrong client secret %s\" % client_secret)\n return False\n else:\n return True\n\n def _authenticate_request_body(self, request):\n \"\"\"\n Try to authenticate the client using client_id and client_secret\n parameters included in body.\n\n Remember that this method is NOT RECOMMENDED and SHOULD be limited to\n clients unable to directly utilize the HTTP Basic authentication scheme.\n See rfc:`2.3.1` for more details.\n \"\"\"\n # TODO: check if oauthlib has already unquoted client_id and client_secret\n try:\n client_id = request.client_id\n client_secret = request.client_secret\n except AttributeError:\n return False\n\n if self._load_application(client_id, request) is None:\n log.debug(\"Failed body auth: Application %s does not exists\" % client_id)\n return False\n elif request.client.client_secret != client_secret:\n log.debug(\"Failed body auth: wrong client secret %s\" % client_secret)\n return False\n else:\n return True\n\n def _load_application(self, client_id, request):\n \"\"\"\n If request.client was not set, load application instance for given\n client_id and store it in request.client\n \"\"\"\n\n # we want to be sure that request has the client attribute!\n assert hasattr(request, \"client\"), '\"request\" instance has no \"client\" attribute'\n\n try:\n request.client = request.client or Application.objects.get(client_id=client_id)\n # Check that the application can be used (defaults to always True)\n if not request.client.is_usable(request):\n log.debug(\"Failed body authentication: Application %r is disabled\" % (client_id))\n return None\n return request.client\n except Application.DoesNotExist:\n log.debug(\"Failed body authentication: Application %r does not exist\" % (client_id))\n return None\n\n def _set_oauth2_error_on_request(self, request, access_token, scopes):\n if access_token is None:\n error = OrderedDict([\n (\"error\", \"invalid_token\", ),\n (\"error_description\", _(\"The access token is invalid.\"), ),\n ])\n elif access_token.is_expired():\n error = OrderedDict([\n (\"error\", \"invalid_token\", ),\n (\"error_description\", _(\"The access token has expired.\"), ),\n ])\n elif not access_token.allow_scopes(scopes):\n error = OrderedDict([\n (\"error\", \"insufficient_scope\", ),\n (\"error_description\", _(\"The access token is valid but does not have enough scope.\"), ),\n ])\n else:\n log.warning(\"OAuth2 access token is invalid for an unknown reason.\")\n error = OrderedDict([\n (\"error\", \"invalid_token\", ),\n ])\n request.oauth2_error = error\n return request\n\n def client_authentication_required(self, request, *args, **kwargs):\n \"\"\"\n Determine if the client has to be authenticated\n\n This method is called only for grant types that supports client authentication:\n * Authorization code grant\n * Resource owner password grant\n * Refresh token grant\n\n If the request contains authorization headers, always authenticate the client\n no matter the grant type.\n\n If the request does not contain authorization headers, proceed with authentication\n only if the client is of type `Confidential`.\n\n If something goes wrong, call oauthlib implementation of the method.\n \"\"\"\n if self._extract_basic_auth(request):\n return True\n\n try:\n if request.client_id and request.client_secret:\n return True\n except AttributeError:\n log.debug(\"Client ID or client secret not provided...\")\n pass\n\n self._load_application(request.client_id, request)\n if request.client:\n return request.client.client_type == AbstractApplication.CLIENT_CONFIDENTIAL\n\n return super().client_authentication_required(request, *args, **kwargs)\n\n def authenticate_client(self, request, *args, **kwargs):\n \"\"\"\n Check if client exists and is authenticating itself as in rfc:`3.2.1`\n\n First we try to authenticate with HTTP Basic Auth, and that is the PREFERRED\n authentication method.\n Whether this fails we support including the client credentials in the request-body,\n but this method is NOT RECOMMENDED and SHOULD be limited to clients unable to\n directly utilize the HTTP Basic authentication scheme.\n See rfc:`2.3.1` for more details\n \"\"\"\n authenticated = self._authenticate_basic_auth(request)\n\n if not authenticated:\n authenticated = self._authenticate_request_body(request)\n\n return authenticated\n\n def authenticate_client_id(self, client_id, request, *args, **kwargs):\n \"\"\"\n If we are here, the client did not authenticate itself as in rfc:`3.2.1` and we can\n proceed only if the client exists and is not of type \"Confidential\".\n \"\"\"\n if self._load_application(client_id, request) is not None:\n log.debug(\"Application %r has type %r\" % (client_id, request.client.client_type))\n return request.client.client_type != AbstractApplication.CLIENT_CONFIDENTIAL\n return False\n\n def confirm_redirect_uri(self, client_id, code, redirect_uri, client, *args, **kwargs):\n \"\"\"\n Ensure the redirect_uri is listed in the Application instance redirect_uris field\n \"\"\"\n grant = Grant.objects.get(code=code, application=client)\n return grant.redirect_uri_allowed(redirect_uri)\n\n def invalidate_authorization_code(self, client_id, code, request, *args, **kwargs):\n \"\"\"\n Remove the temporary grant used to swap the authorization token\n \"\"\"\n grant = Grant.objects.get(code=code, application=request.client)\n grant.delete()\n\n def validate_client_id(self, client_id, request, *args, **kwargs):\n \"\"\"\n Ensure an Application exists with given client_id.\n If it exists, it's assigned to request.client.\n \"\"\"\n return self._load_application(client_id, request) is not None\n\n def get_default_redirect_uri(self, client_id, request, *args, **kwargs):\n return request.client.default_redirect_uri\n\n def _get_token_from_authentication_server(\n self, token, introspection_url, introspection_token, introspection_credentials\n ):\n \"\"\"Use external introspection endpoint to \"crack open\" the token.\n :param introspection_url: introspection endpoint URL\n :param introspection_token: Bearer token\n :param introspection_credentials: Basic Auth credentials (id,secret)\n :return: :class:`models.AccessToken`\n\n Some RFC 7662 implementations (including this one) use a Bearer token while others use Basic\n Auth. Depending on the external AS's implementation, provide either the introspection_token\n or the introspection_credentials.\n\n If the resulting access_token identifies a username (e.g. Authorization Code grant), add\n that user to the UserModel. Also cache the access_token up until its expiry time or a\n configured maximum time.\n\n \"\"\"\n headers = None\n if introspection_token:\n headers = {\"Authorization\": \"Bearer {}\".format(introspection_token)}\n elif introspection_credentials:\n client_id = introspection_credentials[0].encode(\"utf-8\")\n client_secret = introspection_credentials[1].encode(\"utf-8\")\n basic_auth = base64.b64encode(client_id + b\":\" + client_secret)\n headers = {\"Authorization\": \"Basic {}\".format(basic_auth.decode(\"utf-8\"))}\n\n try:\n response = requests.post(\n introspection_url,\n data={\"token\": token}, headers=headers\n )\n except requests.exceptions.RequestException:\n log.exception(\"Introspection: Failed POST to %r in token lookup\", introspection_url)\n return None\n\n try:\n content = response.json()\n except ValueError:\n log.exception(\"Introspection: Failed to parse response as json\")\n return None\n\n if \"active\" in content and content[\"active\"] is True:\n if \"username\" in content:\n user, _created = UserModel.objects.get_or_create(\n **{UserModel.USERNAME_FIELD: content[\"username\"]}\n )\n else:\n user = None\n\n max_caching_time = datetime.now() + timedelta(\n seconds=oauth2_settings.RESOURCE_SERVER_TOKEN_CACHING_SECONDS\n )\n\n if \"exp\" in content:\n expires = datetime.utcfromtimestamp(content[\"exp\"])\n if expires > max_caching_time:\n expires = max_caching_time\n else:\n expires = max_caching_time\n\n scope = content.get(\"scope\", \"\")\n expires = make_aware(expires)\n\n access_token, _created = AccessToken.objects.update_or_create(\n token=token,\n defaults={\n \"user\": user,\n \"application\": None,\n \"scope\": scope,\n \"expires\": expires,\n })\n\n return access_token\n\n def validate_bearer_token(self, token, scopes, request):\n \"\"\"\n When users try to access resources, check that provided token is valid\n \"\"\"\n if not token:\n return False\n\n introspection_url = oauth2_settings.RESOURCE_SERVER_INTROSPECTION_URL\n introspection_token = oauth2_settings.RESOURCE_SERVER_AUTH_TOKEN\n introspection_credentials = oauth2_settings.RESOURCE_SERVER_INTROSPECTION_CREDENTIALS\n\n try:\n access_token = AccessToken.objects.select_related(\"application\", \"user\").get(token=token)\n except AccessToken.DoesNotExist:\n access_token = None\n\n # if there is no token or it's invalid then introspect the token if there's an external OAuth server\n if not access_token or not access_token.is_valid(scopes):\n if introspection_url and (introspection_token or introspection_credentials):\n access_token = self._get_token_from_authentication_server(\n token,\n introspection_url,\n introspection_token,\n introspection_credentials\n )\n\n if access_token and access_token.is_valid(scopes):\n request.client = access_token.application\n request.user = access_token.user\n request.scopes = scopes\n\n # this is needed by django rest framework\n request.access_token = access_token\n return True\n else:\n self._set_oauth2_error_on_request(request, access_token, scopes)\n return False\n\n def validate_code(self, client_id, code, client, request, *args, **kwargs):\n try:\n grant = Grant.objects.get(code=code, application=client)\n if not grant.is_expired():\n request.scopes = grant.scope.split(\" \")\n request.user = grant.user\n return True\n return False\n\n except Grant.DoesNotExist:\n return False\n\n def validate_grant_type(self, client_id, grant_type, client, request, *args, **kwargs):\n \"\"\"\n Validate both grant_type is a valid string and grant_type is allowed for current workflow\n \"\"\"\n assert(grant_type in GRANT_TYPE_MAPPING) # mapping misconfiguration\n return request.client.allows_grant_type(*GRANT_TYPE_MAPPING[grant_type])\n\n def validate_response_type(self, client_id, response_type, client, request, *args, **kwargs):\n \"\"\"\n We currently do not support the Authorization Endpoint Response Types registry as in\n rfc:`8.4`, so validate the response_type only if it matches \"code\" or \"token\"\n \"\"\"\n if response_type == \"code\":\n return client.allows_grant_type(AbstractApplication.GRANT_AUTHORIZATION_CODE)\n elif response_type == \"token\":\n return client.allows_grant_type(AbstractApplication.GRANT_IMPLICIT)\n else:\n return False\n\n def validate_scopes(self, client_id, scopes, client, request, *args, **kwargs):\n \"\"\"\n Ensure required scopes are permitted (as specified in the settings file)\n \"\"\"\n available_scopes = get_scopes_backend().get_available_scopes(application=client, request=request)\n return set(scopes).issubset(set(available_scopes))\n\n def get_default_scopes(self, client_id, request, *args, **kwargs):\n default_scopes = get_scopes_backend().get_default_scopes(application=request.client, request=request)\n return default_scopes\n\n def validate_redirect_uri(self, client_id, redirect_uri, request, *args, **kwargs):\n return request.client.redirect_uri_allowed(redirect_uri)\n\n def is_pkce_required(self, client_id, request):\n \"\"\"\n Enables or disables PKCE verification.\n\n Uses the setting PKCE_REQUIRED, which can be either a bool or a callable that\n receives the client id and returns a bool.\n \"\"\"\n if callable(oauth2_settings.PKCE_REQUIRED):\n return oauth2_settings.PKCE_REQUIRED(client_id)\n return oauth2_settings.PKCE_REQUIRED\n\n def get_code_challenge(self, code, request):\n grant = Grant.objects.get(code=code, application=request.client)\n return grant.code_challenge or None\n\n def get_code_challenge_method(self, code, request):\n grant = Grant.objects.get(code=code, application=request.client)\n return grant.code_challenge_method or None\n\n def save_authorization_code(self, client_id, code, request, *args, **kwargs):\n expires = timezone.now() + timedelta(\n seconds=oauth2_settings.AUTHORIZATION_CODE_EXPIRE_SECONDS)\n Grant.objects.create(\n application=request.client,\n user=request.user,\n code=code[\"code\"],\n expires=expires,\n redirect_uri=request.redirect_uri,\n scope=\" \".join(request.scopes),\n code_challenge=request.code_challenge or \"\",\n code_challenge_method=request.code_challenge_method or \"\"\n )\n\n def rotate_refresh_token(self, request):\n \"\"\"\n Checks if rotate refresh token is enabled\n \"\"\"\n return oauth2_settings.ROTATE_REFRESH_TOKEN\n\n @transaction.atomic\n def save_bearer_token(self, token, request, *args, **kwargs):\n \"\"\"\n Save access and refresh token, If refresh token is issued, remove or\n reuse old refresh token as in rfc:`6`\n\n @see: https://tools.ietf.org/html/draft-ietf-oauth-v2-31#page-43\n \"\"\"\n\n if \"scope\" not in token:\n raise FatalClientError(\"Failed to renew access token: missing scope\")\n\n # expires_in is passed to Server on initialization\n # custom server class can have logic to override this\n expires = timezone.now() + timedelta(seconds=token.get(\n \"expires_in\", oauth2_settings.ACCESS_TOKEN_EXPIRE_SECONDS,\n ))\n\n if request.grant_type == \"client_credentials\":\n request.user = None\n\n # This comes from OAuthLib:\n # https://github.com/idan/oauthlib/blob/1.0.3/oauthlib/oauth2/rfc6749/tokens.py#L267\n # Its value is either a new random code; or if we are reusing\n # refresh tokens, then it is the same value that the request passed in\n # (stored in `request.refresh_token`)\n refresh_token_code = token.get(\"refresh_token\", None)\n\n if refresh_token_code:\n # an instance of `RefreshToken` that matches the old refresh code.\n # Set on the request in `validate_refresh_token`\n refresh_token_instance = getattr(request, \"refresh_token_instance\", None)\n\n # If we are to reuse tokens, and we can: do so\n if not self.rotate_refresh_token(request) and \\\n isinstance(refresh_token_instance, RefreshToken) and \\\n refresh_token_instance.access_token:\n\n access_token = AccessToken.objects.select_for_update().get(\n pk=refresh_token_instance.access_token.pk\n )\n access_token.user = request.user\n access_token.scope = token[\"scope\"]\n access_token.expires = expires\n access_token.token = token[\"access_token\"]\n access_token.application = request.client\n access_token.save()\n\n # else create fresh with access & refresh tokens\n else:\n # revoke existing tokens if possible to allow reuse of grant\n if isinstance(refresh_token_instance, RefreshToken):\n # First, to ensure we don't have concurrency issues, we refresh the refresh token\n # from the db while acquiring a lock on it\n # We also put it in the \"request cache\"\n refresh_token_instance = RefreshToken.objects.select_for_update().get(\n id=refresh_token_instance.id\n )\n request.refresh_token_instance = refresh_token_instance\n\n previous_access_token = AccessToken.objects.filter(\n source_refresh_token=refresh_token_instance\n ).first()\n try:\n refresh_token_instance.revoke()\n except (AccessToken.DoesNotExist, RefreshToken.DoesNotExist):\n pass\n else:\n setattr(request, \"refresh_token_instance\", None)\n else:\n previous_access_token = None\n\n # If the refresh token has already been used to create an\n # access token (ie it's within the grace period), return that\n # access token\n if not previous_access_token:\n access_token = self._create_access_token(\n expires,\n request,\n token,\n source_refresh_token=refresh_token_instance,\n )\n\n self._create_refresh_token(request, refresh_token_code, access_token)\n else:\n # make sure that the token data we're returning matches\n # the existing token\n token[\"access_token\"] = previous_access_token.token\n token[\"refresh_token\"] = RefreshToken.objects.filter(\n access_token=previous_access_token\n ).first().token\n token[\"scope\"] = previous_access_token.scope\n\n # No refresh token should be created, just access token\n else:\n self._create_access_token(expires, request, token)\n\n def _create_access_token(self, expires, request, token, source_refresh_token=None):\n return AccessToken.objects.create(\n user=request.user,\n scope=token[\"scope\"],\n expires=expires,\n token=token[\"access_token\"],\n application=request.client,\n source_refresh_token=source_refresh_token,\n )\n\n def _create_refresh_token(self, request, refresh_token_code, access_token):\n return RefreshToken.objects.create(\n user=request.user,\n token=refresh_token_code,\n application=request.client,\n access_token=access_token\n )\n\n def revoke_token(self, token, token_type_hint, request, *args, **kwargs):\n \"\"\"\n Revoke an access or refresh token.\n\n :param token: The token string.\n :param token_type_hint: access_token or refresh_token.\n :param request: The HTTP Request (oauthlib.common.Request)\n \"\"\"\n if token_type_hint not in [\"access_token\", \"refresh_token\"]:\n token_type_hint = None\n\n token_types = {\n \"access_token\": AccessToken,\n \"refresh_token\": RefreshToken,\n }\n\n token_type = token_types.get(token_type_hint, AccessToken)\n try:\n token_type.objects.get(token=token).revoke()\n except ObjectDoesNotExist:\n for other_type in [_t for _t in token_types.values() if _t != token_type]:\n # slightly inefficient on Python2, but the queryset contains only one instance\n list(map(lambda t: t.revoke(), other_type.objects.filter(token=token)))\n\n def validate_user(self, username, password, client, request, *args, **kwargs):\n \"\"\"\n Check username and password correspond to a valid and active User\n \"\"\"\n u = authenticate(request, username=username, password=password)\n if u is not None and u.is_active:\n request.user = u\n return True\n return False\n\n def get_original_scopes(self, refresh_token, request, *args, **kwargs):\n # Avoid second query for RefreshToken since this method is invoked *after*\n # validate_refresh_token.\n rt = request.refresh_token_instance\n if not rt.access_token_id:\n return AccessToken.objects.get(source_refresh_token_id=rt.id).scope\n\n return rt.access_token.scope\n\n def validate_refresh_token(self, refresh_token, client, request, *args, **kwargs):\n \"\"\"\n Check refresh_token exists and refers to the right client.\n Also attach User instance to the request object\n \"\"\"\n\n null_or_recent = Q(revoked__isnull=True) | Q(\n revoked__gt=timezone.now() - timedelta(\n seconds=oauth2_settings.REFRESH_TOKEN_GRACE_PERIOD_SECONDS\n )\n )\n rt = RefreshToken.objects.filter(null_or_recent, token=refresh_token).first()\n\n if not rt:\n return False\n\n request.user = rt.user\n request.refresh_token = rt.token\n # Temporary store RefreshToken instance to be reused by get_original_scopes and save_bearer_token.\n request.refresh_token_instance = rt\n return rt.application == client\n", "path": "oauth2_provider/oauth2_validators.py"}], "after_files": [{"content": "import base64\nimport binascii\nimport logging\nfrom collections import OrderedDict\nfrom datetime import datetime, timedelta\nfrom urllib.parse import unquote_plus\n\nimport requests\nfrom django.conf import settings\nfrom django.contrib.auth import authenticate, get_user_model\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.db import transaction\nfrom django.db.models import Q\nfrom django.utils import timezone\nfrom django.utils.timezone import make_aware\nfrom django.utils.translation import gettext_lazy as _\nfrom oauthlib.oauth2 import RequestValidator\n\nfrom .exceptions import FatalClientError\nfrom .models import (\n AbstractApplication, get_access_token_model,\n get_application_model, get_grant_model, get_refresh_token_model\n)\nfrom .scopes import get_scopes_backend\nfrom .settings import oauth2_settings\n\n\nlog = logging.getLogger(\"oauth2_provider\")\n\nGRANT_TYPE_MAPPING = {\n \"authorization_code\": (AbstractApplication.GRANT_AUTHORIZATION_CODE, ),\n \"password\": (AbstractApplication.GRANT_PASSWORD, ),\n \"client_credentials\": (AbstractApplication.GRANT_CLIENT_CREDENTIALS, ),\n \"refresh_token\": (\n AbstractApplication.GRANT_AUTHORIZATION_CODE,\n AbstractApplication.GRANT_PASSWORD,\n AbstractApplication.GRANT_CLIENT_CREDENTIALS,\n )\n}\n\nApplication = get_application_model()\nAccessToken = get_access_token_model()\nGrant = get_grant_model()\nRefreshToken = get_refresh_token_model()\nUserModel = get_user_model()\n\n\nclass OAuth2Validator(RequestValidator):\n def _extract_basic_auth(self, request):\n \"\"\"\n Return authentication string if request contains basic auth credentials,\n otherwise return None\n \"\"\"\n auth = request.headers.get(\"HTTP_AUTHORIZATION\", None)\n if not auth:\n return None\n\n splitted = auth.split(\" \", 1)\n if len(splitted) != 2:\n return None\n auth_type, auth_string = splitted\n\n if auth_type != \"Basic\":\n return None\n\n return auth_string\n\n def _authenticate_basic_auth(self, request):\n \"\"\"\n Authenticates with HTTP Basic Auth.\n\n Note: as stated in rfc:`2.3.1`, client_id and client_secret must be encoded with\n \"application/x-www-form-urlencoded\" encoding algorithm.\n \"\"\"\n auth_string = self._extract_basic_auth(request)\n if not auth_string:\n return False\n\n try:\n encoding = request.encoding or settings.DEFAULT_CHARSET or \"utf-8\"\n except AttributeError:\n encoding = \"utf-8\"\n\n try:\n b64_decoded = base64.b64decode(auth_string)\n except (TypeError, binascii.Error):\n log.debug(\"Failed basic auth: %r can't be decoded as base64\", auth_string)\n return False\n\n try:\n auth_string_decoded = b64_decoded.decode(encoding)\n except UnicodeDecodeError:\n log.debug(\n \"Failed basic auth: %r can't be decoded as unicode by %r\",\n auth_string, encoding\n )\n return False\n\n try:\n client_id, client_secret = map(unquote_plus, auth_string_decoded.split(\":\", 1))\n except ValueError:\n log.debug(\"Failed basic auth, Invalid base64 encoding.\")\n return False\n\n if self._load_application(client_id, request) is None:\n log.debug(\"Failed basic auth: Application %s does not exist\" % client_id)\n return False\n elif request.client.client_id != client_id:\n log.debug(\"Failed basic auth: wrong client id %s\" % client_id)\n return False\n elif request.client.client_secret != client_secret:\n log.debug(\"Failed basic auth: wrong client secret %s\" % client_secret)\n return False\n else:\n return True\n\n def _authenticate_request_body(self, request):\n \"\"\"\n Try to authenticate the client using client_id and client_secret\n parameters included in body.\n\n Remember that this method is NOT RECOMMENDED and SHOULD be limited to\n clients unable to directly utilize the HTTP Basic authentication scheme.\n See rfc:`2.3.1` for more details.\n \"\"\"\n # TODO: check if oauthlib has already unquoted client_id and client_secret\n try:\n client_id = request.client_id\n client_secret = request.client_secret\n except AttributeError:\n return False\n\n if self._load_application(client_id, request) is None:\n log.debug(\"Failed body auth: Application %s does not exists\" % client_id)\n return False\n elif request.client.client_secret != client_secret:\n log.debug(\"Failed body auth: wrong client secret %s\" % client_secret)\n return False\n else:\n return True\n\n def _load_application(self, client_id, request):\n \"\"\"\n If request.client was not set, load application instance for given\n client_id and store it in request.client\n \"\"\"\n\n # we want to be sure that request has the client attribute!\n assert hasattr(request, \"client\"), '\"request\" instance has no \"client\" attribute'\n\n try:\n request.client = request.client or Application.objects.get(client_id=client_id)\n # Check that the application can be used (defaults to always True)\n if not request.client.is_usable(request):\n log.debug(\"Failed body authentication: Application %r is disabled\" % (client_id))\n return None\n return request.client\n except Application.DoesNotExist:\n log.debug(\"Failed body authentication: Application %r does not exist\" % (client_id))\n return None\n\n def _set_oauth2_error_on_request(self, request, access_token, scopes):\n if access_token is None:\n error = OrderedDict([\n (\"error\", \"invalid_token\", ),\n (\"error_description\", _(\"The access token is invalid.\"), ),\n ])\n elif access_token.is_expired():\n error = OrderedDict([\n (\"error\", \"invalid_token\", ),\n (\"error_description\", _(\"The access token has expired.\"), ),\n ])\n elif not access_token.allow_scopes(scopes):\n error = OrderedDict([\n (\"error\", \"insufficient_scope\", ),\n (\"error_description\", _(\"The access token is valid but does not have enough scope.\"), ),\n ])\n else:\n log.warning(\"OAuth2 access token is invalid for an unknown reason.\")\n error = OrderedDict([\n (\"error\", \"invalid_token\", ),\n ])\n request.oauth2_error = error\n return request\n\n def client_authentication_required(self, request, *args, **kwargs):\n \"\"\"\n Determine if the client has to be authenticated\n\n This method is called only for grant types that supports client authentication:\n * Authorization code grant\n * Resource owner password grant\n * Refresh token grant\n\n If the request contains authorization headers, always authenticate the client\n no matter the grant type.\n\n If the request does not contain authorization headers, proceed with authentication\n only if the client is of type `Confidential`.\n\n If something goes wrong, call oauthlib implementation of the method.\n \"\"\"\n if self._extract_basic_auth(request):\n return True\n\n try:\n if request.client_id and request.client_secret:\n return True\n except AttributeError:\n log.debug(\"Client ID or client secret not provided...\")\n pass\n\n self._load_application(request.client_id, request)\n if request.client:\n return request.client.client_type == AbstractApplication.CLIENT_CONFIDENTIAL\n\n return super().client_authentication_required(request, *args, **kwargs)\n\n def authenticate_client(self, request, *args, **kwargs):\n \"\"\"\n Check if client exists and is authenticating itself as in rfc:`3.2.1`\n\n First we try to authenticate with HTTP Basic Auth, and that is the PREFERRED\n authentication method.\n Whether this fails we support including the client credentials in the request-body,\n but this method is NOT RECOMMENDED and SHOULD be limited to clients unable to\n directly utilize the HTTP Basic authentication scheme.\n See rfc:`2.3.1` for more details\n \"\"\"\n authenticated = self._authenticate_basic_auth(request)\n\n if not authenticated:\n authenticated = self._authenticate_request_body(request)\n\n return authenticated\n\n def authenticate_client_id(self, client_id, request, *args, **kwargs):\n \"\"\"\n If we are here, the client did not authenticate itself as in rfc:`3.2.1` and we can\n proceed only if the client exists and is not of type \"Confidential\".\n \"\"\"\n if self._load_application(client_id, request) is not None:\n log.debug(\"Application %r has type %r\" % (client_id, request.client.client_type))\n return request.client.client_type != AbstractApplication.CLIENT_CONFIDENTIAL\n return False\n\n def confirm_redirect_uri(self, client_id, code, redirect_uri, client, *args, **kwargs):\n \"\"\"\n Ensure the redirect_uri is listed in the Application instance redirect_uris field\n \"\"\"\n grant = Grant.objects.get(code=code, application=client)\n return grant.redirect_uri_allowed(redirect_uri)\n\n def invalidate_authorization_code(self, client_id, code, request, *args, **kwargs):\n \"\"\"\n Remove the temporary grant used to swap the authorization token\n \"\"\"\n grant = Grant.objects.get(code=code, application=request.client)\n grant.delete()\n\n def validate_client_id(self, client_id, request, *args, **kwargs):\n \"\"\"\n Ensure an Application exists with given client_id.\n If it exists, it's assigned to request.client.\n \"\"\"\n return self._load_application(client_id, request) is not None\n\n def get_default_redirect_uri(self, client_id, request, *args, **kwargs):\n return request.client.default_redirect_uri\n\n def _get_token_from_authentication_server(\n self, token, introspection_url, introspection_token, introspection_credentials\n ):\n \"\"\"Use external introspection endpoint to \"crack open\" the token.\n :param introspection_url: introspection endpoint URL\n :param introspection_token: Bearer token\n :param introspection_credentials: Basic Auth credentials (id,secret)\n :return: :class:`models.AccessToken`\n\n Some RFC 7662 implementations (including this one) use a Bearer token while others use Basic\n Auth. Depending on the external AS's implementation, provide either the introspection_token\n or the introspection_credentials.\n\n If the resulting access_token identifies a username (e.g. Authorization Code grant), add\n that user to the UserModel. Also cache the access_token up until its expiry time or a\n configured maximum time.\n\n \"\"\"\n headers = None\n if introspection_token:\n headers = {\"Authorization\": \"Bearer {}\".format(introspection_token)}\n elif introspection_credentials:\n client_id = introspection_credentials[0].encode(\"utf-8\")\n client_secret = introspection_credentials[1].encode(\"utf-8\")\n basic_auth = base64.b64encode(client_id + b\":\" + client_secret)\n headers = {\"Authorization\": \"Basic {}\".format(basic_auth.decode(\"utf-8\"))}\n\n try:\n response = requests.post(\n introspection_url,\n data={\"token\": token}, headers=headers\n )\n except requests.exceptions.RequestException:\n log.exception(\"Introspection: Failed POST to %r in token lookup\", introspection_url)\n return None\n\n try:\n content = response.json()\n except ValueError:\n log.exception(\"Introspection: Failed to parse response as json\")\n return None\n\n if \"active\" in content and content[\"active\"] is True:\n if \"username\" in content:\n user, _created = UserModel.objects.get_or_create(\n **{UserModel.USERNAME_FIELD: content[\"username\"]}\n )\n else:\n user = None\n\n max_caching_time = datetime.now() + timedelta(\n seconds=oauth2_settings.RESOURCE_SERVER_TOKEN_CACHING_SECONDS\n )\n\n if \"exp\" in content:\n expires = datetime.utcfromtimestamp(content[\"exp\"])\n if expires > max_caching_time:\n expires = max_caching_time\n else:\n expires = max_caching_time\n\n scope = content.get(\"scope\", \"\")\n expires = make_aware(expires)\n\n access_token, _created = AccessToken.objects.update_or_create(\n token=token,\n defaults={\n \"user\": user,\n \"application\": None,\n \"scope\": scope,\n \"expires\": expires,\n })\n\n return access_token\n\n def validate_bearer_token(self, token, scopes, request):\n \"\"\"\n When users try to access resources, check that provided token is valid\n \"\"\"\n if not token:\n return False\n\n introspection_url = oauth2_settings.RESOURCE_SERVER_INTROSPECTION_URL\n introspection_token = oauth2_settings.RESOURCE_SERVER_AUTH_TOKEN\n introspection_credentials = oauth2_settings.RESOURCE_SERVER_INTROSPECTION_CREDENTIALS\n\n try:\n access_token = AccessToken.objects.select_related(\"application\", \"user\").get(token=token)\n except AccessToken.DoesNotExist:\n access_token = None\n\n # if there is no token or it's invalid then introspect the token if there's an external OAuth server\n if not access_token or not access_token.is_valid(scopes):\n if introspection_url and (introspection_token or introspection_credentials):\n access_token = self._get_token_from_authentication_server(\n token,\n introspection_url,\n introspection_token,\n introspection_credentials\n )\n\n if access_token and access_token.is_valid(scopes):\n request.client = access_token.application\n request.user = access_token.user\n request.scopes = scopes\n\n # this is needed by django rest framework\n request.access_token = access_token\n return True\n else:\n self._set_oauth2_error_on_request(request, access_token, scopes)\n return False\n\n def validate_code(self, client_id, code, client, request, *args, **kwargs):\n try:\n grant = Grant.objects.get(code=code, application=client)\n if not grant.is_expired():\n request.scopes = grant.scope.split(\" \")\n request.user = grant.user\n return True\n return False\n\n except Grant.DoesNotExist:\n return False\n\n def validate_grant_type(self, client_id, grant_type, client, request, *args, **kwargs):\n \"\"\"\n Validate both grant_type is a valid string and grant_type is allowed for current workflow\n \"\"\"\n assert(grant_type in GRANT_TYPE_MAPPING) # mapping misconfiguration\n return request.client.allows_grant_type(*GRANT_TYPE_MAPPING[grant_type])\n\n def validate_response_type(self, client_id, response_type, client, request, *args, **kwargs):\n \"\"\"\n We currently do not support the Authorization Endpoint Response Types registry as in\n rfc:`8.4`, so validate the response_type only if it matches \"code\" or \"token\"\n \"\"\"\n if response_type == \"code\":\n return client.allows_grant_type(AbstractApplication.GRANT_AUTHORIZATION_CODE)\n elif response_type == \"token\":\n return client.allows_grant_type(AbstractApplication.GRANT_IMPLICIT)\n else:\n return False\n\n def validate_scopes(self, client_id, scopes, client, request, *args, **kwargs):\n \"\"\"\n Ensure required scopes are permitted (as specified in the settings file)\n \"\"\"\n available_scopes = get_scopes_backend().get_available_scopes(application=client, request=request)\n return set(scopes).issubset(set(available_scopes))\n\n def get_default_scopes(self, client_id, request, *args, **kwargs):\n default_scopes = get_scopes_backend().get_default_scopes(application=request.client, request=request)\n return default_scopes\n\n def validate_redirect_uri(self, client_id, redirect_uri, request, *args, **kwargs):\n return request.client.redirect_uri_allowed(redirect_uri)\n\n def is_pkce_required(self, client_id, request):\n \"\"\"\n Enables or disables PKCE verification.\n\n Uses the setting PKCE_REQUIRED, which can be either a bool or a callable that\n receives the client id and returns a bool.\n \"\"\"\n if callable(oauth2_settings.PKCE_REQUIRED):\n return oauth2_settings.PKCE_REQUIRED(client_id)\n return oauth2_settings.PKCE_REQUIRED\n\n def get_code_challenge(self, code, request):\n grant = Grant.objects.get(code=code, application=request.client)\n return grant.code_challenge or None\n\n def get_code_challenge_method(self, code, request):\n grant = Grant.objects.get(code=code, application=request.client)\n return grant.code_challenge_method or None\n\n def save_authorization_code(self, client_id, code, request, *args, **kwargs):\n expires = timezone.now() + timedelta(\n seconds=oauth2_settings.AUTHORIZATION_CODE_EXPIRE_SECONDS)\n Grant.objects.create(\n application=request.client,\n user=request.user,\n code=code[\"code\"],\n expires=expires,\n redirect_uri=request.redirect_uri,\n scope=\" \".join(request.scopes),\n code_challenge=request.code_challenge or \"\",\n code_challenge_method=request.code_challenge_method or \"\"\n )\n\n def rotate_refresh_token(self, request):\n \"\"\"\n Checks if rotate refresh token is enabled\n \"\"\"\n return oauth2_settings.ROTATE_REFRESH_TOKEN\n\n @transaction.atomic\n def save_bearer_token(self, token, request, *args, **kwargs):\n \"\"\"\n Save access and refresh token, If refresh token is issued, remove or\n reuse old refresh token as in rfc:`6`\n\n @see: https://tools.ietf.org/html/draft-ietf-oauth-v2-31#page-43\n \"\"\"\n\n if \"scope\" not in token:\n raise FatalClientError(\"Failed to renew access token: missing scope\")\n\n # expires_in is passed to Server on initialization\n # custom server class can have logic to override this\n expires = timezone.now() + timedelta(seconds=token.get(\n \"expires_in\", oauth2_settings.ACCESS_TOKEN_EXPIRE_SECONDS,\n ))\n\n if request.grant_type == \"client_credentials\":\n request.user = None\n\n # This comes from OAuthLib:\n # https://github.com/idan/oauthlib/blob/1.0.3/oauthlib/oauth2/rfc6749/tokens.py#L267\n # Its value is either a new random code; or if we are reusing\n # refresh tokens, then it is the same value that the request passed in\n # (stored in `request.refresh_token`)\n refresh_token_code = token.get(\"refresh_token\", None)\n\n if refresh_token_code:\n # an instance of `RefreshToken` that matches the old refresh code.\n # Set on the request in `validate_refresh_token`\n refresh_token_instance = getattr(request, \"refresh_token_instance\", None)\n\n # If we are to reuse tokens, and we can: do so\n if not self.rotate_refresh_token(request) and \\\n isinstance(refresh_token_instance, RefreshToken) and \\\n refresh_token_instance.access_token:\n\n access_token = AccessToken.objects.select_for_update().get(\n pk=refresh_token_instance.access_token.pk\n )\n access_token.user = request.user\n access_token.scope = token[\"scope\"]\n access_token.expires = expires\n access_token.token = token[\"access_token\"]\n access_token.application = request.client\n access_token.save()\n\n # else create fresh with access & refresh tokens\n else:\n # revoke existing tokens if possible to allow reuse of grant\n if isinstance(refresh_token_instance, RefreshToken):\n # First, to ensure we don't have concurrency issues, we refresh the refresh token\n # from the db while acquiring a lock on it\n # We also put it in the \"request cache\"\n refresh_token_instance = RefreshToken.objects.select_for_update().get(\n id=refresh_token_instance.id\n )\n request.refresh_token_instance = refresh_token_instance\n\n previous_access_token = AccessToken.objects.filter(\n source_refresh_token=refresh_token_instance\n ).first()\n try:\n refresh_token_instance.revoke()\n except (AccessToken.DoesNotExist, RefreshToken.DoesNotExist):\n pass\n else:\n setattr(request, \"refresh_token_instance\", None)\n else:\n previous_access_token = None\n\n # If the refresh token has already been used to create an\n # access token (ie it's within the grace period), return that\n # access token\n if not previous_access_token:\n access_token = self._create_access_token(\n expires,\n request,\n token,\n source_refresh_token=refresh_token_instance,\n )\n\n self._create_refresh_token(request, refresh_token_code, access_token)\n else:\n # make sure that the token data we're returning matches\n # the existing token\n token[\"access_token\"] = previous_access_token.token\n token[\"refresh_token\"] = RefreshToken.objects.filter(\n access_token=previous_access_token\n ).first().token\n token[\"scope\"] = previous_access_token.scope\n\n # No refresh token should be created, just access token\n else:\n self._create_access_token(expires, request, token)\n\n def _create_access_token(self, expires, request, token, source_refresh_token=None):\n return AccessToken.objects.create(\n user=request.user,\n scope=token[\"scope\"],\n expires=expires,\n token=token[\"access_token\"],\n application=request.client,\n source_refresh_token=source_refresh_token,\n )\n\n def _create_refresh_token(self, request, refresh_token_code, access_token):\n return RefreshToken.objects.create(\n user=request.user,\n token=refresh_token_code,\n application=request.client,\n access_token=access_token\n )\n\n def revoke_token(self, token, token_type_hint, request, *args, **kwargs):\n \"\"\"\n Revoke an access or refresh token.\n\n :param token: The token string.\n :param token_type_hint: access_token or refresh_token.\n :param request: The HTTP Request (oauthlib.common.Request)\n \"\"\"\n if token_type_hint not in [\"access_token\", \"refresh_token\"]:\n token_type_hint = None\n\n token_types = {\n \"access_token\": AccessToken,\n \"refresh_token\": RefreshToken,\n }\n\n token_type = token_types.get(token_type_hint, AccessToken)\n try:\n token_type.objects.get(token=token).revoke()\n except ObjectDoesNotExist:\n for other_type in [_t for _t in token_types.values() if _t != token_type]:\n # slightly inefficient on Python2, but the queryset contains only one instance\n list(map(lambda t: t.revoke(), other_type.objects.filter(token=token)))\n\n def validate_user(self, username, password, client, request, *args, **kwargs):\n \"\"\"\n Check username and password correspond to a valid and active User\n \"\"\"\n u = authenticate(username=username, password=password)\n if u is not None and u.is_active:\n request.user = u\n return True\n return False\n\n def get_original_scopes(self, refresh_token, request, *args, **kwargs):\n # Avoid second query for RefreshToken since this method is invoked *after*\n # validate_refresh_token.\n rt = request.refresh_token_instance\n if not rt.access_token_id:\n return AccessToken.objects.get(source_refresh_token_id=rt.id).scope\n\n return rt.access_token.scope\n\n def validate_refresh_token(self, refresh_token, client, request, *args, **kwargs):\n \"\"\"\n Check refresh_token exists and refers to the right client.\n Also attach User instance to the request object\n \"\"\"\n\n null_or_recent = Q(revoked__isnull=True) | Q(\n revoked__gt=timezone.now() - timedelta(\n seconds=oauth2_settings.REFRESH_TOKEN_GRACE_PERIOD_SECONDS\n )\n )\n rt = RefreshToken.objects.filter(null_or_recent, token=refresh_token).first()\n\n if not rt:\n return False\n\n request.user = rt.user\n request.refresh_token = rt.token\n # Temporary store RefreshToken instance to be reused by get_original_scopes and save_bearer_token.\n request.refresh_token_instance = rt\n return rt.application == client\n", "path": "oauth2_provider/oauth2_validators.py"}]} |
gh_patches_debug_1335 | rasdani/github-patches | git_diff | uclapi__uclapi-3514 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] Requests of Undefined when Creating a New App
**Describe the bug**
When creating a new app on the staging.ninja dashboard the redirect page fails to render.
**To Reproduce**
Steps to reproduce the behavior:
1. Create a new app on https://staging.ninja/dashboard/
**Expected behavior**
Return to app list page.
**Screenshots**
```js
vendors-66dfc54de2ab880932e8.js:2 TypeError: Cannot read property 'requests' of undefined
at dashboard-edcd2f4fb681f0126f93.js:1
at Array.map (<anonymous>)
at A.value (dashboard-edcd2f4fb681f0126f93.js:1)
at jo (vendors-66dfc54de2ab880932e8.js:2)
at Yo (vendors-66dfc54de2ab880932e8.js:2)
at ys (vendors-66dfc54de2ab880932e8.js:2)
at El (vendors-66dfc54de2ab880932e8.js:2)
at fl (vendors-66dfc54de2ab880932e8.js:2)
at sl (vendors-66dfc54de2ab880932e8.js:2)
at vendors-66dfc54de2ab880932e8.js:2
dashboard-edcd2f4fb681f0126f93.js:1 Uncaught (in promise) TypeError: Cannot read property 'requests' of undefined
at dashboard-edcd2f4fb681f0126f93.js:1
at Array.map (<anonymous>)
at A.value (dashboard-edcd2f4fb681f0126f93.js:1)
at jo (vendors-66dfc54de2ab880932e8.js:2)
at Yo (vendors-66dfc54de2ab880932e8.js:2)
at ys (vendors-66dfc54de2ab880932e8.js:2)
at El (vendors-66dfc54de2ab880932e8.js:2)
at fl (vendors-66dfc54de2ab880932e8.js:2)
at sl (vendors-66dfc54de2ab880932e8.js:2)
at vendors-66dfc54de2ab880932e8.js:2
```
**Additional context**
Does not occur in production.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `backend/uclapi/dashboard/api_applications.py`
Content:
```
1 import json
2 import redis
3 from django.db.models import Count
4
5 from django.http import JsonResponse
6 from django.utils.datastructures import MultiValueDictKeyError
7 from django.utils.datetime_safe import datetime
8
9 from oauth.models import OAuthToken
10 from oauth.scoping import Scopes
11 from common.helpers import PrettyJsonResponse
12 from uclapi.settings import REDIS_UCLAPI_HOST
13
14 from .app_helpers import (is_url_unsafe, NOT_HTTPS,
15 NOT_VALID, URL_BLACKLISTED, NOT_PUBLIC)
16 from .models import App, User, APICall
17
18
19 def get_user_by_id(user_id):
20 user = User.objects.get(id=user_id)
21 return user
22
23
24 def create_app(request):
25 if request.method != "POST":
26 response = PrettyJsonResponse({
27 "success": False,
28 "error": "Request is not of method POST"
29 })
30 response.status_code = 400
31 return response
32
33 try:
34 name = request.POST["name"]
35 user_id = request.session["user_id"]
36 except (KeyError, AttributeError):
37 response = PrettyJsonResponse({
38 "success": False,
39 "message": "Request does not have name or user."
40 })
41 response.status_code = 400
42 return response
43
44 user = get_user_by_id(user_id)
45
46 new_app = App(name=name, user=user)
47 new_app.save()
48
49 s = Scopes()
50
51 return PrettyJsonResponse({
52 "success": True,
53 "message": "App sucessfully created",
54 "app": {
55 "name": new_app.name,
56 "id": new_app.id,
57 "token": new_app.api_token,
58 "created": new_app.created,
59 "updated": new_app.last_updated,
60 "oauth": {
61 "client_id": new_app.client_id,
62 "client_secret": new_app.client_secret,
63 "callback_url": new_app.callback_url,
64 "scopes": s.get_all_scopes()
65 },
66 "webhook": {
67 "verification_secret": new_app.webhook.verification_secret,
68 }
69 }
70 })
71
72
73 def rename_app(request):
74 if request.method != "POST":
75 response = PrettyJsonResponse({
76 "success": False,
77 "error": "Request is not of method POST"
78 })
79 response.status_code = 400
80 return response
81
82 try:
83 app_id = request.POST["app_id"]
84 new_name = request.POST["new_name"]
85 user_id = request.session["user_id"]
86 except (KeyError, AttributeError):
87 response = PrettyJsonResponse({
88 "success": False,
89 "message": "Request does not have app_id/new_name"
90 })
91 response.status_code = 400
92 return response
93
94 user = get_user_by_id(user_id)
95
96 apps = App.objects.filter(id=app_id, user=user, deleted=False)
97 if len(apps) == 0:
98 response = PrettyJsonResponse({
99 "success": False,
100 "message": "App does not exist."
101 })
102 response.status_code = 400
103 return response
104 else:
105 app = apps[0]
106 app.name = new_name
107 app.save()
108
109 return PrettyJsonResponse({
110 "success": True,
111 "message": "App sucessfully renamed.",
112 "date": app.last_updated
113 })
114
115
116 def regenerate_app_token(request):
117 if request.method != "POST":
118 response = PrettyJsonResponse({
119 "success": False,
120 "error": "Request is not of method POST"
121 })
122 response.status_code = 400
123 return response
124
125 try:
126 app_id = request.POST["app_id"]
127 user_id = request.session["user_id"]
128 except (KeyError, AttributeError):
129 response = PrettyJsonResponse({
130 "success": False,
131 "message": "Request does not have an app_id."
132 })
133 response.status_code = 400
134 return response
135
136 user = get_user_by_id(user_id)
137
138 apps = App.objects.filter(id=app_id, user=user)
139 if len(apps) == 0:
140 response = PrettyJsonResponse({
141 "success": False,
142 "message": "App does not exist."
143 })
144 response.status_code = 400
145 return response
146 else:
147 app = apps[0]
148 app.regenerate_token()
149 new_api_token = app.api_token
150
151 return PrettyJsonResponse({
152 "success": True,
153 "message": "App token sucessfully regenerated.",
154 "app": {
155 "id": app.id,
156 "token": new_api_token,
157 "date": app.last_updated
158 }
159 })
160
161
162 def delete_app(request):
163 if request.method != "POST":
164 response = PrettyJsonResponse({
165 "success": False,
166 "error": "Request is not of method POST"
167 })
168 response.status_code = 400
169 return response
170
171 try:
172 app_id = request.POST["app_id"]
173 user_id = request.session["user_id"]
174 except (KeyError, AttributeError):
175 response = PrettyJsonResponse({
176 "success": False,
177 "message": "Request does not have an app_id."
178 })
179 response.status_code = 400
180 return response
181
182 user = get_user_by_id(user_id)
183
184 apps = App.objects.filter(id=app_id, user=user)
185 if len(apps) == 0:
186 response = PrettyJsonResponse({
187 "success": False,
188 "message": "App does not exist."
189 })
190 response.status_code = 400
191 return response
192 else:
193 app = apps[0]
194 app.deleted = True
195 webhook = app.webhook
196 webhook.url = ""
197 webhook.siteid = ""
198 webhook.roomid = ""
199 webhook.contact = ""
200 webhook.enabled = False
201 webhook.save()
202 app.save()
203
204 return PrettyJsonResponse({
205 "success": True,
206 "message": "App sucessfully deleted.",
207 })
208
209
210 def set_callback_url(request):
211 if request.method != "POST":
212 response = PrettyJsonResponse({
213 "success": False,
214 "error": "Request is not of method POST"
215 })
216 response.status_code = 400
217 return response
218 try:
219 app_id = request.POST["app_id"]
220 except KeyError:
221 response = PrettyJsonResponse({
222 "success": False,
223 "message": "Request does not have an app_id."
224 })
225 response.status_code = 400
226 return response
227
228 try:
229 user_id = request.session["user_id"]
230 except (KeyError, AttributeError):
231 response = PrettyJsonResponse({
232 "success": False,
233 "message": "User ID not set in session. Please log in again."
234 })
235 response.status_code = 400
236 return response
237
238 try:
239 new_callback_url = request.POST["callback_url"]
240 except KeyError:
241 response = PrettyJsonResponse({
242 "success": False,
243 "message": "Request does not have a Callback URL."
244 })
245 response.status_code = 400
246 return response
247 url_not_safe_saved = is_url_unsafe(new_callback_url)
248 if url_not_safe_saved:
249 if url_not_safe_saved == NOT_HTTPS:
250 message = "The requested callback URL does not " \
251 "start with 'https://'."
252 elif url_not_safe_saved == NOT_VALID:
253 message = "The requested callback URL is not valid."
254 elif url_not_safe_saved == URL_BLACKLISTED:
255 message = "The requested callback URL is forbidden."
256 elif url_not_safe_saved == NOT_PUBLIC:
257 message = "The requested callback URL is not publicly available."
258 response = PrettyJsonResponse({
259 "success": False,
260 "message": message
261 })
262 response.status_code = 400
263 return response
264
265 user = get_user_by_id(user_id)
266
267 apps = App.objects.filter(id=app_id, user=user)
268 if len(apps) == 0:
269 response = PrettyJsonResponse({
270 "success": False,
271 "message": "App does not exist."
272 })
273 response.status_code = 400
274 return response
275
276 app = apps[0]
277 app.callback_url = new_callback_url
278 app.save()
279
280 return PrettyJsonResponse({
281 "success": True,
282 "message": "Callback URL successfully changed.",
283 })
284
285
286 def update_scopes(request):
287 if request.method != "POST":
288 response = PrettyJsonResponse({
289 "success": False,
290 "error": "Request is not of method POST"
291 })
292 response.status_code = 400
293 return response
294
295 try:
296 app_id = request.POST["app_id"]
297 except KeyError:
298 response = PrettyJsonResponse({
299 "success": False,
300 "message": "Request does not have an app_id."
301 })
302 response.status_code = 400
303 return response
304
305 try:
306 user_id = request.session["user_id"]
307 except (KeyError, AttributeError):
308 response = PrettyJsonResponse({
309 "success": False,
310 "message": "User ID not set in session. Please log in again."
311 })
312 response.status_code = 400
313 return response
314
315 try:
316 scopes_json = request.POST["scopes"]
317 except KeyError:
318 response = PrettyJsonResponse({
319 "success": False,
320 "message": "No scopes data attached."
321 })
322 response.status_code = 400
323 return response
324
325 try:
326 scopes = json.loads(scopes_json)
327 except ValueError:
328 response = PrettyJsonResponse({
329 "success": False,
330 "message": "Invalid scope data that could not be parsed."
331 })
332 response.status_code = 400
333 return response
334
335 user = get_user_by_id(user_id)
336
337 apps = App.objects.filter(id=app_id, user=user)
338 if len(apps) == 0:
339 response = PrettyJsonResponse({
340 "success": False,
341 "message": "App does not exist."
342 })
343 response.status_code = 400
344 return response
345 else:
346 app = apps[0]
347 current = app.scope.scope_number
348 s = Scopes()
349 try:
350 for scope in scopes:
351 if "checked" in scope and scope["checked"]:
352 current = s.add_scope(current, scope["name"])
353 else:
354 current = s.remove_scope(current, scope["name"])
355
356 app.scope.scope_number = current
357 app.scope.save()
358 app.save()
359 except (KeyError, ValueError, TypeError):
360 response = PrettyJsonResponse({
361 "success": False,
362 "message": "Invalid scope data that could not be iterated."
363 })
364 response.status_code = 400
365 return response
366
367 return PrettyJsonResponse({
368 "success": True,
369 "message": "Scope successfully changed.",
370 })
371
372
373 def get_number_of_requests(token):
374 if token.startswith('uclapi-user-'):
375 calls = APICall.objects.filter(token__token__exact=token)
376 elif token.startswith('uclapi-'):
377 calls = APICall.objects.filter(app__api_token__exact=token)
378 else:
379 return None
380
381 return len(calls)
382
383
384 def number_of_requests(request):
385 try:
386 token = request.GET["token"]
387 except MultiValueDictKeyError:
388 response = JsonResponse({
389 "ok": False,
390 "message": "No token provided"
391 })
392 response.status_code = 400
393 return response
394
395 calls = get_number_of_requests(token)
396 if calls is None:
397 response = JsonResponse({
398 "ok": False,
399 "message": "Token is invalid"
400 })
401 response.status_code = 400
402 return response
403
404 return PrettyJsonResponse({
405 "ok": True,
406 "num": calls,
407 })
408
409
410 def get_apps(request):
411 if request.method != "GET":
412 response = PrettyJsonResponse({
413 "success": False,
414 "error": "Request is not of method GET"
415 })
416 response.status_code = 400
417 return response
418 try:
419 user_id = request.session["user_id"]
420 except (KeyError, AttributeError):
421 response = PrettyJsonResponse({
422 "success": False,
423 "message": "User ID not set in session. Please log in again."
424 })
425 response.status_code = 400
426 return response
427
428 user = get_user_by_id(user_id)
429
430 user_meta = {
431 "name": user.full_name,
432 "cn": user.cn,
433 "department": user.department,
434 "intranet_groups": user.raw_intranet_groups,
435 "apps": []
436 }
437
438 user_apps = App.objects.filter(user=user, deleted=False)
439
440 s = Scopes()
441
442 for app in user_apps:
443 user_meta["apps"].append({
444 "name": app.name,
445 "id": app.id,
446 "token": app.api_token,
447 "created": app.created,
448 "updated": app.last_updated,
449 "oauth": {
450 "client_id": app.client_id,
451 "client_secret": app.client_secret,
452 "callback_url": app.callback_url,
453 "scopes": s.scope_dict_all(app.scope.scope_number)
454 },
455 "webhook": {
456 "verification_secret": app.webhook.verification_secret,
457 "url": app.webhook.url,
458 "siteid": app.webhook.siteid,
459 "roomid": app.webhook.roomid,
460 "contact": app.webhook.contact
461 },
462 "analytics": {
463 "requests": get_number_of_requests(app.api_token),
464 "remaining_quota": get_quota_remaining(app.api_token),
465 "users": get_users_per_app(app.api_token),
466 "users_per_dept": get_users_per_app_per_dept(app.api_token)
467 }
468 })
469
470 return PrettyJsonResponse(user_meta)
471
472
473 def get_quota_remaining(token):
474 r = redis.Redis(host=REDIS_UCLAPI_HOST)
475
476 if token.startswith('uclapi-user-'):
477 Otoken = OAuthToken.objects.filter(token__exact=token).first()
478
479 cache_key = "oauth:" + Otoken.user.email
480 limit = Otoken.user.oauth_quota
481
482 elif token.startswith('uclapi-'):
483 app = App.objects.filter(api_token__exact=token).first()
484 cache_key = app.user.email
485 limit = app.user.dev_quota
486
487 else:
488 return None
489
490 count_data = r.get(cache_key)
491
492 if count_data:
493 count_data = int(r.get(cache_key))
494 else:
495 count_data = 0
496
497 return limit - count_data
498
499
500 def quota_remaining(request):
501 try:
502 token = request.GET["token"]
503 except MultiValueDictKeyError:
504 response = JsonResponse({
505 "ok": False,
506 "message": "No token provided"
507 })
508 response.status_code = 400
509 return response
510
511 quota = get_quota_remaining(token)
512 if quota is None:
513 response = JsonResponse({
514 "ok": False,
515 "message": "Token is invalid"
516 })
517 response.status_code = 400
518 return response
519
520 return PrettyJsonResponse({
521 "ok": True,
522 "remaining": quota,
523 })
524
525
526 def most_popular_service(request):
527 most_common = APICall.objects.values("service").annotate(
528 count=Count('service')).order_by("-count")
529 most_common = list(most_common)
530
531 return PrettyJsonResponse({
532 "ok": True,
533 "data": most_common
534 })
535
536
537 def most_popular_method(request):
538 service = request.GET.get("service", False)
539 split_by_service = request.GET.get("split_services", "false")
540 split_by_service = False if split_by_service.lower() in [
541 "false", "0"] else True
542
543 if service:
544 most_common = APICall.objects.filter(service__exact=service)\
545 .values("service", "method").annotate(count=Count('method')).order_by("-count")
546 else:
547 most_common = APICall.objects\
548 .values("service", "method").annotate(count=Count('method')).order_by("-count")
549
550 if not split_by_service:
551 t_most_common_counter = {}
552 for m in most_common:
553 if m["method"].split("/")[0] in t_most_common_counter:
554 t_most_common_counter[m["method"].split("/")[0]] += m["count"]
555 else:
556 t_most_common_counter[m["method"].split("/")[0]] = m["count"]
557 print(t_most_common_counter)
558
559 most_common = [{"method": method, "count": count}
560 for method, count in t_most_common_counter.items()]
561 else:
562 temp_most_common_aggregate = {}
563 for method in most_common:
564 if method["service"] in temp_most_common_aggregate:
565 temp_most_common_aggregate[method["service"]].append({
566 "method": method["method"],
567 "count": method["count"]
568 })
569 else:
570 temp_most_common_aggregate[method["service"]] = [{
571 "method": method["method"],
572 "count": method["count"]
573 }]
574 most_common = temp_most_common_aggregate
575
576 return PrettyJsonResponse({
577 "ok": True,
578 "data": most_common
579 })
580
581
582 def get_users_per_app(token, start=None, end=None):
583 if start and end:
584 start_date = datetime.strptime(start, "%Y-%m-%d")
585 end_date = datetime.strptime(end, "%Y-%m-%d")
586
587 users = OAuthToken.objects.filter(creation_date__gte=start_date,
588 creation_date__lte=end_date,
589 app__api_token__exact=token)
590 else:
591 users = OAuthToken.objects.filter(app__api_token__exact=token)
592
593 return len(users)
594
595
596 def users_per_app(request):
597 try:
598 token = request.GET["token"]
599 except MultiValueDictKeyError:
600 response = JsonResponse({
601 "ok": False,
602 "message": "No token provided"
603 })
604 response.status_code = 400
605 return response
606
607 try:
608 start = request.GET["start_date"]
609 end = request.GET["end_date"]
610 users_count = get_users_per_app(token, start, end)
611 except MultiValueDictKeyError:
612 users_count = get_users_per_app(token)
613
614 return PrettyJsonResponse({
615 "ok": True,
616 "users": users_count
617 })
618
619
620 def get_users_per_app_per_dept(token):
621 users = User.objects.filter(oauthtoken__app__api_token__exact=token)\
622 .values("department").annotate(count=Count('department'))\
623 .order_by("-count")
624 return list(users)
625
626
627 def users_per_app_by_dept(request):
628 try:
629 token = request.GET["token"]
630 except MultiValueDictKeyError:
631 response = JsonResponse({
632 "ok": False,
633 "message": "No token provided"
634 })
635 response.status_code = 400
636 return response
637
638 users = get_users_per_app_per_dept(token)
639
640 return PrettyJsonResponse({
641 "ok": True,
642 "data": users
643 })
644
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/backend/uclapi/dashboard/api_applications.py b/backend/uclapi/dashboard/api_applications.py
--- a/backend/uclapi/dashboard/api_applications.py
+++ b/backend/uclapi/dashboard/api_applications.py
@@ -65,6 +65,12 @@
},
"webhook": {
"verification_secret": new_app.webhook.verification_secret,
+ },
+ "analytics": {
+ "requests": 0,
+ "remaining_quota": User._meta.get_field('oauth_quota').get_default(),
+ "users": 0,
+ "users_per_dept": []
}
}
})
| {"golden_diff": "diff --git a/backend/uclapi/dashboard/api_applications.py b/backend/uclapi/dashboard/api_applications.py\n--- a/backend/uclapi/dashboard/api_applications.py\n+++ b/backend/uclapi/dashboard/api_applications.py\n@@ -65,6 +65,12 @@\n },\n \"webhook\": {\n \"verification_secret\": new_app.webhook.verification_secret,\n+ },\n+ \"analytics\": {\n+ \"requests\": 0,\n+ \"remaining_quota\": User._meta.get_field('oauth_quota').get_default(),\n+ \"users\": 0,\n+ \"users_per_dept\": []\n }\n }\n })\n", "issue": "[BUG] Requests of Undefined when Creating a New App\n**Describe the bug**\r\nWhen creating a new app on the staging.ninja dashboard the redirect page fails to render.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Create a new app on https://staging.ninja/dashboard/\r\n\r\n**Expected behavior**\r\nReturn to app list page.\r\n\r\n**Screenshots**\r\n```js\r\nvendors-66dfc54de2ab880932e8.js:2 TypeError: Cannot read property 'requests' of undefined\r\n at dashboard-edcd2f4fb681f0126f93.js:1\r\n at Array.map (<anonymous>)\r\n at A.value (dashboard-edcd2f4fb681f0126f93.js:1)\r\n at jo (vendors-66dfc54de2ab880932e8.js:2)\r\n at Yo (vendors-66dfc54de2ab880932e8.js:2)\r\n at ys (vendors-66dfc54de2ab880932e8.js:2)\r\n at El (vendors-66dfc54de2ab880932e8.js:2)\r\n at fl (vendors-66dfc54de2ab880932e8.js:2)\r\n at sl (vendors-66dfc54de2ab880932e8.js:2)\r\n at vendors-66dfc54de2ab880932e8.js:2\r\n\r\ndashboard-edcd2f4fb681f0126f93.js:1 Uncaught (in promise) TypeError: Cannot read property 'requests' of undefined\r\n at dashboard-edcd2f4fb681f0126f93.js:1\r\n at Array.map (<anonymous>)\r\n at A.value (dashboard-edcd2f4fb681f0126f93.js:1)\r\n at jo (vendors-66dfc54de2ab880932e8.js:2)\r\n at Yo (vendors-66dfc54de2ab880932e8.js:2)\r\n at ys (vendors-66dfc54de2ab880932e8.js:2)\r\n at El (vendors-66dfc54de2ab880932e8.js:2)\r\n at fl (vendors-66dfc54de2ab880932e8.js:2)\r\n at sl (vendors-66dfc54de2ab880932e8.js:2)\r\n at vendors-66dfc54de2ab880932e8.js:2\r\n```\r\n\r\n\r\n**Additional context**\r\nDoes not occur in production.\r\n\n", "before_files": [{"content": "import json\nimport redis\nfrom django.db.models import Count\n\nfrom django.http import JsonResponse\nfrom django.utils.datastructures import MultiValueDictKeyError\nfrom django.utils.datetime_safe import datetime\n\nfrom oauth.models import OAuthToken\nfrom oauth.scoping import Scopes\nfrom common.helpers import PrettyJsonResponse\nfrom uclapi.settings import REDIS_UCLAPI_HOST\n\nfrom .app_helpers import (is_url_unsafe, NOT_HTTPS,\n NOT_VALID, URL_BLACKLISTED, NOT_PUBLIC)\nfrom .models import App, User, APICall\n\n\ndef get_user_by_id(user_id):\n user = User.objects.get(id=user_id)\n return user\n\n\ndef create_app(request):\n if request.method != \"POST\":\n response = PrettyJsonResponse({\n \"success\": False,\n \"error\": \"Request is not of method POST\"\n })\n response.status_code = 400\n return response\n\n try:\n name = request.POST[\"name\"]\n user_id = request.session[\"user_id\"]\n except (KeyError, AttributeError):\n response = PrettyJsonResponse({\n \"success\": False,\n \"message\": \"Request does not have name or user.\"\n })\n response.status_code = 400\n return response\n\n user = get_user_by_id(user_id)\n\n new_app = App(name=name, user=user)\n new_app.save()\n\n s = Scopes()\n\n return PrettyJsonResponse({\n \"success\": True,\n \"message\": \"App sucessfully created\",\n \"app\": {\n \"name\": new_app.name,\n \"id\": new_app.id,\n \"token\": new_app.api_token,\n \"created\": new_app.created,\n \"updated\": new_app.last_updated,\n \"oauth\": {\n \"client_id\": new_app.client_id,\n \"client_secret\": new_app.client_secret,\n \"callback_url\": new_app.callback_url,\n \"scopes\": s.get_all_scopes()\n },\n \"webhook\": {\n \"verification_secret\": new_app.webhook.verification_secret,\n }\n }\n })\n\n\ndef rename_app(request):\n if request.method != \"POST\":\n response = PrettyJsonResponse({\n \"success\": False,\n \"error\": \"Request is not of method POST\"\n })\n response.status_code = 400\n return response\n\n try:\n app_id = request.POST[\"app_id\"]\n new_name = request.POST[\"new_name\"]\n user_id = request.session[\"user_id\"]\n except (KeyError, AttributeError):\n response = PrettyJsonResponse({\n \"success\": False,\n \"message\": \"Request does not have app_id/new_name\"\n })\n response.status_code = 400\n return response\n\n user = get_user_by_id(user_id)\n\n apps = App.objects.filter(id=app_id, user=user, deleted=False)\n if len(apps) == 0:\n response = PrettyJsonResponse({\n \"success\": False,\n \"message\": \"App does not exist.\"\n })\n response.status_code = 400\n return response\n else:\n app = apps[0]\n app.name = new_name\n app.save()\n\n return PrettyJsonResponse({\n \"success\": True,\n \"message\": \"App sucessfully renamed.\",\n \"date\": app.last_updated\n })\n\n\ndef regenerate_app_token(request):\n if request.method != \"POST\":\n response = PrettyJsonResponse({\n \"success\": False,\n \"error\": \"Request is not of method POST\"\n })\n response.status_code = 400\n return response\n\n try:\n app_id = request.POST[\"app_id\"]\n user_id = request.session[\"user_id\"]\n except (KeyError, AttributeError):\n response = PrettyJsonResponse({\n \"success\": False,\n \"message\": \"Request does not have an app_id.\"\n })\n response.status_code = 400\n return response\n\n user = get_user_by_id(user_id)\n\n apps = App.objects.filter(id=app_id, user=user)\n if len(apps) == 0:\n response = PrettyJsonResponse({\n \"success\": False,\n \"message\": \"App does not exist.\"\n })\n response.status_code = 400\n return response\n else:\n app = apps[0]\n app.regenerate_token()\n new_api_token = app.api_token\n\n return PrettyJsonResponse({\n \"success\": True,\n \"message\": \"App token sucessfully regenerated.\",\n \"app\": {\n \"id\": app.id,\n \"token\": new_api_token,\n \"date\": app.last_updated\n }\n })\n\n\ndef delete_app(request):\n if request.method != \"POST\":\n response = PrettyJsonResponse({\n \"success\": False,\n \"error\": \"Request is not of method POST\"\n })\n response.status_code = 400\n return response\n\n try:\n app_id = request.POST[\"app_id\"]\n user_id = request.session[\"user_id\"]\n except (KeyError, AttributeError):\n response = PrettyJsonResponse({\n \"success\": False,\n \"message\": \"Request does not have an app_id.\"\n })\n response.status_code = 400\n return response\n\n user = get_user_by_id(user_id)\n\n apps = App.objects.filter(id=app_id, user=user)\n if len(apps) == 0:\n response = PrettyJsonResponse({\n \"success\": False,\n \"message\": \"App does not exist.\"\n })\n response.status_code = 400\n return response\n else:\n app = apps[0]\n app.deleted = True\n webhook = app.webhook\n webhook.url = \"\"\n webhook.siteid = \"\"\n webhook.roomid = \"\"\n webhook.contact = \"\"\n webhook.enabled = False\n webhook.save()\n app.save()\n\n return PrettyJsonResponse({\n \"success\": True,\n \"message\": \"App sucessfully deleted.\",\n })\n\n\ndef set_callback_url(request):\n if request.method != \"POST\":\n response = PrettyJsonResponse({\n \"success\": False,\n \"error\": \"Request is not of method POST\"\n })\n response.status_code = 400\n return response\n try:\n app_id = request.POST[\"app_id\"]\n except KeyError:\n response = PrettyJsonResponse({\n \"success\": False,\n \"message\": \"Request does not have an app_id.\"\n })\n response.status_code = 400\n return response\n\n try:\n user_id = request.session[\"user_id\"]\n except (KeyError, AttributeError):\n response = PrettyJsonResponse({\n \"success\": False,\n \"message\": \"User ID not set in session. Please log in again.\"\n })\n response.status_code = 400\n return response\n\n try:\n new_callback_url = request.POST[\"callback_url\"]\n except KeyError:\n response = PrettyJsonResponse({\n \"success\": False,\n \"message\": \"Request does not have a Callback URL.\"\n })\n response.status_code = 400\n return response\n url_not_safe_saved = is_url_unsafe(new_callback_url)\n if url_not_safe_saved:\n if url_not_safe_saved == NOT_HTTPS:\n message = \"The requested callback URL does not \" \\\n \"start with 'https://'.\"\n elif url_not_safe_saved == NOT_VALID:\n message = \"The requested callback URL is not valid.\"\n elif url_not_safe_saved == URL_BLACKLISTED:\n message = \"The requested callback URL is forbidden.\"\n elif url_not_safe_saved == NOT_PUBLIC:\n message = \"The requested callback URL is not publicly available.\"\n response = PrettyJsonResponse({\n \"success\": False,\n \"message\": message\n })\n response.status_code = 400\n return response\n\n user = get_user_by_id(user_id)\n\n apps = App.objects.filter(id=app_id, user=user)\n if len(apps) == 0:\n response = PrettyJsonResponse({\n \"success\": False,\n \"message\": \"App does not exist.\"\n })\n response.status_code = 400\n return response\n\n app = apps[0]\n app.callback_url = new_callback_url\n app.save()\n\n return PrettyJsonResponse({\n \"success\": True,\n \"message\": \"Callback URL successfully changed.\",\n })\n\n\ndef update_scopes(request):\n if request.method != \"POST\":\n response = PrettyJsonResponse({\n \"success\": False,\n \"error\": \"Request is not of method POST\"\n })\n response.status_code = 400\n return response\n\n try:\n app_id = request.POST[\"app_id\"]\n except KeyError:\n response = PrettyJsonResponse({\n \"success\": False,\n \"message\": \"Request does not have an app_id.\"\n })\n response.status_code = 400\n return response\n\n try:\n user_id = request.session[\"user_id\"]\n except (KeyError, AttributeError):\n response = PrettyJsonResponse({\n \"success\": False,\n \"message\": \"User ID not set in session. Please log in again.\"\n })\n response.status_code = 400\n return response\n\n try:\n scopes_json = request.POST[\"scopes\"]\n except KeyError:\n response = PrettyJsonResponse({\n \"success\": False,\n \"message\": \"No scopes data attached.\"\n })\n response.status_code = 400\n return response\n\n try:\n scopes = json.loads(scopes_json)\n except ValueError:\n response = PrettyJsonResponse({\n \"success\": False,\n \"message\": \"Invalid scope data that could not be parsed.\"\n })\n response.status_code = 400\n return response\n\n user = get_user_by_id(user_id)\n\n apps = App.objects.filter(id=app_id, user=user)\n if len(apps) == 0:\n response = PrettyJsonResponse({\n \"success\": False,\n \"message\": \"App does not exist.\"\n })\n response.status_code = 400\n return response\n else:\n app = apps[0]\n current = app.scope.scope_number\n s = Scopes()\n try:\n for scope in scopes:\n if \"checked\" in scope and scope[\"checked\"]:\n current = s.add_scope(current, scope[\"name\"])\n else:\n current = s.remove_scope(current, scope[\"name\"])\n\n app.scope.scope_number = current\n app.scope.save()\n app.save()\n except (KeyError, ValueError, TypeError):\n response = PrettyJsonResponse({\n \"success\": False,\n \"message\": \"Invalid scope data that could not be iterated.\"\n })\n response.status_code = 400\n return response\n\n return PrettyJsonResponse({\n \"success\": True,\n \"message\": \"Scope successfully changed.\",\n })\n\n\ndef get_number_of_requests(token):\n if token.startswith('uclapi-user-'):\n calls = APICall.objects.filter(token__token__exact=token)\n elif token.startswith('uclapi-'):\n calls = APICall.objects.filter(app__api_token__exact=token)\n else:\n return None\n\n return len(calls)\n\n\ndef number_of_requests(request):\n try:\n token = request.GET[\"token\"]\n except MultiValueDictKeyError:\n response = JsonResponse({\n \"ok\": False,\n \"message\": \"No token provided\"\n })\n response.status_code = 400\n return response\n\n calls = get_number_of_requests(token)\n if calls is None:\n response = JsonResponse({\n \"ok\": False,\n \"message\": \"Token is invalid\"\n })\n response.status_code = 400\n return response\n\n return PrettyJsonResponse({\n \"ok\": True,\n \"num\": calls,\n })\n\n\ndef get_apps(request):\n if request.method != \"GET\":\n response = PrettyJsonResponse({\n \"success\": False,\n \"error\": \"Request is not of method GET\"\n })\n response.status_code = 400\n return response\n try:\n user_id = request.session[\"user_id\"]\n except (KeyError, AttributeError):\n response = PrettyJsonResponse({\n \"success\": False,\n \"message\": \"User ID not set in session. Please log in again.\"\n })\n response.status_code = 400\n return response\n\n user = get_user_by_id(user_id)\n\n user_meta = {\n \"name\": user.full_name,\n \"cn\": user.cn,\n \"department\": user.department,\n \"intranet_groups\": user.raw_intranet_groups,\n \"apps\": []\n }\n\n user_apps = App.objects.filter(user=user, deleted=False)\n\n s = Scopes()\n\n for app in user_apps:\n user_meta[\"apps\"].append({\n \"name\": app.name,\n \"id\": app.id,\n \"token\": app.api_token,\n \"created\": app.created,\n \"updated\": app.last_updated,\n \"oauth\": {\n \"client_id\": app.client_id,\n \"client_secret\": app.client_secret,\n \"callback_url\": app.callback_url,\n \"scopes\": s.scope_dict_all(app.scope.scope_number)\n },\n \"webhook\": {\n \"verification_secret\": app.webhook.verification_secret,\n \"url\": app.webhook.url,\n \"siteid\": app.webhook.siteid,\n \"roomid\": app.webhook.roomid,\n \"contact\": app.webhook.contact\n },\n \"analytics\": {\n \"requests\": get_number_of_requests(app.api_token),\n \"remaining_quota\": get_quota_remaining(app.api_token),\n \"users\": get_users_per_app(app.api_token),\n \"users_per_dept\": get_users_per_app_per_dept(app.api_token)\n }\n })\n\n return PrettyJsonResponse(user_meta)\n\n\ndef get_quota_remaining(token):\n r = redis.Redis(host=REDIS_UCLAPI_HOST)\n\n if token.startswith('uclapi-user-'):\n Otoken = OAuthToken.objects.filter(token__exact=token).first()\n\n cache_key = \"oauth:\" + Otoken.user.email\n limit = Otoken.user.oauth_quota\n\n elif token.startswith('uclapi-'):\n app = App.objects.filter(api_token__exact=token).first()\n cache_key = app.user.email\n limit = app.user.dev_quota\n\n else:\n return None\n\n count_data = r.get(cache_key)\n\n if count_data:\n count_data = int(r.get(cache_key))\n else:\n count_data = 0\n\n return limit - count_data\n\n\ndef quota_remaining(request):\n try:\n token = request.GET[\"token\"]\n except MultiValueDictKeyError:\n response = JsonResponse({\n \"ok\": False,\n \"message\": \"No token provided\"\n })\n response.status_code = 400\n return response\n\n quota = get_quota_remaining(token)\n if quota is None:\n response = JsonResponse({\n \"ok\": False,\n \"message\": \"Token is invalid\"\n })\n response.status_code = 400\n return response\n\n return PrettyJsonResponse({\n \"ok\": True,\n \"remaining\": quota,\n })\n\n\ndef most_popular_service(request):\n most_common = APICall.objects.values(\"service\").annotate(\n count=Count('service')).order_by(\"-count\")\n most_common = list(most_common)\n\n return PrettyJsonResponse({\n \"ok\": True,\n \"data\": most_common\n })\n\n\ndef most_popular_method(request):\n service = request.GET.get(\"service\", False)\n split_by_service = request.GET.get(\"split_services\", \"false\")\n split_by_service = False if split_by_service.lower() in [\n \"false\", \"0\"] else True\n\n if service:\n most_common = APICall.objects.filter(service__exact=service)\\\n .values(\"service\", \"method\").annotate(count=Count('method')).order_by(\"-count\")\n else:\n most_common = APICall.objects\\\n .values(\"service\", \"method\").annotate(count=Count('method')).order_by(\"-count\")\n\n if not split_by_service:\n t_most_common_counter = {}\n for m in most_common:\n if m[\"method\"].split(\"/\")[0] in t_most_common_counter:\n t_most_common_counter[m[\"method\"].split(\"/\")[0]] += m[\"count\"]\n else:\n t_most_common_counter[m[\"method\"].split(\"/\")[0]] = m[\"count\"]\n print(t_most_common_counter)\n\n most_common = [{\"method\": method, \"count\": count}\n for method, count in t_most_common_counter.items()]\n else:\n temp_most_common_aggregate = {}\n for method in most_common:\n if method[\"service\"] in temp_most_common_aggregate:\n temp_most_common_aggregate[method[\"service\"]].append({\n \"method\": method[\"method\"],\n \"count\": method[\"count\"]\n })\n else:\n temp_most_common_aggregate[method[\"service\"]] = [{\n \"method\": method[\"method\"],\n \"count\": method[\"count\"]\n }]\n most_common = temp_most_common_aggregate\n\n return PrettyJsonResponse({\n \"ok\": True,\n \"data\": most_common\n })\n\n\ndef get_users_per_app(token, start=None, end=None):\n if start and end:\n start_date = datetime.strptime(start, \"%Y-%m-%d\")\n end_date = datetime.strptime(end, \"%Y-%m-%d\")\n\n users = OAuthToken.objects.filter(creation_date__gte=start_date,\n creation_date__lte=end_date,\n app__api_token__exact=token)\n else:\n users = OAuthToken.objects.filter(app__api_token__exact=token)\n\n return len(users)\n\n\ndef users_per_app(request):\n try:\n token = request.GET[\"token\"]\n except MultiValueDictKeyError:\n response = JsonResponse({\n \"ok\": False,\n \"message\": \"No token provided\"\n })\n response.status_code = 400\n return response\n\n try:\n start = request.GET[\"start_date\"]\n end = request.GET[\"end_date\"]\n users_count = get_users_per_app(token, start, end)\n except MultiValueDictKeyError:\n users_count = get_users_per_app(token)\n\n return PrettyJsonResponse({\n \"ok\": True,\n \"users\": users_count\n })\n\n\ndef get_users_per_app_per_dept(token):\n users = User.objects.filter(oauthtoken__app__api_token__exact=token)\\\n .values(\"department\").annotate(count=Count('department'))\\\n .order_by(\"-count\")\n return list(users)\n\n\ndef users_per_app_by_dept(request):\n try:\n token = request.GET[\"token\"]\n except MultiValueDictKeyError:\n response = JsonResponse({\n \"ok\": False,\n \"message\": \"No token provided\"\n })\n response.status_code = 400\n return response\n\n users = get_users_per_app_per_dept(token)\n\n return PrettyJsonResponse({\n \"ok\": True,\n \"data\": users\n })\n", "path": "backend/uclapi/dashboard/api_applications.py"}], "after_files": [{"content": "import json\nimport redis\nfrom django.db.models import Count\n\nfrom django.http import JsonResponse\nfrom django.utils.datastructures import MultiValueDictKeyError\nfrom django.utils.datetime_safe import datetime\n\nfrom oauth.models import OAuthToken\nfrom oauth.scoping import Scopes\nfrom common.helpers import PrettyJsonResponse\nfrom uclapi.settings import REDIS_UCLAPI_HOST\n\nfrom .app_helpers import (is_url_unsafe, NOT_HTTPS,\n NOT_VALID, URL_BLACKLISTED, NOT_PUBLIC)\nfrom .models import App, User, APICall\n\n\ndef get_user_by_id(user_id):\n user = User.objects.get(id=user_id)\n return user\n\n\ndef create_app(request):\n if request.method != \"POST\":\n response = PrettyJsonResponse({\n \"success\": False,\n \"error\": \"Request is not of method POST\"\n })\n response.status_code = 400\n return response\n\n try:\n name = request.POST[\"name\"]\n user_id = request.session[\"user_id\"]\n except (KeyError, AttributeError):\n response = PrettyJsonResponse({\n \"success\": False,\n \"message\": \"Request does not have name or user.\"\n })\n response.status_code = 400\n return response\n\n user = get_user_by_id(user_id)\n\n new_app = App(name=name, user=user)\n new_app.save()\n\n s = Scopes()\n\n return PrettyJsonResponse({\n \"success\": True,\n \"message\": \"App sucessfully created\",\n \"app\": {\n \"name\": new_app.name,\n \"id\": new_app.id,\n \"token\": new_app.api_token,\n \"created\": new_app.created,\n \"updated\": new_app.last_updated,\n \"oauth\": {\n \"client_id\": new_app.client_id,\n \"client_secret\": new_app.client_secret,\n \"callback_url\": new_app.callback_url,\n \"scopes\": s.get_all_scopes()\n },\n \"webhook\": {\n \"verification_secret\": new_app.webhook.verification_secret,\n },\n \"analytics\": {\n \"requests\": 0,\n \"remaining_quota\": User._meta.get_field('oauth_quota').get_default(),\n \"users\": 0,\n \"users_per_dept\": []\n }\n }\n })\n\n\ndef rename_app(request):\n if request.method != \"POST\":\n response = PrettyJsonResponse({\n \"success\": False,\n \"error\": \"Request is not of method POST\"\n })\n response.status_code = 400\n return response\n\n try:\n app_id = request.POST[\"app_id\"]\n new_name = request.POST[\"new_name\"]\n user_id = request.session[\"user_id\"]\n except (KeyError, AttributeError):\n response = PrettyJsonResponse({\n \"success\": False,\n \"message\": \"Request does not have app_id/new_name\"\n })\n response.status_code = 400\n return response\n\n user = get_user_by_id(user_id)\n\n apps = App.objects.filter(id=app_id, user=user, deleted=False)\n if len(apps) == 0:\n response = PrettyJsonResponse({\n \"success\": False,\n \"message\": \"App does not exist.\"\n })\n response.status_code = 400\n return response\n else:\n app = apps[0]\n app.name = new_name\n app.save()\n\n return PrettyJsonResponse({\n \"success\": True,\n \"message\": \"App sucessfully renamed.\",\n \"date\": app.last_updated\n })\n\n\ndef regenerate_app_token(request):\n if request.method != \"POST\":\n response = PrettyJsonResponse({\n \"success\": False,\n \"error\": \"Request is not of method POST\"\n })\n response.status_code = 400\n return response\n\n try:\n app_id = request.POST[\"app_id\"]\n user_id = request.session[\"user_id\"]\n except (KeyError, AttributeError):\n response = PrettyJsonResponse({\n \"success\": False,\n \"message\": \"Request does not have an app_id.\"\n })\n response.status_code = 400\n return response\n\n user = get_user_by_id(user_id)\n\n apps = App.objects.filter(id=app_id, user=user)\n if len(apps) == 0:\n response = PrettyJsonResponse({\n \"success\": False,\n \"message\": \"App does not exist.\"\n })\n response.status_code = 400\n return response\n else:\n app = apps[0]\n app.regenerate_token()\n new_api_token = app.api_token\n\n return PrettyJsonResponse({\n \"success\": True,\n \"message\": \"App token sucessfully regenerated.\",\n \"app\": {\n \"id\": app.id,\n \"token\": new_api_token,\n \"date\": app.last_updated\n }\n })\n\n\ndef delete_app(request):\n if request.method != \"POST\":\n response = PrettyJsonResponse({\n \"success\": False,\n \"error\": \"Request is not of method POST\"\n })\n response.status_code = 400\n return response\n\n try:\n app_id = request.POST[\"app_id\"]\n user_id = request.session[\"user_id\"]\n except (KeyError, AttributeError):\n response = PrettyJsonResponse({\n \"success\": False,\n \"message\": \"Request does not have an app_id.\"\n })\n response.status_code = 400\n return response\n\n user = get_user_by_id(user_id)\n\n apps = App.objects.filter(id=app_id, user=user)\n if len(apps) == 0:\n response = PrettyJsonResponse({\n \"success\": False,\n \"message\": \"App does not exist.\"\n })\n response.status_code = 400\n return response\n else:\n app = apps[0]\n app.deleted = True\n webhook = app.webhook\n webhook.url = \"\"\n webhook.siteid = \"\"\n webhook.roomid = \"\"\n webhook.contact = \"\"\n webhook.enabled = False\n webhook.save()\n app.save()\n\n return PrettyJsonResponse({\n \"success\": True,\n \"message\": \"App sucessfully deleted.\",\n })\n\n\ndef set_callback_url(request):\n if request.method != \"POST\":\n response = PrettyJsonResponse({\n \"success\": False,\n \"error\": \"Request is not of method POST\"\n })\n response.status_code = 400\n return response\n try:\n app_id = request.POST[\"app_id\"]\n except KeyError:\n response = PrettyJsonResponse({\n \"success\": False,\n \"message\": \"Request does not have an app_id.\"\n })\n response.status_code = 400\n return response\n\n try:\n user_id = request.session[\"user_id\"]\n except (KeyError, AttributeError):\n response = PrettyJsonResponse({\n \"success\": False,\n \"message\": \"User ID not set in session. Please log in again.\"\n })\n response.status_code = 400\n return response\n\n try:\n new_callback_url = request.POST[\"callback_url\"]\n except KeyError:\n response = PrettyJsonResponse({\n \"success\": False,\n \"message\": \"Request does not have a Callback URL.\"\n })\n response.status_code = 400\n return response\n url_not_safe_saved = is_url_unsafe(new_callback_url)\n if url_not_safe_saved:\n if url_not_safe_saved == NOT_HTTPS:\n message = \"The requested callback URL does not \" \\\n \"start with 'https://'.\"\n elif url_not_safe_saved == NOT_VALID:\n message = \"The requested callback URL is not valid.\"\n elif url_not_safe_saved == URL_BLACKLISTED:\n message = \"The requested callback URL is forbidden.\"\n elif url_not_safe_saved == NOT_PUBLIC:\n message = \"The requested callback URL is not publicly available.\"\n response = PrettyJsonResponse({\n \"success\": False,\n \"message\": message\n })\n response.status_code = 400\n return response\n\n user = get_user_by_id(user_id)\n\n apps = App.objects.filter(id=app_id, user=user)\n if len(apps) == 0:\n response = PrettyJsonResponse({\n \"success\": False,\n \"message\": \"App does not exist.\"\n })\n response.status_code = 400\n return response\n\n app = apps[0]\n app.callback_url = new_callback_url\n app.save()\n\n return PrettyJsonResponse({\n \"success\": True,\n \"message\": \"Callback URL successfully changed.\",\n })\n\n\ndef update_scopes(request):\n if request.method != \"POST\":\n response = PrettyJsonResponse({\n \"success\": False,\n \"error\": \"Request is not of method POST\"\n })\n response.status_code = 400\n return response\n\n try:\n app_id = request.POST[\"app_id\"]\n except KeyError:\n response = PrettyJsonResponse({\n \"success\": False,\n \"message\": \"Request does not have an app_id.\"\n })\n response.status_code = 400\n return response\n\n try:\n user_id = request.session[\"user_id\"]\n except (KeyError, AttributeError):\n response = PrettyJsonResponse({\n \"success\": False,\n \"message\": \"User ID not set in session. Please log in again.\"\n })\n response.status_code = 400\n return response\n\n try:\n scopes_json = request.POST[\"scopes\"]\n except KeyError:\n response = PrettyJsonResponse({\n \"success\": False,\n \"message\": \"No scopes data attached.\"\n })\n response.status_code = 400\n return response\n\n try:\n scopes = json.loads(scopes_json)\n except ValueError:\n response = PrettyJsonResponse({\n \"success\": False,\n \"message\": \"Invalid scope data that could not be parsed.\"\n })\n response.status_code = 400\n return response\n\n user = get_user_by_id(user_id)\n\n apps = App.objects.filter(id=app_id, user=user)\n if len(apps) == 0:\n response = PrettyJsonResponse({\n \"success\": False,\n \"message\": \"App does not exist.\"\n })\n response.status_code = 400\n return response\n else:\n app = apps[0]\n current = app.scope.scope_number\n s = Scopes()\n try:\n for scope in scopes:\n if \"checked\" in scope and scope[\"checked\"]:\n current = s.add_scope(current, scope[\"name\"])\n else:\n current = s.remove_scope(current, scope[\"name\"])\n\n app.scope.scope_number = current\n app.scope.save()\n app.save()\n except (KeyError, ValueError, TypeError):\n response = PrettyJsonResponse({\n \"success\": False,\n \"message\": \"Invalid scope data that could not be iterated.\"\n })\n response.status_code = 400\n return response\n\n return PrettyJsonResponse({\n \"success\": True,\n \"message\": \"Scope successfully changed.\",\n })\n\n\ndef get_number_of_requests(token):\n if token.startswith('uclapi-user-'):\n calls = APICall.objects.filter(token__token__exact=token)\n elif token.startswith('uclapi-'):\n calls = APICall.objects.filter(app__api_token__exact=token)\n else:\n return None\n\n return len(calls)\n\n\ndef number_of_requests(request):\n try:\n token = request.GET[\"token\"]\n except MultiValueDictKeyError:\n response = JsonResponse({\n \"ok\": False,\n \"message\": \"No token provided\"\n })\n response.status_code = 400\n return response\n\n calls = get_number_of_requests(token)\n if calls is None:\n response = JsonResponse({\n \"ok\": False,\n \"message\": \"Token is invalid\"\n })\n response.status_code = 400\n return response\n\n return PrettyJsonResponse({\n \"ok\": True,\n \"num\": calls,\n })\n\n\ndef get_apps(request):\n if request.method != \"GET\":\n response = PrettyJsonResponse({\n \"success\": False,\n \"error\": \"Request is not of method GET\"\n })\n response.status_code = 400\n return response\n try:\n user_id = request.session[\"user_id\"]\n except (KeyError, AttributeError):\n response = PrettyJsonResponse({\n \"success\": False,\n \"message\": \"User ID not set in session. Please log in again.\"\n })\n response.status_code = 400\n return response\n\n user = get_user_by_id(user_id)\n\n user_meta = {\n \"name\": user.full_name,\n \"cn\": user.cn,\n \"department\": user.department,\n \"intranet_groups\": user.raw_intranet_groups,\n \"apps\": []\n }\n\n user_apps = App.objects.filter(user=user, deleted=False)\n\n s = Scopes()\n\n for app in user_apps:\n user_meta[\"apps\"].append({\n \"name\": app.name,\n \"id\": app.id,\n \"token\": app.api_token,\n \"created\": app.created,\n \"updated\": app.last_updated,\n \"oauth\": {\n \"client_id\": app.client_id,\n \"client_secret\": app.client_secret,\n \"callback_url\": app.callback_url,\n \"scopes\": s.scope_dict_all(app.scope.scope_number)\n },\n \"webhook\": {\n \"verification_secret\": app.webhook.verification_secret,\n \"url\": app.webhook.url,\n \"siteid\": app.webhook.siteid,\n \"roomid\": app.webhook.roomid,\n \"contact\": app.webhook.contact\n },\n \"analytics\": {\n \"requests\": get_number_of_requests(app.api_token),\n \"remaining_quota\": get_quota_remaining(app.api_token),\n \"users\": get_users_per_app(app.api_token),\n \"users_per_dept\": get_users_per_app_per_dept(app.api_token)\n }\n })\n\n return PrettyJsonResponse(user_meta)\n\n\ndef get_quota_remaining(token):\n r = redis.Redis(host=REDIS_UCLAPI_HOST)\n\n if token.startswith('uclapi-user-'):\n Otoken = OAuthToken.objects.filter(token__exact=token).first()\n\n cache_key = \"oauth:\" + Otoken.user.email\n limit = Otoken.user.oauth_quota\n\n elif token.startswith('uclapi-'):\n app = App.objects.filter(api_token__exact=token).first()\n cache_key = app.user.email\n limit = app.user.dev_quota\n\n else:\n return None\n\n count_data = r.get(cache_key)\n\n if count_data:\n count_data = int(r.get(cache_key))\n else:\n count_data = 0\n\n return limit - count_data\n\n\ndef quota_remaining(request):\n try:\n token = request.GET[\"token\"]\n except MultiValueDictKeyError:\n response = JsonResponse({\n \"ok\": False,\n \"message\": \"No token provided\"\n })\n response.status_code = 400\n return response\n\n quota = get_quota_remaining(token)\n if quota is None:\n response = JsonResponse({\n \"ok\": False,\n \"message\": \"Token is invalid\"\n })\n response.status_code = 400\n return response\n\n return PrettyJsonResponse({\n \"ok\": True,\n \"remaining\": quota,\n })\n\n\ndef most_popular_service(request):\n most_common = APICall.objects.values(\"service\").annotate(\n count=Count('service')).order_by(\"-count\")\n most_common = list(most_common)\n\n return PrettyJsonResponse({\n \"ok\": True,\n \"data\": most_common\n })\n\n\ndef most_popular_method(request):\n service = request.GET.get(\"service\", False)\n split_by_service = request.GET.get(\"split_services\", \"false\")\n split_by_service = False if split_by_service.lower() in [\n \"false\", \"0\"] else True\n\n if service:\n most_common = APICall.objects.filter(service__exact=service)\\\n .values(\"service\", \"method\").annotate(count=Count('method')).order_by(\"-count\")\n else:\n most_common = APICall.objects\\\n .values(\"service\", \"method\").annotate(count=Count('method')).order_by(\"-count\")\n\n if not split_by_service:\n t_most_common_counter = {}\n for m in most_common:\n if m[\"method\"].split(\"/\")[0] in t_most_common_counter:\n t_most_common_counter[m[\"method\"].split(\"/\")[0]] += m[\"count\"]\n else:\n t_most_common_counter[m[\"method\"].split(\"/\")[0]] = m[\"count\"]\n print(t_most_common_counter)\n\n most_common = [{\"method\": method, \"count\": count}\n for method, count in t_most_common_counter.items()]\n else:\n temp_most_common_aggregate = {}\n for method in most_common:\n if method[\"service\"] in temp_most_common_aggregate:\n temp_most_common_aggregate[method[\"service\"]].append({\n \"method\": method[\"method\"],\n \"count\": method[\"count\"]\n })\n else:\n temp_most_common_aggregate[method[\"service\"]] = [{\n \"method\": method[\"method\"],\n \"count\": method[\"count\"]\n }]\n most_common = temp_most_common_aggregate\n\n return PrettyJsonResponse({\n \"ok\": True,\n \"data\": most_common\n })\n\n\ndef get_users_per_app(token, start=None, end=None):\n if start and end:\n start_date = datetime.strptime(start, \"%Y-%m-%d\")\n end_date = datetime.strptime(end, \"%Y-%m-%d\")\n\n users = OAuthToken.objects.filter(creation_date__gte=start_date,\n creation_date__lte=end_date,\n app__api_token__exact=token)\n else:\n users = OAuthToken.objects.filter(app__api_token__exact=token)\n\n return len(users)\n\n\ndef users_per_app(request):\n try:\n token = request.GET[\"token\"]\n except MultiValueDictKeyError:\n response = JsonResponse({\n \"ok\": False,\n \"message\": \"No token provided\"\n })\n response.status_code = 400\n return response\n\n try:\n start = request.GET[\"start_date\"]\n end = request.GET[\"end_date\"]\n users_count = get_users_per_app(token, start, end)\n except MultiValueDictKeyError:\n users_count = get_users_per_app(token)\n\n return PrettyJsonResponse({\n \"ok\": True,\n \"users\": users_count\n })\n\n\ndef get_users_per_app_per_dept(token):\n users = User.objects.filter(oauthtoken__app__api_token__exact=token)\\\n .values(\"department\").annotate(count=Count('department'))\\\n .order_by(\"-count\")\n return list(users)\n\n\ndef users_per_app_by_dept(request):\n try:\n token = request.GET[\"token\"]\n except MultiValueDictKeyError:\n response = JsonResponse({\n \"ok\": False,\n \"message\": \"No token provided\"\n })\n response.status_code = 400\n return response\n\n users = get_users_per_app_per_dept(token)\n\n return PrettyJsonResponse({\n \"ok\": True,\n \"data\": users\n })\n", "path": "backend/uclapi/dashboard/api_applications.py"}]} |
gh_patches_debug_1336 | rasdani/github-patches | git_diff | getredash__redash-716 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Deleting and re-adding widgets to a dashboard breaks it
There's a specific set of steps that has messed up some of our users' dashboards:
1. Create a new dashboard
2. Add multiple widgets to it.
3. Remove all those widgets from the dashboard
4. Re-add some widgets
5. Click the "Edit Dashboard (Name/Layout)" button
6. Click Save without changing anything.
7. Refresh the page
This makes none of the widgets appear and causes the "Layout" array in the admin panel to contain one or more "null" values (depending on how many widgets you added/deleted):

The only way to recover from this state is to manually delete the "null" values through the admin interface.
This is on re:dash version 0.8.2
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `redash/handlers/widgets.py`
Content:
```
1 import json
2
3 from flask import request
4
5 from redash import models
6 from redash.wsgi import api
7 from redash.permissions import require_permission
8 from redash.handlers.base import BaseResource
9
10
11 class WidgetListAPI(BaseResource):
12 @require_permission('edit_dashboard')
13 def post(self):
14 widget_properties = request.get_json(force=True)
15 widget_properties['options'] = json.dumps(widget_properties['options'])
16 widget_properties.pop('id', None)
17 widget_properties['dashboard'] = widget_properties.pop('dashboard_id')
18 widget_properties['visualization'] = widget_properties.pop('visualization_id')
19 widget = models.Widget(**widget_properties)
20 widget.save()
21
22 layout = json.loads(widget.dashboard.layout)
23 new_row = True
24
25 if len(layout) == 0 or widget.width == 2:
26 layout.append([widget.id])
27 elif len(layout[-1]) == 1:
28 neighbour_widget = models.Widget.get(models.Widget.id == layout[-1][0])
29 if neighbour_widget.width == 1:
30 layout[-1].append(widget.id)
31 new_row = False
32 else:
33 layout.append([widget.id])
34 else:
35 layout.append([widget.id])
36
37 widget.dashboard.layout = json.dumps(layout)
38 widget.dashboard.save()
39
40 return {'widget': widget.to_dict(), 'layout': layout, 'new_row': new_row}
41
42
43 class WidgetAPI(BaseResource):
44 @require_permission('edit_dashboard')
45 def delete(self, widget_id):
46 widget = models.Widget.get(models.Widget.id == widget_id)
47 widget.delete_instance()
48
49 api.add_resource(WidgetListAPI, '/api/widgets', endpoint='widgets')
50 api.add_resource(WidgetAPI, '/api/widgets/<int:widget_id>', endpoint='widget')
51
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/redash/handlers/widgets.py b/redash/handlers/widgets.py
--- a/redash/handlers/widgets.py
+++ b/redash/handlers/widgets.py
@@ -46,5 +46,7 @@
widget = models.Widget.get(models.Widget.id == widget_id)
widget.delete_instance()
+ return {'layout': widget.dashboard.layout }
+
api.add_resource(WidgetListAPI, '/api/widgets', endpoint='widgets')
api.add_resource(WidgetAPI, '/api/widgets/<int:widget_id>', endpoint='widget')
| {"golden_diff": "diff --git a/redash/handlers/widgets.py b/redash/handlers/widgets.py\n--- a/redash/handlers/widgets.py\n+++ b/redash/handlers/widgets.py\n@@ -46,5 +46,7 @@\n widget = models.Widget.get(models.Widget.id == widget_id)\n widget.delete_instance()\n \n+ return {'layout': widget.dashboard.layout }\n+\n api.add_resource(WidgetListAPI, '/api/widgets', endpoint='widgets')\n api.add_resource(WidgetAPI, '/api/widgets/<int:widget_id>', endpoint='widget')\n", "issue": "Deleting and re-adding widgets to a dashboard breaks it\nThere's a specific set of steps that has messed up some of our users' dashboards:\n1. Create a new dashboard\n2. Add multiple widgets to it.\n3. Remove all those widgets from the dashboard\n4. Re-add some widgets\n5. Click the \"Edit Dashboard (Name/Layout)\" button\n6. Click Save without changing anything.\n7. Refresh the page\n\nThis makes none of the widgets appear and causes the \"Layout\" array in the admin panel to contain one or more \"null\" values (depending on how many widgets you added/deleted):\n\n\nThe only way to recover from this state is to manually delete the \"null\" values through the admin interface.\n\nThis is on re:dash version 0.8.2\n\n", "before_files": [{"content": "import json\n\nfrom flask import request\n\nfrom redash import models\nfrom redash.wsgi import api\nfrom redash.permissions import require_permission\nfrom redash.handlers.base import BaseResource\n\n\nclass WidgetListAPI(BaseResource):\n @require_permission('edit_dashboard')\n def post(self):\n widget_properties = request.get_json(force=True)\n widget_properties['options'] = json.dumps(widget_properties['options'])\n widget_properties.pop('id', None)\n widget_properties['dashboard'] = widget_properties.pop('dashboard_id')\n widget_properties['visualization'] = widget_properties.pop('visualization_id')\n widget = models.Widget(**widget_properties)\n widget.save()\n\n layout = json.loads(widget.dashboard.layout)\n new_row = True\n\n if len(layout) == 0 or widget.width == 2:\n layout.append([widget.id])\n elif len(layout[-1]) == 1:\n neighbour_widget = models.Widget.get(models.Widget.id == layout[-1][0])\n if neighbour_widget.width == 1:\n layout[-1].append(widget.id)\n new_row = False\n else:\n layout.append([widget.id])\n else:\n layout.append([widget.id])\n\n widget.dashboard.layout = json.dumps(layout)\n widget.dashboard.save()\n\n return {'widget': widget.to_dict(), 'layout': layout, 'new_row': new_row}\n\n\nclass WidgetAPI(BaseResource):\n @require_permission('edit_dashboard')\n def delete(self, widget_id):\n widget = models.Widget.get(models.Widget.id == widget_id)\n widget.delete_instance()\n\napi.add_resource(WidgetListAPI, '/api/widgets', endpoint='widgets')\napi.add_resource(WidgetAPI, '/api/widgets/<int:widget_id>', endpoint='widget')\n", "path": "redash/handlers/widgets.py"}], "after_files": [{"content": "import json\n\nfrom flask import request\n\nfrom redash import models\nfrom redash.wsgi import api\nfrom redash.permissions import require_permission\nfrom redash.handlers.base import BaseResource\n\n\nclass WidgetListAPI(BaseResource):\n @require_permission('edit_dashboard')\n def post(self):\n widget_properties = request.get_json(force=True)\n widget_properties['options'] = json.dumps(widget_properties['options'])\n widget_properties.pop('id', None)\n widget_properties['dashboard'] = widget_properties.pop('dashboard_id')\n widget_properties['visualization'] = widget_properties.pop('visualization_id')\n widget = models.Widget(**widget_properties)\n widget.save()\n\n layout = json.loads(widget.dashboard.layout)\n new_row = True\n\n if len(layout) == 0 or widget.width == 2:\n layout.append([widget.id])\n elif len(layout[-1]) == 1:\n neighbour_widget = models.Widget.get(models.Widget.id == layout[-1][0])\n if neighbour_widget.width == 1:\n layout[-1].append(widget.id)\n new_row = False\n else:\n layout.append([widget.id])\n else:\n layout.append([widget.id])\n\n widget.dashboard.layout = json.dumps(layout)\n widget.dashboard.save()\n\n return {'widget': widget.to_dict(), 'layout': layout, 'new_row': new_row}\n\n\nclass WidgetAPI(BaseResource):\n @require_permission('edit_dashboard')\n def delete(self, widget_id):\n widget = models.Widget.get(models.Widget.id == widget_id)\n widget.delete_instance()\n\n return {'layout': widget.dashboard.layout }\n\napi.add_resource(WidgetListAPI, '/api/widgets', endpoint='widgets')\napi.add_resource(WidgetAPI, '/api/widgets/<int:widget_id>', endpoint='widget')\n", "path": "redash/handlers/widgets.py"}]} |
gh_patches_debug_1337 | rasdani/github-patches | git_diff | streamlink__streamlink-5622 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
plugins.dlive: Failed to fetch segment | 403 Client Error
### Checklist
- [X] This is a [plugin issue](https://streamlink.github.io/plugins.html) and not [a different kind of issue](https://github.com/streamlink/streamlink/issues/new/choose)
- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)
- [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22)
- [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)
### Streamlink version
[cli][info] Your Streamlink version (6.2.1) is up to date!
### Description
I navigate to the folder where streamlink.exe is located and enter the command "streamlink.exe https://dlive.tv/cryptokaprika best". It doesn't matter which channel is specified, the same error comes up for all of them as of late.
Here is the complete output that is shown to me in the command line:
C:\Program Files\Streamlink\bin>streamlink.exe https://dlive.tv/cryptokaprika best
[cli][info] Found matching plugin dlive for URL https://dlive.tv/cryptokaprika
[cli][info] Available streams: src (worst, best)
[cli][info] Opening stream: src (hls)
[cli][info] Starting player: C:\Program Files\VideoLAN\VLC\vlc.exe
[stream.hls][error] Failed to fetch segment 79790: Unable to open URL: https://videos.prd.dlivecdn.com/dlive/0000079790.ts (403 Client Error: Forbidden for url: https://videos.prd.dlivecdn.com/dlive/0000079790.ts)
[stream.hls][error] Failed to fetch segment 79791: Unable to open URL: https://videos.prd.dlivecdn.com/dlive/0000079791.ts (403 Client Error: Forbidden for url: https://videos.prd.dlivecdn.com/dlive/0000079791.ts)
[cli][info] Stream ended
[cli][info] Closing currently open stream...
The VLC Media Player also starts, but I only get the following picture and am referred to the homepage: [https://imgur.com/a/NpuAHQ3](https://imgur.com/a/NpuAHQ3)
### Debug log
```text
C:\Program Files\Streamlink\bin>streamlink.exe --loglevel=debug https://dlive.tv/cryptokaprika best
[cli][debug] OS: Windows 10
[cli][debug] Python: 3.11.5
[cli][debug] OpenSSL: OpenSSL 3.0.9 30 May 2023
[cli][debug] Streamlink: 6.2.1
[cli][debug] Dependencies:
[cli][debug] certifi: 2023.7.22
[cli][debug] isodate: 0.6.1
[cli][debug] lxml: 4.9.3
[cli][debug] pycountry: 22.3.5
[cli][debug] pycryptodome: 3.19.0
[cli][debug] PySocks: 1.7.1
[cli][debug] requests: 2.31.0
[cli][debug] trio: 0.22.2
[cli][debug] trio-websocket: 0.11.1
[cli][debug] typing-extensions: 4.8.0
[cli][debug] urllib3: 2.0.6
[cli][debug] websocket-client: 1.6.3
[cli][debug] Arguments:
[cli][debug] url=https://dlive.tv/cryptokaprika
[cli][debug] stream=['best']
[cli][debug] --loglevel=debug
[cli][debug] --ffmpeg-ffmpeg=C:\Program Files\Streamlink\ffmpeg\ffmpeg.exe
[cli][info] Found matching plugin dlive for URL https://dlive.tv/cryptokaprika
[plugins.dlive][debug] Getting live HLS streams for cryptokaprika
[utils.l10n][debug] Language code: en_US
[cli][info] Available streams: src (worst, best)
[cli][info] Opening stream: src (hls)
[cli][info] Starting player: C:\Program Files\VideoLAN\VLC\vlc.exe
[stream.hls][debug] Reloading playlist
[cli][debug] Pre-buffering 8192 bytes
[stream.hls][debug] First Sequence: 79786; Last Sequence: 79791
[stream.hls][debug] Start offset: 0; Duration: None; Start Sequence: 79786; End Sequence: 79791
[stream.hls][debug] Adding segment 79786 to queue
[stream.hls][debug] Adding segment 79787 to queue
[stream.hls][debug] Adding segment 79788 to queue
[stream.hls][debug] Adding segment 79789 to queue
[stream.hls][debug] Adding segment 79790 to queue
[stream.hls][debug] Adding segment 79791 to queue
[stream.segmented][debug] Closing worker thread
[stream.hls][debug] Writing segment 79786 to output
[stream.hls][debug] Segment 79786 complete
[cli.output][debug] Opening subprocess: ['C:\\Program Files\\VideoLAN\\VLC\\vlc.exe', '--input-title-format', 'https://dlive.tv/cryptokaprika', '-']
[stream.hls][debug] Writing segment 79787 to output
[stream.hls][debug] Segment 79787 complete
[stream.hls][debug] Writing segment 79788 to output
[stream.hls][debug] Segment 79788 complete
[stream.hls][debug] Writing segment 79789 to output
[stream.hls][debug] Segment 79789 complete
[cli][debug] Writing stream to output
[stream.hls][error] Failed to fetch segment 79790: Unable to open URL: https://videos.prd.dlivecdn.com/dlive/0000079790.ts (403 Client Error: Forbidden for url: https://videos.prd.dlivecdn.com/dlive/0000079790.ts)
[stream.hls][error] Failed to fetch segment 79791: Unable to open URL: https://videos.prd.dlivecdn.com/dlive/0000079791.ts (403 Client Error: Forbidden for url: https://videos.prd.dlivecdn.com/dlive/0000079791.ts)
[stream.segmented][debug] Closing writer thread
[cli][info] Stream ended
[cli][info] Closing currently open stream...
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/streamlink/plugins/dlive.py`
Content:
```
1 """
2 $description Global live-streaming platform owned by BitTorrent, Inc.
3 $url dlive.tv
4 $type live, vod
5 $metadata author
6 $metadata title
7 """
8
9 import logging
10 import re
11 from urllib.parse import unquote_plus
12
13 from streamlink.plugin import Plugin, pluginmatcher
14 from streamlink.plugin.api import validate
15 from streamlink.stream.hls import HLSStream
16
17
18 log = logging.getLogger(__name__)
19
20
21 @pluginmatcher(re.compile(r"""
22 https?://(?:www\.)?dlive\.tv/
23 (?:
24 p/(?P<video>[^/]+)
25 |
26 (?P<channel>[^/]+)
27 )
28 """, re.VERBOSE))
29 class DLive(Plugin):
30 URL_LIVE = "https://live.prd.dlive.tv/hls/live/{username}.m3u8"
31
32 QUALITY_WEIGHTS = {
33 "src": 1080,
34 }
35
36 @classmethod
37 def stream_weight(cls, key):
38 weight = cls.QUALITY_WEIGHTS.get(key)
39 if weight:
40 return weight, "dlive"
41
42 return super().stream_weight(key)
43
44 def _get_streams_video(self, video):
45 log.debug(f"Getting video HLS streams for {video}")
46 hls_url = self.session.http.get(self.url, schema=validate.Schema(
47 validate.regex(re.compile(r'"playbackUrl"\s*:\s*"([^"]+\.m3u8)"')),
48 validate.get(1),
49 validate.transform(unquote_plus),
50 validate.transform(lambda url: bytes(url, "utf-8").decode("unicode_escape")),
51 validate.url(),
52 ))
53
54 return HLSStream.parse_variant_playlist(self.session, hls_url)
55
56 def _get_streams_live(self, channel):
57 log.debug(f"Getting live HLS streams for {channel}")
58 query = f"""query {{
59 userByDisplayName(displayname:"{channel}") {{
60 livestream {{
61 title
62 }}
63 username
64 }}
65 }}"""
66 livestream, username = self.session.http.post(
67 "https://graphigo.prd.dlive.tv/",
68 json={"query": query},
69 schema=validate.Schema(
70 validate.parse_json(),
71 {
72 "data": {
73 "userByDisplayName": {
74 "livestream": {
75 "title": str,
76 },
77 "username": str,
78 },
79 },
80 },
81 validate.get(("data", "userByDisplayName")),
82 validate.union_get("livestream", "username"),
83 ),
84 )
85
86 self.author = channel
87 self.title = livestream["title"]
88
89 return HLSStream.parse_variant_playlist(self.session, self.URL_LIVE.format(username=username))
90
91 def _get_streams(self):
92 video = self.match.group("video")
93 channel = self.match.group("channel")
94
95 if video:
96 return self._get_streams_video(video)
97 elif channel:
98 return self._get_streams_live(channel)
99
100
101 __plugin__ = DLive
102
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/streamlink/plugins/dlive.py b/src/streamlink/plugins/dlive.py
--- a/src/streamlink/plugins/dlive.py
+++ b/src/streamlink/plugins/dlive.py
@@ -86,7 +86,7 @@
self.author = channel
self.title = livestream["title"]
- return HLSStream.parse_variant_playlist(self.session, self.URL_LIVE.format(username=username))
+ return HLSStream.parse_variant_playlist(self.session, self.URL_LIVE.format(username=username), headers={"Referer": "https://dlive.tv/"})
def _get_streams(self):
video = self.match.group("video")
| {"golden_diff": "diff --git a/src/streamlink/plugins/dlive.py b/src/streamlink/plugins/dlive.py\n--- a/src/streamlink/plugins/dlive.py\n+++ b/src/streamlink/plugins/dlive.py\n@@ -86,7 +86,7 @@\n self.author = channel\n self.title = livestream[\"title\"]\n \n- return HLSStream.parse_variant_playlist(self.session, self.URL_LIVE.format(username=username))\n+ return HLSStream.parse_variant_playlist(self.session, self.URL_LIVE.format(username=username), headers={\"Referer\": \"https://dlive.tv/\"})\n \n def _get_streams(self):\n video = self.match.group(\"video\")\n", "issue": "plugins.dlive: Failed to fetch segment | 403 Client Error\n### Checklist\n\n- [X] This is a [plugin issue](https://streamlink.github.io/plugins.html) and not [a different kind of issue](https://github.com/streamlink/streamlink/issues/new/choose)\n- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)\n- [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22)\n- [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)\n\n### Streamlink version\n\n[cli][info] Your Streamlink version (6.2.1) is up to date!\n\n### Description\n\nI navigate to the folder where streamlink.exe is located and enter the command \"streamlink.exe https://dlive.tv/cryptokaprika best\". It doesn't matter which channel is specified, the same error comes up for all of them as of late.\r\n\r\n\r\nHere is the complete output that is shown to me in the command line:\r\n\r\n\r\nC:\\Program Files\\Streamlink\\bin>streamlink.exe https://dlive.tv/cryptokaprika best\r\n\r\n[cli][info] Found matching plugin dlive for URL https://dlive.tv/cryptokaprika\r\n\r\n[cli][info] Available streams: src (worst, best)\r\n\r\n[cli][info] Opening stream: src (hls)\r\n\r\n[cli][info] Starting player: C:\\Program Files\\VideoLAN\\VLC\\vlc.exe\r\n\r\n[stream.hls][error] Failed to fetch segment 79790: Unable to open URL: https://videos.prd.dlivecdn.com/dlive/0000079790.ts (403 Client Error: Forbidden for url: https://videos.prd.dlivecdn.com/dlive/0000079790.ts)\r\n\r\n[stream.hls][error] Failed to fetch segment 79791: Unable to open URL: https://videos.prd.dlivecdn.com/dlive/0000079791.ts (403 Client Error: Forbidden for url: https://videos.prd.dlivecdn.com/dlive/0000079791.ts)\r\n\r\n[cli][info] Stream ended\r\n\r\n[cli][info] Closing currently open stream...\r\n\r\n\r\nThe VLC Media Player also starts, but I only get the following picture and am referred to the homepage: [https://imgur.com/a/NpuAHQ3](https://imgur.com/a/NpuAHQ3)\n\n### Debug log\n\n```text\nC:\\Program Files\\Streamlink\\bin>streamlink.exe --loglevel=debug https://dlive.tv/cryptokaprika best\r\n[cli][debug] OS: Windows 10\r\n[cli][debug] Python: 3.11.5\r\n[cli][debug] OpenSSL: OpenSSL 3.0.9 30 May 2023\r\n[cli][debug] Streamlink: 6.2.1\r\n[cli][debug] Dependencies:\r\n[cli][debug] certifi: 2023.7.22\r\n[cli][debug] isodate: 0.6.1\r\n[cli][debug] lxml: 4.9.3\r\n[cli][debug] pycountry: 22.3.5\r\n[cli][debug] pycryptodome: 3.19.0\r\n[cli][debug] PySocks: 1.7.1\r\n[cli][debug] requests: 2.31.0\r\n[cli][debug] trio: 0.22.2\r\n[cli][debug] trio-websocket: 0.11.1\r\n[cli][debug] typing-extensions: 4.8.0\r\n[cli][debug] urllib3: 2.0.6\r\n[cli][debug] websocket-client: 1.6.3\r\n[cli][debug] Arguments:\r\n[cli][debug] url=https://dlive.tv/cryptokaprika\r\n[cli][debug] stream=['best']\r\n[cli][debug] --loglevel=debug\r\n[cli][debug] --ffmpeg-ffmpeg=C:\\Program Files\\Streamlink\\ffmpeg\\ffmpeg.exe\r\n[cli][info] Found matching plugin dlive for URL https://dlive.tv/cryptokaprika\r\n[plugins.dlive][debug] Getting live HLS streams for cryptokaprika\r\n[utils.l10n][debug] Language code: en_US\r\n[cli][info] Available streams: src (worst, best)\r\n[cli][info] Opening stream: src (hls)\r\n[cli][info] Starting player: C:\\Program Files\\VideoLAN\\VLC\\vlc.exe\r\n[stream.hls][debug] Reloading playlist\r\n[cli][debug] Pre-buffering 8192 bytes\r\n[stream.hls][debug] First Sequence: 79786; Last Sequence: 79791\r\n[stream.hls][debug] Start offset: 0; Duration: None; Start Sequence: 79786; End Sequence: 79791\r\n[stream.hls][debug] Adding segment 79786 to queue\r\n[stream.hls][debug] Adding segment 79787 to queue\r\n[stream.hls][debug] Adding segment 79788 to queue\r\n[stream.hls][debug] Adding segment 79789 to queue\r\n[stream.hls][debug] Adding segment 79790 to queue\r\n[stream.hls][debug] Adding segment 79791 to queue\r\n[stream.segmented][debug] Closing worker thread\r\n[stream.hls][debug] Writing segment 79786 to output\r\n[stream.hls][debug] Segment 79786 complete\r\n[cli.output][debug] Opening subprocess: ['C:\\\\Program Files\\\\VideoLAN\\\\VLC\\\\vlc.exe', '--input-title-format', 'https://dlive.tv/cryptokaprika', '-']\r\n[stream.hls][debug] Writing segment 79787 to output\r\n[stream.hls][debug] Segment 79787 complete\r\n[stream.hls][debug] Writing segment 79788 to output\r\n[stream.hls][debug] Segment 79788 complete\r\n[stream.hls][debug] Writing segment 79789 to output\r\n[stream.hls][debug] Segment 79789 complete\r\n[cli][debug] Writing stream to output\r\n[stream.hls][error] Failed to fetch segment 79790: Unable to open URL: https://videos.prd.dlivecdn.com/dlive/0000079790.ts (403 Client Error: Forbidden for url: https://videos.prd.dlivecdn.com/dlive/0000079790.ts)\r\n[stream.hls][error] Failed to fetch segment 79791: Unable to open URL: https://videos.prd.dlivecdn.com/dlive/0000079791.ts (403 Client Error: Forbidden for url: https://videos.prd.dlivecdn.com/dlive/0000079791.ts)\r\n[stream.segmented][debug] Closing writer thread\r\n[cli][info] Stream ended\r\n[cli][info] Closing currently open stream...\n```\n\n", "before_files": [{"content": "\"\"\"\n$description Global live-streaming platform owned by BitTorrent, Inc.\n$url dlive.tv\n$type live, vod\n$metadata author\n$metadata title\n\"\"\"\n\nimport logging\nimport re\nfrom urllib.parse import unquote_plus\n\nfrom streamlink.plugin import Plugin, pluginmatcher\nfrom streamlink.plugin.api import validate\nfrom streamlink.stream.hls import HLSStream\n\n\nlog = logging.getLogger(__name__)\n\n\n@pluginmatcher(re.compile(r\"\"\"\n https?://(?:www\\.)?dlive\\.tv/\n (?:\n p/(?P<video>[^/]+)\n |\n (?P<channel>[^/]+)\n )\n\"\"\", re.VERBOSE))\nclass DLive(Plugin):\n URL_LIVE = \"https://live.prd.dlive.tv/hls/live/{username}.m3u8\"\n\n QUALITY_WEIGHTS = {\n \"src\": 1080,\n }\n\n @classmethod\n def stream_weight(cls, key):\n weight = cls.QUALITY_WEIGHTS.get(key)\n if weight:\n return weight, \"dlive\"\n\n return super().stream_weight(key)\n\n def _get_streams_video(self, video):\n log.debug(f\"Getting video HLS streams for {video}\")\n hls_url = self.session.http.get(self.url, schema=validate.Schema(\n validate.regex(re.compile(r'\"playbackUrl\"\\s*:\\s*\"([^\"]+\\.m3u8)\"')),\n validate.get(1),\n validate.transform(unquote_plus),\n validate.transform(lambda url: bytes(url, \"utf-8\").decode(\"unicode_escape\")),\n validate.url(),\n ))\n\n return HLSStream.parse_variant_playlist(self.session, hls_url)\n\n def _get_streams_live(self, channel):\n log.debug(f\"Getting live HLS streams for {channel}\")\n query = f\"\"\"query {{\n userByDisplayName(displayname:\"{channel}\") {{\n livestream {{\n title\n }}\n username\n }}\n }}\"\"\"\n livestream, username = self.session.http.post(\n \"https://graphigo.prd.dlive.tv/\",\n json={\"query\": query},\n schema=validate.Schema(\n validate.parse_json(),\n {\n \"data\": {\n \"userByDisplayName\": {\n \"livestream\": {\n \"title\": str,\n },\n \"username\": str,\n },\n },\n },\n validate.get((\"data\", \"userByDisplayName\")),\n validate.union_get(\"livestream\", \"username\"),\n ),\n )\n\n self.author = channel\n self.title = livestream[\"title\"]\n\n return HLSStream.parse_variant_playlist(self.session, self.URL_LIVE.format(username=username))\n\n def _get_streams(self):\n video = self.match.group(\"video\")\n channel = self.match.group(\"channel\")\n\n if video:\n return self._get_streams_video(video)\n elif channel:\n return self._get_streams_live(channel)\n\n\n__plugin__ = DLive\n", "path": "src/streamlink/plugins/dlive.py"}], "after_files": [{"content": "\"\"\"\n$description Global live-streaming platform owned by BitTorrent, Inc.\n$url dlive.tv\n$type live, vod\n$metadata author\n$metadata title\n\"\"\"\n\nimport logging\nimport re\nfrom urllib.parse import unquote_plus\n\nfrom streamlink.plugin import Plugin, pluginmatcher\nfrom streamlink.plugin.api import validate\nfrom streamlink.stream.hls import HLSStream\n\n\nlog = logging.getLogger(__name__)\n\n\n@pluginmatcher(re.compile(r\"\"\"\n https?://(?:www\\.)?dlive\\.tv/\n (?:\n p/(?P<video>[^/]+)\n |\n (?P<channel>[^/]+)\n )\n\"\"\", re.VERBOSE))\nclass DLive(Plugin):\n URL_LIVE = \"https://live.prd.dlive.tv/hls/live/{username}.m3u8\"\n\n QUALITY_WEIGHTS = {\n \"src\": 1080,\n }\n\n @classmethod\n def stream_weight(cls, key):\n weight = cls.QUALITY_WEIGHTS.get(key)\n if weight:\n return weight, \"dlive\"\n\n return super().stream_weight(key)\n\n def _get_streams_video(self, video):\n log.debug(f\"Getting video HLS streams for {video}\")\n hls_url = self.session.http.get(self.url, schema=validate.Schema(\n validate.regex(re.compile(r'\"playbackUrl\"\\s*:\\s*\"([^\"]+\\.m3u8)\"')),\n validate.get(1),\n validate.transform(unquote_plus),\n validate.transform(lambda url: bytes(url, \"utf-8\").decode(\"unicode_escape\")),\n validate.url(),\n ))\n\n return HLSStream.parse_variant_playlist(self.session, hls_url)\n\n def _get_streams_live(self, channel):\n log.debug(f\"Getting live HLS streams for {channel}\")\n query = f\"\"\"query {{\n userByDisplayName(displayname:\"{channel}\") {{\n livestream {{\n title\n }}\n username\n }}\n }}\"\"\"\n livestream, username = self.session.http.post(\n \"https://graphigo.prd.dlive.tv/\",\n json={\"query\": query},\n schema=validate.Schema(\n validate.parse_json(),\n {\n \"data\": {\n \"userByDisplayName\": {\n \"livestream\": {\n \"title\": str,\n },\n \"username\": str,\n },\n },\n },\n validate.get((\"data\", \"userByDisplayName\")),\n validate.union_get(\"livestream\", \"username\"),\n ),\n )\n\n self.author = channel\n self.title = livestream[\"title\"]\n\n return HLSStream.parse_variant_playlist(self.session, self.URL_LIVE.format(username=username), headers={\"Referer\": \"https://dlive.tv/\"})\n\n def _get_streams(self):\n video = self.match.group(\"video\")\n channel = self.match.group(\"channel\")\n\n if video:\n return self._get_streams_video(video)\n elif channel:\n return self._get_streams_live(channel)\n\n\n__plugin__ = DLive\n", "path": "src/streamlink/plugins/dlive.py"}]} |
gh_patches_debug_1338 | rasdani/github-patches | git_diff | pwndbg__pwndbg-1908 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
crash in command start after running sstart
### Description
crash when running start after sstart
### Steps to reproduce
Gdb session history:
```
sstart
start
set exception-verbose on
start
bugreport --run-broweser
```
### My setup
```
Platform: Linux-6.2.0-26-generic-x86_64-with-glibc2.35
OS: Ubuntu 22.04.3 LTS
OS ABI: #26~22.04.1-Ubuntu SMP PREEMPT_DYNAMIC Thu Jul 13 16:27:29 UTC 2
Architecture: x86_64
Endian: little
Charset: utf-8
Width: 156
Height: 78
Gdb: 12.1
Python: 3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]
Pwndbg: 2023.07.17 build: e959a47
Capstone: 5.0.1280
Unicorn: 2.0.1
This GDB was configured as follows:
configure --host=x86_64-linux-gnu --target=x86_64-linux-gnu
--with-auto-load-dir=$debugdir:$datadir/auto-load
--with-auto-load-safe-path=$debugdir:$datadir/auto-load
--with-expat
--with-gdb-datadir=/usr/share/gdb (relocatable)
--with-jit-reader-dir=/usr/lib/gdb (relocatable)
--without-libunwind-ia64
--with-lzma
--with-babeltrace
--with-intel-pt
--with-mpfr
--with-xxhash
--with-python=/usr (relocatable)
--with-python-libdir=/usr/lib (relocatable)
--with-debuginfod
--without-guile
--enable-source-highlight
--with-separate-debug-dir=/usr/lib/debug (relocatable)
--with-system-gdbinit=/etc/gdb/gdbinit
--with-system-gdbinit-dir=/etc/gdb/gdbinit.d
("Relocatable" means the directory can be moved with the GDB installation
tree, and GDB will still find it.)
```

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pwndbg/gdblib/symbol.py`
Content:
```
1 """
2 Looking up addresses for function names / symbols, and
3 vice-versa.
4
5 Uses IDA when available if there isn't sufficient symbol
6 information available.
7 """
8 from __future__ import annotations
9
10 import re
11
12 import gdb
13
14 import pwndbg.gdblib.android
15 import pwndbg.gdblib.arch
16 import pwndbg.gdblib.elf
17 import pwndbg.gdblib.events
18 import pwndbg.gdblib.file
19 import pwndbg.gdblib.memory
20 import pwndbg.gdblib.qemu
21 import pwndbg.gdblib.remote
22 import pwndbg.gdblib.stack
23 import pwndbg.gdblib.vmmap
24 import pwndbg.ida
25 import pwndbg.lib.cache
26
27
28 def _get_debug_file_directory():
29 """
30 Retrieve the debug file directory path.
31
32 The debug file directory path ('show debug-file-directory') is a comma-
33 separated list of directories which GDB will look in to find the binaries
34 currently loaded.
35 """
36 result = gdb.execute("show debug-file-directory", to_string=True, from_tty=False)
37 expr = r'The directory where separate debug symbols are searched for is "(.*)".\n'
38
39 match = re.search(expr, result)
40
41 if match:
42 return match.group(1)
43 return ""
44
45
46 def _set_debug_file_directory(d) -> None:
47 gdb.execute(f"set debug-file-directory {d}", to_string=True, from_tty=False)
48
49
50 def _add_debug_file_directory(d) -> None:
51 current = _get_debug_file_directory()
52 if current:
53 _set_debug_file_directory(f"{current}:{d}")
54 else:
55 _set_debug_file_directory(d)
56
57
58 if "/usr/lib/debug" not in _get_debug_file_directory():
59 _add_debug_file_directory("/usr/lib/debug")
60
61
62 @pwndbg.lib.cache.cache_until("objfile")
63 def get(address: int, gdb_only=False) -> str:
64 """
65 Retrieve the name for the symbol located at `address` - either from GDB or from IDA sync
66 Passing `gdb_only=True`
67 """
68 # Note: we do not return "" on `address < pwndbg.gdblib.memory.MMAP_MIN_ADDR`
69 # because this may be used to find out the symbol name on PIE binaries that weren't started yet
70 # and then their symbol addresses can be found by GDB on their (non-rebased) offsets
71
72 # Fast path: GDB's `info symbol` returns 'Numeric constant too large' here
73 if address >= ((1 << 64) - 1):
74 return ""
75
76 # This sucks, but there's not a GDB API for this.
77 result = gdb.execute("info symbol %#x" % int(address), to_string=True, from_tty=False)
78
79 if not gdb_only and result.startswith("No symbol"):
80 address = int(address)
81 exe = pwndbg.gdblib.elf.exe()
82 if exe:
83 exe_map = pwndbg.gdblib.vmmap.find(exe.address)
84 if exe_map and address in exe_map:
85 res = pwndbg.ida.Name(address) or pwndbg.ida.GetFuncOffset(address)
86 return res or ""
87
88 # If there are newlines, which means that there are multiple symbols for the address
89 # then use the first one (see also #1610)
90 result = result[: result.index("\n")]
91
92 # See https://github.com/bminor/binutils-gdb/blob/d1702fea87aa62dff7de465464097dba63cc8c0f/gdb/printcmd.c#L1594-L1624
93 # The most often encountered formats looks like this:
94 # "main in section .text of /bin/bash"
95 # "main + 3 in section .text of /bin/bash"
96 # "system + 1 in section .text of /lib/x86_64-linux-gnu/libc.so.6"
97 # "No symbol matches system-1"
98 # But there are some others that we have to account for as well
99 if " in section " in result:
100 loc_string, _ = result.split(" in section ")
101 elif " in load address range of " in result:
102 loc_string, _ = result.split(" in load address range of ")
103 elif " overlay section " in result:
104 result, _ = result.split(" overlay section ")
105 loc_string, _ = result.split(" in ")
106 else:
107 loc_string = ""
108
109 # If there is 'main + 87' we want to replace it with 'main+87' etc.
110 return loc_string.replace(" + ", "+")
111
112
113 @pwndbg.lib.cache.cache_until("objfile")
114 def address(symbol: str) -> int:
115 """
116 Get the address for `symbol`
117 """
118 try:
119 symbol_obj = gdb.lookup_symbol(symbol)[0]
120 if symbol_obj:
121 return int(symbol_obj.value().address)
122 except gdb.error as e:
123 # Symbol lookup only throws exceptions on errors, not if it failed to
124 # lookup a symbol. We want to raise these errors so we can handle them
125 # properly, but there are some we haven't figured out how to fix yet, so
126 # we ignore those here
127 skipped_exceptions = []
128
129 # This is exception is being thrown by the Go typeinfo tests, we should
130 # investigate why this is happening and see if we can explicitly check
131 # for it with `gdb.selected_frame()`
132 skipped_exceptions.append("No frame selected")
133
134 # If we try to look up a TLS variable when there is no TLS, this
135 # exception occurs. Ideally we should come up with a way to check for
136 # this case before calling `gdb.lookup_symbol`
137 skipped_exceptions.append("Cannot find thread-local")
138
139 if all(x not in str(e) for x in skipped_exceptions):
140 raise e
141
142 try:
143 # Unfortunately, `gdb.lookup_symbol` does not seem to handle all
144 # symbols, so we need to fallback to using `gdb.parse_and_eval`. See
145 # https://sourceware.org/pipermail/gdb/2022-October/050362.html
146 # (We tried parsing the output of the `info address` before, but there were some issues. See #1628 and #1666)
147 if "\\" in symbol:
148 # Is it possible that happens? Probably not, but just in case
149 raise ValueError(f"Symbol {symbol!r} contains a backslash")
150 sanitized_symbol_name = symbol.replace("'", "\\'")
151 return int(gdb.parse_and_eval(f"&'{sanitized_symbol_name}'"))
152
153 except gdb.error:
154 return None
155
156
157 @pwndbg.lib.cache.cache_until("objfile", "thread")
158 def static_linkage_symbol_address(symbol: str) -> int:
159 """
160 Get the address for static linkage `symbol`
161 """
162
163 try:
164 symbol_obj = gdb.lookup_static_symbol(symbol)
165 return int(symbol_obj.value().address) if symbol_obj else None
166 except gdb.error:
167 return None
168
169
170 @pwndbg.lib.cache.cache_until("stop", "start")
171 def selected_frame_source_absolute_filename():
172 """
173 Retrieve the symbol table’s source absolute file name from the selected frame.
174
175 In case of missing symbol table or frame information, None is returned.
176 """
177 try:
178 frame = gdb.selected_frame()
179 except gdb.error:
180 return None
181
182 if not frame:
183 return None
184
185 sal = frame.find_sal()
186 if not sal:
187 return None
188
189 symtab = sal.symtab
190 if not symtab:
191 return None
192
193 return symtab.fullname()
194
195
196 def parse_and_eval(expression: str) -> gdb.Value | None:
197 """Error handling wrapper for GDBs parse_and_eval function"""
198 try:
199 return gdb.parse_and_eval(expression)
200 except gdb.error:
201 return None
202
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pwndbg/gdblib/symbol.py b/pwndbg/gdblib/symbol.py
--- a/pwndbg/gdblib/symbol.py
+++ b/pwndbg/gdblib/symbol.py
@@ -136,6 +136,9 @@
# this case before calling `gdb.lookup_symbol`
skipped_exceptions.append("Cannot find thread-local")
+ # This reproduced on GDB 12.1 and caused #1878
+ skipped_exceptions.append("symbol requires a frame to compute its value")
+
if all(x not in str(e) for x in skipped_exceptions):
raise e
| {"golden_diff": "diff --git a/pwndbg/gdblib/symbol.py b/pwndbg/gdblib/symbol.py\n--- a/pwndbg/gdblib/symbol.py\n+++ b/pwndbg/gdblib/symbol.py\n@@ -136,6 +136,9 @@\n # this case before calling `gdb.lookup_symbol`\n skipped_exceptions.append(\"Cannot find thread-local\")\n \n+ # This reproduced on GDB 12.1 and caused #1878\n+ skipped_exceptions.append(\"symbol requires a frame to compute its value\")\n+\n if all(x not in str(e) for x in skipped_exceptions):\n raise e\n", "issue": "crash in command start after running sstart\n### Description\r\n\r\ncrash when running start after sstart\r\n\r\n### Steps to reproduce\r\n\r\nGdb session history:\r\n```\r\nsstart\r\nstart\r\nset exception-verbose on\r\nstart\r\nbugreport --run-broweser\r\n```\r\n\r\n### My setup\r\n\r\n```\r\nPlatform: Linux-6.2.0-26-generic-x86_64-with-glibc2.35\r\nOS: Ubuntu 22.04.3 LTS\r\nOS ABI: #26~22.04.1-Ubuntu SMP PREEMPT_DYNAMIC Thu Jul 13 16:27:29 UTC 2\r\nArchitecture: x86_64\r\nEndian: little\r\nCharset: utf-8\r\nWidth: 156\r\nHeight: 78\r\nGdb: 12.1\r\nPython: 3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]\r\nPwndbg: 2023.07.17 build: e959a47\r\nCapstone: 5.0.1280\r\nUnicorn: 2.0.1\r\nThis GDB was configured as follows:\r\n configure --host=x86_64-linux-gnu --target=x86_64-linux-gnu\r\n\t --with-auto-load-dir=$debugdir:$datadir/auto-load\r\n\t --with-auto-load-safe-path=$debugdir:$datadir/auto-load\r\n\t --with-expat\r\n\t --with-gdb-datadir=/usr/share/gdb (relocatable)\r\n\t --with-jit-reader-dir=/usr/lib/gdb (relocatable)\r\n\t --without-libunwind-ia64\r\n\t --with-lzma\r\n\t --with-babeltrace\r\n\t --with-intel-pt\r\n\t --with-mpfr\r\n\t --with-xxhash\r\n\t --with-python=/usr (relocatable)\r\n\t --with-python-libdir=/usr/lib (relocatable)\r\n\t --with-debuginfod\r\n\t --without-guile\r\n\t --enable-source-highlight\r\n\t --with-separate-debug-dir=/usr/lib/debug (relocatable)\r\n\t --with-system-gdbinit=/etc/gdb/gdbinit\r\n\t --with-system-gdbinit-dir=/etc/gdb/gdbinit.d\r\n\r\n(\"Relocatable\" means the directory can be moved with the GDB installation\r\ntree, and GDB will still find it.)\r\n\r\n```\r\n\r\n\r\n\n", "before_files": [{"content": "\"\"\"\nLooking up addresses for function names / symbols, and\nvice-versa.\n\nUses IDA when available if there isn't sufficient symbol\ninformation available.\n\"\"\"\nfrom __future__ import annotations\n\nimport re\n\nimport gdb\n\nimport pwndbg.gdblib.android\nimport pwndbg.gdblib.arch\nimport pwndbg.gdblib.elf\nimport pwndbg.gdblib.events\nimport pwndbg.gdblib.file\nimport pwndbg.gdblib.memory\nimport pwndbg.gdblib.qemu\nimport pwndbg.gdblib.remote\nimport pwndbg.gdblib.stack\nimport pwndbg.gdblib.vmmap\nimport pwndbg.ida\nimport pwndbg.lib.cache\n\n\ndef _get_debug_file_directory():\n \"\"\"\n Retrieve the debug file directory path.\n\n The debug file directory path ('show debug-file-directory') is a comma-\n separated list of directories which GDB will look in to find the binaries\n currently loaded.\n \"\"\"\n result = gdb.execute(\"show debug-file-directory\", to_string=True, from_tty=False)\n expr = r'The directory where separate debug symbols are searched for is \"(.*)\".\\n'\n\n match = re.search(expr, result)\n\n if match:\n return match.group(1)\n return \"\"\n\n\ndef _set_debug_file_directory(d) -> None:\n gdb.execute(f\"set debug-file-directory {d}\", to_string=True, from_tty=False)\n\n\ndef _add_debug_file_directory(d) -> None:\n current = _get_debug_file_directory()\n if current:\n _set_debug_file_directory(f\"{current}:{d}\")\n else:\n _set_debug_file_directory(d)\n\n\nif \"/usr/lib/debug\" not in _get_debug_file_directory():\n _add_debug_file_directory(\"/usr/lib/debug\")\n\n\[email protected]_until(\"objfile\")\ndef get(address: int, gdb_only=False) -> str:\n \"\"\"\n Retrieve the name for the symbol located at `address` - either from GDB or from IDA sync\n Passing `gdb_only=True`\n \"\"\"\n # Note: we do not return \"\" on `address < pwndbg.gdblib.memory.MMAP_MIN_ADDR`\n # because this may be used to find out the symbol name on PIE binaries that weren't started yet\n # and then their symbol addresses can be found by GDB on their (non-rebased) offsets\n\n # Fast path: GDB's `info symbol` returns 'Numeric constant too large' here\n if address >= ((1 << 64) - 1):\n return \"\"\n\n # This sucks, but there's not a GDB API for this.\n result = gdb.execute(\"info symbol %#x\" % int(address), to_string=True, from_tty=False)\n\n if not gdb_only and result.startswith(\"No symbol\"):\n address = int(address)\n exe = pwndbg.gdblib.elf.exe()\n if exe:\n exe_map = pwndbg.gdblib.vmmap.find(exe.address)\n if exe_map and address in exe_map:\n res = pwndbg.ida.Name(address) or pwndbg.ida.GetFuncOffset(address)\n return res or \"\"\n\n # If there are newlines, which means that there are multiple symbols for the address\n # then use the first one (see also #1610)\n result = result[: result.index(\"\\n\")]\n\n # See https://github.com/bminor/binutils-gdb/blob/d1702fea87aa62dff7de465464097dba63cc8c0f/gdb/printcmd.c#L1594-L1624\n # The most often encountered formats looks like this:\n # \"main in section .text of /bin/bash\"\n # \"main + 3 in section .text of /bin/bash\"\n # \"system + 1 in section .text of /lib/x86_64-linux-gnu/libc.so.6\"\n # \"No symbol matches system-1\"\n # But there are some others that we have to account for as well\n if \" in section \" in result:\n loc_string, _ = result.split(\" in section \")\n elif \" in load address range of \" in result:\n loc_string, _ = result.split(\" in load address range of \")\n elif \" overlay section \" in result:\n result, _ = result.split(\" overlay section \")\n loc_string, _ = result.split(\" in \")\n else:\n loc_string = \"\"\n\n # If there is 'main + 87' we want to replace it with 'main+87' etc.\n return loc_string.replace(\" + \", \"+\")\n\n\[email protected]_until(\"objfile\")\ndef address(symbol: str) -> int:\n \"\"\"\n Get the address for `symbol`\n \"\"\"\n try:\n symbol_obj = gdb.lookup_symbol(symbol)[0]\n if symbol_obj:\n return int(symbol_obj.value().address)\n except gdb.error as e:\n # Symbol lookup only throws exceptions on errors, not if it failed to\n # lookup a symbol. We want to raise these errors so we can handle them\n # properly, but there are some we haven't figured out how to fix yet, so\n # we ignore those here\n skipped_exceptions = []\n\n # This is exception is being thrown by the Go typeinfo tests, we should\n # investigate why this is happening and see if we can explicitly check\n # for it with `gdb.selected_frame()`\n skipped_exceptions.append(\"No frame selected\")\n\n # If we try to look up a TLS variable when there is no TLS, this\n # exception occurs. Ideally we should come up with a way to check for\n # this case before calling `gdb.lookup_symbol`\n skipped_exceptions.append(\"Cannot find thread-local\")\n\n if all(x not in str(e) for x in skipped_exceptions):\n raise e\n\n try:\n # Unfortunately, `gdb.lookup_symbol` does not seem to handle all\n # symbols, so we need to fallback to using `gdb.parse_and_eval`. See\n # https://sourceware.org/pipermail/gdb/2022-October/050362.html\n # (We tried parsing the output of the `info address` before, but there were some issues. See #1628 and #1666)\n if \"\\\\\" in symbol:\n # Is it possible that happens? Probably not, but just in case\n raise ValueError(f\"Symbol {symbol!r} contains a backslash\")\n sanitized_symbol_name = symbol.replace(\"'\", \"\\\\'\")\n return int(gdb.parse_and_eval(f\"&'{sanitized_symbol_name}'\"))\n\n except gdb.error:\n return None\n\n\[email protected]_until(\"objfile\", \"thread\")\ndef static_linkage_symbol_address(symbol: str) -> int:\n \"\"\"\n Get the address for static linkage `symbol`\n \"\"\"\n\n try:\n symbol_obj = gdb.lookup_static_symbol(symbol)\n return int(symbol_obj.value().address) if symbol_obj else None\n except gdb.error:\n return None\n\n\[email protected]_until(\"stop\", \"start\")\ndef selected_frame_source_absolute_filename():\n \"\"\"\n Retrieve the symbol table\u2019s source absolute file name from the selected frame.\n\n In case of missing symbol table or frame information, None is returned.\n \"\"\"\n try:\n frame = gdb.selected_frame()\n except gdb.error:\n return None\n\n if not frame:\n return None\n\n sal = frame.find_sal()\n if not sal:\n return None\n\n symtab = sal.symtab\n if not symtab:\n return None\n\n return symtab.fullname()\n\n\ndef parse_and_eval(expression: str) -> gdb.Value | None:\n \"\"\"Error handling wrapper for GDBs parse_and_eval function\"\"\"\n try:\n return gdb.parse_and_eval(expression)\n except gdb.error:\n return None\n", "path": "pwndbg/gdblib/symbol.py"}], "after_files": [{"content": "\"\"\"\nLooking up addresses for function names / symbols, and\nvice-versa.\n\nUses IDA when available if there isn't sufficient symbol\ninformation available.\n\"\"\"\nfrom __future__ import annotations\n\nimport re\n\nimport gdb\n\nimport pwndbg.gdblib.android\nimport pwndbg.gdblib.arch\nimport pwndbg.gdblib.elf\nimport pwndbg.gdblib.events\nimport pwndbg.gdblib.file\nimport pwndbg.gdblib.memory\nimport pwndbg.gdblib.qemu\nimport pwndbg.gdblib.remote\nimport pwndbg.gdblib.stack\nimport pwndbg.gdblib.vmmap\nimport pwndbg.ida\nimport pwndbg.lib.cache\n\n\ndef _get_debug_file_directory():\n \"\"\"\n Retrieve the debug file directory path.\n\n The debug file directory path ('show debug-file-directory') is a comma-\n separated list of directories which GDB will look in to find the binaries\n currently loaded.\n \"\"\"\n result = gdb.execute(\"show debug-file-directory\", to_string=True, from_tty=False)\n expr = r'The directory where separate debug symbols are searched for is \"(.*)\".\\n'\n\n match = re.search(expr, result)\n\n if match:\n return match.group(1)\n return \"\"\n\n\ndef _set_debug_file_directory(d) -> None:\n gdb.execute(f\"set debug-file-directory {d}\", to_string=True, from_tty=False)\n\n\ndef _add_debug_file_directory(d) -> None:\n current = _get_debug_file_directory()\n if current:\n _set_debug_file_directory(f\"{current}:{d}\")\n else:\n _set_debug_file_directory(d)\n\n\nif \"/usr/lib/debug\" not in _get_debug_file_directory():\n _add_debug_file_directory(\"/usr/lib/debug\")\n\n\[email protected]_until(\"objfile\")\ndef get(address: int, gdb_only=False) -> str:\n \"\"\"\n Retrieve the name for the symbol located at `address` - either from GDB or from IDA sync\n Passing `gdb_only=True`\n \"\"\"\n # Note: we do not return \"\" on `address < pwndbg.gdblib.memory.MMAP_MIN_ADDR`\n # because this may be used to find out the symbol name on PIE binaries that weren't started yet\n # and then their symbol addresses can be found by GDB on their (non-rebased) offsets\n\n # Fast path: GDB's `info symbol` returns 'Numeric constant too large' here\n if address >= ((1 << 64) - 1):\n return \"\"\n\n # This sucks, but there's not a GDB API for this.\n result = gdb.execute(\"info symbol %#x\" % int(address), to_string=True, from_tty=False)\n\n if not gdb_only and result.startswith(\"No symbol\"):\n address = int(address)\n exe = pwndbg.gdblib.elf.exe()\n if exe:\n exe_map = pwndbg.gdblib.vmmap.find(exe.address)\n if exe_map and address in exe_map:\n res = pwndbg.ida.Name(address) or pwndbg.ida.GetFuncOffset(address)\n return res or \"\"\n\n # If there are newlines, which means that there are multiple symbols for the address\n # then use the first one (see also #1610)\n result = result[: result.index(\"\\n\")]\n\n # See https://github.com/bminor/binutils-gdb/blob/d1702fea87aa62dff7de465464097dba63cc8c0f/gdb/printcmd.c#L1594-L1624\n # The most often encountered formats looks like this:\n # \"main in section .text of /bin/bash\"\n # \"main + 3 in section .text of /bin/bash\"\n # \"system + 1 in section .text of /lib/x86_64-linux-gnu/libc.so.6\"\n # \"No symbol matches system-1\"\n # But there are some others that we have to account for as well\n if \" in section \" in result:\n loc_string, _ = result.split(\" in section \")\n elif \" in load address range of \" in result:\n loc_string, _ = result.split(\" in load address range of \")\n elif \" overlay section \" in result:\n result, _ = result.split(\" overlay section \")\n loc_string, _ = result.split(\" in \")\n else:\n loc_string = \"\"\n\n # If there is 'main + 87' we want to replace it with 'main+87' etc.\n return loc_string.replace(\" + \", \"+\")\n\n\[email protected]_until(\"objfile\")\ndef address(symbol: str) -> int:\n \"\"\"\n Get the address for `symbol`\n \"\"\"\n try:\n symbol_obj = gdb.lookup_symbol(symbol)[0]\n if symbol_obj:\n return int(symbol_obj.value().address)\n except gdb.error as e:\n # Symbol lookup only throws exceptions on errors, not if it failed to\n # lookup a symbol. We want to raise these errors so we can handle them\n # properly, but there are some we haven't figured out how to fix yet, so\n # we ignore those here\n skipped_exceptions = []\n\n # This is exception is being thrown by the Go typeinfo tests, we should\n # investigate why this is happening and see if we can explicitly check\n # for it with `gdb.selected_frame()`\n skipped_exceptions.append(\"No frame selected\")\n\n # If we try to look up a TLS variable when there is no TLS, this\n # exception occurs. Ideally we should come up with a way to check for\n # this case before calling `gdb.lookup_symbol`\n skipped_exceptions.append(\"Cannot find thread-local\")\n\n # This reproduced on GDB 12.1 and caused #1878\n skipped_exceptions.append(\"symbol requires a frame to compute its value\")\n\n if all(x not in str(e) for x in skipped_exceptions):\n raise e\n\n try:\n # Unfortunately, `gdb.lookup_symbol` does not seem to handle all\n # symbols, so we need to fallback to using `gdb.parse_and_eval`. See\n # https://sourceware.org/pipermail/gdb/2022-October/050362.html\n # (We tried parsing the output of the `info address` before, but there were some issues. See #1628 and #1666)\n if \"\\\\\" in symbol:\n # Is it possible that happens? Probably not, but just in case\n raise ValueError(f\"Symbol {symbol!r} contains a backslash\")\n sanitized_symbol_name = symbol.replace(\"'\", \"\\\\'\")\n return int(gdb.parse_and_eval(f\"&'{sanitized_symbol_name}'\"))\n\n except gdb.error:\n return None\n\n\[email protected]_until(\"objfile\", \"thread\")\ndef static_linkage_symbol_address(symbol: str) -> int:\n \"\"\"\n Get the address for static linkage `symbol`\n \"\"\"\n\n try:\n symbol_obj = gdb.lookup_static_symbol(symbol)\n return int(symbol_obj.value().address) if symbol_obj else None\n except gdb.error:\n return None\n\n\[email protected]_until(\"stop\", \"start\")\ndef selected_frame_source_absolute_filename():\n \"\"\"\n Retrieve the symbol table\u2019s source absolute file name from the selected frame.\n\n In case of missing symbol table or frame information, None is returned.\n \"\"\"\n try:\n frame = gdb.selected_frame()\n except gdb.error:\n return None\n\n if not frame:\n return None\n\n sal = frame.find_sal()\n if not sal:\n return None\n\n symtab = sal.symtab\n if not symtab:\n return None\n\n return symtab.fullname()\n\n\ndef parse_and_eval(expression: str) -> gdb.Value | None:\n \"\"\"Error handling wrapper for GDBs parse_and_eval function\"\"\"\n try:\n return gdb.parse_and_eval(expression)\n except gdb.error:\n return None\n", "path": "pwndbg/gdblib/symbol.py"}]} |
gh_patches_debug_1339 | rasdani/github-patches | git_diff | encode__uvicorn-978 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
improve user experience by complaining when reload_dirs is given a string instead of a list
Passing a string to reload_dirs as in the following, causes reload to silently not work :
```
uvicorn.run(
"dp.server:app",
host="127.0.0.1", port=5000,
log_level="info",
reload=True,
reload_dirs="/home/maxou/dev/proj",
reload_delay=2.0
)
```
Giving reload_dirs an array : ["/home/maxou/dev/proj"] fixes the problem
It's not really a bug, but complaining with an error message would improve the user experience.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `uvicorn/config.py`
Content:
```
1 import asyncio
2 import inspect
3 import json
4 import logging
5 import logging.config
6 import os
7 import socket
8 import ssl
9 import sys
10 from typing import List, Tuple
11
12 import click
13
14 try:
15 import yaml
16 except ImportError:
17 # If the code below that depends on yaml is exercised, it will raise a NameError.
18 # Install the PyYAML package or the uvicorn[standard] optional dependencies to
19 # enable this functionality.
20 pass
21
22 from uvicorn.importer import ImportFromStringError, import_from_string
23 from uvicorn.middleware.asgi2 import ASGI2Middleware
24 from uvicorn.middleware.debug import DebugMiddleware
25 from uvicorn.middleware.message_logger import MessageLoggerMiddleware
26 from uvicorn.middleware.proxy_headers import ProxyHeadersMiddleware
27 from uvicorn.middleware.wsgi import WSGIMiddleware
28
29 TRACE_LOG_LEVEL = 5
30
31 LOG_LEVELS = {
32 "critical": logging.CRITICAL,
33 "error": logging.ERROR,
34 "warning": logging.WARNING,
35 "info": logging.INFO,
36 "debug": logging.DEBUG,
37 "trace": TRACE_LOG_LEVEL,
38 }
39 HTTP_PROTOCOLS = {
40 "auto": "uvicorn.protocols.http.auto:AutoHTTPProtocol",
41 "h11": "uvicorn.protocols.http.h11_impl:H11Protocol",
42 "httptools": "uvicorn.protocols.http.httptools_impl:HttpToolsProtocol",
43 }
44 WS_PROTOCOLS = {
45 "auto": "uvicorn.protocols.websockets.auto:AutoWebSocketsProtocol",
46 "none": None,
47 "websockets": "uvicorn.protocols.websockets.websockets_impl:WebSocketProtocol",
48 "wsproto": "uvicorn.protocols.websockets.wsproto_impl:WSProtocol",
49 }
50 LIFESPAN = {
51 "auto": "uvicorn.lifespan.on:LifespanOn",
52 "on": "uvicorn.lifespan.on:LifespanOn",
53 "off": "uvicorn.lifespan.off:LifespanOff",
54 }
55 LOOP_SETUPS = {
56 "none": None,
57 "auto": "uvicorn.loops.auto:auto_loop_setup",
58 "asyncio": "uvicorn.loops.asyncio:asyncio_setup",
59 "uvloop": "uvicorn.loops.uvloop:uvloop_setup",
60 }
61 INTERFACES = ["auto", "asgi3", "asgi2", "wsgi"]
62
63
64 # Fallback to 'ssl.PROTOCOL_SSLv23' in order to support Python < 3.5.3.
65 SSL_PROTOCOL_VERSION = getattr(ssl, "PROTOCOL_TLS", ssl.PROTOCOL_SSLv23)
66
67
68 LOGGING_CONFIG = {
69 "version": 1,
70 "disable_existing_loggers": False,
71 "formatters": {
72 "default": {
73 "()": "uvicorn.logging.DefaultFormatter",
74 "fmt": "%(levelprefix)s %(message)s",
75 "use_colors": None,
76 },
77 "access": {
78 "()": "uvicorn.logging.AccessFormatter",
79 "fmt": '%(levelprefix)s %(client_addr)s - "%(request_line)s" %(status_code)s', # noqa: E501
80 },
81 },
82 "handlers": {
83 "default": {
84 "formatter": "default",
85 "class": "logging.StreamHandler",
86 "stream": "ext://sys.stderr",
87 },
88 "access": {
89 "formatter": "access",
90 "class": "logging.StreamHandler",
91 "stream": "ext://sys.stdout",
92 },
93 },
94 "loggers": {
95 "uvicorn": {"handlers": ["default"], "level": "INFO"},
96 "uvicorn.error": {"level": "INFO"},
97 "uvicorn.access": {"handlers": ["access"], "level": "INFO", "propagate": False},
98 },
99 }
100
101 logger = logging.getLogger("uvicorn.error")
102
103
104 def create_ssl_context(
105 certfile, keyfile, password, ssl_version, cert_reqs, ca_certs, ciphers
106 ):
107 ctx = ssl.SSLContext(ssl_version)
108 get_password = (lambda: password) if password else None
109 ctx.load_cert_chain(certfile, keyfile, get_password)
110 ctx.verify_mode = cert_reqs
111 if ca_certs:
112 ctx.load_verify_locations(ca_certs)
113 if ciphers:
114 ctx.set_ciphers(ciphers)
115 return ctx
116
117
118 class Config:
119 def __init__(
120 self,
121 app,
122 host="127.0.0.1",
123 port=8000,
124 uds=None,
125 fd=None,
126 loop="auto",
127 http="auto",
128 ws="auto",
129 lifespan="auto",
130 env_file=None,
131 log_config=LOGGING_CONFIG,
132 log_level=None,
133 access_log=True,
134 use_colors=None,
135 interface="auto",
136 debug=False,
137 reload=False,
138 reload_dirs=None,
139 reload_delay=None,
140 workers=None,
141 proxy_headers=True,
142 forwarded_allow_ips=None,
143 root_path="",
144 limit_concurrency=None,
145 limit_max_requests=None,
146 backlog=2048,
147 timeout_keep_alive=5,
148 timeout_notify=30,
149 callback_notify=None,
150 ssl_keyfile=None,
151 ssl_certfile=None,
152 ssl_keyfile_password=None,
153 ssl_version=SSL_PROTOCOL_VERSION,
154 ssl_cert_reqs=ssl.CERT_NONE,
155 ssl_ca_certs=None,
156 ssl_ciphers="TLSv1",
157 headers=None,
158 factory=False,
159 ):
160 self.app = app
161 self.host = host
162 self.port = port
163 self.uds = uds
164 self.fd = fd
165 self.loop = loop
166 self.http = http
167 self.ws = ws
168 self.lifespan = lifespan
169 self.log_config = log_config
170 self.log_level = log_level
171 self.access_log = access_log
172 self.use_colors = use_colors
173 self.interface = interface
174 self.debug = debug
175 self.reload = reload
176 self.reload_delay = reload_delay or 0.25
177 self.workers = workers or 1
178 self.proxy_headers = proxy_headers
179 self.root_path = root_path
180 self.limit_concurrency = limit_concurrency
181 self.limit_max_requests = limit_max_requests
182 self.backlog = backlog
183 self.timeout_keep_alive = timeout_keep_alive
184 self.timeout_notify = timeout_notify
185 self.callback_notify = callback_notify
186 self.ssl_keyfile = ssl_keyfile
187 self.ssl_certfile = ssl_certfile
188 self.ssl_keyfile_password = ssl_keyfile_password
189 self.ssl_version = ssl_version
190 self.ssl_cert_reqs = ssl_cert_reqs
191 self.ssl_ca_certs = ssl_ca_certs
192 self.ssl_ciphers = ssl_ciphers
193 self.headers = headers if headers else [] # type: List[str]
194 self.encoded_headers = None # type: List[Tuple[bytes, bytes]]
195 self.factory = factory
196
197 self.loaded = False
198 self.configure_logging()
199
200 if reload_dirs is None:
201 self.reload_dirs = [os.getcwd()]
202 else:
203 self.reload_dirs = reload_dirs
204
205 if env_file is not None:
206 from dotenv import load_dotenv
207
208 logger.info("Loading environment from '%s'", env_file)
209 load_dotenv(dotenv_path=env_file)
210
211 if workers is None and "WEB_CONCURRENCY" in os.environ:
212 self.workers = int(os.environ["WEB_CONCURRENCY"])
213
214 if forwarded_allow_ips is None:
215 self.forwarded_allow_ips = os.environ.get(
216 "FORWARDED_ALLOW_IPS", "127.0.0.1"
217 )
218 else:
219 self.forwarded_allow_ips = forwarded_allow_ips
220
221 @property
222 def asgi_version(self) -> str:
223 return {"asgi2": "2.0", "asgi3": "3.0", "wsgi": "3.0"}[self.interface]
224
225 @property
226 def is_ssl(self) -> bool:
227 return bool(self.ssl_keyfile or self.ssl_certfile)
228
229 def configure_logging(self):
230 logging.addLevelName(TRACE_LOG_LEVEL, "TRACE")
231
232 if self.log_config is not None:
233 if isinstance(self.log_config, dict):
234 if self.use_colors in (True, False):
235 self.log_config["formatters"]["default"][
236 "use_colors"
237 ] = self.use_colors
238 self.log_config["formatters"]["access"][
239 "use_colors"
240 ] = self.use_colors
241 logging.config.dictConfig(self.log_config)
242 elif self.log_config.endswith(".json"):
243 with open(self.log_config) as file:
244 loaded_config = json.load(file)
245 logging.config.dictConfig(loaded_config)
246 elif self.log_config.endswith((".yaml", ".yml")):
247 with open(self.log_config) as file:
248 loaded_config = yaml.safe_load(file)
249 logging.config.dictConfig(loaded_config)
250 else:
251 # See the note about fileConfig() here:
252 # https://docs.python.org/3/library/logging.config.html#configuration-file-format
253 logging.config.fileConfig(
254 self.log_config, disable_existing_loggers=False
255 )
256
257 if self.log_level is not None:
258 if isinstance(self.log_level, str):
259 log_level = LOG_LEVELS[self.log_level]
260 else:
261 log_level = self.log_level
262 logging.getLogger("uvicorn.error").setLevel(log_level)
263 logging.getLogger("uvicorn.access").setLevel(log_level)
264 logging.getLogger("uvicorn.asgi").setLevel(log_level)
265 if self.access_log is False:
266 logging.getLogger("uvicorn.access").handlers = []
267 logging.getLogger("uvicorn.access").propagate = False
268
269 def load(self):
270 assert not self.loaded
271
272 if self.is_ssl:
273 self.ssl = create_ssl_context(
274 keyfile=self.ssl_keyfile,
275 certfile=self.ssl_certfile,
276 password=self.ssl_keyfile_password,
277 ssl_version=self.ssl_version,
278 cert_reqs=self.ssl_cert_reqs,
279 ca_certs=self.ssl_ca_certs,
280 ciphers=self.ssl_ciphers,
281 )
282 else:
283 self.ssl = None
284
285 encoded_headers = [
286 (key.lower().encode("latin1"), value.encode("latin1"))
287 for key, value in self.headers
288 ]
289 self.encoded_headers = (
290 encoded_headers
291 if b"server" in dict(encoded_headers)
292 else [(b"server", b"uvicorn")] + encoded_headers
293 ) # type: List[Tuple[bytes, bytes]]
294
295 if isinstance(self.http, str):
296 self.http_protocol_class = import_from_string(HTTP_PROTOCOLS[self.http])
297 else:
298 self.http_protocol_class = self.http
299
300 if isinstance(self.ws, str):
301 self.ws_protocol_class = import_from_string(WS_PROTOCOLS[self.ws])
302 else:
303 self.ws_protocol_class = self.ws
304
305 self.lifespan_class = import_from_string(LIFESPAN[self.lifespan])
306
307 try:
308 self.loaded_app = import_from_string(self.app)
309 except ImportFromStringError as exc:
310 logger.error("Error loading ASGI app. %s" % exc)
311 sys.exit(1)
312
313 try:
314 self.loaded_app = self.loaded_app()
315 except TypeError as exc:
316 if self.factory:
317 logger.error("Error loading ASGI app factory: %s", exc)
318 sys.exit(1)
319 else:
320 if not self.factory:
321 logger.warning(
322 "ASGI app factory detected. Using it, "
323 "but please consider setting the --factory flag explicitly."
324 )
325
326 if self.interface == "auto":
327 if inspect.isclass(self.loaded_app):
328 use_asgi_3 = hasattr(self.loaded_app, "__await__")
329 elif inspect.isfunction(self.loaded_app):
330 use_asgi_3 = asyncio.iscoroutinefunction(self.loaded_app)
331 else:
332 call = getattr(self.loaded_app, "__call__", None)
333 use_asgi_3 = asyncio.iscoroutinefunction(call)
334 self.interface = "asgi3" if use_asgi_3 else "asgi2"
335
336 if self.interface == "wsgi":
337 self.loaded_app = WSGIMiddleware(self.loaded_app)
338 self.ws_protocol_class = None
339 elif self.interface == "asgi2":
340 self.loaded_app = ASGI2Middleware(self.loaded_app)
341
342 if self.debug:
343 self.loaded_app = DebugMiddleware(self.loaded_app)
344 if logger.level <= TRACE_LOG_LEVEL:
345 self.loaded_app = MessageLoggerMiddleware(self.loaded_app)
346 if self.proxy_headers:
347 self.loaded_app = ProxyHeadersMiddleware(
348 self.loaded_app, trusted_hosts=self.forwarded_allow_ips
349 )
350
351 self.loaded = True
352
353 def setup_event_loop(self):
354 loop_setup = import_from_string(LOOP_SETUPS[self.loop])
355 if loop_setup is not None:
356 loop_setup()
357
358 def bind_socket(self):
359 family = socket.AF_INET
360 addr_format = "%s://%s:%d"
361
362 if self.host and ":" in self.host:
363 # It's an IPv6 address.
364 family = socket.AF_INET6
365 addr_format = "%s://[%s]:%d"
366
367 sock = socket.socket(family=family)
368 sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
369 try:
370 sock.bind((self.host, self.port))
371 except OSError as exc:
372 logger.error(exc)
373 sys.exit(1)
374 sock.set_inheritable(True)
375
376 message = f"Uvicorn running on {addr_format} (Press CTRL+C to quit)"
377 color_message = (
378 "Uvicorn running on "
379 + click.style(addr_format, bold=True)
380 + " (Press CTRL+C to quit)"
381 )
382 protocol_name = "https" if self.is_ssl else "http"
383 logger.info(
384 message,
385 protocol_name,
386 self.host,
387 self.port,
388 extra={"color_message": color_message},
389 )
390 return sock
391
392 @property
393 def should_reload(self):
394 return isinstance(self.app, str) and (self.debug or self.reload)
395
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/uvicorn/config.py b/uvicorn/config.py
--- a/uvicorn/config.py
+++ b/uvicorn/config.py
@@ -200,7 +200,10 @@
if reload_dirs is None:
self.reload_dirs = [os.getcwd()]
else:
- self.reload_dirs = reload_dirs
+ if isinstance(reload_dirs, str):
+ self.reload_dirs = [reload_dirs]
+ else:
+ self.reload_dirs = reload_dirs
if env_file is not None:
from dotenv import load_dotenv
| {"golden_diff": "diff --git a/uvicorn/config.py b/uvicorn/config.py\n--- a/uvicorn/config.py\n+++ b/uvicorn/config.py\n@@ -200,7 +200,10 @@\n if reload_dirs is None:\n self.reload_dirs = [os.getcwd()]\n else:\n- self.reload_dirs = reload_dirs\n+ if isinstance(reload_dirs, str):\n+ self.reload_dirs = [reload_dirs]\n+ else:\n+ self.reload_dirs = reload_dirs\n \n if env_file is not None:\n from dotenv import load_dotenv\n", "issue": "improve user experience by complaining when reload_dirs is given a string instead of a list\n\r\nPassing a string to reload_dirs as in the following, causes reload to silently not work :\r\n\r\n```\r\n uvicorn.run(\r\n \"dp.server:app\",\r\n host=\"127.0.0.1\", port=5000,\r\n log_level=\"info\",\r\n reload=True,\r\n reload_dirs=\"/home/maxou/dev/proj\",\r\n reload_delay=2.0\r\n )\r\n```\r\n\r\nGiving reload_dirs an array : [\"/home/maxou/dev/proj\"] fixes the problem\r\n\r\n\r\nIt's not really a bug, but complaining with an error message would improve the user experience.\r\n\n", "before_files": [{"content": "import asyncio\nimport inspect\nimport json\nimport logging\nimport logging.config\nimport os\nimport socket\nimport ssl\nimport sys\nfrom typing import List, Tuple\n\nimport click\n\ntry:\n import yaml\nexcept ImportError:\n # If the code below that depends on yaml is exercised, it will raise a NameError.\n # Install the PyYAML package or the uvicorn[standard] optional dependencies to\n # enable this functionality.\n pass\n\nfrom uvicorn.importer import ImportFromStringError, import_from_string\nfrom uvicorn.middleware.asgi2 import ASGI2Middleware\nfrom uvicorn.middleware.debug import DebugMiddleware\nfrom uvicorn.middleware.message_logger import MessageLoggerMiddleware\nfrom uvicorn.middleware.proxy_headers import ProxyHeadersMiddleware\nfrom uvicorn.middleware.wsgi import WSGIMiddleware\n\nTRACE_LOG_LEVEL = 5\n\nLOG_LEVELS = {\n \"critical\": logging.CRITICAL,\n \"error\": logging.ERROR,\n \"warning\": logging.WARNING,\n \"info\": logging.INFO,\n \"debug\": logging.DEBUG,\n \"trace\": TRACE_LOG_LEVEL,\n}\nHTTP_PROTOCOLS = {\n \"auto\": \"uvicorn.protocols.http.auto:AutoHTTPProtocol\",\n \"h11\": \"uvicorn.protocols.http.h11_impl:H11Protocol\",\n \"httptools\": \"uvicorn.protocols.http.httptools_impl:HttpToolsProtocol\",\n}\nWS_PROTOCOLS = {\n \"auto\": \"uvicorn.protocols.websockets.auto:AutoWebSocketsProtocol\",\n \"none\": None,\n \"websockets\": \"uvicorn.protocols.websockets.websockets_impl:WebSocketProtocol\",\n \"wsproto\": \"uvicorn.protocols.websockets.wsproto_impl:WSProtocol\",\n}\nLIFESPAN = {\n \"auto\": \"uvicorn.lifespan.on:LifespanOn\",\n \"on\": \"uvicorn.lifespan.on:LifespanOn\",\n \"off\": \"uvicorn.lifespan.off:LifespanOff\",\n}\nLOOP_SETUPS = {\n \"none\": None,\n \"auto\": \"uvicorn.loops.auto:auto_loop_setup\",\n \"asyncio\": \"uvicorn.loops.asyncio:asyncio_setup\",\n \"uvloop\": \"uvicorn.loops.uvloop:uvloop_setup\",\n}\nINTERFACES = [\"auto\", \"asgi3\", \"asgi2\", \"wsgi\"]\n\n\n# Fallback to 'ssl.PROTOCOL_SSLv23' in order to support Python < 3.5.3.\nSSL_PROTOCOL_VERSION = getattr(ssl, \"PROTOCOL_TLS\", ssl.PROTOCOL_SSLv23)\n\n\nLOGGING_CONFIG = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"formatters\": {\n \"default\": {\n \"()\": \"uvicorn.logging.DefaultFormatter\",\n \"fmt\": \"%(levelprefix)s %(message)s\",\n \"use_colors\": None,\n },\n \"access\": {\n \"()\": \"uvicorn.logging.AccessFormatter\",\n \"fmt\": '%(levelprefix)s %(client_addr)s - \"%(request_line)s\" %(status_code)s', # noqa: E501\n },\n },\n \"handlers\": {\n \"default\": {\n \"formatter\": \"default\",\n \"class\": \"logging.StreamHandler\",\n \"stream\": \"ext://sys.stderr\",\n },\n \"access\": {\n \"formatter\": \"access\",\n \"class\": \"logging.StreamHandler\",\n \"stream\": \"ext://sys.stdout\",\n },\n },\n \"loggers\": {\n \"uvicorn\": {\"handlers\": [\"default\"], \"level\": \"INFO\"},\n \"uvicorn.error\": {\"level\": \"INFO\"},\n \"uvicorn.access\": {\"handlers\": [\"access\"], \"level\": \"INFO\", \"propagate\": False},\n },\n}\n\nlogger = logging.getLogger(\"uvicorn.error\")\n\n\ndef create_ssl_context(\n certfile, keyfile, password, ssl_version, cert_reqs, ca_certs, ciphers\n):\n ctx = ssl.SSLContext(ssl_version)\n get_password = (lambda: password) if password else None\n ctx.load_cert_chain(certfile, keyfile, get_password)\n ctx.verify_mode = cert_reqs\n if ca_certs:\n ctx.load_verify_locations(ca_certs)\n if ciphers:\n ctx.set_ciphers(ciphers)\n return ctx\n\n\nclass Config:\n def __init__(\n self,\n app,\n host=\"127.0.0.1\",\n port=8000,\n uds=None,\n fd=None,\n loop=\"auto\",\n http=\"auto\",\n ws=\"auto\",\n lifespan=\"auto\",\n env_file=None,\n log_config=LOGGING_CONFIG,\n log_level=None,\n access_log=True,\n use_colors=None,\n interface=\"auto\",\n debug=False,\n reload=False,\n reload_dirs=None,\n reload_delay=None,\n workers=None,\n proxy_headers=True,\n forwarded_allow_ips=None,\n root_path=\"\",\n limit_concurrency=None,\n limit_max_requests=None,\n backlog=2048,\n timeout_keep_alive=5,\n timeout_notify=30,\n callback_notify=None,\n ssl_keyfile=None,\n ssl_certfile=None,\n ssl_keyfile_password=None,\n ssl_version=SSL_PROTOCOL_VERSION,\n ssl_cert_reqs=ssl.CERT_NONE,\n ssl_ca_certs=None,\n ssl_ciphers=\"TLSv1\",\n headers=None,\n factory=False,\n ):\n self.app = app\n self.host = host\n self.port = port\n self.uds = uds\n self.fd = fd\n self.loop = loop\n self.http = http\n self.ws = ws\n self.lifespan = lifespan\n self.log_config = log_config\n self.log_level = log_level\n self.access_log = access_log\n self.use_colors = use_colors\n self.interface = interface\n self.debug = debug\n self.reload = reload\n self.reload_delay = reload_delay or 0.25\n self.workers = workers or 1\n self.proxy_headers = proxy_headers\n self.root_path = root_path\n self.limit_concurrency = limit_concurrency\n self.limit_max_requests = limit_max_requests\n self.backlog = backlog\n self.timeout_keep_alive = timeout_keep_alive\n self.timeout_notify = timeout_notify\n self.callback_notify = callback_notify\n self.ssl_keyfile = ssl_keyfile\n self.ssl_certfile = ssl_certfile\n self.ssl_keyfile_password = ssl_keyfile_password\n self.ssl_version = ssl_version\n self.ssl_cert_reqs = ssl_cert_reqs\n self.ssl_ca_certs = ssl_ca_certs\n self.ssl_ciphers = ssl_ciphers\n self.headers = headers if headers else [] # type: List[str]\n self.encoded_headers = None # type: List[Tuple[bytes, bytes]]\n self.factory = factory\n\n self.loaded = False\n self.configure_logging()\n\n if reload_dirs is None:\n self.reload_dirs = [os.getcwd()]\n else:\n self.reload_dirs = reload_dirs\n\n if env_file is not None:\n from dotenv import load_dotenv\n\n logger.info(\"Loading environment from '%s'\", env_file)\n load_dotenv(dotenv_path=env_file)\n\n if workers is None and \"WEB_CONCURRENCY\" in os.environ:\n self.workers = int(os.environ[\"WEB_CONCURRENCY\"])\n\n if forwarded_allow_ips is None:\n self.forwarded_allow_ips = os.environ.get(\n \"FORWARDED_ALLOW_IPS\", \"127.0.0.1\"\n )\n else:\n self.forwarded_allow_ips = forwarded_allow_ips\n\n @property\n def asgi_version(self) -> str:\n return {\"asgi2\": \"2.0\", \"asgi3\": \"3.0\", \"wsgi\": \"3.0\"}[self.interface]\n\n @property\n def is_ssl(self) -> bool:\n return bool(self.ssl_keyfile or self.ssl_certfile)\n\n def configure_logging(self):\n logging.addLevelName(TRACE_LOG_LEVEL, \"TRACE\")\n\n if self.log_config is not None:\n if isinstance(self.log_config, dict):\n if self.use_colors in (True, False):\n self.log_config[\"formatters\"][\"default\"][\n \"use_colors\"\n ] = self.use_colors\n self.log_config[\"formatters\"][\"access\"][\n \"use_colors\"\n ] = self.use_colors\n logging.config.dictConfig(self.log_config)\n elif self.log_config.endswith(\".json\"):\n with open(self.log_config) as file:\n loaded_config = json.load(file)\n logging.config.dictConfig(loaded_config)\n elif self.log_config.endswith((\".yaml\", \".yml\")):\n with open(self.log_config) as file:\n loaded_config = yaml.safe_load(file)\n logging.config.dictConfig(loaded_config)\n else:\n # See the note about fileConfig() here:\n # https://docs.python.org/3/library/logging.config.html#configuration-file-format\n logging.config.fileConfig(\n self.log_config, disable_existing_loggers=False\n )\n\n if self.log_level is not None:\n if isinstance(self.log_level, str):\n log_level = LOG_LEVELS[self.log_level]\n else:\n log_level = self.log_level\n logging.getLogger(\"uvicorn.error\").setLevel(log_level)\n logging.getLogger(\"uvicorn.access\").setLevel(log_level)\n logging.getLogger(\"uvicorn.asgi\").setLevel(log_level)\n if self.access_log is False:\n logging.getLogger(\"uvicorn.access\").handlers = []\n logging.getLogger(\"uvicorn.access\").propagate = False\n\n def load(self):\n assert not self.loaded\n\n if self.is_ssl:\n self.ssl = create_ssl_context(\n keyfile=self.ssl_keyfile,\n certfile=self.ssl_certfile,\n password=self.ssl_keyfile_password,\n ssl_version=self.ssl_version,\n cert_reqs=self.ssl_cert_reqs,\n ca_certs=self.ssl_ca_certs,\n ciphers=self.ssl_ciphers,\n )\n else:\n self.ssl = None\n\n encoded_headers = [\n (key.lower().encode(\"latin1\"), value.encode(\"latin1\"))\n for key, value in self.headers\n ]\n self.encoded_headers = (\n encoded_headers\n if b\"server\" in dict(encoded_headers)\n else [(b\"server\", b\"uvicorn\")] + encoded_headers\n ) # type: List[Tuple[bytes, bytes]]\n\n if isinstance(self.http, str):\n self.http_protocol_class = import_from_string(HTTP_PROTOCOLS[self.http])\n else:\n self.http_protocol_class = self.http\n\n if isinstance(self.ws, str):\n self.ws_protocol_class = import_from_string(WS_PROTOCOLS[self.ws])\n else:\n self.ws_protocol_class = self.ws\n\n self.lifespan_class = import_from_string(LIFESPAN[self.lifespan])\n\n try:\n self.loaded_app = import_from_string(self.app)\n except ImportFromStringError as exc:\n logger.error(\"Error loading ASGI app. %s\" % exc)\n sys.exit(1)\n\n try:\n self.loaded_app = self.loaded_app()\n except TypeError as exc:\n if self.factory:\n logger.error(\"Error loading ASGI app factory: %s\", exc)\n sys.exit(1)\n else:\n if not self.factory:\n logger.warning(\n \"ASGI app factory detected. Using it, \"\n \"but please consider setting the --factory flag explicitly.\"\n )\n\n if self.interface == \"auto\":\n if inspect.isclass(self.loaded_app):\n use_asgi_3 = hasattr(self.loaded_app, \"__await__\")\n elif inspect.isfunction(self.loaded_app):\n use_asgi_3 = asyncio.iscoroutinefunction(self.loaded_app)\n else:\n call = getattr(self.loaded_app, \"__call__\", None)\n use_asgi_3 = asyncio.iscoroutinefunction(call)\n self.interface = \"asgi3\" if use_asgi_3 else \"asgi2\"\n\n if self.interface == \"wsgi\":\n self.loaded_app = WSGIMiddleware(self.loaded_app)\n self.ws_protocol_class = None\n elif self.interface == \"asgi2\":\n self.loaded_app = ASGI2Middleware(self.loaded_app)\n\n if self.debug:\n self.loaded_app = DebugMiddleware(self.loaded_app)\n if logger.level <= TRACE_LOG_LEVEL:\n self.loaded_app = MessageLoggerMiddleware(self.loaded_app)\n if self.proxy_headers:\n self.loaded_app = ProxyHeadersMiddleware(\n self.loaded_app, trusted_hosts=self.forwarded_allow_ips\n )\n\n self.loaded = True\n\n def setup_event_loop(self):\n loop_setup = import_from_string(LOOP_SETUPS[self.loop])\n if loop_setup is not None:\n loop_setup()\n\n def bind_socket(self):\n family = socket.AF_INET\n addr_format = \"%s://%s:%d\"\n\n if self.host and \":\" in self.host:\n # It's an IPv6 address.\n family = socket.AF_INET6\n addr_format = \"%s://[%s]:%d\"\n\n sock = socket.socket(family=family)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n try:\n sock.bind((self.host, self.port))\n except OSError as exc:\n logger.error(exc)\n sys.exit(1)\n sock.set_inheritable(True)\n\n message = f\"Uvicorn running on {addr_format} (Press CTRL+C to quit)\"\n color_message = (\n \"Uvicorn running on \"\n + click.style(addr_format, bold=True)\n + \" (Press CTRL+C to quit)\"\n )\n protocol_name = \"https\" if self.is_ssl else \"http\"\n logger.info(\n message,\n protocol_name,\n self.host,\n self.port,\n extra={\"color_message\": color_message},\n )\n return sock\n\n @property\n def should_reload(self):\n return isinstance(self.app, str) and (self.debug or self.reload)\n", "path": "uvicorn/config.py"}], "after_files": [{"content": "import asyncio\nimport inspect\nimport json\nimport logging\nimport logging.config\nimport os\nimport socket\nimport ssl\nimport sys\nfrom typing import List, Tuple\n\nimport click\n\ntry:\n import yaml\nexcept ImportError:\n # If the code below that depends on yaml is exercised, it will raise a NameError.\n # Install the PyYAML package or the uvicorn[standard] optional dependencies to\n # enable this functionality.\n pass\n\nfrom uvicorn.importer import ImportFromStringError, import_from_string\nfrom uvicorn.middleware.asgi2 import ASGI2Middleware\nfrom uvicorn.middleware.debug import DebugMiddleware\nfrom uvicorn.middleware.message_logger import MessageLoggerMiddleware\nfrom uvicorn.middleware.proxy_headers import ProxyHeadersMiddleware\nfrom uvicorn.middleware.wsgi import WSGIMiddleware\n\nTRACE_LOG_LEVEL = 5\n\nLOG_LEVELS = {\n \"critical\": logging.CRITICAL,\n \"error\": logging.ERROR,\n \"warning\": logging.WARNING,\n \"info\": logging.INFO,\n \"debug\": logging.DEBUG,\n \"trace\": TRACE_LOG_LEVEL,\n}\nHTTP_PROTOCOLS = {\n \"auto\": \"uvicorn.protocols.http.auto:AutoHTTPProtocol\",\n \"h11\": \"uvicorn.protocols.http.h11_impl:H11Protocol\",\n \"httptools\": \"uvicorn.protocols.http.httptools_impl:HttpToolsProtocol\",\n}\nWS_PROTOCOLS = {\n \"auto\": \"uvicorn.protocols.websockets.auto:AutoWebSocketsProtocol\",\n \"none\": None,\n \"websockets\": \"uvicorn.protocols.websockets.websockets_impl:WebSocketProtocol\",\n \"wsproto\": \"uvicorn.protocols.websockets.wsproto_impl:WSProtocol\",\n}\nLIFESPAN = {\n \"auto\": \"uvicorn.lifespan.on:LifespanOn\",\n \"on\": \"uvicorn.lifespan.on:LifespanOn\",\n \"off\": \"uvicorn.lifespan.off:LifespanOff\",\n}\nLOOP_SETUPS = {\n \"none\": None,\n \"auto\": \"uvicorn.loops.auto:auto_loop_setup\",\n \"asyncio\": \"uvicorn.loops.asyncio:asyncio_setup\",\n \"uvloop\": \"uvicorn.loops.uvloop:uvloop_setup\",\n}\nINTERFACES = [\"auto\", \"asgi3\", \"asgi2\", \"wsgi\"]\n\n\n# Fallback to 'ssl.PROTOCOL_SSLv23' in order to support Python < 3.5.3.\nSSL_PROTOCOL_VERSION = getattr(ssl, \"PROTOCOL_TLS\", ssl.PROTOCOL_SSLv23)\n\n\nLOGGING_CONFIG = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"formatters\": {\n \"default\": {\n \"()\": \"uvicorn.logging.DefaultFormatter\",\n \"fmt\": \"%(levelprefix)s %(message)s\",\n \"use_colors\": None,\n },\n \"access\": {\n \"()\": \"uvicorn.logging.AccessFormatter\",\n \"fmt\": '%(levelprefix)s %(client_addr)s - \"%(request_line)s\" %(status_code)s', # noqa: E501\n },\n },\n \"handlers\": {\n \"default\": {\n \"formatter\": \"default\",\n \"class\": \"logging.StreamHandler\",\n \"stream\": \"ext://sys.stderr\",\n },\n \"access\": {\n \"formatter\": \"access\",\n \"class\": \"logging.StreamHandler\",\n \"stream\": \"ext://sys.stdout\",\n },\n },\n \"loggers\": {\n \"uvicorn\": {\"handlers\": [\"default\"], \"level\": \"INFO\"},\n \"uvicorn.error\": {\"level\": \"INFO\"},\n \"uvicorn.access\": {\"handlers\": [\"access\"], \"level\": \"INFO\", \"propagate\": False},\n },\n}\n\nlogger = logging.getLogger(\"uvicorn.error\")\n\n\ndef create_ssl_context(\n certfile, keyfile, password, ssl_version, cert_reqs, ca_certs, ciphers\n):\n ctx = ssl.SSLContext(ssl_version)\n get_password = (lambda: password) if password else None\n ctx.load_cert_chain(certfile, keyfile, get_password)\n ctx.verify_mode = cert_reqs\n if ca_certs:\n ctx.load_verify_locations(ca_certs)\n if ciphers:\n ctx.set_ciphers(ciphers)\n return ctx\n\n\nclass Config:\n def __init__(\n self,\n app,\n host=\"127.0.0.1\",\n port=8000,\n uds=None,\n fd=None,\n loop=\"auto\",\n http=\"auto\",\n ws=\"auto\",\n lifespan=\"auto\",\n env_file=None,\n log_config=LOGGING_CONFIG,\n log_level=None,\n access_log=True,\n use_colors=None,\n interface=\"auto\",\n debug=False,\n reload=False,\n reload_dirs=None,\n reload_delay=None,\n workers=None,\n proxy_headers=True,\n forwarded_allow_ips=None,\n root_path=\"\",\n limit_concurrency=None,\n limit_max_requests=None,\n backlog=2048,\n timeout_keep_alive=5,\n timeout_notify=30,\n callback_notify=None,\n ssl_keyfile=None,\n ssl_certfile=None,\n ssl_keyfile_password=None,\n ssl_version=SSL_PROTOCOL_VERSION,\n ssl_cert_reqs=ssl.CERT_NONE,\n ssl_ca_certs=None,\n ssl_ciphers=\"TLSv1\",\n headers=None,\n factory=False,\n ):\n self.app = app\n self.host = host\n self.port = port\n self.uds = uds\n self.fd = fd\n self.loop = loop\n self.http = http\n self.ws = ws\n self.lifespan = lifespan\n self.log_config = log_config\n self.log_level = log_level\n self.access_log = access_log\n self.use_colors = use_colors\n self.interface = interface\n self.debug = debug\n self.reload = reload\n self.reload_delay = reload_delay or 0.25\n self.workers = workers or 1\n self.proxy_headers = proxy_headers\n self.root_path = root_path\n self.limit_concurrency = limit_concurrency\n self.limit_max_requests = limit_max_requests\n self.backlog = backlog\n self.timeout_keep_alive = timeout_keep_alive\n self.timeout_notify = timeout_notify\n self.callback_notify = callback_notify\n self.ssl_keyfile = ssl_keyfile\n self.ssl_certfile = ssl_certfile\n self.ssl_keyfile_password = ssl_keyfile_password\n self.ssl_version = ssl_version\n self.ssl_cert_reqs = ssl_cert_reqs\n self.ssl_ca_certs = ssl_ca_certs\n self.ssl_ciphers = ssl_ciphers\n self.headers = headers if headers else [] # type: List[str]\n self.encoded_headers = None # type: List[Tuple[bytes, bytes]]\n self.factory = factory\n\n self.loaded = False\n self.configure_logging()\n\n if reload_dirs is None:\n self.reload_dirs = [os.getcwd()]\n else:\n if isinstance(reload_dirs, str):\n self.reload_dirs = [reload_dirs]\n else:\n self.reload_dirs = reload_dirs\n\n if env_file is not None:\n from dotenv import load_dotenv\n\n logger.info(\"Loading environment from '%s'\", env_file)\n load_dotenv(dotenv_path=env_file)\n\n if workers is None and \"WEB_CONCURRENCY\" in os.environ:\n self.workers = int(os.environ[\"WEB_CONCURRENCY\"])\n\n if forwarded_allow_ips is None:\n self.forwarded_allow_ips = os.environ.get(\n \"FORWARDED_ALLOW_IPS\", \"127.0.0.1\"\n )\n else:\n self.forwarded_allow_ips = forwarded_allow_ips\n\n @property\n def asgi_version(self) -> str:\n return {\"asgi2\": \"2.0\", \"asgi3\": \"3.0\", \"wsgi\": \"3.0\"}[self.interface]\n\n @property\n def is_ssl(self) -> bool:\n return bool(self.ssl_keyfile or self.ssl_certfile)\n\n def configure_logging(self):\n logging.addLevelName(TRACE_LOG_LEVEL, \"TRACE\")\n\n if self.log_config is not None:\n if isinstance(self.log_config, dict):\n if self.use_colors in (True, False):\n self.log_config[\"formatters\"][\"default\"][\n \"use_colors\"\n ] = self.use_colors\n self.log_config[\"formatters\"][\"access\"][\n \"use_colors\"\n ] = self.use_colors\n logging.config.dictConfig(self.log_config)\n elif self.log_config.endswith(\".json\"):\n with open(self.log_config) as file:\n loaded_config = json.load(file)\n logging.config.dictConfig(loaded_config)\n elif self.log_config.endswith((\".yaml\", \".yml\")):\n with open(self.log_config) as file:\n loaded_config = yaml.safe_load(file)\n logging.config.dictConfig(loaded_config)\n else:\n # See the note about fileConfig() here:\n # https://docs.python.org/3/library/logging.config.html#configuration-file-format\n logging.config.fileConfig(\n self.log_config, disable_existing_loggers=False\n )\n\n if self.log_level is not None:\n if isinstance(self.log_level, str):\n log_level = LOG_LEVELS[self.log_level]\n else:\n log_level = self.log_level\n logging.getLogger(\"uvicorn.error\").setLevel(log_level)\n logging.getLogger(\"uvicorn.access\").setLevel(log_level)\n logging.getLogger(\"uvicorn.asgi\").setLevel(log_level)\n if self.access_log is False:\n logging.getLogger(\"uvicorn.access\").handlers = []\n logging.getLogger(\"uvicorn.access\").propagate = False\n\n def load(self):\n assert not self.loaded\n\n if self.is_ssl:\n self.ssl = create_ssl_context(\n keyfile=self.ssl_keyfile,\n certfile=self.ssl_certfile,\n password=self.ssl_keyfile_password,\n ssl_version=self.ssl_version,\n cert_reqs=self.ssl_cert_reqs,\n ca_certs=self.ssl_ca_certs,\n ciphers=self.ssl_ciphers,\n )\n else:\n self.ssl = None\n\n encoded_headers = [\n (key.lower().encode(\"latin1\"), value.encode(\"latin1\"))\n for key, value in self.headers\n ]\n self.encoded_headers = (\n encoded_headers\n if b\"server\" in dict(encoded_headers)\n else [(b\"server\", b\"uvicorn\")] + encoded_headers\n ) # type: List[Tuple[bytes, bytes]]\n\n if isinstance(self.http, str):\n self.http_protocol_class = import_from_string(HTTP_PROTOCOLS[self.http])\n else:\n self.http_protocol_class = self.http\n\n if isinstance(self.ws, str):\n self.ws_protocol_class = import_from_string(WS_PROTOCOLS[self.ws])\n else:\n self.ws_protocol_class = self.ws\n\n self.lifespan_class = import_from_string(LIFESPAN[self.lifespan])\n\n try:\n self.loaded_app = import_from_string(self.app)\n except ImportFromStringError as exc:\n logger.error(\"Error loading ASGI app. %s\" % exc)\n sys.exit(1)\n\n try:\n self.loaded_app = self.loaded_app()\n except TypeError as exc:\n if self.factory:\n logger.error(\"Error loading ASGI app factory: %s\", exc)\n sys.exit(1)\n else:\n if not self.factory:\n logger.warning(\n \"ASGI app factory detected. Using it, \"\n \"but please consider setting the --factory flag explicitly.\"\n )\n\n if self.interface == \"auto\":\n if inspect.isclass(self.loaded_app):\n use_asgi_3 = hasattr(self.loaded_app, \"__await__\")\n elif inspect.isfunction(self.loaded_app):\n use_asgi_3 = asyncio.iscoroutinefunction(self.loaded_app)\n else:\n call = getattr(self.loaded_app, \"__call__\", None)\n use_asgi_3 = asyncio.iscoroutinefunction(call)\n self.interface = \"asgi3\" if use_asgi_3 else \"asgi2\"\n\n if self.interface == \"wsgi\":\n self.loaded_app = WSGIMiddleware(self.loaded_app)\n self.ws_protocol_class = None\n elif self.interface == \"asgi2\":\n self.loaded_app = ASGI2Middleware(self.loaded_app)\n\n if self.debug:\n self.loaded_app = DebugMiddleware(self.loaded_app)\n if logger.level <= TRACE_LOG_LEVEL:\n self.loaded_app = MessageLoggerMiddleware(self.loaded_app)\n if self.proxy_headers:\n self.loaded_app = ProxyHeadersMiddleware(\n self.loaded_app, trusted_hosts=self.forwarded_allow_ips\n )\n\n self.loaded = True\n\n def setup_event_loop(self):\n loop_setup = import_from_string(LOOP_SETUPS[self.loop])\n if loop_setup is not None:\n loop_setup()\n\n def bind_socket(self):\n family = socket.AF_INET\n addr_format = \"%s://%s:%d\"\n\n if self.host and \":\" in self.host:\n # It's an IPv6 address.\n family = socket.AF_INET6\n addr_format = \"%s://[%s]:%d\"\n\n sock = socket.socket(family=family)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n try:\n sock.bind((self.host, self.port))\n except OSError as exc:\n logger.error(exc)\n sys.exit(1)\n sock.set_inheritable(True)\n\n message = f\"Uvicorn running on {addr_format} (Press CTRL+C to quit)\"\n color_message = (\n \"Uvicorn running on \"\n + click.style(addr_format, bold=True)\n + \" (Press CTRL+C to quit)\"\n )\n protocol_name = \"https\" if self.is_ssl else \"http\"\n logger.info(\n message,\n protocol_name,\n self.host,\n self.port,\n extra={\"color_message\": color_message},\n )\n return sock\n\n @property\n def should_reload(self):\n return isinstance(self.app, str) and (self.debug or self.reload)\n", "path": "uvicorn/config.py"}]} |
gh_patches_debug_1340 | rasdani/github-patches | git_diff | ckan__ckan-5502 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fix line breaks in translatable strings
I've got reports of confusing strings from translators, eg:

There is a line break and extra spaces in the middle of the source string (`msgid`) and it's unclear to users if they should keep it in their translations.
This is how the `msgid` looks like:
```
#: ckan/templates/snippets/changes/extension_fields.html:3
msgid ""
"Changed value of field <q>{key}</q> to <q>{value}</q> in\n"
" {pkg_link}"
msgstr ""
```
The source file contains a line break probably created by an overzealous code formatter:
```
{{ _('Changed value of field <q>{key}</q> to <q>{value}</q> in
{pkg_link}')
```
Most if not all the strings seemed to be part of the snippets added in the `changes` folder on https://github.com/ckan/ckan/pull/4929 so I think it's ok if for now we manually fix the strings in the snippets, extract them and update the msgids on the po files using the `ckan translation sync-msgids` command we introduced in #5339
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ckan/logic/validators.py`
Content:
```
1
2 # encoding: utf-8
3
4 import collections
5 import datetime
6 from itertools import count
7 import re
8 import mimetypes
9 import string
10 import json
11
12 from six import string_types, iteritems
13 from six.moves.urllib.parse import urlparse
14
15 import ckan.lib.navl.dictization_functions as df
16 import ckan.logic as logic
17 import ckan.lib.helpers as h
18 from ckan.model import (MAX_TAG_LENGTH, MIN_TAG_LENGTH,
19 PACKAGE_NAME_MIN_LENGTH, PACKAGE_NAME_MAX_LENGTH,
20 PACKAGE_VERSION_MAX_LENGTH,
21 VOCABULARY_NAME_MAX_LENGTH,
22 VOCABULARY_NAME_MIN_LENGTH)
23 import ckan.authz as authz
24 from ckan.model.core import State
25
26 from ckan.common import _
27
28 Invalid = df.Invalid
29 StopOnError = df.StopOnError
30 Missing = df.Missing
31 missing = df.missing
32
33
34 def owner_org_validator(key, data, errors, context):
35
36 value = data.get(key)
37
38 if value is missing or value is None:
39 if not authz.check_config_permission('create_unowned_dataset'):
40 raise Invalid(_('An organization must be provided'))
41 data.pop(key, None)
42 raise df.StopOnError
43
44 model = context['model']
45 user = context['user']
46 user = model.User.get(user)
47 if value == '':
48 if not authz.check_config_permission('create_unowned_dataset'):
49 raise Invalid(_('An organization must be provided'))
50 return
51
52 if (authz.check_config_permission('allow_dataset_collaborators')
53 and not authz.check_config_permission('allow_collaborators_to_change_owner_org')):
54
55 package = context.get('package')
56 if package and user and not user.sysadmin:
57 is_collaborator = authz.user_is_collaborator_on_dataset(
58 user.id, package.id, ['admin', 'editor'])
59 if is_collaborator:
60 # User is a collaborator, check if it's also a member with
61 # edit rights of the current organization (redundant, but possible)
62 user_orgs = logic.get_action(
63 'organization_list_for_user')(
64 {'ignore_auth': True}, {'id': user.id, 'permission': 'update_dataset'})
65 user_is_org_member = package.owner_org in [org['id'] for org in user_orgs]
66 if data.get(key) != package.owner_org and not user_is_org_member:
67 raise Invalid(_('You cannot move this dataset to another organization'))
68
69 group = model.Group.get(value)
70 if not group:
71 raise Invalid(_('Organization does not exist'))
72 group_id = group.id
73 if not context.get(u'ignore_auth', False) and not(user.sysadmin or
74 authz.has_user_permission_for_group_or_org(
75 group_id, user.name, 'create_dataset')):
76 raise Invalid(_('You cannot add a dataset to this organization'))
77 data[key] = group_id
78
79
80 def package_id_not_changed(value, context):
81
82 package = context.get('package')
83 if package and value != package.id:
84 raise Invalid('Cannot change value of key from %s to %s. '
85 'This key is read-only' % (package.id, value))
86 return value
87
88 def int_validator(value, context):
89 '''
90 Return an integer for value, which may be a string in base 10 or
91 a numeric type (e.g. int, long, float, Decimal, Fraction). Return
92 None for None or empty/all-whitespace string values.
93
94 :raises: ckan.lib.navl.dictization_functions.Invalid for other
95 inputs or non-whole values
96 '''
97 if value is None:
98 return None
99 if hasattr(value, 'strip') and not value.strip():
100 return None
101
102 try:
103 whole, part = divmod(value, 1)
104 except TypeError:
105 try:
106 return int(value)
107 except (TypeError, ValueError):
108 pass
109 else:
110 if not part:
111 try:
112 return int(whole)
113 except TypeError:
114 pass # complex number: fail like int(complex) does
115
116 raise Invalid(_('Invalid integer'))
117
118 def natural_number_validator(value, context):
119 value = int_validator(value, context)
120 if value < 0:
121 raise Invalid(_('Must be a natural number'))
122 return value
123
124 def is_positive_integer(value, context):
125 value = int_validator(value, context)
126 if value < 1:
127 raise Invalid(_('Must be a postive integer'))
128 return value
129
130 def boolean_validator(value, context):
131 '''
132 Return a boolean for value.
133 Return value when value is a python bool type.
134 Return True for strings 'true', 'yes', 't', 'y', and '1'.
135 Return False in all other cases, including when value is an empty string or
136 None
137 '''
138 if value is missing or value is None:
139 return False
140 if isinstance(value, bool):
141 return value
142 if value.lower() in ['true', 'yes', 't', 'y', '1']:
143 return True
144 return False
145
146 def isodate(value, context):
147 if isinstance(value, datetime.datetime):
148 return value
149 if value == '':
150 return None
151 try:
152 date = h.date_str_to_datetime(value)
153 except (TypeError, ValueError) as e:
154 raise Invalid(_('Date format incorrect'))
155 return date
156
157 def no_http(value, context):
158
159 model = context['model']
160 session = context['session']
161
162 if 'http:' in value:
163 raise Invalid(_('No links are allowed in the log_message.'))
164 return value
165
166 def package_id_exists(value, context):
167
168 model = context['model']
169 session = context['session']
170
171 result = session.query(model.Package).get(value)
172 if not result:
173 raise Invalid('%s: %s' % (_('Not found'), _('Dataset')))
174 return value
175
176 def package_id_does_not_exist(value, context):
177
178 model = context['model']
179 session = context['session']
180
181 result = session.query(model.Package).get(value)
182 if result:
183 raise Invalid(_('Dataset id already exists'))
184 return value
185
186 def package_name_exists(value, context):
187
188 model = context['model']
189 session = context['session']
190
191 result = session.query(model.Package).filter_by(name=value).first()
192
193 if not result:
194 raise Invalid(_('Not found') + ': %s' % value)
195 return value
196
197 def package_id_or_name_exists(package_id_or_name, context):
198 '''Return the given package_id_or_name if such a package exists.
199
200 :raises: ckan.lib.navl.dictization_functions.Invalid if there is no
201 package with the given id or name
202
203 '''
204 model = context['model']
205 session = context['session']
206
207 result = session.query(model.Package).get(package_id_or_name)
208 if result:
209 return package_id_or_name
210
211 result = session.query(model.Package).filter_by(
212 name=package_id_or_name).first()
213
214 if not result:
215 raise Invalid('%s: %s' % (_('Not found'), _('Dataset')))
216
217 return package_id_or_name
218
219
220 def resource_id_exists(value, context):
221 model = context['model']
222 session = context['session']
223 if not session.query(model.Resource).get(value):
224 raise Invalid('%s: %s' % (_('Not found'), _('Resource')))
225 return value
226
227
228 def user_id_exists(user_id, context):
229 '''Raises Invalid if the given user_id does not exist in the model given
230 in the context, otherwise returns the given user_id.
231
232 '''
233 model = context['model']
234 session = context['session']
235
236 result = session.query(model.User).get(user_id)
237 if not result:
238 raise Invalid('%s: %s' % (_('Not found'), _('User')))
239 return user_id
240
241 def user_id_or_name_exists(user_id_or_name, context):
242 '''Return the given user_id_or_name if such a user exists.
243
244 :raises: ckan.lib.navl.dictization_functions.Invalid if no user can be
245 found with the given id or user name
246
247 '''
248 model = context['model']
249 session = context['session']
250 result = session.query(model.User).get(user_id_or_name)
251 if result:
252 return user_id_or_name
253 result = session.query(model.User).filter_by(name=user_id_or_name).first()
254 if not result:
255 raise Invalid('%s: %s' % (_('Not found'), _('User')))
256 return user_id_or_name
257
258 def group_id_exists(group_id, context):
259 '''Raises Invalid if the given group_id does not exist in the model given
260 in the context, otherwise returns the given group_id.
261
262 '''
263 model = context['model']
264 session = context['session']
265
266 result = session.query(model.Group).get(group_id)
267 if not result:
268 raise Invalid('%s: %s' % (_('Not found'), _('Group')))
269 return group_id
270
271 def group_id_or_name_exists(reference, context):
272 '''
273 Raises Invalid if a group identified by the name or id cannot be found.
274 '''
275 model = context['model']
276 result = model.Group.get(reference)
277 if not result:
278 raise Invalid(_('That group name or ID does not exist.'))
279 return reference
280
281 def activity_type_exists(activity_type):
282 '''Raises Invalid if there is no registered activity renderer for the
283 given activity_type. Otherwise returns the given activity_type.
284
285 This just uses object_id_validators as a lookup.
286 very safe.
287
288 '''
289 if activity_type in object_id_validators:
290 return activity_type
291 else:
292 raise Invalid('%s: %s' % (_('Not found'), _('Activity type')))
293
294
295 # A dictionary mapping activity_type values from activity dicts to functions
296 # for validating the object_id values from those same activity dicts.
297 object_id_validators = {
298 'new package' : package_id_exists,
299 'changed package' : package_id_exists,
300 'deleted package' : package_id_exists,
301 'follow dataset' : package_id_exists,
302 'new user' : user_id_exists,
303 'changed user' : user_id_exists,
304 'follow user' : user_id_exists,
305 'new group' : group_id_exists,
306 'changed group' : group_id_exists,
307 'deleted group' : group_id_exists,
308 'new organization' : group_id_exists,
309 'changed organization' : group_id_exists,
310 'deleted organization' : group_id_exists,
311 'follow group' : group_id_exists,
312 }
313
314 def object_id_validator(key, activity_dict, errors, context):
315 '''Validate the 'object_id' value of an activity_dict.
316
317 Uses the object_id_validators dict (above) to find and call an 'object_id'
318 validator function for the given activity_dict's 'activity_type' value.
319
320 Raises Invalid if the model given in context contains no object of the
321 correct type (according to the 'activity_type' value of the activity_dict)
322 with the given ID.
323
324 Raises Invalid if there is no object_id_validator for the activity_dict's
325 'activity_type' value.
326
327 '''
328 activity_type = activity_dict[('activity_type',)]
329 if activity_type in object_id_validators:
330 object_id = activity_dict[('object_id',)]
331 return object_id_validators[activity_type](object_id, context)
332 else:
333 raise Invalid('There is no object_id validator for '
334 'activity type "%s"' % activity_type)
335
336 name_match = re.compile('[a-z0-9_\-]*$')
337 def name_validator(value, context):
338 '''Return the given value if it's a valid name, otherwise raise Invalid.
339
340 If it's a valid name, the given value will be returned unmodified.
341
342 This function applies general validation rules for names of packages,
343 groups, users, etc.
344
345 Most schemas also have their own custom name validator function to apply
346 custom validation rules after this function, for example a
347 ``package_name_validator()`` to check that no package with the given name
348 already exists.
349
350 :raises ckan.lib.navl.dictization_functions.Invalid: if ``value`` is not
351 a valid name
352
353 '''
354 if not isinstance(value, string_types):
355 raise Invalid(_('Names must be strings'))
356
357 # check basic textual rules
358 if value in ['new', 'edit', 'search']:
359 raise Invalid(_('That name cannot be used'))
360
361 if len(value) < 2:
362 raise Invalid(_('Must be at least %s characters long') % 2)
363 if len(value) > PACKAGE_NAME_MAX_LENGTH:
364 raise Invalid(_('Name must be a maximum of %i characters long') % \
365 PACKAGE_NAME_MAX_LENGTH)
366 if not name_match.match(value):
367 raise Invalid(_('Must be purely lowercase alphanumeric '
368 '(ascii) characters and these symbols: -_'))
369 return value
370
371 def package_name_validator(key, data, errors, context):
372 model = context['model']
373 session = context['session']
374 package = context.get('package')
375
376 query = session.query(model.Package.state).filter_by(name=data[key])
377 if package:
378 package_id = package.id
379 else:
380 package_id = data.get(key[:-1] + ('id',))
381 if package_id and package_id is not missing:
382 query = query.filter(model.Package.id != package_id)
383 result = query.first()
384 if result and result.state != State.DELETED:
385 errors[key].append(_('That URL is already in use.'))
386
387 value = data[key]
388 if len(value) < PACKAGE_NAME_MIN_LENGTH:
389 raise Invalid(
390 _('Name "%s" length is less than minimum %s') % (value, PACKAGE_NAME_MIN_LENGTH)
391 )
392 if len(value) > PACKAGE_NAME_MAX_LENGTH:
393 raise Invalid(
394 _('Name "%s" length is more than maximum %s') % (value, PACKAGE_NAME_MAX_LENGTH)
395 )
396
397 def package_version_validator(value, context):
398
399 if len(value) > PACKAGE_VERSION_MAX_LENGTH:
400 raise Invalid(_('Version must be a maximum of %i characters long') % \
401 PACKAGE_VERSION_MAX_LENGTH)
402 return value
403
404 def duplicate_extras_key(key, data, errors, context):
405
406 unflattened = df.unflatten(data)
407 extras = unflattened.get('extras', [])
408 extras_keys = []
409 for extra in extras:
410 if not extra.get('deleted'):
411 extras_keys.append(extra['key'])
412
413 for extra_key in set(extras_keys):
414 extras_keys.remove(extra_key)
415 if extras_keys:
416 key_ = ('extras_validation',)
417 assert key_ not in errors
418 errors[key_] = [_('Duplicate key "%s"') % extras_keys[0]]
419
420 def group_name_validator(key, data, errors, context):
421 model = context['model']
422 session = context['session']
423 group = context.get('group')
424
425 query = session.query(model.Group.name).filter_by(name=data[key])
426 if group:
427 group_id = group.id
428 else:
429 group_id = data.get(key[:-1] + ('id',))
430 if group_id and group_id is not missing:
431 query = query.filter(model.Group.id != group_id)
432 result = query.first()
433 if result:
434 errors[key].append(_('Group name already exists in database'))
435
436 def tag_length_validator(value, context):
437
438 if len(value) < MIN_TAG_LENGTH:
439 raise Invalid(
440 _('Tag "%s" length is less than minimum %s') % (value, MIN_TAG_LENGTH)
441 )
442 if len(value) > MAX_TAG_LENGTH:
443 raise Invalid(
444 _('Tag "%s" length is more than maximum %i') % (value, MAX_TAG_LENGTH)
445 )
446 return value
447
448 def tag_name_validator(value, context):
449
450 tagname_match = re.compile('[\w \-.]*$', re.UNICODE)
451 if not tagname_match.match(value):
452 raise Invalid(_('Tag "%s" must be alphanumeric '
453 'characters or symbols: -_.') % (value))
454 return value
455
456 def tag_not_uppercase(value, context):
457
458 tagname_uppercase = re.compile('[A-Z]')
459 if tagname_uppercase.search(value):
460 raise Invalid(_('Tag "%s" must not be uppercase' % (value)))
461 return value
462
463 def tag_string_convert(key, data, errors, context):
464 '''Takes a list of tags that is a comma-separated string (in data[key])
465 and parses tag names. These are added to the data dict, enumerated. They
466 are also validated.'''
467
468 if isinstance(data[key], string_types):
469 tags = [tag.strip() \
470 for tag in data[key].split(',') \
471 if tag.strip()]
472 else:
473 tags = data[key]
474
475 current_index = max( [int(k[1]) for k in data.keys() if len(k) == 3 and k[0] == 'tags'] + [-1] )
476
477 for num, tag in zip(count(current_index+1), tags):
478 data[('tags', num, 'name')] = tag
479
480 for tag in tags:
481 tag_length_validator(tag, context)
482 tag_name_validator(tag, context)
483
484 def ignore_not_admin(key, data, errors, context):
485 # Deprecated in favour of ignore_not_package_admin
486 return ignore_not_package_admin(key, data, errors, context)
487
488 def ignore_not_package_admin(key, data, errors, context):
489 '''Ignore if the user is not allowed to administer the package specified.'''
490
491 model = context['model']
492 user = context.get('user')
493
494 if 'ignore_auth' in context:
495 return
496
497 if user and authz.is_sysadmin(user):
498 return
499
500 authorized = False
501 pkg = context.get('package')
502 if pkg:
503 try:
504 logic.check_access('package_change_state',context)
505 authorized = True
506 except logic.NotAuthorized:
507 authorized = False
508
509 if (user and pkg and authorized):
510 return
511
512 # allow_state_change in the context will allow the state to be changed
513 # FIXME is this the best way to cjeck for state only?
514 if key == ('state',) and context.get('allow_state_change'):
515 return
516 data.pop(key)
517
518
519 def ignore_not_sysadmin(key, data, errors, context):
520 '''Ignore the field if user not sysadmin or ignore_auth in context.'''
521
522 user = context.get('user')
523 ignore_auth = context.get('ignore_auth')
524 if ignore_auth or (user and authz.is_sysadmin(user)):
525 return
526
527 data.pop(key)
528
529
530 def ignore_not_group_admin(key, data, errors, context):
531 '''Ignore if the user is not allowed to administer for the group specified.'''
532
533 model = context['model']
534 user = context.get('user')
535
536 if user and authz.is_sysadmin(user):
537 return
538
539 authorized = False
540 group = context.get('group')
541 if group:
542 try:
543 logic.check_access('group_change_state',context)
544 authorized = True
545 except logic.NotAuthorized:
546 authorized = False
547
548 if (user and group and authorized):
549 return
550
551 data.pop(key)
552
553 def user_name_validator(key, data, errors, context):
554 '''Validate a new user name.
555
556 Append an error message to ``errors[key]`` if a user named ``data[key]``
557 already exists. Otherwise, do nothing.
558
559 :raises ckan.lib.navl.dictization_functions.Invalid: if ``data[key]`` is
560 not a string
561 :rtype: None
562
563 '''
564 model = context['model']
565 new_user_name = data[key]
566
567 if not isinstance(new_user_name, string_types):
568 raise Invalid(_('User names must be strings'))
569
570 user = model.User.get(new_user_name)
571 user_obj_from_context = context.get('user_obj')
572 if user is not None:
573 # A user with new_user_name already exists in the database.
574 if user_obj_from_context and user_obj_from_context.id == user.id:
575 # If there's a user_obj in context with the same id as the user
576 # found in the db, then we must be doing a user_update and not
577 # updating the user name, so don't return an error.
578 return
579 else:
580 # Otherwise return an error: there's already another user with that
581 # name, so you can create a new user with that name or update an
582 # existing user's name to that name.
583 errors[key].append(_('That login name is not available.'))
584 elif user_obj_from_context:
585 old_user = model.User.get(user_obj_from_context.id)
586 if old_user is not None and old_user.state != model.State.PENDING:
587 errors[key].append(_('That login name can not be modified.'))
588 else:
589 return
590
591 def user_both_passwords_entered(key, data, errors, context):
592
593 password1 = data.get(('password1',),None)
594 password2 = data.get(('password2',),None)
595
596 if password1 is None or password1 == '' or \
597 password2 is None or password2 == '':
598 errors[('password',)].append(_('Please enter both passwords'))
599
600 def user_password_validator(key, data, errors, context):
601 value = data[key]
602
603 if isinstance(value, Missing):
604 pass
605 elif not isinstance(value, string_types):
606 errors[('password',)].append(_('Passwords must be strings'))
607 elif value == '':
608 pass
609 elif len(value) < 8:
610 errors[('password',)].append(_('Your password must be 8 characters or '
611 'longer'))
612
613 def user_passwords_match(key, data, errors, context):
614
615 password1 = data.get(('password1',),None)
616 password2 = data.get(('password2',),None)
617
618 if not password1 == password2:
619 errors[key].append(_('The passwords you entered do not match'))
620 else:
621 #Set correct password
622 data[('password',)] = password1
623
624 def user_password_not_empty(key, data, errors, context):
625 '''Only check if password is present if the user is created via action API.
626 If not, user_both_passwords_entered will handle the validation'''
627 # sysadmin may provide password_hash directly for importing users
628 if (data.get(('password_hash',), missing) is not missing and
629 authz.is_sysadmin(context.get('user'))):
630 return
631
632 if not ('password1',) in data and not ('password2',) in data:
633 password = data.get(('password',),None)
634 if not password:
635 errors[key].append(_('Missing value'))
636
637 def user_about_validator(value,context):
638 if 'http://' in value or 'https://' in value:
639 raise Invalid(_('Edit not allowed as it looks like spam. Please avoid links in your description.'))
640
641 return value
642
643 def vocabulary_name_validator(name, context):
644 model = context['model']
645 session = context['session']
646
647 if len(name) < VOCABULARY_NAME_MIN_LENGTH:
648 raise Invalid(_('Name must be at least %s characters long') %
649 VOCABULARY_NAME_MIN_LENGTH)
650 if len(name) > VOCABULARY_NAME_MAX_LENGTH:
651 raise Invalid(_('Name must be a maximum of %i characters long') %
652 VOCABULARY_NAME_MAX_LENGTH)
653 query = session.query(model.Vocabulary.name).filter_by(name=name)
654 result = query.first()
655 if result:
656 raise Invalid(_('That vocabulary name is already in use.'))
657 return name
658
659 def vocabulary_id_not_changed(value, context):
660 vocabulary = context.get('vocabulary')
661 if vocabulary and value != vocabulary.id:
662 raise Invalid(_('Cannot change value of key from %s to %s. '
663 'This key is read-only') % (vocabulary.id, value))
664 return value
665
666 def vocabulary_id_exists(value, context):
667 model = context['model']
668 session = context['session']
669 result = session.query(model.Vocabulary).get(value)
670 if not result:
671 raise Invalid(_('Tag vocabulary was not found.'))
672 return value
673
674 def tag_in_vocabulary_validator(value, context):
675 model = context['model']
676 session = context['session']
677 vocabulary = context.get('vocabulary')
678 if vocabulary:
679 query = session.query(model.Tag)\
680 .filter(model.Tag.vocabulary_id==vocabulary.id)\
681 .filter(model.Tag.name==value)\
682 .count()
683 if not query:
684 raise Invalid(_('Tag %s does not belong to vocabulary %s') % (value, vocabulary.name))
685 return value
686
687 def tag_not_in_vocabulary(key, tag_dict, errors, context):
688 tag_name = tag_dict[('name',)]
689 if not tag_name:
690 raise Invalid(_('No tag name'))
691 if ('vocabulary_id',) in tag_dict:
692 vocabulary_id = tag_dict[('vocabulary_id',)]
693 else:
694 vocabulary_id = None
695 model = context['model']
696 session = context['session']
697
698 query = session.query(model.Tag)
699 query = query.filter(model.Tag.vocabulary_id==vocabulary_id)
700 query = query.filter(model.Tag.name==tag_name)
701 count = query.count()
702 if count > 0:
703 raise Invalid(_('Tag %s already belongs to vocabulary %s') %
704 (tag_name, vocabulary_id))
705 else:
706 return
707
708 def url_validator(key, data, errors, context):
709 ''' Checks that the provided value (if it is present) is a valid URL '''
710
711 url = data.get(key, None)
712 if not url:
713 return
714
715 try:
716 pieces = urlparse(url)
717 if all([pieces.scheme, pieces.netloc]) and \
718 set(pieces.netloc) <= set(string.ascii_letters + string.digits + '-.') and \
719 pieces.scheme in ['http', 'https']:
720 return
721 except ValueError:
722 # url is invalid
723 pass
724
725 errors[key].append(_('Please provide a valid URL'))
726
727
728 def user_name_exists(user_name, context):
729 model = context['model']
730 session = context['session']
731 result = session.query(model.User).filter_by(name=user_name).first()
732 if not result:
733 raise Invalid('%s: %s' % (_('Not found'), _('User')))
734 return result.name
735
736
737 def role_exists(role, context):
738 if role not in authz.ROLE_PERMISSIONS:
739 raise Invalid(_('role does not exist.'))
740 return role
741
742
743 def datasets_with_no_organization_cannot_be_private(key, data, errors,
744 context):
745
746 dataset_id = data.get(('id',))
747 owner_org = data.get(('owner_org',))
748 private = data[key] is True
749
750 check_passed = True
751
752 if not dataset_id and private and not owner_org:
753 # When creating a dataset, enforce it directly
754 check_passed = False
755 elif dataset_id and private and not owner_org:
756 # Check if the dataset actually has an owner_org, even if not provided
757 try:
758 dataset_dict = logic.get_action('package_show')({},
759 {'id': dataset_id})
760 if not dataset_dict.get('owner_org'):
761 check_passed = False
762
763 except logic.NotFound:
764 check_passed = False
765
766 if not check_passed:
767 errors[key].append(
768 _("Datasets with no organization can't be private."))
769
770
771 def list_of_strings(key, data, errors, context):
772 value = data.get(key)
773 if not isinstance(value, list):
774 raise Invalid(_('Not a list'))
775 for x in value:
776 if not isinstance(x, string_types):
777 raise Invalid('%s: %s' % (_('Not a string'), x))
778
779 def if_empty_guess_format(key, data, errors, context):
780 value = data[key]
781 resource_id = data.get(key[:-1] + ('id',))
782
783 # if resource_id then an update
784 if (not value or value is Missing) and not resource_id:
785 url = data.get(key[:-1] + ('url',), '')
786 if not url:
787 return
788 mimetype, encoding = mimetypes.guess_type(url)
789 if mimetype:
790 data[key] = mimetype
791
792 def clean_format(format):
793 return h.unified_resource_format(format)
794
795 def no_loops_in_hierarchy(key, data, errors, context):
796 '''Checks that the parent groups specified in the data would not cause
797 a loop in the group hierarchy, and therefore cause the recursion up/down
798 the hierarchy to get into an infinite loop.
799 '''
800 if not 'id' in data:
801 # Must be a new group - has no children, so no chance of loops
802 return
803 group = context['model'].Group.get(data['id'])
804 allowable_parents = group.\
805 groups_allowed_to_be_its_parent(type=group.type)
806 for parent in data['groups']:
807 parent_name = parent['name']
808 # a blank name signifies top level, which is always allowed
809 if parent_name and context['model'].Group.get(parent_name) \
810 not in allowable_parents:
811 raise Invalid(_('This parent would create a loop in the '
812 'hierarchy'))
813
814
815 def filter_fields_and_values_should_have_same_length(key, data, errors, context):
816 convert_to_list_if_string = logic.converters.convert_to_list_if_string
817 fields = convert_to_list_if_string(data.get(('filter_fields',), []))
818 values = convert_to_list_if_string(data.get(('filter_values',), []))
819
820 if len(fields) != len(values):
821 msg = _('"filter_fields" and "filter_values" should have the same length')
822 errors[('filter_fields',)].append(msg)
823 errors[('filter_values',)].append(msg)
824
825
826 def filter_fields_and_values_exist_and_are_valid(key, data, errors, context):
827 convert_to_list_if_string = logic.converters.convert_to_list_if_string
828 fields = convert_to_list_if_string(data.get(('filter_fields',)))
829 values = convert_to_list_if_string(data.get(('filter_values',)))
830
831 if not fields:
832 errors[('filter_fields',)].append(_('"filter_fields" is required when '
833 '"filter_values" is filled'))
834 if not values:
835 errors[('filter_values',)].append(_('"filter_values" is required when '
836 '"filter_fields" is filled'))
837
838 filters = collections.defaultdict(list)
839 for field, value in zip(fields, values):
840 filters[field].append(value)
841
842 data[('filters',)] = dict(filters)
843
844
845 def extra_key_not_in_root_schema(key, data, errors, context):
846
847 for schema_key in context.get('schema_keys', []):
848 if schema_key == data[key]:
849 raise Invalid(_('There is a schema field with the same name'))
850
851
852 def empty_if_not_sysadmin(key, data, errors, context):
853 '''Only sysadmins may pass this value'''
854 from ckan.lib.navl.validators import empty
855
856 user = context.get('user')
857
858 ignore_auth = context.get('ignore_auth')
859 if ignore_auth or (user and authz.is_sysadmin(user)):
860 return
861
862 empty(key, data, errors, context)
863
864 #pattern from https://html.spec.whatwg.org/#e-mail-state-(type=email)
865 email_pattern = re.compile(
866 # additional pattern to reject malformed dots usage
867 r"^(?!\.)(?!.*\.$)(?!.*?\.\.)"\
868 "[a-zA-Z0-9.!#$%&'*+\/=?^_`{|}~-]+@[a-zA-Z0-9]"\
869 "(?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\.[a-zA-Z0-9]"\
870 "(?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)*$"
871 )
872
873
874 def email_validator(value, context):
875 '''Validate email input '''
876
877 if value:
878 if not email_pattern.match(value):
879 raise Invalid(_('Email {email} is not a valid format').format(email=value))
880 return value
881
882 def collect_prefix_validate(prefix, *validator_names):
883 """
884 Return a validator that will collect top-level keys starting with
885 prefix then apply validator_names to each one. Results are moved
886 to a dict under the prefix name, with prefix removed from keys
887 """
888 validator_fns = [logic.get_validator(v) for v in validator_names]
889
890 def prefix_validator(key, data, errors, context):
891 out = {}
892 extras = data.get(('__extras',), {})
893
894 # values passed as lists of dicts will have been flattened into __junk
895 junk = df.unflatten(data.get(('__junk',), {}))
896 for field_name in junk:
897 if not field_name.startswith(prefix):
898 continue
899 extras[field_name] = junk[field_name]
900
901 for field_name in list(extras):
902 if not field_name.startswith(prefix):
903 continue
904 data[(field_name,)] = extras.pop(field_name)
905 for v in validator_fns:
906 try:
907 df.convert(v, (field_name,), data, errors, context)
908 except df.StopOnError:
909 break
910 out[field_name[len(prefix):]] = data.pop((field_name,))
911
912 data[(prefix,)] = out
913
914 return prefix_validator
915
916
917 def dict_only(value):
918 if not isinstance(value, dict):
919 raise Invalid(_('Must be a dict'))
920 return value
921
922 def email_is_unique(key, data, errors, context):
923 '''Validate email is unique'''
924 model = context['model']
925 session = context['session']
926
927 users = session.query(model.User) \
928 .filter(model.User.email == data[key]).all()
929 # is there is no users with this email it's free
930 if not users:
931 return
932 else:
933 # allow user to update their own email
934 for user in users:
935 if (user.name == data[("name",)]
936 or user.id == data[("id",)]):
937 return
938
939 raise Invalid(
940 _('The email address \'{email}\' \
941 belongs to a registered user.').
942 format(email=data[key]))
943
944 def one_of(list_of_value):
945 ''' Checks if the provided value is present in a list '''
946 def callable(value):
947 if value not in list_of_value:
948 raise Invalid(_('Value must be one of {}'.format(list_of_value)))
949 return value
950 return callable
951
952
953 def json_object(value):
954 ''' Make sure value can be serialized as a JSON object'''
955 if value is None or value == '':
956 return
957 try:
958 if not json.dumps(value).startswith('{'):
959 raise Invalid(_('The value should be a valid JSON object'))
960 except ValueError as e:
961 raise Invalid(_('Could not parse the value as a valid JSON object'))
962
963 return value
964
965
966 def extras_valid_json(extras, context):
967 try:
968 for extra, value in iteritems(extras):
969 json.dumps(value)
970 except ValueError as e:
971 raise Invalid(_(u'Could not parse extra \'{name}\' as valid JSON').
972 format(name=extra))
973 return extras
974
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ckan/logic/validators.py b/ckan/logic/validators.py
--- a/ckan/logic/validators.py
+++ b/ckan/logic/validators.py
@@ -937,9 +937,8 @@
return
raise Invalid(
- _('The email address \'{email}\' \
- belongs to a registered user.').
- format(email=data[key]))
+ _('The email address \'{email}\' belongs to a registered user.').format(email=data[key]))
+
def one_of(list_of_value):
''' Checks if the provided value is present in a list '''
| {"golden_diff": "diff --git a/ckan/logic/validators.py b/ckan/logic/validators.py\n--- a/ckan/logic/validators.py\n+++ b/ckan/logic/validators.py\n@@ -937,9 +937,8 @@\n return\n \n raise Invalid(\n- _('The email address \\'{email}\\' \\\n- belongs to a registered user.').\n- format(email=data[key]))\n+ _('The email address \\'{email}\\' belongs to a registered user.').format(email=data[key]))\n+\n \n def one_of(list_of_value):\n ''' Checks if the provided value is present in a list '''\n", "issue": "Fix line breaks in translatable strings\nI've got reports of confusing strings from translators, eg:\r\n\r\n\r\n\r\nThere is a line break and extra spaces in the middle of the source string (`msgid`) and it's unclear to users if they should keep it in their translations.\r\n\r\nThis is how the `msgid` looks like:\r\n\r\n```\r\n#: ckan/templates/snippets/changes/extension_fields.html:3 \r\nmsgid \"\" \r\n\"Changed value of field <q>{key}</q> to <q>{value}</q> in\\n\" \r\n\" {pkg_link}\" \r\nmsgstr \"\" \r\n```\r\n\r\nThe source file contains a line break probably created by an overzealous code formatter:\r\n```\r\n {{ _('Changed value of field <q>{key}</q> to <q>{value}</q> in\r\n {pkg_link}')\r\n```\r\n\r\nMost if not all the strings seemed to be part of the snippets added in the `changes` folder on https://github.com/ckan/ckan/pull/4929 so I think it's ok if for now we manually fix the strings in the snippets, extract them and update the msgids on the po files using the `ckan translation sync-msgids` command we introduced in #5339 \r\n\r\n\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "\n# encoding: utf-8\n\nimport collections\nimport datetime\nfrom itertools import count\nimport re\nimport mimetypes\nimport string\nimport json\n\nfrom six import string_types, iteritems\nfrom six.moves.urllib.parse import urlparse\n\nimport ckan.lib.navl.dictization_functions as df\nimport ckan.logic as logic\nimport ckan.lib.helpers as h\nfrom ckan.model import (MAX_TAG_LENGTH, MIN_TAG_LENGTH,\n PACKAGE_NAME_MIN_LENGTH, PACKAGE_NAME_MAX_LENGTH,\n PACKAGE_VERSION_MAX_LENGTH,\n VOCABULARY_NAME_MAX_LENGTH,\n VOCABULARY_NAME_MIN_LENGTH)\nimport ckan.authz as authz\nfrom ckan.model.core import State\n\nfrom ckan.common import _\n\nInvalid = df.Invalid\nStopOnError = df.StopOnError\nMissing = df.Missing\nmissing = df.missing\n\n\ndef owner_org_validator(key, data, errors, context):\n\n value = data.get(key)\n\n if value is missing or value is None:\n if not authz.check_config_permission('create_unowned_dataset'):\n raise Invalid(_('An organization must be provided'))\n data.pop(key, None)\n raise df.StopOnError\n\n model = context['model']\n user = context['user']\n user = model.User.get(user)\n if value == '':\n if not authz.check_config_permission('create_unowned_dataset'):\n raise Invalid(_('An organization must be provided'))\n return\n\n if (authz.check_config_permission('allow_dataset_collaborators')\n and not authz.check_config_permission('allow_collaborators_to_change_owner_org')):\n\n package = context.get('package')\n if package and user and not user.sysadmin:\n is_collaborator = authz.user_is_collaborator_on_dataset(\n user.id, package.id, ['admin', 'editor'])\n if is_collaborator:\n # User is a collaborator, check if it's also a member with\n # edit rights of the current organization (redundant, but possible)\n user_orgs = logic.get_action(\n 'organization_list_for_user')(\n {'ignore_auth': True}, {'id': user.id, 'permission': 'update_dataset'})\n user_is_org_member = package.owner_org in [org['id'] for org in user_orgs]\n if data.get(key) != package.owner_org and not user_is_org_member:\n raise Invalid(_('You cannot move this dataset to another organization'))\n\n group = model.Group.get(value)\n if not group:\n raise Invalid(_('Organization does not exist'))\n group_id = group.id\n if not context.get(u'ignore_auth', False) and not(user.sysadmin or\n authz.has_user_permission_for_group_or_org(\n group_id, user.name, 'create_dataset')):\n raise Invalid(_('You cannot add a dataset to this organization'))\n data[key] = group_id\n\n\ndef package_id_not_changed(value, context):\n\n package = context.get('package')\n if package and value != package.id:\n raise Invalid('Cannot change value of key from %s to %s. '\n 'This key is read-only' % (package.id, value))\n return value\n\ndef int_validator(value, context):\n '''\n Return an integer for value, which may be a string in base 10 or\n a numeric type (e.g. int, long, float, Decimal, Fraction). Return\n None for None or empty/all-whitespace string values.\n\n :raises: ckan.lib.navl.dictization_functions.Invalid for other\n inputs or non-whole values\n '''\n if value is None:\n return None\n if hasattr(value, 'strip') and not value.strip():\n return None\n\n try:\n whole, part = divmod(value, 1)\n except TypeError:\n try:\n return int(value)\n except (TypeError, ValueError):\n pass\n else:\n if not part:\n try:\n return int(whole)\n except TypeError:\n pass # complex number: fail like int(complex) does\n\n raise Invalid(_('Invalid integer'))\n\ndef natural_number_validator(value, context):\n value = int_validator(value, context)\n if value < 0:\n raise Invalid(_('Must be a natural number'))\n return value\n\ndef is_positive_integer(value, context):\n value = int_validator(value, context)\n if value < 1:\n raise Invalid(_('Must be a postive integer'))\n return value\n\ndef boolean_validator(value, context):\n '''\n Return a boolean for value.\n Return value when value is a python bool type.\n Return True for strings 'true', 'yes', 't', 'y', and '1'.\n Return False in all other cases, including when value is an empty string or\n None\n '''\n if value is missing or value is None:\n return False\n if isinstance(value, bool):\n return value\n if value.lower() in ['true', 'yes', 't', 'y', '1']:\n return True\n return False\n\ndef isodate(value, context):\n if isinstance(value, datetime.datetime):\n return value\n if value == '':\n return None\n try:\n date = h.date_str_to_datetime(value)\n except (TypeError, ValueError) as e:\n raise Invalid(_('Date format incorrect'))\n return date\n\ndef no_http(value, context):\n\n model = context['model']\n session = context['session']\n\n if 'http:' in value:\n raise Invalid(_('No links are allowed in the log_message.'))\n return value\n\ndef package_id_exists(value, context):\n\n model = context['model']\n session = context['session']\n\n result = session.query(model.Package).get(value)\n if not result:\n raise Invalid('%s: %s' % (_('Not found'), _('Dataset')))\n return value\n\ndef package_id_does_not_exist(value, context):\n\n model = context['model']\n session = context['session']\n\n result = session.query(model.Package).get(value)\n if result:\n raise Invalid(_('Dataset id already exists'))\n return value\n\ndef package_name_exists(value, context):\n\n model = context['model']\n session = context['session']\n\n result = session.query(model.Package).filter_by(name=value).first()\n\n if not result:\n raise Invalid(_('Not found') + ': %s' % value)\n return value\n\ndef package_id_or_name_exists(package_id_or_name, context):\n '''Return the given package_id_or_name if such a package exists.\n\n :raises: ckan.lib.navl.dictization_functions.Invalid if there is no\n package with the given id or name\n\n '''\n model = context['model']\n session = context['session']\n\n result = session.query(model.Package).get(package_id_or_name)\n if result:\n return package_id_or_name\n\n result = session.query(model.Package).filter_by(\n name=package_id_or_name).first()\n\n if not result:\n raise Invalid('%s: %s' % (_('Not found'), _('Dataset')))\n\n return package_id_or_name\n\n\ndef resource_id_exists(value, context):\n model = context['model']\n session = context['session']\n if not session.query(model.Resource).get(value):\n raise Invalid('%s: %s' % (_('Not found'), _('Resource')))\n return value\n\n\ndef user_id_exists(user_id, context):\n '''Raises Invalid if the given user_id does not exist in the model given\n in the context, otherwise returns the given user_id.\n\n '''\n model = context['model']\n session = context['session']\n\n result = session.query(model.User).get(user_id)\n if not result:\n raise Invalid('%s: %s' % (_('Not found'), _('User')))\n return user_id\n\ndef user_id_or_name_exists(user_id_or_name, context):\n '''Return the given user_id_or_name if such a user exists.\n\n :raises: ckan.lib.navl.dictization_functions.Invalid if no user can be\n found with the given id or user name\n\n '''\n model = context['model']\n session = context['session']\n result = session.query(model.User).get(user_id_or_name)\n if result:\n return user_id_or_name\n result = session.query(model.User).filter_by(name=user_id_or_name).first()\n if not result:\n raise Invalid('%s: %s' % (_('Not found'), _('User')))\n return user_id_or_name\n\ndef group_id_exists(group_id, context):\n '''Raises Invalid if the given group_id does not exist in the model given\n in the context, otherwise returns the given group_id.\n\n '''\n model = context['model']\n session = context['session']\n\n result = session.query(model.Group).get(group_id)\n if not result:\n raise Invalid('%s: %s' % (_('Not found'), _('Group')))\n return group_id\n\ndef group_id_or_name_exists(reference, context):\n '''\n Raises Invalid if a group identified by the name or id cannot be found.\n '''\n model = context['model']\n result = model.Group.get(reference)\n if not result:\n raise Invalid(_('That group name or ID does not exist.'))\n return reference\n\ndef activity_type_exists(activity_type):\n '''Raises Invalid if there is no registered activity renderer for the\n given activity_type. Otherwise returns the given activity_type.\n\n This just uses object_id_validators as a lookup.\n very safe.\n\n '''\n if activity_type in object_id_validators:\n return activity_type\n else:\n raise Invalid('%s: %s' % (_('Not found'), _('Activity type')))\n\n\n# A dictionary mapping activity_type values from activity dicts to functions\n# for validating the object_id values from those same activity dicts.\nobject_id_validators = {\n 'new package' : package_id_exists,\n 'changed package' : package_id_exists,\n 'deleted package' : package_id_exists,\n 'follow dataset' : package_id_exists,\n 'new user' : user_id_exists,\n 'changed user' : user_id_exists,\n 'follow user' : user_id_exists,\n 'new group' : group_id_exists,\n 'changed group' : group_id_exists,\n 'deleted group' : group_id_exists,\n 'new organization' : group_id_exists,\n 'changed organization' : group_id_exists,\n 'deleted organization' : group_id_exists,\n 'follow group' : group_id_exists,\n }\n\ndef object_id_validator(key, activity_dict, errors, context):\n '''Validate the 'object_id' value of an activity_dict.\n\n Uses the object_id_validators dict (above) to find and call an 'object_id'\n validator function for the given activity_dict's 'activity_type' value.\n\n Raises Invalid if the model given in context contains no object of the\n correct type (according to the 'activity_type' value of the activity_dict)\n with the given ID.\n\n Raises Invalid if there is no object_id_validator for the activity_dict's\n 'activity_type' value.\n\n '''\n activity_type = activity_dict[('activity_type',)]\n if activity_type in object_id_validators:\n object_id = activity_dict[('object_id',)]\n return object_id_validators[activity_type](object_id, context)\n else:\n raise Invalid('There is no object_id validator for '\n 'activity type \"%s\"' % activity_type)\n\nname_match = re.compile('[a-z0-9_\\-]*$')\ndef name_validator(value, context):\n '''Return the given value if it's a valid name, otherwise raise Invalid.\n\n If it's a valid name, the given value will be returned unmodified.\n\n This function applies general validation rules for names of packages,\n groups, users, etc.\n\n Most schemas also have their own custom name validator function to apply\n custom validation rules after this function, for example a\n ``package_name_validator()`` to check that no package with the given name\n already exists.\n\n :raises ckan.lib.navl.dictization_functions.Invalid: if ``value`` is not\n a valid name\n\n '''\n if not isinstance(value, string_types):\n raise Invalid(_('Names must be strings'))\n\n # check basic textual rules\n if value in ['new', 'edit', 'search']:\n raise Invalid(_('That name cannot be used'))\n\n if len(value) < 2:\n raise Invalid(_('Must be at least %s characters long') % 2)\n if len(value) > PACKAGE_NAME_MAX_LENGTH:\n raise Invalid(_('Name must be a maximum of %i characters long') % \\\n PACKAGE_NAME_MAX_LENGTH)\n if not name_match.match(value):\n raise Invalid(_('Must be purely lowercase alphanumeric '\n '(ascii) characters and these symbols: -_'))\n return value\n\ndef package_name_validator(key, data, errors, context):\n model = context['model']\n session = context['session']\n package = context.get('package')\n\n query = session.query(model.Package.state).filter_by(name=data[key])\n if package:\n package_id = package.id\n else:\n package_id = data.get(key[:-1] + ('id',))\n if package_id and package_id is not missing:\n query = query.filter(model.Package.id != package_id)\n result = query.first()\n if result and result.state != State.DELETED:\n errors[key].append(_('That URL is already in use.'))\n\n value = data[key]\n if len(value) < PACKAGE_NAME_MIN_LENGTH:\n raise Invalid(\n _('Name \"%s\" length is less than minimum %s') % (value, PACKAGE_NAME_MIN_LENGTH)\n )\n if len(value) > PACKAGE_NAME_MAX_LENGTH:\n raise Invalid(\n _('Name \"%s\" length is more than maximum %s') % (value, PACKAGE_NAME_MAX_LENGTH)\n )\n\ndef package_version_validator(value, context):\n\n if len(value) > PACKAGE_VERSION_MAX_LENGTH:\n raise Invalid(_('Version must be a maximum of %i characters long') % \\\n PACKAGE_VERSION_MAX_LENGTH)\n return value\n\ndef duplicate_extras_key(key, data, errors, context):\n\n unflattened = df.unflatten(data)\n extras = unflattened.get('extras', [])\n extras_keys = []\n for extra in extras:\n if not extra.get('deleted'):\n extras_keys.append(extra['key'])\n\n for extra_key in set(extras_keys):\n extras_keys.remove(extra_key)\n if extras_keys:\n key_ = ('extras_validation',)\n assert key_ not in errors\n errors[key_] = [_('Duplicate key \"%s\"') % extras_keys[0]]\n\ndef group_name_validator(key, data, errors, context):\n model = context['model']\n session = context['session']\n group = context.get('group')\n\n query = session.query(model.Group.name).filter_by(name=data[key])\n if group:\n group_id = group.id\n else:\n group_id = data.get(key[:-1] + ('id',))\n if group_id and group_id is not missing:\n query = query.filter(model.Group.id != group_id)\n result = query.first()\n if result:\n errors[key].append(_('Group name already exists in database'))\n\ndef tag_length_validator(value, context):\n\n if len(value) < MIN_TAG_LENGTH:\n raise Invalid(\n _('Tag \"%s\" length is less than minimum %s') % (value, MIN_TAG_LENGTH)\n )\n if len(value) > MAX_TAG_LENGTH:\n raise Invalid(\n _('Tag \"%s\" length is more than maximum %i') % (value, MAX_TAG_LENGTH)\n )\n return value\n\ndef tag_name_validator(value, context):\n\n tagname_match = re.compile('[\\w \\-.]*$', re.UNICODE)\n if not tagname_match.match(value):\n raise Invalid(_('Tag \"%s\" must be alphanumeric '\n 'characters or symbols: -_.') % (value))\n return value\n\ndef tag_not_uppercase(value, context):\n\n tagname_uppercase = re.compile('[A-Z]')\n if tagname_uppercase.search(value):\n raise Invalid(_('Tag \"%s\" must not be uppercase' % (value)))\n return value\n\ndef tag_string_convert(key, data, errors, context):\n '''Takes a list of tags that is a comma-separated string (in data[key])\n and parses tag names. These are added to the data dict, enumerated. They\n are also validated.'''\n\n if isinstance(data[key], string_types):\n tags = [tag.strip() \\\n for tag in data[key].split(',') \\\n if tag.strip()]\n else:\n tags = data[key]\n\n current_index = max( [int(k[1]) for k in data.keys() if len(k) == 3 and k[0] == 'tags'] + [-1] )\n\n for num, tag in zip(count(current_index+1), tags):\n data[('tags', num, 'name')] = tag\n\n for tag in tags:\n tag_length_validator(tag, context)\n tag_name_validator(tag, context)\n\ndef ignore_not_admin(key, data, errors, context):\n # Deprecated in favour of ignore_not_package_admin\n return ignore_not_package_admin(key, data, errors, context)\n\ndef ignore_not_package_admin(key, data, errors, context):\n '''Ignore if the user is not allowed to administer the package specified.'''\n\n model = context['model']\n user = context.get('user')\n\n if 'ignore_auth' in context:\n return\n\n if user and authz.is_sysadmin(user):\n return\n\n authorized = False\n pkg = context.get('package')\n if pkg:\n try:\n logic.check_access('package_change_state',context)\n authorized = True\n except logic.NotAuthorized:\n authorized = False\n\n if (user and pkg and authorized):\n return\n\n # allow_state_change in the context will allow the state to be changed\n # FIXME is this the best way to cjeck for state only?\n if key == ('state',) and context.get('allow_state_change'):\n return\n data.pop(key)\n\n\ndef ignore_not_sysadmin(key, data, errors, context):\n '''Ignore the field if user not sysadmin or ignore_auth in context.'''\n\n user = context.get('user')\n ignore_auth = context.get('ignore_auth')\n if ignore_auth or (user and authz.is_sysadmin(user)):\n return\n\n data.pop(key)\n\n\ndef ignore_not_group_admin(key, data, errors, context):\n '''Ignore if the user is not allowed to administer for the group specified.'''\n\n model = context['model']\n user = context.get('user')\n\n if user and authz.is_sysadmin(user):\n return\n\n authorized = False\n group = context.get('group')\n if group:\n try:\n logic.check_access('group_change_state',context)\n authorized = True\n except logic.NotAuthorized:\n authorized = False\n\n if (user and group and authorized):\n return\n\n data.pop(key)\n\ndef user_name_validator(key, data, errors, context):\n '''Validate a new user name.\n\n Append an error message to ``errors[key]`` if a user named ``data[key]``\n already exists. Otherwise, do nothing.\n\n :raises ckan.lib.navl.dictization_functions.Invalid: if ``data[key]`` is\n not a string\n :rtype: None\n\n '''\n model = context['model']\n new_user_name = data[key]\n\n if not isinstance(new_user_name, string_types):\n raise Invalid(_('User names must be strings'))\n\n user = model.User.get(new_user_name)\n user_obj_from_context = context.get('user_obj')\n if user is not None:\n # A user with new_user_name already exists in the database.\n if user_obj_from_context and user_obj_from_context.id == user.id:\n # If there's a user_obj in context with the same id as the user\n # found in the db, then we must be doing a user_update and not\n # updating the user name, so don't return an error.\n return\n else:\n # Otherwise return an error: there's already another user with that\n # name, so you can create a new user with that name or update an\n # existing user's name to that name.\n errors[key].append(_('That login name is not available.'))\n elif user_obj_from_context:\n old_user = model.User.get(user_obj_from_context.id)\n if old_user is not None and old_user.state != model.State.PENDING:\n errors[key].append(_('That login name can not be modified.'))\n else:\n return\n\ndef user_both_passwords_entered(key, data, errors, context):\n\n password1 = data.get(('password1',),None)\n password2 = data.get(('password2',),None)\n\n if password1 is None or password1 == '' or \\\n password2 is None or password2 == '':\n errors[('password',)].append(_('Please enter both passwords'))\n\ndef user_password_validator(key, data, errors, context):\n value = data[key]\n\n if isinstance(value, Missing):\n pass\n elif not isinstance(value, string_types):\n errors[('password',)].append(_('Passwords must be strings'))\n elif value == '':\n pass\n elif len(value) < 8:\n errors[('password',)].append(_('Your password must be 8 characters or '\n 'longer'))\n\ndef user_passwords_match(key, data, errors, context):\n\n password1 = data.get(('password1',),None)\n password2 = data.get(('password2',),None)\n\n if not password1 == password2:\n errors[key].append(_('The passwords you entered do not match'))\n else:\n #Set correct password\n data[('password',)] = password1\n\ndef user_password_not_empty(key, data, errors, context):\n '''Only check if password is present if the user is created via action API.\n If not, user_both_passwords_entered will handle the validation'''\n # sysadmin may provide password_hash directly for importing users\n if (data.get(('password_hash',), missing) is not missing and\n authz.is_sysadmin(context.get('user'))):\n return\n\n if not ('password1',) in data and not ('password2',) in data:\n password = data.get(('password',),None)\n if not password:\n errors[key].append(_('Missing value'))\n\ndef user_about_validator(value,context):\n if 'http://' in value or 'https://' in value:\n raise Invalid(_('Edit not allowed as it looks like spam. Please avoid links in your description.'))\n\n return value\n\ndef vocabulary_name_validator(name, context):\n model = context['model']\n session = context['session']\n\n if len(name) < VOCABULARY_NAME_MIN_LENGTH:\n raise Invalid(_('Name must be at least %s characters long') %\n VOCABULARY_NAME_MIN_LENGTH)\n if len(name) > VOCABULARY_NAME_MAX_LENGTH:\n raise Invalid(_('Name must be a maximum of %i characters long') %\n VOCABULARY_NAME_MAX_LENGTH)\n query = session.query(model.Vocabulary.name).filter_by(name=name)\n result = query.first()\n if result:\n raise Invalid(_('That vocabulary name is already in use.'))\n return name\n\ndef vocabulary_id_not_changed(value, context):\n vocabulary = context.get('vocabulary')\n if vocabulary and value != vocabulary.id:\n raise Invalid(_('Cannot change value of key from %s to %s. '\n 'This key is read-only') % (vocabulary.id, value))\n return value\n\ndef vocabulary_id_exists(value, context):\n model = context['model']\n session = context['session']\n result = session.query(model.Vocabulary).get(value)\n if not result:\n raise Invalid(_('Tag vocabulary was not found.'))\n return value\n\ndef tag_in_vocabulary_validator(value, context):\n model = context['model']\n session = context['session']\n vocabulary = context.get('vocabulary')\n if vocabulary:\n query = session.query(model.Tag)\\\n .filter(model.Tag.vocabulary_id==vocabulary.id)\\\n .filter(model.Tag.name==value)\\\n .count()\n if not query:\n raise Invalid(_('Tag %s does not belong to vocabulary %s') % (value, vocabulary.name))\n return value\n\ndef tag_not_in_vocabulary(key, tag_dict, errors, context):\n tag_name = tag_dict[('name',)]\n if not tag_name:\n raise Invalid(_('No tag name'))\n if ('vocabulary_id',) in tag_dict:\n vocabulary_id = tag_dict[('vocabulary_id',)]\n else:\n vocabulary_id = None\n model = context['model']\n session = context['session']\n\n query = session.query(model.Tag)\n query = query.filter(model.Tag.vocabulary_id==vocabulary_id)\n query = query.filter(model.Tag.name==tag_name)\n count = query.count()\n if count > 0:\n raise Invalid(_('Tag %s already belongs to vocabulary %s') %\n (tag_name, vocabulary_id))\n else:\n return\n\ndef url_validator(key, data, errors, context):\n ''' Checks that the provided value (if it is present) is a valid URL '''\n\n url = data.get(key, None)\n if not url:\n return\n\n try:\n pieces = urlparse(url)\n if all([pieces.scheme, pieces.netloc]) and \\\n set(pieces.netloc) <= set(string.ascii_letters + string.digits + '-.') and \\\n pieces.scheme in ['http', 'https']:\n return\n except ValueError:\n # url is invalid\n pass\n\n errors[key].append(_('Please provide a valid URL'))\n\n\ndef user_name_exists(user_name, context):\n model = context['model']\n session = context['session']\n result = session.query(model.User).filter_by(name=user_name).first()\n if not result:\n raise Invalid('%s: %s' % (_('Not found'), _('User')))\n return result.name\n\n\ndef role_exists(role, context):\n if role not in authz.ROLE_PERMISSIONS:\n raise Invalid(_('role does not exist.'))\n return role\n\n\ndef datasets_with_no_organization_cannot_be_private(key, data, errors,\n context):\n\n dataset_id = data.get(('id',))\n owner_org = data.get(('owner_org',))\n private = data[key] is True\n\n check_passed = True\n\n if not dataset_id and private and not owner_org:\n # When creating a dataset, enforce it directly\n check_passed = False\n elif dataset_id and private and not owner_org:\n # Check if the dataset actually has an owner_org, even if not provided\n try:\n dataset_dict = logic.get_action('package_show')({},\n {'id': dataset_id})\n if not dataset_dict.get('owner_org'):\n check_passed = False\n\n except logic.NotFound:\n check_passed = False\n\n if not check_passed:\n errors[key].append(\n _(\"Datasets with no organization can't be private.\"))\n\n\ndef list_of_strings(key, data, errors, context):\n value = data.get(key)\n if not isinstance(value, list):\n raise Invalid(_('Not a list'))\n for x in value:\n if not isinstance(x, string_types):\n raise Invalid('%s: %s' % (_('Not a string'), x))\n\ndef if_empty_guess_format(key, data, errors, context):\n value = data[key]\n resource_id = data.get(key[:-1] + ('id',))\n\n # if resource_id then an update\n if (not value or value is Missing) and not resource_id:\n url = data.get(key[:-1] + ('url',), '')\n if not url:\n return\n mimetype, encoding = mimetypes.guess_type(url)\n if mimetype:\n data[key] = mimetype\n\ndef clean_format(format):\n return h.unified_resource_format(format)\n\ndef no_loops_in_hierarchy(key, data, errors, context):\n '''Checks that the parent groups specified in the data would not cause\n a loop in the group hierarchy, and therefore cause the recursion up/down\n the hierarchy to get into an infinite loop.\n '''\n if not 'id' in data:\n # Must be a new group - has no children, so no chance of loops\n return\n group = context['model'].Group.get(data['id'])\n allowable_parents = group.\\\n groups_allowed_to_be_its_parent(type=group.type)\n for parent in data['groups']:\n parent_name = parent['name']\n # a blank name signifies top level, which is always allowed\n if parent_name and context['model'].Group.get(parent_name) \\\n not in allowable_parents:\n raise Invalid(_('This parent would create a loop in the '\n 'hierarchy'))\n\n\ndef filter_fields_and_values_should_have_same_length(key, data, errors, context):\n convert_to_list_if_string = logic.converters.convert_to_list_if_string\n fields = convert_to_list_if_string(data.get(('filter_fields',), []))\n values = convert_to_list_if_string(data.get(('filter_values',), []))\n\n if len(fields) != len(values):\n msg = _('\"filter_fields\" and \"filter_values\" should have the same length')\n errors[('filter_fields',)].append(msg)\n errors[('filter_values',)].append(msg)\n\n\ndef filter_fields_and_values_exist_and_are_valid(key, data, errors, context):\n convert_to_list_if_string = logic.converters.convert_to_list_if_string\n fields = convert_to_list_if_string(data.get(('filter_fields',)))\n values = convert_to_list_if_string(data.get(('filter_values',)))\n\n if not fields:\n errors[('filter_fields',)].append(_('\"filter_fields\" is required when '\n '\"filter_values\" is filled'))\n if not values:\n errors[('filter_values',)].append(_('\"filter_values\" is required when '\n '\"filter_fields\" is filled'))\n\n filters = collections.defaultdict(list)\n for field, value in zip(fields, values):\n filters[field].append(value)\n\n data[('filters',)] = dict(filters)\n\n\ndef extra_key_not_in_root_schema(key, data, errors, context):\n\n for schema_key in context.get('schema_keys', []):\n if schema_key == data[key]:\n raise Invalid(_('There is a schema field with the same name'))\n\n\ndef empty_if_not_sysadmin(key, data, errors, context):\n '''Only sysadmins may pass this value'''\n from ckan.lib.navl.validators import empty\n\n user = context.get('user')\n\n ignore_auth = context.get('ignore_auth')\n if ignore_auth or (user and authz.is_sysadmin(user)):\n return\n\n empty(key, data, errors, context)\n\n#pattern from https://html.spec.whatwg.org/#e-mail-state-(type=email)\nemail_pattern = re.compile(\n # additional pattern to reject malformed dots usage\n r\"^(?!\\.)(?!.*\\.$)(?!.*?\\.\\.)\"\\\n \"[a-zA-Z0-9.!#$%&'*+\\/=?^_`{|}~-]+@[a-zA-Z0-9]\"\\\n \"(?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\\.[a-zA-Z0-9]\"\\\n \"(?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)*$\"\n )\n\n\ndef email_validator(value, context):\n '''Validate email input '''\n\n if value:\n if not email_pattern.match(value):\n raise Invalid(_('Email {email} is not a valid format').format(email=value))\n return value\n\ndef collect_prefix_validate(prefix, *validator_names):\n \"\"\"\n Return a validator that will collect top-level keys starting with\n prefix then apply validator_names to each one. Results are moved\n to a dict under the prefix name, with prefix removed from keys\n \"\"\"\n validator_fns = [logic.get_validator(v) for v in validator_names]\n\n def prefix_validator(key, data, errors, context):\n out = {}\n extras = data.get(('__extras',), {})\n\n # values passed as lists of dicts will have been flattened into __junk\n junk = df.unflatten(data.get(('__junk',), {}))\n for field_name in junk:\n if not field_name.startswith(prefix):\n continue\n extras[field_name] = junk[field_name]\n\n for field_name in list(extras):\n if not field_name.startswith(prefix):\n continue\n data[(field_name,)] = extras.pop(field_name)\n for v in validator_fns:\n try:\n df.convert(v, (field_name,), data, errors, context)\n except df.StopOnError:\n break\n out[field_name[len(prefix):]] = data.pop((field_name,))\n\n data[(prefix,)] = out\n\n return prefix_validator\n\n\ndef dict_only(value):\n if not isinstance(value, dict):\n raise Invalid(_('Must be a dict'))\n return value\n\ndef email_is_unique(key, data, errors, context):\n '''Validate email is unique'''\n model = context['model']\n session = context['session']\n\n users = session.query(model.User) \\\n .filter(model.User.email == data[key]).all()\n # is there is no users with this email it's free\n if not users:\n return\n else:\n # allow user to update their own email\n for user in users:\n if (user.name == data[(\"name\",)]\n or user.id == data[(\"id\",)]):\n return\n\n raise Invalid(\n _('The email address \\'{email}\\' \\\n belongs to a registered user.').\n format(email=data[key]))\n\ndef one_of(list_of_value):\n ''' Checks if the provided value is present in a list '''\n def callable(value):\n if value not in list_of_value:\n raise Invalid(_('Value must be one of {}'.format(list_of_value)))\n return value\n return callable\n\n\ndef json_object(value):\n ''' Make sure value can be serialized as a JSON object'''\n if value is None or value == '':\n return\n try:\n if not json.dumps(value).startswith('{'):\n raise Invalid(_('The value should be a valid JSON object'))\n except ValueError as e:\n raise Invalid(_('Could not parse the value as a valid JSON object'))\n\n return value\n\n\ndef extras_valid_json(extras, context):\n try:\n for extra, value in iteritems(extras):\n json.dumps(value)\n except ValueError as e:\n raise Invalid(_(u'Could not parse extra \\'{name}\\' as valid JSON').\n format(name=extra))\n return extras\n", "path": "ckan/logic/validators.py"}], "after_files": [{"content": "\n# encoding: utf-8\n\nimport collections\nimport datetime\nfrom itertools import count\nimport re\nimport mimetypes\nimport string\nimport json\n\nfrom six import string_types, iteritems\nfrom six.moves.urllib.parse import urlparse\n\nimport ckan.lib.navl.dictization_functions as df\nimport ckan.logic as logic\nimport ckan.lib.helpers as h\nfrom ckan.model import (MAX_TAG_LENGTH, MIN_TAG_LENGTH,\n PACKAGE_NAME_MIN_LENGTH, PACKAGE_NAME_MAX_LENGTH,\n PACKAGE_VERSION_MAX_LENGTH,\n VOCABULARY_NAME_MAX_LENGTH,\n VOCABULARY_NAME_MIN_LENGTH)\nimport ckan.authz as authz\nfrom ckan.model.core import State\n\nfrom ckan.common import _\n\nInvalid = df.Invalid\nStopOnError = df.StopOnError\nMissing = df.Missing\nmissing = df.missing\n\n\ndef owner_org_validator(key, data, errors, context):\n\n value = data.get(key)\n\n if value is missing or value is None:\n if not authz.check_config_permission('create_unowned_dataset'):\n raise Invalid(_('An organization must be provided'))\n data.pop(key, None)\n raise df.StopOnError\n\n model = context['model']\n user = context['user']\n user = model.User.get(user)\n if value == '':\n if not authz.check_config_permission('create_unowned_dataset'):\n raise Invalid(_('An organization must be provided'))\n return\n\n if (authz.check_config_permission('allow_dataset_collaborators')\n and not authz.check_config_permission('allow_collaborators_to_change_owner_org')):\n\n package = context.get('package')\n if package and user and not user.sysadmin:\n is_collaborator = authz.user_is_collaborator_on_dataset(\n user.id, package.id, ['admin', 'editor'])\n if is_collaborator:\n # User is a collaborator, check if it's also a member with\n # edit rights of the current organization (redundant, but possible)\n user_orgs = logic.get_action(\n 'organization_list_for_user')(\n {'ignore_auth': True}, {'id': user.id, 'permission': 'update_dataset'})\n user_is_org_member = package.owner_org in [org['id'] for org in user_orgs]\n if data.get(key) != package.owner_org and not user_is_org_member:\n raise Invalid(_('You cannot move this dataset to another organization'))\n\n group = model.Group.get(value)\n if not group:\n raise Invalid(_('Organization does not exist'))\n group_id = group.id\n if not context.get(u'ignore_auth', False) and not(user.sysadmin or\n authz.has_user_permission_for_group_or_org(\n group_id, user.name, 'create_dataset')):\n raise Invalid(_('You cannot add a dataset to this organization'))\n data[key] = group_id\n\n\ndef package_id_not_changed(value, context):\n\n package = context.get('package')\n if package and value != package.id:\n raise Invalid('Cannot change value of key from %s to %s. '\n 'This key is read-only' % (package.id, value))\n return value\n\ndef int_validator(value, context):\n '''\n Return an integer for value, which may be a string in base 10 or\n a numeric type (e.g. int, long, float, Decimal, Fraction). Return\n None for None or empty/all-whitespace string values.\n\n :raises: ckan.lib.navl.dictization_functions.Invalid for other\n inputs or non-whole values\n '''\n if value is None:\n return None\n if hasattr(value, 'strip') and not value.strip():\n return None\n\n try:\n whole, part = divmod(value, 1)\n except TypeError:\n try:\n return int(value)\n except (TypeError, ValueError):\n pass\n else:\n if not part:\n try:\n return int(whole)\n except TypeError:\n pass # complex number: fail like int(complex) does\n\n raise Invalid(_('Invalid integer'))\n\ndef natural_number_validator(value, context):\n value = int_validator(value, context)\n if value < 0:\n raise Invalid(_('Must be a natural number'))\n return value\n\ndef is_positive_integer(value, context):\n value = int_validator(value, context)\n if value < 1:\n raise Invalid(_('Must be a postive integer'))\n return value\n\ndef boolean_validator(value, context):\n '''\n Return a boolean for value.\n Return value when value is a python bool type.\n Return True for strings 'true', 'yes', 't', 'y', and '1'.\n Return False in all other cases, including when value is an empty string or\n None\n '''\n if value is missing or value is None:\n return False\n if isinstance(value, bool):\n return value\n if value.lower() in ['true', 'yes', 't', 'y', '1']:\n return True\n return False\n\ndef isodate(value, context):\n if isinstance(value, datetime.datetime):\n return value\n if value == '':\n return None\n try:\n date = h.date_str_to_datetime(value)\n except (TypeError, ValueError) as e:\n raise Invalid(_('Date format incorrect'))\n return date\n\ndef no_http(value, context):\n\n model = context['model']\n session = context['session']\n\n if 'http:' in value:\n raise Invalid(_('No links are allowed in the log_message.'))\n return value\n\ndef package_id_exists(value, context):\n\n model = context['model']\n session = context['session']\n\n result = session.query(model.Package).get(value)\n if not result:\n raise Invalid('%s: %s' % (_('Not found'), _('Dataset')))\n return value\n\ndef package_id_does_not_exist(value, context):\n\n model = context['model']\n session = context['session']\n\n result = session.query(model.Package).get(value)\n if result:\n raise Invalid(_('Dataset id already exists'))\n return value\n\ndef package_name_exists(value, context):\n\n model = context['model']\n session = context['session']\n\n result = session.query(model.Package).filter_by(name=value).first()\n\n if not result:\n raise Invalid(_('Not found') + ': %s' % value)\n return value\n\ndef package_id_or_name_exists(package_id_or_name, context):\n '''Return the given package_id_or_name if such a package exists.\n\n :raises: ckan.lib.navl.dictization_functions.Invalid if there is no\n package with the given id or name\n\n '''\n model = context['model']\n session = context['session']\n\n result = session.query(model.Package).get(package_id_or_name)\n if result:\n return package_id_or_name\n\n result = session.query(model.Package).filter_by(\n name=package_id_or_name).first()\n\n if not result:\n raise Invalid('%s: %s' % (_('Not found'), _('Dataset')))\n\n return package_id_or_name\n\n\ndef resource_id_exists(value, context):\n model = context['model']\n session = context['session']\n if not session.query(model.Resource).get(value):\n raise Invalid('%s: %s' % (_('Not found'), _('Resource')))\n return value\n\n\ndef user_id_exists(user_id, context):\n '''Raises Invalid if the given user_id does not exist in the model given\n in the context, otherwise returns the given user_id.\n\n '''\n model = context['model']\n session = context['session']\n\n result = session.query(model.User).get(user_id)\n if not result:\n raise Invalid('%s: %s' % (_('Not found'), _('User')))\n return user_id\n\ndef user_id_or_name_exists(user_id_or_name, context):\n '''Return the given user_id_or_name if such a user exists.\n\n :raises: ckan.lib.navl.dictization_functions.Invalid if no user can be\n found with the given id or user name\n\n '''\n model = context['model']\n session = context['session']\n result = session.query(model.User).get(user_id_or_name)\n if result:\n return user_id_or_name\n result = session.query(model.User).filter_by(name=user_id_or_name).first()\n if not result:\n raise Invalid('%s: %s' % (_('Not found'), _('User')))\n return user_id_or_name\n\ndef group_id_exists(group_id, context):\n '''Raises Invalid if the given group_id does not exist in the model given\n in the context, otherwise returns the given group_id.\n\n '''\n model = context['model']\n session = context['session']\n\n result = session.query(model.Group).get(group_id)\n if not result:\n raise Invalid('%s: %s' % (_('Not found'), _('Group')))\n return group_id\n\ndef group_id_or_name_exists(reference, context):\n '''\n Raises Invalid if a group identified by the name or id cannot be found.\n '''\n model = context['model']\n result = model.Group.get(reference)\n if not result:\n raise Invalid(_('That group name or ID does not exist.'))\n return reference\n\ndef activity_type_exists(activity_type):\n '''Raises Invalid if there is no registered activity renderer for the\n given activity_type. Otherwise returns the given activity_type.\n\n This just uses object_id_validators as a lookup.\n very safe.\n\n '''\n if activity_type in object_id_validators:\n return activity_type\n else:\n raise Invalid('%s: %s' % (_('Not found'), _('Activity type')))\n\n\n# A dictionary mapping activity_type values from activity dicts to functions\n# for validating the object_id values from those same activity dicts.\nobject_id_validators = {\n 'new package' : package_id_exists,\n 'changed package' : package_id_exists,\n 'deleted package' : package_id_exists,\n 'follow dataset' : package_id_exists,\n 'new user' : user_id_exists,\n 'changed user' : user_id_exists,\n 'follow user' : user_id_exists,\n 'new group' : group_id_exists,\n 'changed group' : group_id_exists,\n 'deleted group' : group_id_exists,\n 'new organization' : group_id_exists,\n 'changed organization' : group_id_exists,\n 'deleted organization' : group_id_exists,\n 'follow group' : group_id_exists,\n }\n\ndef object_id_validator(key, activity_dict, errors, context):\n '''Validate the 'object_id' value of an activity_dict.\n\n Uses the object_id_validators dict (above) to find and call an 'object_id'\n validator function for the given activity_dict's 'activity_type' value.\n\n Raises Invalid if the model given in context contains no object of the\n correct type (according to the 'activity_type' value of the activity_dict)\n with the given ID.\n\n Raises Invalid if there is no object_id_validator for the activity_dict's\n 'activity_type' value.\n\n '''\n activity_type = activity_dict[('activity_type',)]\n if activity_type in object_id_validators:\n object_id = activity_dict[('object_id',)]\n return object_id_validators[activity_type](object_id, context)\n else:\n raise Invalid('There is no object_id validator for '\n 'activity type \"%s\"' % activity_type)\n\nname_match = re.compile('[a-z0-9_\\-]*$')\ndef name_validator(value, context):\n '''Return the given value if it's a valid name, otherwise raise Invalid.\n\n If it's a valid name, the given value will be returned unmodified.\n\n This function applies general validation rules for names of packages,\n groups, users, etc.\n\n Most schemas also have their own custom name validator function to apply\n custom validation rules after this function, for example a\n ``package_name_validator()`` to check that no package with the given name\n already exists.\n\n :raises ckan.lib.navl.dictization_functions.Invalid: if ``value`` is not\n a valid name\n\n '''\n if not isinstance(value, string_types):\n raise Invalid(_('Names must be strings'))\n\n # check basic textual rules\n if value in ['new', 'edit', 'search']:\n raise Invalid(_('That name cannot be used'))\n\n if len(value) < 2:\n raise Invalid(_('Must be at least %s characters long') % 2)\n if len(value) > PACKAGE_NAME_MAX_LENGTH:\n raise Invalid(_('Name must be a maximum of %i characters long') % \\\n PACKAGE_NAME_MAX_LENGTH)\n if not name_match.match(value):\n raise Invalid(_('Must be purely lowercase alphanumeric '\n '(ascii) characters and these symbols: -_'))\n return value\n\ndef package_name_validator(key, data, errors, context):\n model = context['model']\n session = context['session']\n package = context.get('package')\n\n query = session.query(model.Package.state).filter_by(name=data[key])\n if package:\n package_id = package.id\n else:\n package_id = data.get(key[:-1] + ('id',))\n if package_id and package_id is not missing:\n query = query.filter(model.Package.id != package_id)\n result = query.first()\n if result and result.state != State.DELETED:\n errors[key].append(_('That URL is already in use.'))\n\n value = data[key]\n if len(value) < PACKAGE_NAME_MIN_LENGTH:\n raise Invalid(\n _('Name \"%s\" length is less than minimum %s') % (value, PACKAGE_NAME_MIN_LENGTH)\n )\n if len(value) > PACKAGE_NAME_MAX_LENGTH:\n raise Invalid(\n _('Name \"%s\" length is more than maximum %s') % (value, PACKAGE_NAME_MAX_LENGTH)\n )\n\ndef package_version_validator(value, context):\n\n if len(value) > PACKAGE_VERSION_MAX_LENGTH:\n raise Invalid(_('Version must be a maximum of %i characters long') % \\\n PACKAGE_VERSION_MAX_LENGTH)\n return value\n\ndef duplicate_extras_key(key, data, errors, context):\n\n unflattened = df.unflatten(data)\n extras = unflattened.get('extras', [])\n extras_keys = []\n for extra in extras:\n if not extra.get('deleted'):\n extras_keys.append(extra['key'])\n\n for extra_key in set(extras_keys):\n extras_keys.remove(extra_key)\n if extras_keys:\n key_ = ('extras_validation',)\n assert key_ not in errors\n errors[key_] = [_('Duplicate key \"%s\"') % extras_keys[0]]\n\ndef group_name_validator(key, data, errors, context):\n model = context['model']\n session = context['session']\n group = context.get('group')\n\n query = session.query(model.Group.name).filter_by(name=data[key])\n if group:\n group_id = group.id\n else:\n group_id = data.get(key[:-1] + ('id',))\n if group_id and group_id is not missing:\n query = query.filter(model.Group.id != group_id)\n result = query.first()\n if result:\n errors[key].append(_('Group name already exists in database'))\n\ndef tag_length_validator(value, context):\n\n if len(value) < MIN_TAG_LENGTH:\n raise Invalid(\n _('Tag \"%s\" length is less than minimum %s') % (value, MIN_TAG_LENGTH)\n )\n if len(value) > MAX_TAG_LENGTH:\n raise Invalid(\n _('Tag \"%s\" length is more than maximum %i') % (value, MAX_TAG_LENGTH)\n )\n return value\n\ndef tag_name_validator(value, context):\n\n tagname_match = re.compile('[\\w \\-.]*$', re.UNICODE)\n if not tagname_match.match(value):\n raise Invalid(_('Tag \"%s\" must be alphanumeric '\n 'characters or symbols: -_.') % (value))\n return value\n\ndef tag_not_uppercase(value, context):\n\n tagname_uppercase = re.compile('[A-Z]')\n if tagname_uppercase.search(value):\n raise Invalid(_('Tag \"%s\" must not be uppercase' % (value)))\n return value\n\ndef tag_string_convert(key, data, errors, context):\n '''Takes a list of tags that is a comma-separated string (in data[key])\n and parses tag names. These are added to the data dict, enumerated. They\n are also validated.'''\n\n if isinstance(data[key], string_types):\n tags = [tag.strip() \\\n for tag in data[key].split(',') \\\n if tag.strip()]\n else:\n tags = data[key]\n\n current_index = max( [int(k[1]) for k in data.keys() if len(k) == 3 and k[0] == 'tags'] + [-1] )\n\n for num, tag in zip(count(current_index+1), tags):\n data[('tags', num, 'name')] = tag\n\n for tag in tags:\n tag_length_validator(tag, context)\n tag_name_validator(tag, context)\n\ndef ignore_not_admin(key, data, errors, context):\n # Deprecated in favour of ignore_not_package_admin\n return ignore_not_package_admin(key, data, errors, context)\n\ndef ignore_not_package_admin(key, data, errors, context):\n '''Ignore if the user is not allowed to administer the package specified.'''\n\n model = context['model']\n user = context.get('user')\n\n if 'ignore_auth' in context:\n return\n\n if user and authz.is_sysadmin(user):\n return\n\n authorized = False\n pkg = context.get('package')\n if pkg:\n try:\n logic.check_access('package_change_state',context)\n authorized = True\n except logic.NotAuthorized:\n authorized = False\n\n if (user and pkg and authorized):\n return\n\n # allow_state_change in the context will allow the state to be changed\n # FIXME is this the best way to cjeck for state only?\n if key == ('state',) and context.get('allow_state_change'):\n return\n data.pop(key)\n\n\ndef ignore_not_sysadmin(key, data, errors, context):\n '''Ignore the field if user not sysadmin or ignore_auth in context.'''\n\n user = context.get('user')\n ignore_auth = context.get('ignore_auth')\n if ignore_auth or (user and authz.is_sysadmin(user)):\n return\n\n data.pop(key)\n\n\ndef ignore_not_group_admin(key, data, errors, context):\n '''Ignore if the user is not allowed to administer for the group specified.'''\n\n model = context['model']\n user = context.get('user')\n\n if user and authz.is_sysadmin(user):\n return\n\n authorized = False\n group = context.get('group')\n if group:\n try:\n logic.check_access('group_change_state',context)\n authorized = True\n except logic.NotAuthorized:\n authorized = False\n\n if (user and group and authorized):\n return\n\n data.pop(key)\n\ndef user_name_validator(key, data, errors, context):\n '''Validate a new user name.\n\n Append an error message to ``errors[key]`` if a user named ``data[key]``\n already exists. Otherwise, do nothing.\n\n :raises ckan.lib.navl.dictization_functions.Invalid: if ``data[key]`` is\n not a string\n :rtype: None\n\n '''\n model = context['model']\n new_user_name = data[key]\n\n if not isinstance(new_user_name, string_types):\n raise Invalid(_('User names must be strings'))\n\n user = model.User.get(new_user_name)\n user_obj_from_context = context.get('user_obj')\n if user is not None:\n # A user with new_user_name already exists in the database.\n if user_obj_from_context and user_obj_from_context.id == user.id:\n # If there's a user_obj in context with the same id as the user\n # found in the db, then we must be doing a user_update and not\n # updating the user name, so don't return an error.\n return\n else:\n # Otherwise return an error: there's already another user with that\n # name, so you can create a new user with that name or update an\n # existing user's name to that name.\n errors[key].append(_('That login name is not available.'))\n elif user_obj_from_context:\n old_user = model.User.get(user_obj_from_context.id)\n if old_user is not None and old_user.state != model.State.PENDING:\n errors[key].append(_('That login name can not be modified.'))\n else:\n return\n\ndef user_both_passwords_entered(key, data, errors, context):\n\n password1 = data.get(('password1',),None)\n password2 = data.get(('password2',),None)\n\n if password1 is None or password1 == '' or \\\n password2 is None or password2 == '':\n errors[('password',)].append(_('Please enter both passwords'))\n\ndef user_password_validator(key, data, errors, context):\n value = data[key]\n\n if isinstance(value, Missing):\n pass\n elif not isinstance(value, string_types):\n errors[('password',)].append(_('Passwords must be strings'))\n elif value == '':\n pass\n elif len(value) < 8:\n errors[('password',)].append(_('Your password must be 8 characters or '\n 'longer'))\n\ndef user_passwords_match(key, data, errors, context):\n\n password1 = data.get(('password1',),None)\n password2 = data.get(('password2',),None)\n\n if not password1 == password2:\n errors[key].append(_('The passwords you entered do not match'))\n else:\n #Set correct password\n data[('password',)] = password1\n\ndef user_password_not_empty(key, data, errors, context):\n '''Only check if password is present if the user is created via action API.\n If not, user_both_passwords_entered will handle the validation'''\n # sysadmin may provide password_hash directly for importing users\n if (data.get(('password_hash',), missing) is not missing and\n authz.is_sysadmin(context.get('user'))):\n return\n\n if not ('password1',) in data and not ('password2',) in data:\n password = data.get(('password',),None)\n if not password:\n errors[key].append(_('Missing value'))\n\ndef user_about_validator(value,context):\n if 'http://' in value or 'https://' in value:\n raise Invalid(_('Edit not allowed as it looks like spam. Please avoid links in your description.'))\n\n return value\n\ndef vocabulary_name_validator(name, context):\n model = context['model']\n session = context['session']\n\n if len(name) < VOCABULARY_NAME_MIN_LENGTH:\n raise Invalid(_('Name must be at least %s characters long') %\n VOCABULARY_NAME_MIN_LENGTH)\n if len(name) > VOCABULARY_NAME_MAX_LENGTH:\n raise Invalid(_('Name must be a maximum of %i characters long') %\n VOCABULARY_NAME_MAX_LENGTH)\n query = session.query(model.Vocabulary.name).filter_by(name=name)\n result = query.first()\n if result:\n raise Invalid(_('That vocabulary name is already in use.'))\n return name\n\ndef vocabulary_id_not_changed(value, context):\n vocabulary = context.get('vocabulary')\n if vocabulary and value != vocabulary.id:\n raise Invalid(_('Cannot change value of key from %s to %s. '\n 'This key is read-only') % (vocabulary.id, value))\n return value\n\ndef vocabulary_id_exists(value, context):\n model = context['model']\n session = context['session']\n result = session.query(model.Vocabulary).get(value)\n if not result:\n raise Invalid(_('Tag vocabulary was not found.'))\n return value\n\ndef tag_in_vocabulary_validator(value, context):\n model = context['model']\n session = context['session']\n vocabulary = context.get('vocabulary')\n if vocabulary:\n query = session.query(model.Tag)\\\n .filter(model.Tag.vocabulary_id==vocabulary.id)\\\n .filter(model.Tag.name==value)\\\n .count()\n if not query:\n raise Invalid(_('Tag %s does not belong to vocabulary %s') % (value, vocabulary.name))\n return value\n\ndef tag_not_in_vocabulary(key, tag_dict, errors, context):\n tag_name = tag_dict[('name',)]\n if not tag_name:\n raise Invalid(_('No tag name'))\n if ('vocabulary_id',) in tag_dict:\n vocabulary_id = tag_dict[('vocabulary_id',)]\n else:\n vocabulary_id = None\n model = context['model']\n session = context['session']\n\n query = session.query(model.Tag)\n query = query.filter(model.Tag.vocabulary_id==vocabulary_id)\n query = query.filter(model.Tag.name==tag_name)\n count = query.count()\n if count > 0:\n raise Invalid(_('Tag %s already belongs to vocabulary %s') %\n (tag_name, vocabulary_id))\n else:\n return\n\ndef url_validator(key, data, errors, context):\n ''' Checks that the provided value (if it is present) is a valid URL '''\n\n url = data.get(key, None)\n if not url:\n return\n\n try:\n pieces = urlparse(url)\n if all([pieces.scheme, pieces.netloc]) and \\\n set(pieces.netloc) <= set(string.ascii_letters + string.digits + '-.') and \\\n pieces.scheme in ['http', 'https']:\n return\n except ValueError:\n # url is invalid\n pass\n\n errors[key].append(_('Please provide a valid URL'))\n\n\ndef user_name_exists(user_name, context):\n model = context['model']\n session = context['session']\n result = session.query(model.User).filter_by(name=user_name).first()\n if not result:\n raise Invalid('%s: %s' % (_('Not found'), _('User')))\n return result.name\n\n\ndef role_exists(role, context):\n if role not in authz.ROLE_PERMISSIONS:\n raise Invalid(_('role does not exist.'))\n return role\n\n\ndef datasets_with_no_organization_cannot_be_private(key, data, errors,\n context):\n\n dataset_id = data.get(('id',))\n owner_org = data.get(('owner_org',))\n private = data[key] is True\n\n check_passed = True\n\n if not dataset_id and private and not owner_org:\n # When creating a dataset, enforce it directly\n check_passed = False\n elif dataset_id and private and not owner_org:\n # Check if the dataset actually has an owner_org, even if not provided\n try:\n dataset_dict = logic.get_action('package_show')({},\n {'id': dataset_id})\n if not dataset_dict.get('owner_org'):\n check_passed = False\n\n except logic.NotFound:\n check_passed = False\n\n if not check_passed:\n errors[key].append(\n _(\"Datasets with no organization can't be private.\"))\n\n\ndef list_of_strings(key, data, errors, context):\n value = data.get(key)\n if not isinstance(value, list):\n raise Invalid(_('Not a list'))\n for x in value:\n if not isinstance(x, string_types):\n raise Invalid('%s: %s' % (_('Not a string'), x))\n\ndef if_empty_guess_format(key, data, errors, context):\n value = data[key]\n resource_id = data.get(key[:-1] + ('id',))\n\n # if resource_id then an update\n if (not value or value is Missing) and not resource_id:\n url = data.get(key[:-1] + ('url',), '')\n if not url:\n return\n mimetype, encoding = mimetypes.guess_type(url)\n if mimetype:\n data[key] = mimetype\n\ndef clean_format(format):\n return h.unified_resource_format(format)\n\ndef no_loops_in_hierarchy(key, data, errors, context):\n '''Checks that the parent groups specified in the data would not cause\n a loop in the group hierarchy, and therefore cause the recursion up/down\n the hierarchy to get into an infinite loop.\n '''\n if not 'id' in data:\n # Must be a new group - has no children, so no chance of loops\n return\n group = context['model'].Group.get(data['id'])\n allowable_parents = group.\\\n groups_allowed_to_be_its_parent(type=group.type)\n for parent in data['groups']:\n parent_name = parent['name']\n # a blank name signifies top level, which is always allowed\n if parent_name and context['model'].Group.get(parent_name) \\\n not in allowable_parents:\n raise Invalid(_('This parent would create a loop in the '\n 'hierarchy'))\n\n\ndef filter_fields_and_values_should_have_same_length(key, data, errors, context):\n convert_to_list_if_string = logic.converters.convert_to_list_if_string\n fields = convert_to_list_if_string(data.get(('filter_fields',), []))\n values = convert_to_list_if_string(data.get(('filter_values',), []))\n\n if len(fields) != len(values):\n msg = _('\"filter_fields\" and \"filter_values\" should have the same length')\n errors[('filter_fields',)].append(msg)\n errors[('filter_values',)].append(msg)\n\n\ndef filter_fields_and_values_exist_and_are_valid(key, data, errors, context):\n convert_to_list_if_string = logic.converters.convert_to_list_if_string\n fields = convert_to_list_if_string(data.get(('filter_fields',)))\n values = convert_to_list_if_string(data.get(('filter_values',)))\n\n if not fields:\n errors[('filter_fields',)].append(_('\"filter_fields\" is required when '\n '\"filter_values\" is filled'))\n if not values:\n errors[('filter_values',)].append(_('\"filter_values\" is required when '\n '\"filter_fields\" is filled'))\n\n filters = collections.defaultdict(list)\n for field, value in zip(fields, values):\n filters[field].append(value)\n\n data[('filters',)] = dict(filters)\n\n\ndef extra_key_not_in_root_schema(key, data, errors, context):\n\n for schema_key in context.get('schema_keys', []):\n if schema_key == data[key]:\n raise Invalid(_('There is a schema field with the same name'))\n\n\ndef empty_if_not_sysadmin(key, data, errors, context):\n '''Only sysadmins may pass this value'''\n from ckan.lib.navl.validators import empty\n\n user = context.get('user')\n\n ignore_auth = context.get('ignore_auth')\n if ignore_auth or (user and authz.is_sysadmin(user)):\n return\n\n empty(key, data, errors, context)\n\n#pattern from https://html.spec.whatwg.org/#e-mail-state-(type=email)\nemail_pattern = re.compile(\n # additional pattern to reject malformed dots usage\n r\"^(?!\\.)(?!.*\\.$)(?!.*?\\.\\.)\"\\\n \"[a-zA-Z0-9.!#$%&'*+\\/=?^_`{|}~-]+@[a-zA-Z0-9]\"\\\n \"(?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\\.[a-zA-Z0-9]\"\\\n \"(?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)*$\"\n )\n\n\ndef email_validator(value, context):\n '''Validate email input '''\n\n if value:\n if not email_pattern.match(value):\n raise Invalid(_('Email {email} is not a valid format').format(email=value))\n return value\n\ndef collect_prefix_validate(prefix, *validator_names):\n \"\"\"\n Return a validator that will collect top-level keys starting with\n prefix then apply validator_names to each one. Results are moved\n to a dict under the prefix name, with prefix removed from keys\n \"\"\"\n validator_fns = [logic.get_validator(v) for v in validator_names]\n\n def prefix_validator(key, data, errors, context):\n out = {}\n extras = data.get(('__extras',), {})\n\n # values passed as lists of dicts will have been flattened into __junk\n junk = df.unflatten(data.get(('__junk',), {}))\n for field_name in junk:\n if not field_name.startswith(prefix):\n continue\n extras[field_name] = junk[field_name]\n\n for field_name in list(extras):\n if not field_name.startswith(prefix):\n continue\n data[(field_name,)] = extras.pop(field_name)\n for v in validator_fns:\n try:\n df.convert(v, (field_name,), data, errors, context)\n except df.StopOnError:\n break\n out[field_name[len(prefix):]] = data.pop((field_name,))\n\n data[(prefix,)] = out\n\n return prefix_validator\n\n\ndef dict_only(value):\n if not isinstance(value, dict):\n raise Invalid(_('Must be a dict'))\n return value\n\ndef email_is_unique(key, data, errors, context):\n '''Validate email is unique'''\n model = context['model']\n session = context['session']\n\n users = session.query(model.User) \\\n .filter(model.User.email == data[key]).all()\n # is there is no users with this email it's free\n if not users:\n return\n else:\n # allow user to update their own email\n for user in users:\n if (user.name == data[(\"name\",)]\n or user.id == data[(\"id\",)]):\n return\n\n raise Invalid(\n _('The email address \\'{email}\\' belongs to a registered user.').format(email=data[key]))\n\n\ndef one_of(list_of_value):\n ''' Checks if the provided value is present in a list '''\n def callable(value):\n if value not in list_of_value:\n raise Invalid(_('Value must be one of {}'.format(list_of_value)))\n return value\n return callable\n\n\ndef json_object(value):\n ''' Make sure value can be serialized as a JSON object'''\n if value is None or value == '':\n return\n try:\n if not json.dumps(value).startswith('{'):\n raise Invalid(_('The value should be a valid JSON object'))\n except ValueError as e:\n raise Invalid(_('Could not parse the value as a valid JSON object'))\n\n return value\n\n\ndef extras_valid_json(extras, context):\n try:\n for extra, value in iteritems(extras):\n json.dumps(value)\n except ValueError as e:\n raise Invalid(_(u'Could not parse extra \\'{name}\\' as valid JSON').\n format(name=extra))\n return extras\n", "path": "ckan/logic/validators.py"}]} |
gh_patches_debug_1341 | rasdani/github-patches | git_diff | urllib3__urllib3-526 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
assert_hostname=False seems to be ignored in 1.10
I have some code that is relying on assert_hostname=False to work.
I upgrade urllib3 to version 1.10 and the code fails with SSLError: hostname 'remote-host' doesn't match 'localhost'
I haven't looked through the code to try to determine why this happens.
Did anyone else notice this? If so, what is the fix.
Essentially I am creating a connection pool like this:
``` python
import urllib3
http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED',
ca_certs="cert.pem",
assert_hostname=False)
try:
r = http.request('GET', 'https://remote-host:4443/')
print("Certificate verification NO HOSTNAME successful")
except urllib3.exceptions.SSLError as e:
print ("SSL Error:", e)
return -1
return 0
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `urllib3/util/ssl_.py`
Content:
```
1 from binascii import hexlify, unhexlify
2 from hashlib import md5, sha1
3
4 from ..exceptions import SSLError
5
6
7 SSLContext = None
8 HAS_SNI = False
9 create_default_context = None
10
11 import errno
12 import ssl
13
14 try: # Test for SSL features
15 from ssl import wrap_socket, CERT_NONE, PROTOCOL_SSLv23
16 from ssl import HAS_SNI # Has SNI?
17 except ImportError:
18 pass
19
20
21 try:
22 from ssl import OP_NO_SSLv2, OP_NO_SSLv3, OP_NO_COMPRESSION
23 except ImportError:
24 OP_NO_SSLv2, OP_NO_SSLv3 = 0x1000000, 0x2000000
25 OP_NO_COMPRESSION = 0x20000
26
27 try:
28 from ssl import _DEFAULT_CIPHERS
29 except ImportError:
30 _DEFAULT_CIPHERS = (
31 'ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:ECDH+HIGH:'
32 'DH+HIGH:ECDH+3DES:DH+3DES:RSA+AESGCM:RSA+AES:RSA+HIGH:RSA+3DES:ECDH+RC4:'
33 'DH+RC4:RSA+RC4:!aNULL:!eNULL:!MD5'
34 )
35
36 try:
37 from ssl import SSLContext # Modern SSL?
38 except ImportError:
39 import sys
40
41 class SSLContext(object): # Platform-specific: Python 2 & 3.1
42 supports_set_ciphers = sys.version_info >= (2, 7)
43
44 def __init__(self, protocol_version):
45 self.protocol = protocol_version
46 # Use default values from a real SSLContext
47 self.check_hostname = False
48 self.verify_mode = ssl.CERT_NONE
49 self.ca_certs = None
50 self.options = 0
51 self.certfile = None
52 self.keyfile = None
53 self.ciphers = None
54
55 def load_cert_chain(self, certfile, keyfile):
56 self.certfile = certfile
57 self.keyfile = keyfile
58
59 def load_verify_locations(self, location):
60 self.ca_certs = location
61
62 def set_ciphers(self, cipher_suite):
63 if not self.supports_set_ciphers:
64 raise TypeError(
65 'Your version of Python does not support setting '
66 'a custom cipher suite. Please upgrade to Python '
67 '2.7, 3.2, or later if you need this functionality.'
68 )
69 self.ciphers = cipher_suite
70
71 def wrap_socket(self, socket, server_hostname=None):
72 kwargs = {
73 'keyfile': self.keyfile,
74 'certfile': self.certfile,
75 'ca_certs': self.ca_certs,
76 'cert_reqs': self.verify_mode,
77 'ssl_version': self.protocol,
78 }
79 if self.supports_set_ciphers: # Platform-specific: Python 2.7+
80 return wrap_socket(socket, ciphers=self.ciphers, **kwargs)
81 else: # Platform-specific: Python 2.6
82 return wrap_socket(socket, **kwargs)
83
84
85 def assert_fingerprint(cert, fingerprint):
86 """
87 Checks if given fingerprint matches the supplied certificate.
88
89 :param cert:
90 Certificate as bytes object.
91 :param fingerprint:
92 Fingerprint as string of hexdigits, can be interspersed by colons.
93 """
94
95 # Maps the length of a digest to a possible hash function producing
96 # this digest.
97 hashfunc_map = {
98 16: md5,
99 20: sha1
100 }
101
102 fingerprint = fingerprint.replace(':', '').lower()
103 digest_length, odd = divmod(len(fingerprint), 2)
104
105 if odd or digest_length not in hashfunc_map:
106 raise SSLError('Fingerprint is of invalid length.')
107
108 # We need encode() here for py32; works on py2 and p33.
109 fingerprint_bytes = unhexlify(fingerprint.encode())
110
111 hashfunc = hashfunc_map[digest_length]
112
113 cert_digest = hashfunc(cert).digest()
114
115 if not cert_digest == fingerprint_bytes:
116 raise SSLError('Fingerprints did not match. Expected "{0}", got "{1}".'
117 .format(hexlify(fingerprint_bytes),
118 hexlify(cert_digest)))
119
120
121 def resolve_cert_reqs(candidate):
122 """
123 Resolves the argument to a numeric constant, which can be passed to
124 the wrap_socket function/method from the ssl module.
125 Defaults to :data:`ssl.CERT_NONE`.
126 If given a string it is assumed to be the name of the constant in the
127 :mod:`ssl` module or its abbrevation.
128 (So you can specify `REQUIRED` instead of `CERT_REQUIRED`.
129 If it's neither `None` nor a string we assume it is already the numeric
130 constant which can directly be passed to wrap_socket.
131 """
132 if candidate is None:
133 return CERT_NONE
134
135 if isinstance(candidate, str):
136 res = getattr(ssl, candidate, None)
137 if res is None:
138 res = getattr(ssl, 'CERT_' + candidate)
139 return res
140
141 return candidate
142
143
144 def resolve_ssl_version(candidate):
145 """
146 like resolve_cert_reqs
147 """
148 if candidate is None:
149 return PROTOCOL_SSLv23
150
151 if isinstance(candidate, str):
152 res = getattr(ssl, candidate, None)
153 if res is None:
154 res = getattr(ssl, 'PROTOCOL_' + candidate)
155 return res
156
157 return candidate
158
159
160 def create_urllib3_context(ssl_version=None, cert_reqs=ssl.CERT_REQUIRED,
161 options=None, ciphers=None):
162 """All arguments have the same meaning as ``ssl_wrap_socket``.
163
164 By default, this function does a lot of the same work that
165 ``ssl.create_default_context`` does on Python 3.4+. It:
166
167 - Disables SSLv2, SSLv3, and compression
168 - Sets a restricted set of server ciphers
169
170 If you wish to enable SSLv3, you can do::
171
172 from urllib3.util import ssl_
173 context = ssl_.create_urllib3_context()
174 context.options &= ~ssl_.OP_NO_SSLv3
175
176 You can do the same to enable compression (substituting ``COMPRESSION``
177 for ``SSLv3`` in the last line above).
178
179 :param ssl_version:
180 The desired protocol version to use. This will default to
181 PROTOCOL_SSLv23 which will negotiate the highest protocol that both
182 the server and your installation of OpenSSL support.
183 :param cert_reqs:
184 Whether to require the certificate verification. This defaults to
185 ``ssl.CERT_REQUIRED``.
186 :param options:
187 Specific OpenSSL options. These default to ``ssl.OP_NO_SSLv2``,
188 ``ssl.OP_NO_SSLv3``, ``ssl.OP_NO_COMPRESSION``.
189 :param ciphers:
190 Which cipher suites to allow the server to select.
191 :returns:
192 Constructed SSLContext object with specified options
193 :rtype: SSLContext
194 """
195 context = SSLContext(ssl_version or ssl.PROTOCOL_SSLv23)
196
197 if options is None:
198 options = 0
199 # SSLv2 is easily broken and is considered harmful and dangerous
200 options |= OP_NO_SSLv2
201 # SSLv3 has several problems and is now dangerous
202 options |= OP_NO_SSLv3
203 # Disable compression to prevent CRIME attacks for OpenSSL 1.0+
204 # (issue #309)
205 options |= OP_NO_COMPRESSION
206
207 context.options |= options
208
209 if getattr(context, 'supports_set_ciphers', True): # Platform-specific: Python 2.6
210 context.set_ciphers(ciphers or _DEFAULT_CIPHERS)
211
212 context.verify_mode = cert_reqs
213 if getattr(context, 'check_hostname', None) is not None: # Platform-specific: Python 3.2
214 context.check_hostname = (context.verify_mode == ssl.CERT_REQUIRED)
215 return context
216
217
218 def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None,
219 ca_certs=None, server_hostname=None,
220 ssl_version=None, ciphers=None, ssl_context=None):
221 """
222 All arguments except for server_hostname and ssl_context have the same
223 meaning as they do when using :func:`ssl.wrap_socket`.
224
225 :param server_hostname:
226 When SNI is supported, the expected hostname of the certificate
227 :param ssl_context:
228 A pre-made :class:`SSLContext` object. If none is provided, one will
229 be created using :func:`create_urllib3_context`.
230 :param ciphers:
231 A string of ciphers we wish the client to support. This is not
232 supported on Python 2.6 as the ssl module does not support it.
233 """
234 context = ssl_context
235 if context is None:
236 context = create_urllib3_context(ssl_version, cert_reqs,
237 ciphers=ciphers)
238
239 if ca_certs:
240 try:
241 context.load_verify_locations(ca_certs)
242 except IOError as e: # Platform-specific: Python 2.6, 2.7, 3.2
243 raise SSLError(e)
244 # Py33 raises FileNotFoundError which subclasses OSError
245 # These are not equivalent unless we check the errno attribute
246 except OSError as e: # Platform-specific: Python 3.3 and beyond
247 if e.errno == errno.ENOENT:
248 raise SSLError(e)
249 raise
250 if certfile:
251 context.load_cert_chain(certfile, keyfile)
252 if HAS_SNI: # Platform-specific: OpenSSL with enabled SNI
253 return context.wrap_socket(sock, server_hostname=server_hostname)
254 return context.wrap_socket(sock)
255
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/urllib3/util/ssl_.py b/urllib3/util/ssl_.py
--- a/urllib3/util/ssl_.py
+++ b/urllib3/util/ssl_.py
@@ -211,7 +211,9 @@
context.verify_mode = cert_reqs
if getattr(context, 'check_hostname', None) is not None: # Platform-specific: Python 3.2
- context.check_hostname = (context.verify_mode == ssl.CERT_REQUIRED)
+ # We do our own verification, including fingerprints and alternative
+ # hostnames. So disable it here
+ context.check_hostname = False
return context
| {"golden_diff": "diff --git a/urllib3/util/ssl_.py b/urllib3/util/ssl_.py\n--- a/urllib3/util/ssl_.py\n+++ b/urllib3/util/ssl_.py\n@@ -211,7 +211,9 @@\n \n context.verify_mode = cert_reqs\n if getattr(context, 'check_hostname', None) is not None: # Platform-specific: Python 3.2\n- context.check_hostname = (context.verify_mode == ssl.CERT_REQUIRED)\n+ # We do our own verification, including fingerprints and alternative\n+ # hostnames. So disable it here\n+ context.check_hostname = False\n return context\n", "issue": "assert_hostname=False seems to be ignored in 1.10\nI have some code that is relying on assert_hostname=False to work.\nI upgrade urllib3 to version 1.10 and the code fails with SSLError: hostname 'remote-host' doesn't match 'localhost'\n\nI haven't looked through the code to try to determine why this happens.\n\nDid anyone else notice this? If so, what is the fix.\n\nEssentially I am creating a connection pool like this:\n\n``` python\nimport urllib3\n\nhttp = urllib3.PoolManager(cert_reqs='CERT_REQUIRED',\n ca_certs=\"cert.pem\",\n assert_hostname=False)\ntry:\n r = http.request('GET', 'https://remote-host:4443/')\n print(\"Certificate verification NO HOSTNAME successful\")\n\nexcept urllib3.exceptions.SSLError as e:\n print (\"SSL Error:\", e)\n return -1\n\nreturn 0\n```\n\n", "before_files": [{"content": "from binascii import hexlify, unhexlify\nfrom hashlib import md5, sha1\n\nfrom ..exceptions import SSLError\n\n\nSSLContext = None\nHAS_SNI = False\ncreate_default_context = None\n\nimport errno\nimport ssl\n\ntry: # Test for SSL features\n from ssl import wrap_socket, CERT_NONE, PROTOCOL_SSLv23\n from ssl import HAS_SNI # Has SNI?\nexcept ImportError:\n pass\n\n\ntry:\n from ssl import OP_NO_SSLv2, OP_NO_SSLv3, OP_NO_COMPRESSION\nexcept ImportError:\n OP_NO_SSLv2, OP_NO_SSLv3 = 0x1000000, 0x2000000\n OP_NO_COMPRESSION = 0x20000\n\ntry:\n from ssl import _DEFAULT_CIPHERS\nexcept ImportError:\n _DEFAULT_CIPHERS = (\n 'ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:ECDH+HIGH:'\n 'DH+HIGH:ECDH+3DES:DH+3DES:RSA+AESGCM:RSA+AES:RSA+HIGH:RSA+3DES:ECDH+RC4:'\n 'DH+RC4:RSA+RC4:!aNULL:!eNULL:!MD5'\n )\n\ntry:\n from ssl import SSLContext # Modern SSL?\nexcept ImportError:\n import sys\n\n class SSLContext(object): # Platform-specific: Python 2 & 3.1\n supports_set_ciphers = sys.version_info >= (2, 7)\n\n def __init__(self, protocol_version):\n self.protocol = protocol_version\n # Use default values from a real SSLContext\n self.check_hostname = False\n self.verify_mode = ssl.CERT_NONE\n self.ca_certs = None\n self.options = 0\n self.certfile = None\n self.keyfile = None\n self.ciphers = None\n\n def load_cert_chain(self, certfile, keyfile):\n self.certfile = certfile\n self.keyfile = keyfile\n\n def load_verify_locations(self, location):\n self.ca_certs = location\n\n def set_ciphers(self, cipher_suite):\n if not self.supports_set_ciphers:\n raise TypeError(\n 'Your version of Python does not support setting '\n 'a custom cipher suite. Please upgrade to Python '\n '2.7, 3.2, or later if you need this functionality.'\n )\n self.ciphers = cipher_suite\n\n def wrap_socket(self, socket, server_hostname=None):\n kwargs = {\n 'keyfile': self.keyfile,\n 'certfile': self.certfile,\n 'ca_certs': self.ca_certs,\n 'cert_reqs': self.verify_mode,\n 'ssl_version': self.protocol,\n }\n if self.supports_set_ciphers: # Platform-specific: Python 2.7+\n return wrap_socket(socket, ciphers=self.ciphers, **kwargs)\n else: # Platform-specific: Python 2.6\n return wrap_socket(socket, **kwargs)\n\n\ndef assert_fingerprint(cert, fingerprint):\n \"\"\"\n Checks if given fingerprint matches the supplied certificate.\n\n :param cert:\n Certificate as bytes object.\n :param fingerprint:\n Fingerprint as string of hexdigits, can be interspersed by colons.\n \"\"\"\n\n # Maps the length of a digest to a possible hash function producing\n # this digest.\n hashfunc_map = {\n 16: md5,\n 20: sha1\n }\n\n fingerprint = fingerprint.replace(':', '').lower()\n digest_length, odd = divmod(len(fingerprint), 2)\n\n if odd or digest_length not in hashfunc_map:\n raise SSLError('Fingerprint is of invalid length.')\n\n # We need encode() here for py32; works on py2 and p33.\n fingerprint_bytes = unhexlify(fingerprint.encode())\n\n hashfunc = hashfunc_map[digest_length]\n\n cert_digest = hashfunc(cert).digest()\n\n if not cert_digest == fingerprint_bytes:\n raise SSLError('Fingerprints did not match. Expected \"{0}\", got \"{1}\".'\n .format(hexlify(fingerprint_bytes),\n hexlify(cert_digest)))\n\n\ndef resolve_cert_reqs(candidate):\n \"\"\"\n Resolves the argument to a numeric constant, which can be passed to\n the wrap_socket function/method from the ssl module.\n Defaults to :data:`ssl.CERT_NONE`.\n If given a string it is assumed to be the name of the constant in the\n :mod:`ssl` module or its abbrevation.\n (So you can specify `REQUIRED` instead of `CERT_REQUIRED`.\n If it's neither `None` nor a string we assume it is already the numeric\n constant which can directly be passed to wrap_socket.\n \"\"\"\n if candidate is None:\n return CERT_NONE\n\n if isinstance(candidate, str):\n res = getattr(ssl, candidate, None)\n if res is None:\n res = getattr(ssl, 'CERT_' + candidate)\n return res\n\n return candidate\n\n\ndef resolve_ssl_version(candidate):\n \"\"\"\n like resolve_cert_reqs\n \"\"\"\n if candidate is None:\n return PROTOCOL_SSLv23\n\n if isinstance(candidate, str):\n res = getattr(ssl, candidate, None)\n if res is None:\n res = getattr(ssl, 'PROTOCOL_' + candidate)\n return res\n\n return candidate\n\n\ndef create_urllib3_context(ssl_version=None, cert_reqs=ssl.CERT_REQUIRED,\n options=None, ciphers=None):\n \"\"\"All arguments have the same meaning as ``ssl_wrap_socket``.\n\n By default, this function does a lot of the same work that\n ``ssl.create_default_context`` does on Python 3.4+. It:\n\n - Disables SSLv2, SSLv3, and compression\n - Sets a restricted set of server ciphers\n\n If you wish to enable SSLv3, you can do::\n\n from urllib3.util import ssl_\n context = ssl_.create_urllib3_context()\n context.options &= ~ssl_.OP_NO_SSLv3\n\n You can do the same to enable compression (substituting ``COMPRESSION``\n for ``SSLv3`` in the last line above).\n\n :param ssl_version:\n The desired protocol version to use. This will default to\n PROTOCOL_SSLv23 which will negotiate the highest protocol that both\n the server and your installation of OpenSSL support.\n :param cert_reqs:\n Whether to require the certificate verification. This defaults to\n ``ssl.CERT_REQUIRED``.\n :param options:\n Specific OpenSSL options. These default to ``ssl.OP_NO_SSLv2``,\n ``ssl.OP_NO_SSLv3``, ``ssl.OP_NO_COMPRESSION``.\n :param ciphers:\n Which cipher suites to allow the server to select.\n :returns:\n Constructed SSLContext object with specified options\n :rtype: SSLContext\n \"\"\"\n context = SSLContext(ssl_version or ssl.PROTOCOL_SSLv23)\n\n if options is None:\n options = 0\n # SSLv2 is easily broken and is considered harmful and dangerous\n options |= OP_NO_SSLv2\n # SSLv3 has several problems and is now dangerous\n options |= OP_NO_SSLv3\n # Disable compression to prevent CRIME attacks for OpenSSL 1.0+\n # (issue #309)\n options |= OP_NO_COMPRESSION\n\n context.options |= options\n\n if getattr(context, 'supports_set_ciphers', True): # Platform-specific: Python 2.6\n context.set_ciphers(ciphers or _DEFAULT_CIPHERS)\n\n context.verify_mode = cert_reqs\n if getattr(context, 'check_hostname', None) is not None: # Platform-specific: Python 3.2\n context.check_hostname = (context.verify_mode == ssl.CERT_REQUIRED)\n return context\n\n\ndef ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None,\n ca_certs=None, server_hostname=None,\n ssl_version=None, ciphers=None, ssl_context=None):\n \"\"\"\n All arguments except for server_hostname and ssl_context have the same\n meaning as they do when using :func:`ssl.wrap_socket`.\n\n :param server_hostname:\n When SNI is supported, the expected hostname of the certificate\n :param ssl_context:\n A pre-made :class:`SSLContext` object. If none is provided, one will\n be created using :func:`create_urllib3_context`.\n :param ciphers:\n A string of ciphers we wish the client to support. This is not\n supported on Python 2.6 as the ssl module does not support it.\n \"\"\"\n context = ssl_context\n if context is None:\n context = create_urllib3_context(ssl_version, cert_reqs,\n ciphers=ciphers)\n\n if ca_certs:\n try:\n context.load_verify_locations(ca_certs)\n except IOError as e: # Platform-specific: Python 2.6, 2.7, 3.2\n raise SSLError(e)\n # Py33 raises FileNotFoundError which subclasses OSError\n # These are not equivalent unless we check the errno attribute\n except OSError as e: # Platform-specific: Python 3.3 and beyond\n if e.errno == errno.ENOENT:\n raise SSLError(e)\n raise\n if certfile:\n context.load_cert_chain(certfile, keyfile)\n if HAS_SNI: # Platform-specific: OpenSSL with enabled SNI\n return context.wrap_socket(sock, server_hostname=server_hostname)\n return context.wrap_socket(sock)\n", "path": "urllib3/util/ssl_.py"}], "after_files": [{"content": "from binascii import hexlify, unhexlify\nfrom hashlib import md5, sha1\n\nfrom ..exceptions import SSLError\n\n\nSSLContext = None\nHAS_SNI = False\ncreate_default_context = None\n\nimport errno\nimport ssl\n\ntry: # Test for SSL features\n from ssl import wrap_socket, CERT_NONE, PROTOCOL_SSLv23\n from ssl import HAS_SNI # Has SNI?\nexcept ImportError:\n pass\n\n\ntry:\n from ssl import OP_NO_SSLv2, OP_NO_SSLv3, OP_NO_COMPRESSION\nexcept ImportError:\n OP_NO_SSLv2, OP_NO_SSLv3 = 0x1000000, 0x2000000\n OP_NO_COMPRESSION = 0x20000\n\ntry:\n from ssl import _DEFAULT_CIPHERS\nexcept ImportError:\n _DEFAULT_CIPHERS = (\n 'ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:ECDH+HIGH:'\n 'DH+HIGH:ECDH+3DES:DH+3DES:RSA+AESGCM:RSA+AES:RSA+HIGH:RSA+3DES:ECDH+RC4:'\n 'DH+RC4:RSA+RC4:!aNULL:!eNULL:!MD5'\n )\n\ntry:\n from ssl import SSLContext # Modern SSL?\nexcept ImportError:\n import sys\n\n class SSLContext(object): # Platform-specific: Python 2 & 3.1\n supports_set_ciphers = sys.version_info >= (2, 7)\n\n def __init__(self, protocol_version):\n self.protocol = protocol_version\n # Use default values from a real SSLContext\n self.check_hostname = False\n self.verify_mode = ssl.CERT_NONE\n self.ca_certs = None\n self.options = 0\n self.certfile = None\n self.keyfile = None\n self.ciphers = None\n\n def load_cert_chain(self, certfile, keyfile):\n self.certfile = certfile\n self.keyfile = keyfile\n\n def load_verify_locations(self, location):\n self.ca_certs = location\n\n def set_ciphers(self, cipher_suite):\n if not self.supports_set_ciphers:\n raise TypeError(\n 'Your version of Python does not support setting '\n 'a custom cipher suite. Please upgrade to Python '\n '2.7, 3.2, or later if you need this functionality.'\n )\n self.ciphers = cipher_suite\n\n def wrap_socket(self, socket, server_hostname=None):\n kwargs = {\n 'keyfile': self.keyfile,\n 'certfile': self.certfile,\n 'ca_certs': self.ca_certs,\n 'cert_reqs': self.verify_mode,\n 'ssl_version': self.protocol,\n }\n if self.supports_set_ciphers: # Platform-specific: Python 2.7+\n return wrap_socket(socket, ciphers=self.ciphers, **kwargs)\n else: # Platform-specific: Python 2.6\n return wrap_socket(socket, **kwargs)\n\n\ndef assert_fingerprint(cert, fingerprint):\n \"\"\"\n Checks if given fingerprint matches the supplied certificate.\n\n :param cert:\n Certificate as bytes object.\n :param fingerprint:\n Fingerprint as string of hexdigits, can be interspersed by colons.\n \"\"\"\n\n # Maps the length of a digest to a possible hash function producing\n # this digest.\n hashfunc_map = {\n 16: md5,\n 20: sha1\n }\n\n fingerprint = fingerprint.replace(':', '').lower()\n digest_length, odd = divmod(len(fingerprint), 2)\n\n if odd or digest_length not in hashfunc_map:\n raise SSLError('Fingerprint is of invalid length.')\n\n # We need encode() here for py32; works on py2 and p33.\n fingerprint_bytes = unhexlify(fingerprint.encode())\n\n hashfunc = hashfunc_map[digest_length]\n\n cert_digest = hashfunc(cert).digest()\n\n if not cert_digest == fingerprint_bytes:\n raise SSLError('Fingerprints did not match. Expected \"{0}\", got \"{1}\".'\n .format(hexlify(fingerprint_bytes),\n hexlify(cert_digest)))\n\n\ndef resolve_cert_reqs(candidate):\n \"\"\"\n Resolves the argument to a numeric constant, which can be passed to\n the wrap_socket function/method from the ssl module.\n Defaults to :data:`ssl.CERT_NONE`.\n If given a string it is assumed to be the name of the constant in the\n :mod:`ssl` module or its abbrevation.\n (So you can specify `REQUIRED` instead of `CERT_REQUIRED`.\n If it's neither `None` nor a string we assume it is already the numeric\n constant which can directly be passed to wrap_socket.\n \"\"\"\n if candidate is None:\n return CERT_NONE\n\n if isinstance(candidate, str):\n res = getattr(ssl, candidate, None)\n if res is None:\n res = getattr(ssl, 'CERT_' + candidate)\n return res\n\n return candidate\n\n\ndef resolve_ssl_version(candidate):\n \"\"\"\n like resolve_cert_reqs\n \"\"\"\n if candidate is None:\n return PROTOCOL_SSLv23\n\n if isinstance(candidate, str):\n res = getattr(ssl, candidate, None)\n if res is None:\n res = getattr(ssl, 'PROTOCOL_' + candidate)\n return res\n\n return candidate\n\n\ndef create_urllib3_context(ssl_version=None, cert_reqs=ssl.CERT_REQUIRED,\n options=None, ciphers=None):\n \"\"\"All arguments have the same meaning as ``ssl_wrap_socket``.\n\n By default, this function does a lot of the same work that\n ``ssl.create_default_context`` does on Python 3.4+. It:\n\n - Disables SSLv2, SSLv3, and compression\n - Sets a restricted set of server ciphers\n\n If you wish to enable SSLv3, you can do::\n\n from urllib3.util import ssl_\n context = ssl_.create_urllib3_context()\n context.options &= ~ssl_.OP_NO_SSLv3\n\n You can do the same to enable compression (substituting ``COMPRESSION``\n for ``SSLv3`` in the last line above).\n\n :param ssl_version:\n The desired protocol version to use. This will default to\n PROTOCOL_SSLv23 which will negotiate the highest protocol that both\n the server and your installation of OpenSSL support.\n :param cert_reqs:\n Whether to require the certificate verification. This defaults to\n ``ssl.CERT_REQUIRED``.\n :param options:\n Specific OpenSSL options. These default to ``ssl.OP_NO_SSLv2``,\n ``ssl.OP_NO_SSLv3``, ``ssl.OP_NO_COMPRESSION``.\n :param ciphers:\n Which cipher suites to allow the server to select.\n :returns:\n Constructed SSLContext object with specified options\n :rtype: SSLContext\n \"\"\"\n context = SSLContext(ssl_version or ssl.PROTOCOL_SSLv23)\n\n if options is None:\n options = 0\n # SSLv2 is easily broken and is considered harmful and dangerous\n options |= OP_NO_SSLv2\n # SSLv3 has several problems and is now dangerous\n options |= OP_NO_SSLv3\n # Disable compression to prevent CRIME attacks for OpenSSL 1.0+\n # (issue #309)\n options |= OP_NO_COMPRESSION\n\n context.options |= options\n\n if getattr(context, 'supports_set_ciphers', True): # Platform-specific: Python 2.6\n context.set_ciphers(ciphers or _DEFAULT_CIPHERS)\n\n context.verify_mode = cert_reqs\n if getattr(context, 'check_hostname', None) is not None: # Platform-specific: Python 3.2\n # We do our own verification, including fingerprints and alternative\n # hostnames. So disable it here\n context.check_hostname = False\n return context\n\n\ndef ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None,\n ca_certs=None, server_hostname=None,\n ssl_version=None, ciphers=None, ssl_context=None):\n \"\"\"\n All arguments except for server_hostname and ssl_context have the same\n meaning as they do when using :func:`ssl.wrap_socket`.\n\n :param server_hostname:\n When SNI is supported, the expected hostname of the certificate\n :param ssl_context:\n A pre-made :class:`SSLContext` object. If none is provided, one will\n be created using :func:`create_urllib3_context`.\n :param ciphers:\n A string of ciphers we wish the client to support. This is not\n supported on Python 2.6 as the ssl module does not support it.\n \"\"\"\n context = ssl_context\n if context is None:\n context = create_urllib3_context(ssl_version, cert_reqs,\n ciphers=ciphers)\n\n if ca_certs:\n try:\n context.load_verify_locations(ca_certs)\n except IOError as e: # Platform-specific: Python 2.6, 2.7, 3.2\n raise SSLError(e)\n # Py33 raises FileNotFoundError which subclasses OSError\n # These are not equivalent unless we check the errno attribute\n except OSError as e: # Platform-specific: Python 3.3 and beyond\n if e.errno == errno.ENOENT:\n raise SSLError(e)\n raise\n if certfile:\n context.load_cert_chain(certfile, keyfile)\n if HAS_SNI: # Platform-specific: OpenSSL with enabled SNI\n return context.wrap_socket(sock, server_hostname=server_hostname)\n return context.wrap_socket(sock)\n", "path": "urllib3/util/ssl_.py"}]} |
gh_patches_debug_1342 | rasdani/github-patches | git_diff | vyperlang__vyper-2805 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
TypeError: from_annotation() takes 2 positional arguments but 6 were given
### Version Information
* vyper Version (output of `vyper --version`): 0.3.2
### What's your issue about?
I think you'll see which line causes this exception.
```
error: TypeError: from_annotation() takes 2 positional arguments but 6 were given
[25217] Failed to execute script 'vyper_compile' due to unhandled exception!
```
```
struct S:
a: uint128
x: uint256[3]
b: uint240
b: uint8
s: S
a: uint8
@external
def __init__():
self.b = 23
self.a = 17
@external
def f():
self.s = empty(self.s)
self.s.x[0] = 42
self.s.x[1] = 42
self.s.x[2] = 42
self.s = empty(S)
assert self.s.x[0] == 0
assert self.s.x[1] == 0
assert self.s.x[2] == 0
assert b == 23
assert a == 17
@external
def g():
self.s = empty(self.s)
self.s.x[0] = 42
self.s.x[1] = 42
self.s.x[2] = 42
self.s.a = 1
self.s.b = 2
self.s.x = empty(uint256[3])
assert self.s.x[0] == 0
assert self.s.x[1] == 0
assert self.s.x[2] == 0
assert b == 23
assert a == 17
assert self.s.a == 1
assert self.s.b == 2
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `vyper/semantics/types/bases.py`
Content:
```
1 import copy
2 from collections import OrderedDict
3 from enum import Enum
4 from typing import Any, Dict, Optional, Tuple, Type, Union
5
6 from vyper import ast as vy_ast
7 from vyper.abi_types import ABIType
8 from vyper.exceptions import (
9 CompilerPanic,
10 ImmutableViolation,
11 InvalidLiteral,
12 InvalidOperation,
13 NamespaceCollision,
14 StateAccessViolation,
15 StructureException,
16 UnexpectedNodeType,
17 UnexpectedValue,
18 UnknownAttribute,
19 )
20 from vyper.semantics.types.abstract import AbstractDataType
21 from vyper.semantics.validation.levenshtein_utils import get_levenshtein_error_suggestions
22
23
24 class DataLocation(Enum):
25 UNSET = 0
26 MEMORY = 1
27 STORAGE = 2
28 CALLDATA = 3
29 CODE = 4
30
31
32 class DataPosition:
33 _location: DataLocation
34
35
36 class CalldataOffset(DataPosition):
37 __slots__ = (
38 "dynamic_offset",
39 "static_offset",
40 )
41 _location = DataLocation.CALLDATA
42
43 def __init__(self, static_offset, dynamic_offset=None):
44 self.static_offset = static_offset
45 self.dynamic_offset = dynamic_offset
46
47 def __repr__(self):
48 if self.dynamic_offset is not None:
49 return f"<CalldataOffset: static {self.static_offset}, dynamic {self.dynamic_offset})>"
50 else:
51 return f"<CalldataOffset: static {self.static_offset}, no dynamic>"
52
53
54 class MemoryOffset(DataPosition):
55 __slots__ = ("offset",)
56 _location = DataLocation.MEMORY
57
58 def __init__(self, offset):
59 self.offset = offset
60
61 def __repr__(self):
62 return f"<MemoryOffset: {self.offset}>"
63
64
65 class StorageSlot(DataPosition):
66 __slots__ = ("position",)
67 _location = DataLocation.STORAGE
68
69 def __init__(self, position):
70 self.position = position
71
72 def __repr__(self):
73 return f"<StorageSlot: {self.position}>"
74
75
76 class CodeOffset(DataPosition):
77 __slots__ = ("offset",)
78 _location = DataLocation.CODE
79
80 def __init__(self, offset):
81 self.offset = offset
82
83 def __repr__(self):
84 return f"<CodeOffset: {self.offset}>"
85
86
87 class BasePrimitive:
88 """
89 Base class for primitive type classes.
90
91 Primitives are objects that are invoked when applying a type to a variable.
92 They must contain a `from_annotation` (and optionally `from_literal`) method
93 that returns their equivalent `BaseTypeDefinition` object.
94
95 Attributes
96 ----------
97 _id : str
98 The name of the type.
99 _type : BaseTypeDefinition
100 The related `BaseTypeDefinition` class generated from this primitive
101 _as_array: bool, optional
102 If `True`, this type can be used as the base member for an array.
103 _valid_literal : Tuple
104 A tuple of Vyper ast classes that may be assigned this type.
105 """
106
107 _id: str
108 _type: Type["BaseTypeDefinition"]
109 _valid_literal: Tuple
110
111 @classmethod
112 def from_annotation(
113 cls,
114 node: Union[vy_ast.Name, vy_ast.Call],
115 location: DataLocation = DataLocation.UNSET,
116 is_constant: bool = False,
117 is_public: bool = False,
118 is_immutable: bool = False,
119 ) -> "BaseTypeDefinition":
120 """
121 Generate a `BaseTypeDefinition` instance of this type from `AnnAssign.annotation`
122
123 Arguments
124 ---------
125 node : VyperNode
126 Vyper ast node from the `annotation` member of an `AnnAssign` node.
127
128 Returns
129 -------
130 BaseTypeDefinition
131 BaseTypeDefinition related to the primitive that the method was called on.
132 """
133 if not isinstance(node, vy_ast.Name):
134 raise StructureException("Invalid type assignment", node)
135 if node.id != cls._id:
136 raise UnexpectedValue("Node id does not match type name")
137 return cls._type(location, is_constant, is_public, is_immutable)
138
139 @classmethod
140 def from_literal(cls, node: vy_ast.Constant) -> "BaseTypeDefinition":
141 """
142 Generate a `BaseTypeDefinition` instance of this type from a literal constant.
143
144 This method is called on every primitive class in order to determine
145 potential types for a `Constant` AST node.
146
147 Types that may be assigned from literals should include a `_valid_literal`
148 attribute, containing a list of AST node classes that may be valid for
149 this type. If the `_valid_literal` attribute is not included, the type
150 cannot be assigned to a literal.
151
152 Arguments
153 ---------
154 node : VyperNode
155 `Constant` Vyper ast node, or a list or tuple of constants.
156
157 Returns
158 -------
159 BaseTypeDefinition
160 BaseTypeDefinition related to the primitive that the method was called on.
161 """
162 if not isinstance(node, vy_ast.Constant):
163 raise UnexpectedNodeType(f"Attempted to validate a '{node.ast_type}' node.")
164 if not isinstance(node, cls._valid_literal):
165 raise InvalidLiteral(f"Invalid literal type for {cls.__name__}", node)
166 return cls._type()
167
168 @classmethod
169 def compare_type(
170 cls, other: Union["BaseTypeDefinition", "BasePrimitive", AbstractDataType]
171 ) -> bool:
172 """
173 Compare this type object against another type object.
174
175 Failed comparisons must return `False`, not raise an exception.
176
177 This method is not intended to be called directly. Type comparisons
178 are handled by methods in `vyper.context.validation.utils`
179
180 Arguments
181 ---------
182 other : BaseTypeDefinition
183 Another type object to be compared against this one.
184
185 Returns
186 -------
187 bool
188 Indicates if the types are equivalent.
189 """
190 return isinstance(other, cls._type)
191
192 @classmethod
193 def fetch_call_return(self, node: vy_ast.Call) -> "BaseTypeDefinition":
194 """
195 Validate a call to this type and return the result.
196
197 This method must raise if the type is not callable, or the call arguments
198 are not valid.
199
200 Arguments
201 ---------
202 node : Call
203 Vyper ast node of call action to validate.
204
205 Returns
206 -------
207 BaseTypeDefinition, optional
208 Type generated as a result of the call.
209 """
210 raise StructureException("Type is not callable", node)
211
212 @classmethod
213 def get_subscripted_type(self, node: vy_ast.Index) -> None:
214 # always raises - do not implement in inherited classes
215 raise StructureException("Types cannot be indexed", node)
216
217 @classmethod
218 def get_member(cls, key: str, node: vy_ast.Attribute) -> None:
219 # always raises - do not implement in inherited classes
220 raise StructureException("Types do not have members", node)
221
222 @classmethod
223 def validate_modification(
224 cls, node: Union[vy_ast.Assign, vy_ast.AugAssign], mutability: Any
225 ) -> None:
226 # always raises - do not implement in inherited classes
227 raise InvalidOperation("Cannot assign to a type", node)
228
229
230 class BaseTypeDefinition:
231 """
232 Base class for type definition classes.
233
234 Type definitions are objects that represent the type of a specific object
235 within a contract. They are usually derived from a `BasePrimitive` counterpart.
236
237 Class Attributes
238 -----------------
239 _id : str
240 The name of the type.
241 _is_callable : bool, optional
242 If `True`, attempts to assign this value without calling it will raise
243 a more expressive error message recommending that the user performs a
244 function call.
245
246 Object Attributes
247 -----------------
248 is_constant : bool, optional
249 If `True`, the value of this object cannot be modified after assignment.
250 size_in_bytes: int
251 The number of bytes that are required to store this type.
252 """
253
254 # TODO CMC 2022-01-08 `is_dynamic_size` probably unused
255 is_dynamic_size = False
256
257 size_in_bytes = 32
258 _id: str
259
260 def __init__(
261 self,
262 location: DataLocation = DataLocation.UNSET,
263 is_constant: bool = False,
264 is_public: bool = False,
265 is_immutable: bool = False,
266 ) -> None:
267 self.location = location
268 self.is_constant = is_constant
269 self.is_public = is_public
270 self.is_immutable = is_immutable
271
272 self._modification_count = 0
273
274 @property
275 def abi_type(self) -> ABIType:
276 """
277 The ABI type corresponding to this type
278 """
279 raise CompilerPanic("Method must be implemented by the inherited class")
280
281 @property
282 def canonical_abi_type(self) -> str:
283 """
284 The canonical name of this type. Used for ABI types and generating function signatures.
285 """
286 return self.abi_type.selector_name()
287
288 def from_annotation(self, node: vy_ast.VyperNode, **kwargs: Any) -> None:
289 # always raises, user should have used a primitive
290 raise StructureException("Value is not a type", node)
291
292 def set_position(self, position: DataPosition) -> None:
293 if hasattr(self, "position"):
294 raise CompilerPanic("Position was already assigned")
295 if self.location != position._location:
296 if self.location == DataLocation.UNSET:
297 self.location = position._location
298 else:
299 raise CompilerPanic("Incompatible locations")
300 self.position = position
301
302 def compare_type(
303 self, other: Union["BaseTypeDefinition", BasePrimitive, AbstractDataType]
304 ) -> bool:
305 """
306 Compare this type object against another type object.
307
308 Failed comparisons must return `False`, not raise an exception.
309
310 This method is not intended to be called directly. Type comparisons
311 are handled by methods in `vyper.context.validation.utils`
312
313 Arguments
314 ---------
315 other : BaseTypeDefinition
316 Another type object to be compared against this one.
317
318 Returns
319 -------
320 bool
321 Indicates if the types are equivalent.
322 """
323 return isinstance(other, type(self))
324
325 def validate_numeric_op(
326 self, node: Union[vy_ast.UnaryOp, vy_ast.BinOp, vy_ast.AugAssign]
327 ) -> None:
328 """
329 Validate a numeric operation for this type.
330
331 Arguments
332 ---------
333 node : UnaryOp | BinOp | AugAssign
334 Vyper ast node of the numeric operation to be validated.
335
336 Returns
337 -------
338 None. A failed validation must raise an exception.
339 """
340 raise InvalidOperation(f"Cannot perform {node.op.description} on {self}", node)
341
342 def validate_boolean_op(self, node: vy_ast.BoolOp) -> None:
343 """
344 Validate a boolean operation for this type.
345
346 Arguments
347 ---------
348 node : BoolOp
349 Vyper ast node of the boolean operation to be validated.
350
351 Returns
352 -------
353 None. A failed validation must raise an exception.
354 """
355 raise InvalidOperation(f"Invalid type for operand: {self}", node)
356
357 def validate_comparator(self, node: vy_ast.Compare) -> None:
358 """
359 Validate a comparator for this type.
360
361 Arguments
362 ---------
363 node : Compare
364 Vyper ast node of the comparator to be validated.
365
366 Returns
367 -------
368 None. A failed validation must raise an exception.
369 """
370 if not isinstance(node.op, (vy_ast.Eq, vy_ast.NotEq)):
371 raise InvalidOperation(
372 f"Cannot perform {node.op.description} comparison on {self}", node
373 )
374
375 def validate_implements(self, node: vy_ast.AnnAssign) -> None:
376 """
377 Validate an implements statement.
378
379 This method is unique to user-defined interfaces. It should not be
380 included in other types.
381
382 Arguments
383 ---------
384 node : AnnAssign
385 Vyper ast node of the implements statement being validated.
386
387 Returns
388 -------
389 None. A failed validation must raise an exception.
390 """
391 raise StructureException("Value is not an interface", node)
392
393 def fetch_call_return(self, node: vy_ast.Call) -> Union["BaseTypeDefinition", None]:
394 """
395 Validate a call to this value and return the result.
396
397 This method must raise if the value is not callable, or the call arguments
398 are not valid.
399
400 Arguments
401 ---------
402 node : Call
403 Vyper ast node of call action to validate.
404
405 Returns
406 -------
407 BaseTypeDefinition, optional
408 Type generated as a result of the call.
409 """
410 raise StructureException("Value is not callable", node)
411
412 def validate_index_type(self, node: vy_ast.Index) -> None:
413 """
414 Validate an index reference, e.g. x[1]. Raises if the index is invalid.
415
416 Arguments
417 ---------
418 node : Index
419 Vyper ast node from the `slice` member of a Subscript node.
420 """
421 raise StructureException(f"Type '{self}' does not support indexing", node)
422
423 def get_subscripted_type(self, node: vy_ast.Index) -> "BaseTypeDefinition":
424 """
425 Return the type of a subscript expression, e.g. x[1]
426
427 Arguments
428 ---------
429 node: Index
430 Vyper ast node from the `slice` member of a Subscript node
431
432 Returns
433 -------
434 BaseTypeDefinition
435 Type object for value at the given index.
436 """
437 raise StructureException(f"Type '{self}' does not support indexing", node)
438
439 def get_member(self, key: str, node: vy_ast.Attribute) -> "BaseTypeDefinition":
440 """
441 Validate an attribute reference and return the given type for the member.
442
443 Arguments
444 ---------
445 key : str
446 Name of the member being accessed.
447 node: Attribute
448 Vyper ast Attribute node representing the member being accessed.
449
450 Returns
451 -------
452 BaseTypeDefinition
453 A type object for the value of the given member. Raises if the member
454 does not exist for the given type.
455 """
456 raise StructureException(f"Type '{self}' does not support members", node)
457
458 def validate_modification(
459 self,
460 node: Union[vy_ast.Assign, vy_ast.AugAssign, vy_ast.Call],
461 mutability: Any, # should be StateMutability, import cycle
462 ) -> None:
463 """
464 Validate an attempt to modify this value.
465
466 Raises if the value is a constant or involves an invalid operation.
467
468 Arguments
469 ---------
470 node : Assign | AugAssign | Call
471 Vyper ast node of the modifying action.
472 mutability: StateMutability
473 The mutability of the context (e.g., pure function) we are currently in
474 """
475 # TODO: break this cycle, probably by moving this to validation module
476 from vyper.semantics.types.function import StateMutability
477
478 if mutability <= StateMutability.VIEW and self.location == DataLocation.STORAGE:
479 raise StateAccessViolation(
480 f"Cannot modify storage in a {mutability.value} function", node
481 )
482
483 if self.location == DataLocation.CALLDATA:
484 raise ImmutableViolation("Cannot write to calldata", node)
485 if self.is_constant:
486 raise ImmutableViolation("Constant value cannot be written to", node)
487 if self.is_immutable:
488 if node.get_ancestor(vy_ast.FunctionDef).get("name") != "__init__":
489 raise ImmutableViolation("Immutable value cannot be written to", node)
490 if self._modification_count:
491 raise ImmutableViolation(
492 "Immutable value cannot be modified after assignment", node
493 )
494 self._modification_count += 1
495
496 if isinstance(node, vy_ast.AugAssign):
497 self.validate_numeric_op(node)
498
499 def get_signature(self) -> Tuple[Tuple, Optional["BaseTypeDefinition"]]:
500 raise CompilerPanic("Method must be implemented by the inherited class")
501
502 def compare_signature(self, other: "BaseTypeDefinition") -> bool:
503 """
504 Compare the signature of this type with another type.
505
506 Used when determining if an interface has been implemented. This method
507 should not be directly implemented by any inherited classes.
508 """
509
510 if not self.is_public:
511 return False
512
513 arguments, return_type = self.get_signature()
514 other_arguments, other_return_type = other.get_signature()
515
516 if len(arguments) != len(other_arguments):
517 return False
518 for a, b in zip(arguments, other_arguments):
519 if not a.compare_type(b):
520 return False
521 if return_type and not return_type.compare_type(other_return_type): # type: ignore
522 return False
523
524 return True
525
526
527 # TODO rename this: it's really for address/interface signature resolution
528 class ValueTypeDefinition(BaseTypeDefinition):
529 """
530 Base class for types representing a single value.
531
532 Class attributes
533 ----------------
534 _valid_literal: VyperNode | Tuple
535 A vyper ast class or tuple of ast classes that can represent valid literals
536 for the given type. Including this attribute will allow literal values to be
537 assigned this type.
538 """
539
540 def __repr__(self):
541 return self._id
542
543 def get_signature(self):
544 return (), self
545
546
547 class MemberTypeDefinition(BaseTypeDefinition):
548 """
549 Base class for types that have accessible members.
550
551 Class attributes
552 ----------------
553 _type_members : Dict[str, BaseType]
554 Dictionary of members common to all values of this type.
555
556 Object attributes
557 -----------------
558 members : OrderedDict[str, BaseType]
559 Dictionary of members for the given type.
560 """
561
562 _type_members: Dict
563
564 def __init__(
565 self,
566 location: DataLocation = DataLocation.UNSET,
567 is_constant: bool = False,
568 is_public: bool = False,
569 is_immutable: bool = False,
570 ) -> None:
571 super().__init__(location, is_constant, is_public, is_immutable)
572 self.members: OrderedDict = OrderedDict()
573
574 def add_member(self, name: str, type_: BaseTypeDefinition) -> None:
575 if name in self.members:
576 raise NamespaceCollision(f"Member '{name}' already exists in {self}")
577 if name in getattr(self, "_type_members", []):
578 raise NamespaceCollision(f"Member '{name}' already exists in {self}")
579 self.members[name] = type_
580
581 def get_member(self, key: str, node: vy_ast.VyperNode) -> BaseTypeDefinition:
582 if key in self.members:
583 return self.members[key]
584 elif key in getattr(self, "_type_members", []):
585 type_ = copy.deepcopy(self._type_members[key])
586 type_.location = self.location
587 type_.is_constant = self.is_constant
588 return type_
589 suggestions_str = get_levenshtein_error_suggestions(key, self.members, 0.3)
590 raise UnknownAttribute(f"{self} has no member '{key}'. {suggestions_str}", node)
591
592 def __repr__(self):
593 return f"{self._id}"
594
595
596 class IndexableTypeDefinition(BaseTypeDefinition):
597 """
598 Base class for indexable types such as arrays and mappings.
599
600 Attributes
601 ----------
602 key_type: BaseType
603 Type representing the index value for this object.
604 value_type : BaseType
605 Type representing the value(s) contained in this object.
606 _id : str
607 Name of the type.
608 """
609
610 def __init__(
611 self,
612 value_type: BaseTypeDefinition,
613 key_type: BaseTypeDefinition,
614 _id: str,
615 location: DataLocation = DataLocation.UNSET,
616 is_constant: bool = False,
617 is_public: bool = False,
618 is_immutable: bool = False,
619 ) -> None:
620 super().__init__(location, is_constant, is_public, is_immutable)
621 self.value_type = value_type
622 self.key_type = key_type
623 self._id = _id
624
625 def get_signature(self) -> Tuple[Tuple, Optional[BaseTypeDefinition]]:
626 new_args, return_type = self.value_type.get_signature()
627 return (self.key_type,) + new_args, return_type
628
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/vyper/semantics/types/bases.py b/vyper/semantics/types/bases.py
--- a/vyper/semantics/types/bases.py
+++ b/vyper/semantics/types/bases.py
@@ -285,7 +285,7 @@
"""
return self.abi_type.selector_name()
- def from_annotation(self, node: vy_ast.VyperNode, **kwargs: Any) -> None:
+ def from_annotation(self, node: vy_ast.VyperNode, *args: Any, **kwargs: Any) -> None:
# always raises, user should have used a primitive
raise StructureException("Value is not a type", node)
| {"golden_diff": "diff --git a/vyper/semantics/types/bases.py b/vyper/semantics/types/bases.py\n--- a/vyper/semantics/types/bases.py\n+++ b/vyper/semantics/types/bases.py\n@@ -285,7 +285,7 @@\n \"\"\"\n return self.abi_type.selector_name()\n \n- def from_annotation(self, node: vy_ast.VyperNode, **kwargs: Any) -> None:\n+ def from_annotation(self, node: vy_ast.VyperNode, *args: Any, **kwargs: Any) -> None:\n # always raises, user should have used a primitive\n raise StructureException(\"Value is not a type\", node)\n", "issue": "TypeError: from_annotation() takes 2 positional arguments but 6 were given\n### Version Information\r\n\r\n* vyper Version (output of `vyper --version`): 0.3.2\r\n\r\n### What's your issue about?\r\n\r\nI think you'll see which line causes this exception.\r\n\r\n```\r\nerror: TypeError: from_annotation() takes 2 positional arguments but 6 were given\r\n[25217] Failed to execute script 'vyper_compile' due to unhandled exception!\r\n```\r\n\r\n```\r\nstruct S:\r\n a: uint128\r\n x: uint256[3]\r\n b: uint240\r\n \r\nb: uint8\r\ns: S\r\na: uint8\r\n\r\n@external\r\ndef __init__():\r\n self.b = 23\r\n self.a = 17\r\n \r\n@external\r\ndef f():\r\n self.s = empty(self.s)\r\n self.s.x[0] = 42\r\n self.s.x[1] = 42\r\n self.s.x[2] = 42\r\n self.s = empty(S)\r\n assert self.s.x[0] == 0\r\n assert self.s.x[1] == 0\r\n assert self.s.x[2] == 0\r\n assert b == 23\r\n assert a == 17\r\n \r\n@external\r\ndef g():\r\n self.s = empty(self.s)\r\n self.s.x[0] = 42\r\n self.s.x[1] = 42\r\n self.s.x[2] = 42\r\n self.s.a = 1\r\n self.s.b = 2\r\n self.s.x = empty(uint256[3])\r\n assert self.s.x[0] == 0\r\n assert self.s.x[1] == 0\r\n assert self.s.x[2] == 0\r\n assert b == 23\r\n assert a == 17\r\n assert self.s.a == 1\r\n assert self.s.b == 2\r\n```\n", "before_files": [{"content": "import copy\nfrom collections import OrderedDict\nfrom enum import Enum\nfrom typing import Any, Dict, Optional, Tuple, Type, Union\n\nfrom vyper import ast as vy_ast\nfrom vyper.abi_types import ABIType\nfrom vyper.exceptions import (\n CompilerPanic,\n ImmutableViolation,\n InvalidLiteral,\n InvalidOperation,\n NamespaceCollision,\n StateAccessViolation,\n StructureException,\n UnexpectedNodeType,\n UnexpectedValue,\n UnknownAttribute,\n)\nfrom vyper.semantics.types.abstract import AbstractDataType\nfrom vyper.semantics.validation.levenshtein_utils import get_levenshtein_error_suggestions\n\n\nclass DataLocation(Enum):\n UNSET = 0\n MEMORY = 1\n STORAGE = 2\n CALLDATA = 3\n CODE = 4\n\n\nclass DataPosition:\n _location: DataLocation\n\n\nclass CalldataOffset(DataPosition):\n __slots__ = (\n \"dynamic_offset\",\n \"static_offset\",\n )\n _location = DataLocation.CALLDATA\n\n def __init__(self, static_offset, dynamic_offset=None):\n self.static_offset = static_offset\n self.dynamic_offset = dynamic_offset\n\n def __repr__(self):\n if self.dynamic_offset is not None:\n return f\"<CalldataOffset: static {self.static_offset}, dynamic {self.dynamic_offset})>\"\n else:\n return f\"<CalldataOffset: static {self.static_offset}, no dynamic>\"\n\n\nclass MemoryOffset(DataPosition):\n __slots__ = (\"offset\",)\n _location = DataLocation.MEMORY\n\n def __init__(self, offset):\n self.offset = offset\n\n def __repr__(self):\n return f\"<MemoryOffset: {self.offset}>\"\n\n\nclass StorageSlot(DataPosition):\n __slots__ = (\"position\",)\n _location = DataLocation.STORAGE\n\n def __init__(self, position):\n self.position = position\n\n def __repr__(self):\n return f\"<StorageSlot: {self.position}>\"\n\n\nclass CodeOffset(DataPosition):\n __slots__ = (\"offset\",)\n _location = DataLocation.CODE\n\n def __init__(self, offset):\n self.offset = offset\n\n def __repr__(self):\n return f\"<CodeOffset: {self.offset}>\"\n\n\nclass BasePrimitive:\n \"\"\"\n Base class for primitive type classes.\n\n Primitives are objects that are invoked when applying a type to a variable.\n They must contain a `from_annotation` (and optionally `from_literal`) method\n that returns their equivalent `BaseTypeDefinition` object.\n\n Attributes\n ----------\n _id : str\n The name of the type.\n _type : BaseTypeDefinition\n The related `BaseTypeDefinition` class generated from this primitive\n _as_array: bool, optional\n If `True`, this type can be used as the base member for an array.\n _valid_literal : Tuple\n A tuple of Vyper ast classes that may be assigned this type.\n \"\"\"\n\n _id: str\n _type: Type[\"BaseTypeDefinition\"]\n _valid_literal: Tuple\n\n @classmethod\n def from_annotation(\n cls,\n node: Union[vy_ast.Name, vy_ast.Call],\n location: DataLocation = DataLocation.UNSET,\n is_constant: bool = False,\n is_public: bool = False,\n is_immutable: bool = False,\n ) -> \"BaseTypeDefinition\":\n \"\"\"\n Generate a `BaseTypeDefinition` instance of this type from `AnnAssign.annotation`\n\n Arguments\n ---------\n node : VyperNode\n Vyper ast node from the `annotation` member of an `AnnAssign` node.\n\n Returns\n -------\n BaseTypeDefinition\n BaseTypeDefinition related to the primitive that the method was called on.\n \"\"\"\n if not isinstance(node, vy_ast.Name):\n raise StructureException(\"Invalid type assignment\", node)\n if node.id != cls._id:\n raise UnexpectedValue(\"Node id does not match type name\")\n return cls._type(location, is_constant, is_public, is_immutable)\n\n @classmethod\n def from_literal(cls, node: vy_ast.Constant) -> \"BaseTypeDefinition\":\n \"\"\"\n Generate a `BaseTypeDefinition` instance of this type from a literal constant.\n\n This method is called on every primitive class in order to determine\n potential types for a `Constant` AST node.\n\n Types that may be assigned from literals should include a `_valid_literal`\n attribute, containing a list of AST node classes that may be valid for\n this type. If the `_valid_literal` attribute is not included, the type\n cannot be assigned to a literal.\n\n Arguments\n ---------\n node : VyperNode\n `Constant` Vyper ast node, or a list or tuple of constants.\n\n Returns\n -------\n BaseTypeDefinition\n BaseTypeDefinition related to the primitive that the method was called on.\n \"\"\"\n if not isinstance(node, vy_ast.Constant):\n raise UnexpectedNodeType(f\"Attempted to validate a '{node.ast_type}' node.\")\n if not isinstance(node, cls._valid_literal):\n raise InvalidLiteral(f\"Invalid literal type for {cls.__name__}\", node)\n return cls._type()\n\n @classmethod\n def compare_type(\n cls, other: Union[\"BaseTypeDefinition\", \"BasePrimitive\", AbstractDataType]\n ) -> bool:\n \"\"\"\n Compare this type object against another type object.\n\n Failed comparisons must return `False`, not raise an exception.\n\n This method is not intended to be called directly. Type comparisons\n are handled by methods in `vyper.context.validation.utils`\n\n Arguments\n ---------\n other : BaseTypeDefinition\n Another type object to be compared against this one.\n\n Returns\n -------\n bool\n Indicates if the types are equivalent.\n \"\"\"\n return isinstance(other, cls._type)\n\n @classmethod\n def fetch_call_return(self, node: vy_ast.Call) -> \"BaseTypeDefinition\":\n \"\"\"\n Validate a call to this type and return the result.\n\n This method must raise if the type is not callable, or the call arguments\n are not valid.\n\n Arguments\n ---------\n node : Call\n Vyper ast node of call action to validate.\n\n Returns\n -------\n BaseTypeDefinition, optional\n Type generated as a result of the call.\n \"\"\"\n raise StructureException(\"Type is not callable\", node)\n\n @classmethod\n def get_subscripted_type(self, node: vy_ast.Index) -> None:\n # always raises - do not implement in inherited classes\n raise StructureException(\"Types cannot be indexed\", node)\n\n @classmethod\n def get_member(cls, key: str, node: vy_ast.Attribute) -> None:\n # always raises - do not implement in inherited classes\n raise StructureException(\"Types do not have members\", node)\n\n @classmethod\n def validate_modification(\n cls, node: Union[vy_ast.Assign, vy_ast.AugAssign], mutability: Any\n ) -> None:\n # always raises - do not implement in inherited classes\n raise InvalidOperation(\"Cannot assign to a type\", node)\n\n\nclass BaseTypeDefinition:\n \"\"\"\n Base class for type definition classes.\n\n Type definitions are objects that represent the type of a specific object\n within a contract. They are usually derived from a `BasePrimitive` counterpart.\n\n Class Attributes\n -----------------\n _id : str\n The name of the type.\n _is_callable : bool, optional\n If `True`, attempts to assign this value without calling it will raise\n a more expressive error message recommending that the user performs a\n function call.\n\n Object Attributes\n -----------------\n is_constant : bool, optional\n If `True`, the value of this object cannot be modified after assignment.\n size_in_bytes: int\n The number of bytes that are required to store this type.\n \"\"\"\n\n # TODO CMC 2022-01-08 `is_dynamic_size` probably unused\n is_dynamic_size = False\n\n size_in_bytes = 32\n _id: str\n\n def __init__(\n self,\n location: DataLocation = DataLocation.UNSET,\n is_constant: bool = False,\n is_public: bool = False,\n is_immutable: bool = False,\n ) -> None:\n self.location = location\n self.is_constant = is_constant\n self.is_public = is_public\n self.is_immutable = is_immutable\n\n self._modification_count = 0\n\n @property\n def abi_type(self) -> ABIType:\n \"\"\"\n The ABI type corresponding to this type\n \"\"\"\n raise CompilerPanic(\"Method must be implemented by the inherited class\")\n\n @property\n def canonical_abi_type(self) -> str:\n \"\"\"\n The canonical name of this type. Used for ABI types and generating function signatures.\n \"\"\"\n return self.abi_type.selector_name()\n\n def from_annotation(self, node: vy_ast.VyperNode, **kwargs: Any) -> None:\n # always raises, user should have used a primitive\n raise StructureException(\"Value is not a type\", node)\n\n def set_position(self, position: DataPosition) -> None:\n if hasattr(self, \"position\"):\n raise CompilerPanic(\"Position was already assigned\")\n if self.location != position._location:\n if self.location == DataLocation.UNSET:\n self.location = position._location\n else:\n raise CompilerPanic(\"Incompatible locations\")\n self.position = position\n\n def compare_type(\n self, other: Union[\"BaseTypeDefinition\", BasePrimitive, AbstractDataType]\n ) -> bool:\n \"\"\"\n Compare this type object against another type object.\n\n Failed comparisons must return `False`, not raise an exception.\n\n This method is not intended to be called directly. Type comparisons\n are handled by methods in `vyper.context.validation.utils`\n\n Arguments\n ---------\n other : BaseTypeDefinition\n Another type object to be compared against this one.\n\n Returns\n -------\n bool\n Indicates if the types are equivalent.\n \"\"\"\n return isinstance(other, type(self))\n\n def validate_numeric_op(\n self, node: Union[vy_ast.UnaryOp, vy_ast.BinOp, vy_ast.AugAssign]\n ) -> None:\n \"\"\"\n Validate a numeric operation for this type.\n\n Arguments\n ---------\n node : UnaryOp | BinOp | AugAssign\n Vyper ast node of the numeric operation to be validated.\n\n Returns\n -------\n None. A failed validation must raise an exception.\n \"\"\"\n raise InvalidOperation(f\"Cannot perform {node.op.description} on {self}\", node)\n\n def validate_boolean_op(self, node: vy_ast.BoolOp) -> None:\n \"\"\"\n Validate a boolean operation for this type.\n\n Arguments\n ---------\n node : BoolOp\n Vyper ast node of the boolean operation to be validated.\n\n Returns\n -------\n None. A failed validation must raise an exception.\n \"\"\"\n raise InvalidOperation(f\"Invalid type for operand: {self}\", node)\n\n def validate_comparator(self, node: vy_ast.Compare) -> None:\n \"\"\"\n Validate a comparator for this type.\n\n Arguments\n ---------\n node : Compare\n Vyper ast node of the comparator to be validated.\n\n Returns\n -------\n None. A failed validation must raise an exception.\n \"\"\"\n if not isinstance(node.op, (vy_ast.Eq, vy_ast.NotEq)):\n raise InvalidOperation(\n f\"Cannot perform {node.op.description} comparison on {self}\", node\n )\n\n def validate_implements(self, node: vy_ast.AnnAssign) -> None:\n \"\"\"\n Validate an implements statement.\n\n This method is unique to user-defined interfaces. It should not be\n included in other types.\n\n Arguments\n ---------\n node : AnnAssign\n Vyper ast node of the implements statement being validated.\n\n Returns\n -------\n None. A failed validation must raise an exception.\n \"\"\"\n raise StructureException(\"Value is not an interface\", node)\n\n def fetch_call_return(self, node: vy_ast.Call) -> Union[\"BaseTypeDefinition\", None]:\n \"\"\"\n Validate a call to this value and return the result.\n\n This method must raise if the value is not callable, or the call arguments\n are not valid.\n\n Arguments\n ---------\n node : Call\n Vyper ast node of call action to validate.\n\n Returns\n -------\n BaseTypeDefinition, optional\n Type generated as a result of the call.\n \"\"\"\n raise StructureException(\"Value is not callable\", node)\n\n def validate_index_type(self, node: vy_ast.Index) -> None:\n \"\"\"\n Validate an index reference, e.g. x[1]. Raises if the index is invalid.\n\n Arguments\n ---------\n node : Index\n Vyper ast node from the `slice` member of a Subscript node.\n \"\"\"\n raise StructureException(f\"Type '{self}' does not support indexing\", node)\n\n def get_subscripted_type(self, node: vy_ast.Index) -> \"BaseTypeDefinition\":\n \"\"\"\n Return the type of a subscript expression, e.g. x[1]\n\n Arguments\n ---------\n node: Index\n Vyper ast node from the `slice` member of a Subscript node\n\n Returns\n -------\n BaseTypeDefinition\n Type object for value at the given index.\n \"\"\"\n raise StructureException(f\"Type '{self}' does not support indexing\", node)\n\n def get_member(self, key: str, node: vy_ast.Attribute) -> \"BaseTypeDefinition\":\n \"\"\"\n Validate an attribute reference and return the given type for the member.\n\n Arguments\n ---------\n key : str\n Name of the member being accessed.\n node: Attribute\n Vyper ast Attribute node representing the member being accessed.\n\n Returns\n -------\n BaseTypeDefinition\n A type object for the value of the given member. Raises if the member\n does not exist for the given type.\n \"\"\"\n raise StructureException(f\"Type '{self}' does not support members\", node)\n\n def validate_modification(\n self,\n node: Union[vy_ast.Assign, vy_ast.AugAssign, vy_ast.Call],\n mutability: Any, # should be StateMutability, import cycle\n ) -> None:\n \"\"\"\n Validate an attempt to modify this value.\n\n Raises if the value is a constant or involves an invalid operation.\n\n Arguments\n ---------\n node : Assign | AugAssign | Call\n Vyper ast node of the modifying action.\n mutability: StateMutability\n The mutability of the context (e.g., pure function) we are currently in\n \"\"\"\n # TODO: break this cycle, probably by moving this to validation module\n from vyper.semantics.types.function import StateMutability\n\n if mutability <= StateMutability.VIEW and self.location == DataLocation.STORAGE:\n raise StateAccessViolation(\n f\"Cannot modify storage in a {mutability.value} function\", node\n )\n\n if self.location == DataLocation.CALLDATA:\n raise ImmutableViolation(\"Cannot write to calldata\", node)\n if self.is_constant:\n raise ImmutableViolation(\"Constant value cannot be written to\", node)\n if self.is_immutable:\n if node.get_ancestor(vy_ast.FunctionDef).get(\"name\") != \"__init__\":\n raise ImmutableViolation(\"Immutable value cannot be written to\", node)\n if self._modification_count:\n raise ImmutableViolation(\n \"Immutable value cannot be modified after assignment\", node\n )\n self._modification_count += 1\n\n if isinstance(node, vy_ast.AugAssign):\n self.validate_numeric_op(node)\n\n def get_signature(self) -> Tuple[Tuple, Optional[\"BaseTypeDefinition\"]]:\n raise CompilerPanic(\"Method must be implemented by the inherited class\")\n\n def compare_signature(self, other: \"BaseTypeDefinition\") -> bool:\n \"\"\"\n Compare the signature of this type with another type.\n\n Used when determining if an interface has been implemented. This method\n should not be directly implemented by any inherited classes.\n \"\"\"\n\n if not self.is_public:\n return False\n\n arguments, return_type = self.get_signature()\n other_arguments, other_return_type = other.get_signature()\n\n if len(arguments) != len(other_arguments):\n return False\n for a, b in zip(arguments, other_arguments):\n if not a.compare_type(b):\n return False\n if return_type and not return_type.compare_type(other_return_type): # type: ignore\n return False\n\n return True\n\n\n# TODO rename this: it's really for address/interface signature resolution\nclass ValueTypeDefinition(BaseTypeDefinition):\n \"\"\"\n Base class for types representing a single value.\n\n Class attributes\n ----------------\n _valid_literal: VyperNode | Tuple\n A vyper ast class or tuple of ast classes that can represent valid literals\n for the given type. Including this attribute will allow literal values to be\n assigned this type.\n \"\"\"\n\n def __repr__(self):\n return self._id\n\n def get_signature(self):\n return (), self\n\n\nclass MemberTypeDefinition(BaseTypeDefinition):\n \"\"\"\n Base class for types that have accessible members.\n\n Class attributes\n ----------------\n _type_members : Dict[str, BaseType]\n Dictionary of members common to all values of this type.\n\n Object attributes\n -----------------\n members : OrderedDict[str, BaseType]\n Dictionary of members for the given type.\n \"\"\"\n\n _type_members: Dict\n\n def __init__(\n self,\n location: DataLocation = DataLocation.UNSET,\n is_constant: bool = False,\n is_public: bool = False,\n is_immutable: bool = False,\n ) -> None:\n super().__init__(location, is_constant, is_public, is_immutable)\n self.members: OrderedDict = OrderedDict()\n\n def add_member(self, name: str, type_: BaseTypeDefinition) -> None:\n if name in self.members:\n raise NamespaceCollision(f\"Member '{name}' already exists in {self}\")\n if name in getattr(self, \"_type_members\", []):\n raise NamespaceCollision(f\"Member '{name}' already exists in {self}\")\n self.members[name] = type_\n\n def get_member(self, key: str, node: vy_ast.VyperNode) -> BaseTypeDefinition:\n if key in self.members:\n return self.members[key]\n elif key in getattr(self, \"_type_members\", []):\n type_ = copy.deepcopy(self._type_members[key])\n type_.location = self.location\n type_.is_constant = self.is_constant\n return type_\n suggestions_str = get_levenshtein_error_suggestions(key, self.members, 0.3)\n raise UnknownAttribute(f\"{self} has no member '{key}'. {suggestions_str}\", node)\n\n def __repr__(self):\n return f\"{self._id}\"\n\n\nclass IndexableTypeDefinition(BaseTypeDefinition):\n \"\"\"\n Base class for indexable types such as arrays and mappings.\n\n Attributes\n ----------\n key_type: BaseType\n Type representing the index value for this object.\n value_type : BaseType\n Type representing the value(s) contained in this object.\n _id : str\n Name of the type.\n \"\"\"\n\n def __init__(\n self,\n value_type: BaseTypeDefinition,\n key_type: BaseTypeDefinition,\n _id: str,\n location: DataLocation = DataLocation.UNSET,\n is_constant: bool = False,\n is_public: bool = False,\n is_immutable: bool = False,\n ) -> None:\n super().__init__(location, is_constant, is_public, is_immutable)\n self.value_type = value_type\n self.key_type = key_type\n self._id = _id\n\n def get_signature(self) -> Tuple[Tuple, Optional[BaseTypeDefinition]]:\n new_args, return_type = self.value_type.get_signature()\n return (self.key_type,) + new_args, return_type\n", "path": "vyper/semantics/types/bases.py"}], "after_files": [{"content": "import copy\nfrom collections import OrderedDict\nfrom enum import Enum\nfrom typing import Any, Dict, Optional, Tuple, Type, Union\n\nfrom vyper import ast as vy_ast\nfrom vyper.abi_types import ABIType\nfrom vyper.exceptions import (\n CompilerPanic,\n ImmutableViolation,\n InvalidLiteral,\n InvalidOperation,\n NamespaceCollision,\n StateAccessViolation,\n StructureException,\n UnexpectedNodeType,\n UnexpectedValue,\n UnknownAttribute,\n)\nfrom vyper.semantics.types.abstract import AbstractDataType\nfrom vyper.semantics.validation.levenshtein_utils import get_levenshtein_error_suggestions\n\n\nclass DataLocation(Enum):\n UNSET = 0\n MEMORY = 1\n STORAGE = 2\n CALLDATA = 3\n CODE = 4\n\n\nclass DataPosition:\n _location: DataLocation\n\n\nclass CalldataOffset(DataPosition):\n __slots__ = (\n \"dynamic_offset\",\n \"static_offset\",\n )\n _location = DataLocation.CALLDATA\n\n def __init__(self, static_offset, dynamic_offset=None):\n self.static_offset = static_offset\n self.dynamic_offset = dynamic_offset\n\n def __repr__(self):\n if self.dynamic_offset is not None:\n return f\"<CalldataOffset: static {self.static_offset}, dynamic {self.dynamic_offset})>\"\n else:\n return f\"<CalldataOffset: static {self.static_offset}, no dynamic>\"\n\n\nclass MemoryOffset(DataPosition):\n __slots__ = (\"offset\",)\n _location = DataLocation.MEMORY\n\n def __init__(self, offset):\n self.offset = offset\n\n def __repr__(self):\n return f\"<MemoryOffset: {self.offset}>\"\n\n\nclass StorageSlot(DataPosition):\n __slots__ = (\"position\",)\n _location = DataLocation.STORAGE\n\n def __init__(self, position):\n self.position = position\n\n def __repr__(self):\n return f\"<StorageSlot: {self.position}>\"\n\n\nclass CodeOffset(DataPosition):\n __slots__ = (\"offset\",)\n _location = DataLocation.CODE\n\n def __init__(self, offset):\n self.offset = offset\n\n def __repr__(self):\n return f\"<CodeOffset: {self.offset}>\"\n\n\nclass BasePrimitive:\n \"\"\"\n Base class for primitive type classes.\n\n Primitives are objects that are invoked when applying a type to a variable.\n They must contain a `from_annotation` (and optionally `from_literal`) method\n that returns their equivalent `BaseTypeDefinition` object.\n\n Attributes\n ----------\n _id : str\n The name of the type.\n _type : BaseTypeDefinition\n The related `BaseTypeDefinition` class generated from this primitive\n _as_array: bool, optional\n If `True`, this type can be used as the base member for an array.\n _valid_literal : Tuple\n A tuple of Vyper ast classes that may be assigned this type.\n \"\"\"\n\n _id: str\n _type: Type[\"BaseTypeDefinition\"]\n _valid_literal: Tuple\n\n @classmethod\n def from_annotation(\n cls,\n node: Union[vy_ast.Name, vy_ast.Call],\n location: DataLocation = DataLocation.UNSET,\n is_constant: bool = False,\n is_public: bool = False,\n is_immutable: bool = False,\n ) -> \"BaseTypeDefinition\":\n \"\"\"\n Generate a `BaseTypeDefinition` instance of this type from `AnnAssign.annotation`\n\n Arguments\n ---------\n node : VyperNode\n Vyper ast node from the `annotation` member of an `AnnAssign` node.\n\n Returns\n -------\n BaseTypeDefinition\n BaseTypeDefinition related to the primitive that the method was called on.\n \"\"\"\n if not isinstance(node, vy_ast.Name):\n raise StructureException(\"Invalid type assignment\", node)\n if node.id != cls._id:\n raise UnexpectedValue(\"Node id does not match type name\")\n return cls._type(location, is_constant, is_public, is_immutable)\n\n @classmethod\n def from_literal(cls, node: vy_ast.Constant) -> \"BaseTypeDefinition\":\n \"\"\"\n Generate a `BaseTypeDefinition` instance of this type from a literal constant.\n\n This method is called on every primitive class in order to determine\n potential types for a `Constant` AST node.\n\n Types that may be assigned from literals should include a `_valid_literal`\n attribute, containing a list of AST node classes that may be valid for\n this type. If the `_valid_literal` attribute is not included, the type\n cannot be assigned to a literal.\n\n Arguments\n ---------\n node : VyperNode\n `Constant` Vyper ast node, or a list or tuple of constants.\n\n Returns\n -------\n BaseTypeDefinition\n BaseTypeDefinition related to the primitive that the method was called on.\n \"\"\"\n if not isinstance(node, vy_ast.Constant):\n raise UnexpectedNodeType(f\"Attempted to validate a '{node.ast_type}' node.\")\n if not isinstance(node, cls._valid_literal):\n raise InvalidLiteral(f\"Invalid literal type for {cls.__name__}\", node)\n return cls._type()\n\n @classmethod\n def compare_type(\n cls, other: Union[\"BaseTypeDefinition\", \"BasePrimitive\", AbstractDataType]\n ) -> bool:\n \"\"\"\n Compare this type object against another type object.\n\n Failed comparisons must return `False`, not raise an exception.\n\n This method is not intended to be called directly. Type comparisons\n are handled by methods in `vyper.context.validation.utils`\n\n Arguments\n ---------\n other : BaseTypeDefinition\n Another type object to be compared against this one.\n\n Returns\n -------\n bool\n Indicates if the types are equivalent.\n \"\"\"\n return isinstance(other, cls._type)\n\n @classmethod\n def fetch_call_return(self, node: vy_ast.Call) -> \"BaseTypeDefinition\":\n \"\"\"\n Validate a call to this type and return the result.\n\n This method must raise if the type is not callable, or the call arguments\n are not valid.\n\n Arguments\n ---------\n node : Call\n Vyper ast node of call action to validate.\n\n Returns\n -------\n BaseTypeDefinition, optional\n Type generated as a result of the call.\n \"\"\"\n raise StructureException(\"Type is not callable\", node)\n\n @classmethod\n def get_subscripted_type(self, node: vy_ast.Index) -> None:\n # always raises - do not implement in inherited classes\n raise StructureException(\"Types cannot be indexed\", node)\n\n @classmethod\n def get_member(cls, key: str, node: vy_ast.Attribute) -> None:\n # always raises - do not implement in inherited classes\n raise StructureException(\"Types do not have members\", node)\n\n @classmethod\n def validate_modification(\n cls, node: Union[vy_ast.Assign, vy_ast.AugAssign], mutability: Any\n ) -> None:\n # always raises - do not implement in inherited classes\n raise InvalidOperation(\"Cannot assign to a type\", node)\n\n\nclass BaseTypeDefinition:\n \"\"\"\n Base class for type definition classes.\n\n Type definitions are objects that represent the type of a specific object\n within a contract. They are usually derived from a `BasePrimitive` counterpart.\n\n Class Attributes\n -----------------\n _id : str\n The name of the type.\n _is_callable : bool, optional\n If `True`, attempts to assign this value without calling it will raise\n a more expressive error message recommending that the user performs a\n function call.\n\n Object Attributes\n -----------------\n is_constant : bool, optional\n If `True`, the value of this object cannot be modified after assignment.\n size_in_bytes: int\n The number of bytes that are required to store this type.\n \"\"\"\n\n # TODO CMC 2022-01-08 `is_dynamic_size` probably unused\n is_dynamic_size = False\n\n size_in_bytes = 32\n _id: str\n\n def __init__(\n self,\n location: DataLocation = DataLocation.UNSET,\n is_constant: bool = False,\n is_public: bool = False,\n is_immutable: bool = False,\n ) -> None:\n self.location = location\n self.is_constant = is_constant\n self.is_public = is_public\n self.is_immutable = is_immutable\n\n self._modification_count = 0\n\n @property\n def abi_type(self) -> ABIType:\n \"\"\"\n The ABI type corresponding to this type\n \"\"\"\n raise CompilerPanic(\"Method must be implemented by the inherited class\")\n\n @property\n def canonical_abi_type(self) -> str:\n \"\"\"\n The canonical name of this type. Used for ABI types and generating function signatures.\n \"\"\"\n return self.abi_type.selector_name()\n\n def from_annotation(self, node: vy_ast.VyperNode, *args: Any, **kwargs: Any) -> None:\n # always raises, user should have used a primitive\n raise StructureException(\"Value is not a type\", node)\n\n def set_position(self, position: DataPosition) -> None:\n if hasattr(self, \"position\"):\n raise CompilerPanic(\"Position was already assigned\")\n if self.location != position._location:\n if self.location == DataLocation.UNSET:\n self.location = position._location\n else:\n raise CompilerPanic(\"Incompatible locations\")\n self.position = position\n\n def compare_type(\n self, other: Union[\"BaseTypeDefinition\", BasePrimitive, AbstractDataType]\n ) -> bool:\n \"\"\"\n Compare this type object against another type object.\n\n Failed comparisons must return `False`, not raise an exception.\n\n This method is not intended to be called directly. Type comparisons\n are handled by methods in `vyper.context.validation.utils`\n\n Arguments\n ---------\n other : BaseTypeDefinition\n Another type object to be compared against this one.\n\n Returns\n -------\n bool\n Indicates if the types are equivalent.\n \"\"\"\n return isinstance(other, type(self))\n\n def validate_numeric_op(\n self, node: Union[vy_ast.UnaryOp, vy_ast.BinOp, vy_ast.AugAssign]\n ) -> None:\n \"\"\"\n Validate a numeric operation for this type.\n\n Arguments\n ---------\n node : UnaryOp | BinOp | AugAssign\n Vyper ast node of the numeric operation to be validated.\n\n Returns\n -------\n None. A failed validation must raise an exception.\n \"\"\"\n raise InvalidOperation(f\"Cannot perform {node.op.description} on {self}\", node)\n\n def validate_boolean_op(self, node: vy_ast.BoolOp) -> None:\n \"\"\"\n Validate a boolean operation for this type.\n\n Arguments\n ---------\n node : BoolOp\n Vyper ast node of the boolean operation to be validated.\n\n Returns\n -------\n None. A failed validation must raise an exception.\n \"\"\"\n raise InvalidOperation(f\"Invalid type for operand: {self}\", node)\n\n def validate_comparator(self, node: vy_ast.Compare) -> None:\n \"\"\"\n Validate a comparator for this type.\n\n Arguments\n ---------\n node : Compare\n Vyper ast node of the comparator to be validated.\n\n Returns\n -------\n None. A failed validation must raise an exception.\n \"\"\"\n if not isinstance(node.op, (vy_ast.Eq, vy_ast.NotEq)):\n raise InvalidOperation(\n f\"Cannot perform {node.op.description} comparison on {self}\", node\n )\n\n def validate_implements(self, node: vy_ast.AnnAssign) -> None:\n \"\"\"\n Validate an implements statement.\n\n This method is unique to user-defined interfaces. It should not be\n included in other types.\n\n Arguments\n ---------\n node : AnnAssign\n Vyper ast node of the implements statement being validated.\n\n Returns\n -------\n None. A failed validation must raise an exception.\n \"\"\"\n raise StructureException(\"Value is not an interface\", node)\n\n def fetch_call_return(self, node: vy_ast.Call) -> Union[\"BaseTypeDefinition\", None]:\n \"\"\"\n Validate a call to this value and return the result.\n\n This method must raise if the value is not callable, or the call arguments\n are not valid.\n\n Arguments\n ---------\n node : Call\n Vyper ast node of call action to validate.\n\n Returns\n -------\n BaseTypeDefinition, optional\n Type generated as a result of the call.\n \"\"\"\n raise StructureException(\"Value is not callable\", node)\n\n def validate_index_type(self, node: vy_ast.Index) -> None:\n \"\"\"\n Validate an index reference, e.g. x[1]. Raises if the index is invalid.\n\n Arguments\n ---------\n node : Index\n Vyper ast node from the `slice` member of a Subscript node.\n \"\"\"\n raise StructureException(f\"Type '{self}' does not support indexing\", node)\n\n def get_subscripted_type(self, node: vy_ast.Index) -> \"BaseTypeDefinition\":\n \"\"\"\n Return the type of a subscript expression, e.g. x[1]\n\n Arguments\n ---------\n node: Index\n Vyper ast node from the `slice` member of a Subscript node\n\n Returns\n -------\n BaseTypeDefinition\n Type object for value at the given index.\n \"\"\"\n raise StructureException(f\"Type '{self}' does not support indexing\", node)\n\n def get_member(self, key: str, node: vy_ast.Attribute) -> \"BaseTypeDefinition\":\n \"\"\"\n Validate an attribute reference and return the given type for the member.\n\n Arguments\n ---------\n key : str\n Name of the member being accessed.\n node: Attribute\n Vyper ast Attribute node representing the member being accessed.\n\n Returns\n -------\n BaseTypeDefinition\n A type object for the value of the given member. Raises if the member\n does not exist for the given type.\n \"\"\"\n raise StructureException(f\"Type '{self}' does not support members\", node)\n\n def validate_modification(\n self,\n node: Union[vy_ast.Assign, vy_ast.AugAssign, vy_ast.Call],\n mutability: Any, # should be StateMutability, import cycle\n ) -> None:\n \"\"\"\n Validate an attempt to modify this value.\n\n Raises if the value is a constant or involves an invalid operation.\n\n Arguments\n ---------\n node : Assign | AugAssign | Call\n Vyper ast node of the modifying action.\n mutability: StateMutability\n The mutability of the context (e.g., pure function) we are currently in\n \"\"\"\n # TODO: break this cycle, probably by moving this to validation module\n from vyper.semantics.types.function import StateMutability\n\n if mutability <= StateMutability.VIEW and self.location == DataLocation.STORAGE:\n raise StateAccessViolation(\n f\"Cannot modify storage in a {mutability.value} function\", node\n )\n\n if self.location == DataLocation.CALLDATA:\n raise ImmutableViolation(\"Cannot write to calldata\", node)\n if self.is_constant:\n raise ImmutableViolation(\"Constant value cannot be written to\", node)\n if self.is_immutable:\n if node.get_ancestor(vy_ast.FunctionDef).get(\"name\") != \"__init__\":\n raise ImmutableViolation(\"Immutable value cannot be written to\", node)\n if self._modification_count:\n raise ImmutableViolation(\n \"Immutable value cannot be modified after assignment\", node\n )\n self._modification_count += 1\n\n if isinstance(node, vy_ast.AugAssign):\n self.validate_numeric_op(node)\n\n def get_signature(self) -> Tuple[Tuple, Optional[\"BaseTypeDefinition\"]]:\n raise CompilerPanic(\"Method must be implemented by the inherited class\")\n\n def compare_signature(self, other: \"BaseTypeDefinition\") -> bool:\n \"\"\"\n Compare the signature of this type with another type.\n\n Used when determining if an interface has been implemented. This method\n should not be directly implemented by any inherited classes.\n \"\"\"\n\n if not self.is_public:\n return False\n\n arguments, return_type = self.get_signature()\n other_arguments, other_return_type = other.get_signature()\n\n if len(arguments) != len(other_arguments):\n return False\n for a, b in zip(arguments, other_arguments):\n if not a.compare_type(b):\n return False\n if return_type and not return_type.compare_type(other_return_type): # type: ignore\n return False\n\n return True\n\n\n# TODO rename this: it's really for address/interface signature resolution\nclass ValueTypeDefinition(BaseTypeDefinition):\n \"\"\"\n Base class for types representing a single value.\n\n Class attributes\n ----------------\n _valid_literal: VyperNode | Tuple\n A vyper ast class or tuple of ast classes that can represent valid literals\n for the given type. Including this attribute will allow literal values to be\n assigned this type.\n \"\"\"\n\n def __repr__(self):\n return self._id\n\n def get_signature(self):\n return (), self\n\n\nclass MemberTypeDefinition(BaseTypeDefinition):\n \"\"\"\n Base class for types that have accessible members.\n\n Class attributes\n ----------------\n _type_members : Dict[str, BaseType]\n Dictionary of members common to all values of this type.\n\n Object attributes\n -----------------\n members : OrderedDict[str, BaseType]\n Dictionary of members for the given type.\n \"\"\"\n\n _type_members: Dict\n\n def __init__(\n self,\n location: DataLocation = DataLocation.UNSET,\n is_constant: bool = False,\n is_public: bool = False,\n is_immutable: bool = False,\n ) -> None:\n super().__init__(location, is_constant, is_public, is_immutable)\n self.members: OrderedDict = OrderedDict()\n\n def add_member(self, name: str, type_: BaseTypeDefinition) -> None:\n if name in self.members:\n raise NamespaceCollision(f\"Member '{name}' already exists in {self}\")\n if name in getattr(self, \"_type_members\", []):\n raise NamespaceCollision(f\"Member '{name}' already exists in {self}\")\n self.members[name] = type_\n\n def get_member(self, key: str, node: vy_ast.VyperNode) -> BaseTypeDefinition:\n if key in self.members:\n return self.members[key]\n elif key in getattr(self, \"_type_members\", []):\n type_ = copy.deepcopy(self._type_members[key])\n type_.location = self.location\n type_.is_constant = self.is_constant\n return type_\n suggestions_str = get_levenshtein_error_suggestions(key, self.members, 0.3)\n raise UnknownAttribute(f\"{self} has no member '{key}'. {suggestions_str}\", node)\n\n def __repr__(self):\n return f\"{self._id}\"\n\n\nclass IndexableTypeDefinition(BaseTypeDefinition):\n \"\"\"\n Base class for indexable types such as arrays and mappings.\n\n Attributes\n ----------\n key_type: BaseType\n Type representing the index value for this object.\n value_type : BaseType\n Type representing the value(s) contained in this object.\n _id : str\n Name of the type.\n \"\"\"\n\n def __init__(\n self,\n value_type: BaseTypeDefinition,\n key_type: BaseTypeDefinition,\n _id: str,\n location: DataLocation = DataLocation.UNSET,\n is_constant: bool = False,\n is_public: bool = False,\n is_immutable: bool = False,\n ) -> None:\n super().__init__(location, is_constant, is_public, is_immutable)\n self.value_type = value_type\n self.key_type = key_type\n self._id = _id\n\n def get_signature(self) -> Tuple[Tuple, Optional[BaseTypeDefinition]]:\n new_args, return_type = self.value_type.get_signature()\n return (self.key_type,) + new_args, return_type\n", "path": "vyper/semantics/types/bases.py"}]} |
gh_patches_debug_1343 | rasdani/github-patches | git_diff | docker__docker-py-1880 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Invalid symlinks now break utils.create_archive(), previously were skipped, skipped by docker CLI
Version 2.7.0 seems to die if you try to include a symlink that points to something that doesn't exist. This problem seems to have been introduced in this commit:
https://github.com/docker/docker-py/commit/5c5705045be72530091a51372ae920f958192bfb
Example from 2.6.1 and `docker` cli tool:
```
$ mkdir test
$ cd test
$ ln -s /doesnt_exist doesnt_exist
$ cat << EOF > Dockerfile
FROM scratch
COPY . /tmp
EOF
$ docker build -t test .
Sending build context to Docker daemon 2.56kB
Step 1/2 : FROM scratch
--->
Step 2/2 : COPY . /tmp
---> cb6f16dc4b1f
Removing intermediate container 4eda4dd7d7c4
Successfully built cb6f16dc4b1f
Successfully tagged test:latest
$ python
Python 2.7.6 (default, Nov 23 2017, 15:49:48)
[GCC 4.8.4] on linux2
Type "help", "copyright", "credits" or "license" for more information.
>>> import docker
>>> docker.__version__
'2.6.1'
>>> docker.utils.create_archive(".", ['doesnt_exist'])
<open file '<fdopen>', mode 'w+b' at 0x7fa47208f8a0>
>>>
```
But as of 2.7.0:
```
$ python
Python 2.7.6 (default, Nov 23 2017, 15:49:48)
[GCC 4.8.4] on linux2
Type "help", "copyright", "credits" or "license" for more information.
>>> import docker
>>> docker.__version__
'2.7.0'
>>> docker.utils.create_archive(".", ['doesnt_exist'])
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/local/lib/python2.7/dist-packages/docker/utils/utils.py", line 103, in create_archive
'Can not access file in context: {}'.format(full_path)
IOError: Can not access file in context: ./doesnt_exist
>>>
```
It seems that symlinks should be skipped rather than trying to checked to see if they are accessible, as was the previous behavior and the `docker` CLI tool behavior.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docker/utils/utils.py`
Content:
```
1 import base64
2 import io
3 import os
4 import os.path
5 import json
6 import shlex
7 import tarfile
8 import tempfile
9 import warnings
10 from distutils.version import StrictVersion
11 from datetime import datetime
12
13 import requests
14 import six
15
16 from .. import constants
17 from .. import errors
18 from .. import tls
19
20 if six.PY2:
21 from urllib import splitnport
22 else:
23 from urllib.parse import splitnport
24
25 DEFAULT_HTTP_HOST = "127.0.0.1"
26 DEFAULT_UNIX_SOCKET = "http+unix://var/run/docker.sock"
27 DEFAULT_NPIPE = 'npipe:////./pipe/docker_engine'
28
29 BYTE_UNITS = {
30 'b': 1,
31 'k': 1024,
32 'm': 1024 * 1024,
33 'g': 1024 * 1024 * 1024
34 }
35
36
37 def create_ipam_pool(*args, **kwargs):
38 raise errors.DeprecatedMethod(
39 'utils.create_ipam_pool has been removed. Please use a '
40 'docker.types.IPAMPool object instead.'
41 )
42
43
44 def create_ipam_config(*args, **kwargs):
45 raise errors.DeprecatedMethod(
46 'utils.create_ipam_config has been removed. Please use a '
47 'docker.types.IPAMConfig object instead.'
48 )
49
50
51 def mkbuildcontext(dockerfile):
52 f = tempfile.NamedTemporaryFile()
53 t = tarfile.open(mode='w', fileobj=f)
54 if isinstance(dockerfile, io.StringIO):
55 dfinfo = tarfile.TarInfo('Dockerfile')
56 if six.PY3:
57 raise TypeError('Please use io.BytesIO to create in-memory '
58 'Dockerfiles with Python 3')
59 else:
60 dfinfo.size = len(dockerfile.getvalue())
61 dockerfile.seek(0)
62 elif isinstance(dockerfile, io.BytesIO):
63 dfinfo = tarfile.TarInfo('Dockerfile')
64 dfinfo.size = len(dockerfile.getvalue())
65 dockerfile.seek(0)
66 else:
67 dfinfo = t.gettarinfo(fileobj=dockerfile, arcname='Dockerfile')
68 t.addfile(dfinfo, dockerfile)
69 t.close()
70 f.seek(0)
71 return f
72
73
74 def decode_json_header(header):
75 data = base64.b64decode(header)
76 if six.PY3:
77 data = data.decode('utf-8')
78 return json.loads(data)
79
80
81 def build_file_list(root):
82 files = []
83 for dirname, dirnames, fnames in os.walk(root):
84 for filename in fnames + dirnames:
85 longpath = os.path.join(dirname, filename)
86 files.append(
87 longpath.replace(root, '', 1).lstrip('/')
88 )
89
90 return files
91
92
93 def create_archive(root, files=None, fileobj=None, gzip=False):
94 if not fileobj:
95 fileobj = tempfile.NamedTemporaryFile()
96 t = tarfile.open(mode='w:gz' if gzip else 'w', fileobj=fileobj)
97 if files is None:
98 files = build_file_list(root)
99 for path in files:
100 full_path = os.path.join(root, path)
101 if not os.access(full_path, os.R_OK):
102 raise IOError(
103 'Can not access file in context: {}'.format(full_path)
104 )
105 i = t.gettarinfo(full_path, arcname=path)
106 if i is None:
107 # This happens when we encounter a socket file. We can safely
108 # ignore it and proceed.
109 continue
110
111 if constants.IS_WINDOWS_PLATFORM:
112 # Windows doesn't keep track of the execute bit, so we make files
113 # and directories executable by default.
114 i.mode = i.mode & 0o755 | 0o111
115
116 if i.isfile():
117 try:
118 with open(full_path, 'rb') as f:
119 t.addfile(i, f)
120 except IOError:
121 t.addfile(i, None)
122 else:
123 # Directories, FIFOs, symlinks... don't need to be read.
124 t.addfile(i, None)
125 t.close()
126 fileobj.seek(0)
127 return fileobj
128
129
130 def compare_version(v1, v2):
131 """Compare docker versions
132
133 >>> v1 = '1.9'
134 >>> v2 = '1.10'
135 >>> compare_version(v1, v2)
136 1
137 >>> compare_version(v2, v1)
138 -1
139 >>> compare_version(v2, v2)
140 0
141 """
142 s1 = StrictVersion(v1)
143 s2 = StrictVersion(v2)
144 if s1 == s2:
145 return 0
146 elif s1 > s2:
147 return -1
148 else:
149 return 1
150
151
152 def version_lt(v1, v2):
153 return compare_version(v1, v2) > 0
154
155
156 def version_gte(v1, v2):
157 return not version_lt(v1, v2)
158
159
160 def ping_registry(url):
161 warnings.warn(
162 'The `ping_registry` method is deprecated and will be removed.',
163 DeprecationWarning
164 )
165
166 return ping(url + '/v2/', [401]) or ping(url + '/v1/_ping')
167
168
169 def ping(url, valid_4xx_statuses=None):
170 try:
171 res = requests.get(url, timeout=3)
172 except Exception:
173 return False
174 else:
175 # We don't send yet auth headers
176 # and a v2 registry will respond with status 401
177 return (
178 res.status_code < 400 or
179 (valid_4xx_statuses and res.status_code in valid_4xx_statuses)
180 )
181
182
183 def _convert_port_binding(binding):
184 result = {'HostIp': '', 'HostPort': ''}
185 if isinstance(binding, tuple):
186 if len(binding) == 2:
187 result['HostPort'] = binding[1]
188 result['HostIp'] = binding[0]
189 elif isinstance(binding[0], six.string_types):
190 result['HostIp'] = binding[0]
191 else:
192 result['HostPort'] = binding[0]
193 elif isinstance(binding, dict):
194 if 'HostPort' in binding:
195 result['HostPort'] = binding['HostPort']
196 if 'HostIp' in binding:
197 result['HostIp'] = binding['HostIp']
198 else:
199 raise ValueError(binding)
200 else:
201 result['HostPort'] = binding
202
203 if result['HostPort'] is None:
204 result['HostPort'] = ''
205 else:
206 result['HostPort'] = str(result['HostPort'])
207
208 return result
209
210
211 def convert_port_bindings(port_bindings):
212 result = {}
213 for k, v in six.iteritems(port_bindings):
214 key = str(k)
215 if '/' not in key:
216 key += '/tcp'
217 if isinstance(v, list):
218 result[key] = [_convert_port_binding(binding) for binding in v]
219 else:
220 result[key] = [_convert_port_binding(v)]
221 return result
222
223
224 def convert_volume_binds(binds):
225 if isinstance(binds, list):
226 return binds
227
228 result = []
229 for k, v in binds.items():
230 if isinstance(k, six.binary_type):
231 k = k.decode('utf-8')
232
233 if isinstance(v, dict):
234 if 'ro' in v and 'mode' in v:
235 raise ValueError(
236 'Binding cannot contain both "ro" and "mode": {}'
237 .format(repr(v))
238 )
239
240 bind = v['bind']
241 if isinstance(bind, six.binary_type):
242 bind = bind.decode('utf-8')
243
244 if 'ro' in v:
245 mode = 'ro' if v['ro'] else 'rw'
246 elif 'mode' in v:
247 mode = v['mode']
248 else:
249 mode = 'rw'
250
251 result.append(
252 six.text_type('{0}:{1}:{2}').format(k, bind, mode)
253 )
254 else:
255 if isinstance(v, six.binary_type):
256 v = v.decode('utf-8')
257 result.append(
258 six.text_type('{0}:{1}:rw').format(k, v)
259 )
260 return result
261
262
263 def convert_tmpfs_mounts(tmpfs):
264 if isinstance(tmpfs, dict):
265 return tmpfs
266
267 if not isinstance(tmpfs, list):
268 raise ValueError(
269 'Expected tmpfs value to be either a list or a dict, found: {}'
270 .format(type(tmpfs).__name__)
271 )
272
273 result = {}
274 for mount in tmpfs:
275 if isinstance(mount, six.string_types):
276 if ":" in mount:
277 name, options = mount.split(":", 1)
278 else:
279 name = mount
280 options = ""
281
282 else:
283 raise ValueError(
284 "Expected item in tmpfs list to be a string, found: {}"
285 .format(type(mount).__name__)
286 )
287
288 result[name] = options
289 return result
290
291
292 def convert_service_networks(networks):
293 if not networks:
294 return networks
295 if not isinstance(networks, list):
296 raise TypeError('networks parameter must be a list.')
297
298 result = []
299 for n in networks:
300 if isinstance(n, six.string_types):
301 n = {'Target': n}
302 result.append(n)
303 return result
304
305
306 def parse_repository_tag(repo_name):
307 parts = repo_name.rsplit('@', 1)
308 if len(parts) == 2:
309 return tuple(parts)
310 parts = repo_name.rsplit(':', 1)
311 if len(parts) == 2 and '/' not in parts[1]:
312 return tuple(parts)
313 return repo_name, None
314
315
316 # Based on utils.go:ParseHost http://tinyurl.com/nkahcfh
317 # fd:// protocol unsupported (for obvious reasons)
318 # Added support for http and https
319 # Protocol translation: tcp -> http, unix -> http+unix
320 def parse_host(addr, is_win32=False, tls=False):
321 proto = "http+unix"
322 port = None
323 path = ''
324
325 if not addr and is_win32:
326 addr = DEFAULT_NPIPE
327
328 if not addr or addr.strip() == 'unix://':
329 return DEFAULT_UNIX_SOCKET
330
331 addr = addr.strip()
332 if addr.startswith('http://'):
333 addr = addr.replace('http://', 'tcp://')
334 if addr.startswith('http+unix://'):
335 addr = addr.replace('http+unix://', 'unix://')
336
337 if addr == 'tcp://':
338 raise errors.DockerException(
339 "Invalid bind address format: {0}".format(addr)
340 )
341 elif addr.startswith('unix://'):
342 addr = addr[7:]
343 elif addr.startswith('tcp://'):
344 proto = 'http{0}'.format('s' if tls else '')
345 addr = addr[6:]
346 elif addr.startswith('https://'):
347 proto = "https"
348 addr = addr[8:]
349 elif addr.startswith('npipe://'):
350 proto = 'npipe'
351 addr = addr[8:]
352 elif addr.startswith('fd://'):
353 raise errors.DockerException("fd protocol is not implemented")
354 else:
355 if "://" in addr:
356 raise errors.DockerException(
357 "Invalid bind address protocol: {0}".format(addr)
358 )
359 proto = "https" if tls else "http"
360
361 if proto in ("http", "https"):
362 address_parts = addr.split('/', 1)
363 host = address_parts[0]
364 if len(address_parts) == 2:
365 path = '/' + address_parts[1]
366 host, port = splitnport(host)
367
368 if port is None:
369 raise errors.DockerException(
370 "Invalid port: {0}".format(addr)
371 )
372
373 if not host:
374 host = DEFAULT_HTTP_HOST
375 else:
376 host = addr
377
378 if proto in ("http", "https") and port == -1:
379 raise errors.DockerException(
380 "Bind address needs a port: {0}".format(addr))
381
382 if proto == "http+unix" or proto == 'npipe':
383 return "{0}://{1}".format(proto, host).rstrip('/')
384 return "{0}://{1}:{2}{3}".format(proto, host, port, path).rstrip('/')
385
386
387 def parse_devices(devices):
388 device_list = []
389 for device in devices:
390 if isinstance(device, dict):
391 device_list.append(device)
392 continue
393 if not isinstance(device, six.string_types):
394 raise errors.DockerException(
395 'Invalid device type {0}'.format(type(device))
396 )
397 device_mapping = device.split(':')
398 if device_mapping:
399 path_on_host = device_mapping[0]
400 if len(device_mapping) > 1:
401 path_in_container = device_mapping[1]
402 else:
403 path_in_container = path_on_host
404 if len(device_mapping) > 2:
405 permissions = device_mapping[2]
406 else:
407 permissions = 'rwm'
408 device_list.append({
409 'PathOnHost': path_on_host,
410 'PathInContainer': path_in_container,
411 'CgroupPermissions': permissions
412 })
413 return device_list
414
415
416 def kwargs_from_env(ssl_version=None, assert_hostname=None, environment=None):
417 if not environment:
418 environment = os.environ
419 host = environment.get('DOCKER_HOST')
420
421 # empty string for cert path is the same as unset.
422 cert_path = environment.get('DOCKER_CERT_PATH') or None
423
424 # empty string for tls verify counts as "false".
425 # Any value or 'unset' counts as true.
426 tls_verify = environment.get('DOCKER_TLS_VERIFY')
427 if tls_verify == '':
428 tls_verify = False
429 else:
430 tls_verify = tls_verify is not None
431 enable_tls = cert_path or tls_verify
432
433 params = {}
434
435 if host:
436 params['base_url'] = (
437 host.replace('tcp://', 'https://') if enable_tls else host
438 )
439
440 if not enable_tls:
441 return params
442
443 if not cert_path:
444 cert_path = os.path.join(os.path.expanduser('~'), '.docker')
445
446 if not tls_verify and assert_hostname is None:
447 # assert_hostname is a subset of TLS verification,
448 # so if it's not set already then set it to false.
449 assert_hostname = False
450
451 params['tls'] = tls.TLSConfig(
452 client_cert=(os.path.join(cert_path, 'cert.pem'),
453 os.path.join(cert_path, 'key.pem')),
454 ca_cert=os.path.join(cert_path, 'ca.pem'),
455 verify=tls_verify,
456 ssl_version=ssl_version,
457 assert_hostname=assert_hostname,
458 )
459
460 return params
461
462
463 def convert_filters(filters):
464 result = {}
465 for k, v in six.iteritems(filters):
466 if isinstance(v, bool):
467 v = 'true' if v else 'false'
468 if not isinstance(v, list):
469 v = [v, ]
470 result[k] = v
471 return json.dumps(result)
472
473
474 def datetime_to_timestamp(dt):
475 """Convert a UTC datetime to a Unix timestamp"""
476 delta = dt - datetime.utcfromtimestamp(0)
477 return delta.seconds + delta.days * 24 * 3600
478
479
480 def parse_bytes(s):
481 if isinstance(s, six.integer_types + (float,)):
482 return s
483 if len(s) == 0:
484 return 0
485
486 if s[-2:-1].isalpha() and s[-1].isalpha():
487 if s[-1] == "b" or s[-1] == "B":
488 s = s[:-1]
489 units = BYTE_UNITS
490 suffix = s[-1].lower()
491
492 # Check if the variable is a string representation of an int
493 # without a units part. Assuming that the units are bytes.
494 if suffix.isdigit():
495 digits_part = s
496 suffix = 'b'
497 else:
498 digits_part = s[:-1]
499
500 if suffix in units.keys() or suffix.isdigit():
501 try:
502 digits = int(digits_part)
503 except ValueError:
504 raise errors.DockerException(
505 'Failed converting the string value for memory ({0}) to'
506 ' an integer.'.format(digits_part)
507 )
508
509 # Reconvert to long for the final result
510 s = int(digits * units[suffix])
511 else:
512 raise errors.DockerException(
513 'The specified value for memory ({0}) should specify the'
514 ' units. The postfix should be one of the `b` `k` `m` `g`'
515 ' characters'.format(s)
516 )
517
518 return s
519
520
521 def normalize_links(links):
522 if isinstance(links, dict):
523 links = six.iteritems(links)
524
525 return ['{0}:{1}'.format(k, v) for k, v in sorted(links)]
526
527
528 def parse_env_file(env_file):
529 """
530 Reads a line-separated environment file.
531 The format of each line should be "key=value".
532 """
533 environment = {}
534
535 with open(env_file, 'r') as f:
536 for line in f:
537
538 if line[0] == '#':
539 continue
540
541 line = line.strip()
542 if not line:
543 continue
544
545 parse_line = line.split('=', 1)
546 if len(parse_line) == 2:
547 k, v = parse_line
548 environment[k] = v
549 else:
550 raise errors.DockerException(
551 'Invalid line in environment file {0}:\n{1}'.format(
552 env_file, line))
553
554 return environment
555
556
557 def split_command(command):
558 if six.PY2 and not isinstance(command, six.binary_type):
559 command = command.encode('utf-8')
560 return shlex.split(command)
561
562
563 def format_environment(environment):
564 def format_env(key, value):
565 if value is None:
566 return key
567 if isinstance(value, six.binary_type):
568 value = value.decode('utf-8')
569
570 return u'{key}={value}'.format(key=key, value=value)
571 return [format_env(*var) for var in six.iteritems(environment)]
572
573
574 def format_extra_hosts(extra_hosts, task=False):
575 # Use format dictated by Swarm API if container is part of a task
576 if task:
577 return [
578 '{} {}'.format(v, k) for k, v in sorted(six.iteritems(extra_hosts))
579 ]
580
581 return [
582 '{}:{}'.format(k, v) for k, v in sorted(six.iteritems(extra_hosts))
583 ]
584
585
586 def create_host_config(self, *args, **kwargs):
587 raise errors.DeprecatedMethod(
588 'utils.create_host_config has been removed. Please use a '
589 'docker.types.HostConfig object instead.'
590 )
591
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docker/utils/utils.py b/docker/utils/utils.py
--- a/docker/utils/utils.py
+++ b/docker/utils/utils.py
@@ -98,7 +98,8 @@
files = build_file_list(root)
for path in files:
full_path = os.path.join(root, path)
- if not os.access(full_path, os.R_OK):
+
+ if os.lstat(full_path).st_mode & os.R_OK == 0:
raise IOError(
'Can not access file in context: {}'.format(full_path)
)
| {"golden_diff": "diff --git a/docker/utils/utils.py b/docker/utils/utils.py\n--- a/docker/utils/utils.py\n+++ b/docker/utils/utils.py\n@@ -98,7 +98,8 @@\n files = build_file_list(root)\n for path in files:\n full_path = os.path.join(root, path)\n- if not os.access(full_path, os.R_OK):\n+\n+ if os.lstat(full_path).st_mode & os.R_OK == 0:\n raise IOError(\n 'Can not access file in context: {}'.format(full_path)\n )\n", "issue": "Invalid symlinks now break utils.create_archive(), previously were skipped, skipped by docker CLI\nVersion 2.7.0 seems to die if you try to include a symlink that points to something that doesn't exist. This problem seems to have been introduced in this commit:\r\n https://github.com/docker/docker-py/commit/5c5705045be72530091a51372ae920f958192bfb\r\n\r\nExample from 2.6.1 and `docker` cli tool:\r\n```\r\n$ mkdir test\r\n$ cd test\r\n$ ln -s /doesnt_exist doesnt_exist\r\n$ cat << EOF > Dockerfile \r\nFROM scratch\r\n\r\nCOPY . /tmp\r\nEOF\r\n$ docker build -t test . \r\nSending build context to Docker daemon 2.56kB\r\nStep 1/2 : FROM scratch\r\n ---> \r\nStep 2/2 : COPY . /tmp\r\n ---> cb6f16dc4b1f\r\nRemoving intermediate container 4eda4dd7d7c4\r\nSuccessfully built cb6f16dc4b1f\r\nSuccessfully tagged test:latest\r\n$ python\r\nPython 2.7.6 (default, Nov 23 2017, 15:49:48) \r\n[GCC 4.8.4] on linux2\r\nType \"help\", \"copyright\", \"credits\" or \"license\" for more information.\r\n>>> import docker\r\n>>> docker.__version__\r\n'2.6.1'\r\n>>> docker.utils.create_archive(\".\", ['doesnt_exist'])\r\n<open file '<fdopen>', mode 'w+b' at 0x7fa47208f8a0>\r\n>>> \r\n```\r\n\r\nBut as of 2.7.0:\r\n```\r\n$ python\r\nPython 2.7.6 (default, Nov 23 2017, 15:49:48) \r\n[GCC 4.8.4] on linux2\r\nType \"help\", \"copyright\", \"credits\" or \"license\" for more information.\r\n>>> import docker\r\n>>> docker.__version__\r\n'2.7.0'\r\n>>> docker.utils.create_archive(\".\", ['doesnt_exist'])\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\n File \"/usr/local/lib/python2.7/dist-packages/docker/utils/utils.py\", line 103, in create_archive\r\n 'Can not access file in context: {}'.format(full_path)\r\nIOError: Can not access file in context: ./doesnt_exist\r\n>>> \r\n```\r\n\r\nIt seems that symlinks should be skipped rather than trying to checked to see if they are accessible, as was the previous behavior and the `docker` CLI tool behavior.\n", "before_files": [{"content": "import base64\nimport io\nimport os\nimport os.path\nimport json\nimport shlex\nimport tarfile\nimport tempfile\nimport warnings\nfrom distutils.version import StrictVersion\nfrom datetime import datetime\n\nimport requests\nimport six\n\nfrom .. import constants\nfrom .. import errors\nfrom .. import tls\n\nif six.PY2:\n from urllib import splitnport\nelse:\n from urllib.parse import splitnport\n\nDEFAULT_HTTP_HOST = \"127.0.0.1\"\nDEFAULT_UNIX_SOCKET = \"http+unix://var/run/docker.sock\"\nDEFAULT_NPIPE = 'npipe:////./pipe/docker_engine'\n\nBYTE_UNITS = {\n 'b': 1,\n 'k': 1024,\n 'm': 1024 * 1024,\n 'g': 1024 * 1024 * 1024\n}\n\n\ndef create_ipam_pool(*args, **kwargs):\n raise errors.DeprecatedMethod(\n 'utils.create_ipam_pool has been removed. Please use a '\n 'docker.types.IPAMPool object instead.'\n )\n\n\ndef create_ipam_config(*args, **kwargs):\n raise errors.DeprecatedMethod(\n 'utils.create_ipam_config has been removed. Please use a '\n 'docker.types.IPAMConfig object instead.'\n )\n\n\ndef mkbuildcontext(dockerfile):\n f = tempfile.NamedTemporaryFile()\n t = tarfile.open(mode='w', fileobj=f)\n if isinstance(dockerfile, io.StringIO):\n dfinfo = tarfile.TarInfo('Dockerfile')\n if six.PY3:\n raise TypeError('Please use io.BytesIO to create in-memory '\n 'Dockerfiles with Python 3')\n else:\n dfinfo.size = len(dockerfile.getvalue())\n dockerfile.seek(0)\n elif isinstance(dockerfile, io.BytesIO):\n dfinfo = tarfile.TarInfo('Dockerfile')\n dfinfo.size = len(dockerfile.getvalue())\n dockerfile.seek(0)\n else:\n dfinfo = t.gettarinfo(fileobj=dockerfile, arcname='Dockerfile')\n t.addfile(dfinfo, dockerfile)\n t.close()\n f.seek(0)\n return f\n\n\ndef decode_json_header(header):\n data = base64.b64decode(header)\n if six.PY3:\n data = data.decode('utf-8')\n return json.loads(data)\n\n\ndef build_file_list(root):\n files = []\n for dirname, dirnames, fnames in os.walk(root):\n for filename in fnames + dirnames:\n longpath = os.path.join(dirname, filename)\n files.append(\n longpath.replace(root, '', 1).lstrip('/')\n )\n\n return files\n\n\ndef create_archive(root, files=None, fileobj=None, gzip=False):\n if not fileobj:\n fileobj = tempfile.NamedTemporaryFile()\n t = tarfile.open(mode='w:gz' if gzip else 'w', fileobj=fileobj)\n if files is None:\n files = build_file_list(root)\n for path in files:\n full_path = os.path.join(root, path)\n if not os.access(full_path, os.R_OK):\n raise IOError(\n 'Can not access file in context: {}'.format(full_path)\n )\n i = t.gettarinfo(full_path, arcname=path)\n if i is None:\n # This happens when we encounter a socket file. We can safely\n # ignore it and proceed.\n continue\n\n if constants.IS_WINDOWS_PLATFORM:\n # Windows doesn't keep track of the execute bit, so we make files\n # and directories executable by default.\n i.mode = i.mode & 0o755 | 0o111\n\n if i.isfile():\n try:\n with open(full_path, 'rb') as f:\n t.addfile(i, f)\n except IOError:\n t.addfile(i, None)\n else:\n # Directories, FIFOs, symlinks... don't need to be read.\n t.addfile(i, None)\n t.close()\n fileobj.seek(0)\n return fileobj\n\n\ndef compare_version(v1, v2):\n \"\"\"Compare docker versions\n\n >>> v1 = '1.9'\n >>> v2 = '1.10'\n >>> compare_version(v1, v2)\n 1\n >>> compare_version(v2, v1)\n -1\n >>> compare_version(v2, v2)\n 0\n \"\"\"\n s1 = StrictVersion(v1)\n s2 = StrictVersion(v2)\n if s1 == s2:\n return 0\n elif s1 > s2:\n return -1\n else:\n return 1\n\n\ndef version_lt(v1, v2):\n return compare_version(v1, v2) > 0\n\n\ndef version_gte(v1, v2):\n return not version_lt(v1, v2)\n\n\ndef ping_registry(url):\n warnings.warn(\n 'The `ping_registry` method is deprecated and will be removed.',\n DeprecationWarning\n )\n\n return ping(url + '/v2/', [401]) or ping(url + '/v1/_ping')\n\n\ndef ping(url, valid_4xx_statuses=None):\n try:\n res = requests.get(url, timeout=3)\n except Exception:\n return False\n else:\n # We don't send yet auth headers\n # and a v2 registry will respond with status 401\n return (\n res.status_code < 400 or\n (valid_4xx_statuses and res.status_code in valid_4xx_statuses)\n )\n\n\ndef _convert_port_binding(binding):\n result = {'HostIp': '', 'HostPort': ''}\n if isinstance(binding, tuple):\n if len(binding) == 2:\n result['HostPort'] = binding[1]\n result['HostIp'] = binding[0]\n elif isinstance(binding[0], six.string_types):\n result['HostIp'] = binding[0]\n else:\n result['HostPort'] = binding[0]\n elif isinstance(binding, dict):\n if 'HostPort' in binding:\n result['HostPort'] = binding['HostPort']\n if 'HostIp' in binding:\n result['HostIp'] = binding['HostIp']\n else:\n raise ValueError(binding)\n else:\n result['HostPort'] = binding\n\n if result['HostPort'] is None:\n result['HostPort'] = ''\n else:\n result['HostPort'] = str(result['HostPort'])\n\n return result\n\n\ndef convert_port_bindings(port_bindings):\n result = {}\n for k, v in six.iteritems(port_bindings):\n key = str(k)\n if '/' not in key:\n key += '/tcp'\n if isinstance(v, list):\n result[key] = [_convert_port_binding(binding) for binding in v]\n else:\n result[key] = [_convert_port_binding(v)]\n return result\n\n\ndef convert_volume_binds(binds):\n if isinstance(binds, list):\n return binds\n\n result = []\n for k, v in binds.items():\n if isinstance(k, six.binary_type):\n k = k.decode('utf-8')\n\n if isinstance(v, dict):\n if 'ro' in v and 'mode' in v:\n raise ValueError(\n 'Binding cannot contain both \"ro\" and \"mode\": {}'\n .format(repr(v))\n )\n\n bind = v['bind']\n if isinstance(bind, six.binary_type):\n bind = bind.decode('utf-8')\n\n if 'ro' in v:\n mode = 'ro' if v['ro'] else 'rw'\n elif 'mode' in v:\n mode = v['mode']\n else:\n mode = 'rw'\n\n result.append(\n six.text_type('{0}:{1}:{2}').format(k, bind, mode)\n )\n else:\n if isinstance(v, six.binary_type):\n v = v.decode('utf-8')\n result.append(\n six.text_type('{0}:{1}:rw').format(k, v)\n )\n return result\n\n\ndef convert_tmpfs_mounts(tmpfs):\n if isinstance(tmpfs, dict):\n return tmpfs\n\n if not isinstance(tmpfs, list):\n raise ValueError(\n 'Expected tmpfs value to be either a list or a dict, found: {}'\n .format(type(tmpfs).__name__)\n )\n\n result = {}\n for mount in tmpfs:\n if isinstance(mount, six.string_types):\n if \":\" in mount:\n name, options = mount.split(\":\", 1)\n else:\n name = mount\n options = \"\"\n\n else:\n raise ValueError(\n \"Expected item in tmpfs list to be a string, found: {}\"\n .format(type(mount).__name__)\n )\n\n result[name] = options\n return result\n\n\ndef convert_service_networks(networks):\n if not networks:\n return networks\n if not isinstance(networks, list):\n raise TypeError('networks parameter must be a list.')\n\n result = []\n for n in networks:\n if isinstance(n, six.string_types):\n n = {'Target': n}\n result.append(n)\n return result\n\n\ndef parse_repository_tag(repo_name):\n parts = repo_name.rsplit('@', 1)\n if len(parts) == 2:\n return tuple(parts)\n parts = repo_name.rsplit(':', 1)\n if len(parts) == 2 and '/' not in parts[1]:\n return tuple(parts)\n return repo_name, None\n\n\n# Based on utils.go:ParseHost http://tinyurl.com/nkahcfh\n# fd:// protocol unsupported (for obvious reasons)\n# Added support for http and https\n# Protocol translation: tcp -> http, unix -> http+unix\ndef parse_host(addr, is_win32=False, tls=False):\n proto = \"http+unix\"\n port = None\n path = ''\n\n if not addr and is_win32:\n addr = DEFAULT_NPIPE\n\n if not addr or addr.strip() == 'unix://':\n return DEFAULT_UNIX_SOCKET\n\n addr = addr.strip()\n if addr.startswith('http://'):\n addr = addr.replace('http://', 'tcp://')\n if addr.startswith('http+unix://'):\n addr = addr.replace('http+unix://', 'unix://')\n\n if addr == 'tcp://':\n raise errors.DockerException(\n \"Invalid bind address format: {0}\".format(addr)\n )\n elif addr.startswith('unix://'):\n addr = addr[7:]\n elif addr.startswith('tcp://'):\n proto = 'http{0}'.format('s' if tls else '')\n addr = addr[6:]\n elif addr.startswith('https://'):\n proto = \"https\"\n addr = addr[8:]\n elif addr.startswith('npipe://'):\n proto = 'npipe'\n addr = addr[8:]\n elif addr.startswith('fd://'):\n raise errors.DockerException(\"fd protocol is not implemented\")\n else:\n if \"://\" in addr:\n raise errors.DockerException(\n \"Invalid bind address protocol: {0}\".format(addr)\n )\n proto = \"https\" if tls else \"http\"\n\n if proto in (\"http\", \"https\"):\n address_parts = addr.split('/', 1)\n host = address_parts[0]\n if len(address_parts) == 2:\n path = '/' + address_parts[1]\n host, port = splitnport(host)\n\n if port is None:\n raise errors.DockerException(\n \"Invalid port: {0}\".format(addr)\n )\n\n if not host:\n host = DEFAULT_HTTP_HOST\n else:\n host = addr\n\n if proto in (\"http\", \"https\") and port == -1:\n raise errors.DockerException(\n \"Bind address needs a port: {0}\".format(addr))\n\n if proto == \"http+unix\" or proto == 'npipe':\n return \"{0}://{1}\".format(proto, host).rstrip('/')\n return \"{0}://{1}:{2}{3}\".format(proto, host, port, path).rstrip('/')\n\n\ndef parse_devices(devices):\n device_list = []\n for device in devices:\n if isinstance(device, dict):\n device_list.append(device)\n continue\n if not isinstance(device, six.string_types):\n raise errors.DockerException(\n 'Invalid device type {0}'.format(type(device))\n )\n device_mapping = device.split(':')\n if device_mapping:\n path_on_host = device_mapping[0]\n if len(device_mapping) > 1:\n path_in_container = device_mapping[1]\n else:\n path_in_container = path_on_host\n if len(device_mapping) > 2:\n permissions = device_mapping[2]\n else:\n permissions = 'rwm'\n device_list.append({\n 'PathOnHost': path_on_host,\n 'PathInContainer': path_in_container,\n 'CgroupPermissions': permissions\n })\n return device_list\n\n\ndef kwargs_from_env(ssl_version=None, assert_hostname=None, environment=None):\n if not environment:\n environment = os.environ\n host = environment.get('DOCKER_HOST')\n\n # empty string for cert path is the same as unset.\n cert_path = environment.get('DOCKER_CERT_PATH') or None\n\n # empty string for tls verify counts as \"false\".\n # Any value or 'unset' counts as true.\n tls_verify = environment.get('DOCKER_TLS_VERIFY')\n if tls_verify == '':\n tls_verify = False\n else:\n tls_verify = tls_verify is not None\n enable_tls = cert_path or tls_verify\n\n params = {}\n\n if host:\n params['base_url'] = (\n host.replace('tcp://', 'https://') if enable_tls else host\n )\n\n if not enable_tls:\n return params\n\n if not cert_path:\n cert_path = os.path.join(os.path.expanduser('~'), '.docker')\n\n if not tls_verify and assert_hostname is None:\n # assert_hostname is a subset of TLS verification,\n # so if it's not set already then set it to false.\n assert_hostname = False\n\n params['tls'] = tls.TLSConfig(\n client_cert=(os.path.join(cert_path, 'cert.pem'),\n os.path.join(cert_path, 'key.pem')),\n ca_cert=os.path.join(cert_path, 'ca.pem'),\n verify=tls_verify,\n ssl_version=ssl_version,\n assert_hostname=assert_hostname,\n )\n\n return params\n\n\ndef convert_filters(filters):\n result = {}\n for k, v in six.iteritems(filters):\n if isinstance(v, bool):\n v = 'true' if v else 'false'\n if not isinstance(v, list):\n v = [v, ]\n result[k] = v\n return json.dumps(result)\n\n\ndef datetime_to_timestamp(dt):\n \"\"\"Convert a UTC datetime to a Unix timestamp\"\"\"\n delta = dt - datetime.utcfromtimestamp(0)\n return delta.seconds + delta.days * 24 * 3600\n\n\ndef parse_bytes(s):\n if isinstance(s, six.integer_types + (float,)):\n return s\n if len(s) == 0:\n return 0\n\n if s[-2:-1].isalpha() and s[-1].isalpha():\n if s[-1] == \"b\" or s[-1] == \"B\":\n s = s[:-1]\n units = BYTE_UNITS\n suffix = s[-1].lower()\n\n # Check if the variable is a string representation of an int\n # without a units part. Assuming that the units are bytes.\n if suffix.isdigit():\n digits_part = s\n suffix = 'b'\n else:\n digits_part = s[:-1]\n\n if suffix in units.keys() or suffix.isdigit():\n try:\n digits = int(digits_part)\n except ValueError:\n raise errors.DockerException(\n 'Failed converting the string value for memory ({0}) to'\n ' an integer.'.format(digits_part)\n )\n\n # Reconvert to long for the final result\n s = int(digits * units[suffix])\n else:\n raise errors.DockerException(\n 'The specified value for memory ({0}) should specify the'\n ' units. The postfix should be one of the `b` `k` `m` `g`'\n ' characters'.format(s)\n )\n\n return s\n\n\ndef normalize_links(links):\n if isinstance(links, dict):\n links = six.iteritems(links)\n\n return ['{0}:{1}'.format(k, v) for k, v in sorted(links)]\n\n\ndef parse_env_file(env_file):\n \"\"\"\n Reads a line-separated environment file.\n The format of each line should be \"key=value\".\n \"\"\"\n environment = {}\n\n with open(env_file, 'r') as f:\n for line in f:\n\n if line[0] == '#':\n continue\n\n line = line.strip()\n if not line:\n continue\n\n parse_line = line.split('=', 1)\n if len(parse_line) == 2:\n k, v = parse_line\n environment[k] = v\n else:\n raise errors.DockerException(\n 'Invalid line in environment file {0}:\\n{1}'.format(\n env_file, line))\n\n return environment\n\n\ndef split_command(command):\n if six.PY2 and not isinstance(command, six.binary_type):\n command = command.encode('utf-8')\n return shlex.split(command)\n\n\ndef format_environment(environment):\n def format_env(key, value):\n if value is None:\n return key\n if isinstance(value, six.binary_type):\n value = value.decode('utf-8')\n\n return u'{key}={value}'.format(key=key, value=value)\n return [format_env(*var) for var in six.iteritems(environment)]\n\n\ndef format_extra_hosts(extra_hosts, task=False):\n # Use format dictated by Swarm API if container is part of a task\n if task:\n return [\n '{} {}'.format(v, k) for k, v in sorted(six.iteritems(extra_hosts))\n ]\n\n return [\n '{}:{}'.format(k, v) for k, v in sorted(six.iteritems(extra_hosts))\n ]\n\n\ndef create_host_config(self, *args, **kwargs):\n raise errors.DeprecatedMethod(\n 'utils.create_host_config has been removed. Please use a '\n 'docker.types.HostConfig object instead.'\n )\n", "path": "docker/utils/utils.py"}], "after_files": [{"content": "import base64\nimport io\nimport os\nimport os.path\nimport json\nimport shlex\nimport tarfile\nimport tempfile\nimport warnings\nfrom distutils.version import StrictVersion\nfrom datetime import datetime\n\nimport requests\nimport six\n\nfrom .. import constants\nfrom .. import errors\nfrom .. import tls\n\nif six.PY2:\n from urllib import splitnport\nelse:\n from urllib.parse import splitnport\n\nDEFAULT_HTTP_HOST = \"127.0.0.1\"\nDEFAULT_UNIX_SOCKET = \"http+unix://var/run/docker.sock\"\nDEFAULT_NPIPE = 'npipe:////./pipe/docker_engine'\n\nBYTE_UNITS = {\n 'b': 1,\n 'k': 1024,\n 'm': 1024 * 1024,\n 'g': 1024 * 1024 * 1024\n}\n\n\ndef create_ipam_pool(*args, **kwargs):\n raise errors.DeprecatedMethod(\n 'utils.create_ipam_pool has been removed. Please use a '\n 'docker.types.IPAMPool object instead.'\n )\n\n\ndef create_ipam_config(*args, **kwargs):\n raise errors.DeprecatedMethod(\n 'utils.create_ipam_config has been removed. Please use a '\n 'docker.types.IPAMConfig object instead.'\n )\n\n\ndef mkbuildcontext(dockerfile):\n f = tempfile.NamedTemporaryFile()\n t = tarfile.open(mode='w', fileobj=f)\n if isinstance(dockerfile, io.StringIO):\n dfinfo = tarfile.TarInfo('Dockerfile')\n if six.PY3:\n raise TypeError('Please use io.BytesIO to create in-memory '\n 'Dockerfiles with Python 3')\n else:\n dfinfo.size = len(dockerfile.getvalue())\n dockerfile.seek(0)\n elif isinstance(dockerfile, io.BytesIO):\n dfinfo = tarfile.TarInfo('Dockerfile')\n dfinfo.size = len(dockerfile.getvalue())\n dockerfile.seek(0)\n else:\n dfinfo = t.gettarinfo(fileobj=dockerfile, arcname='Dockerfile')\n t.addfile(dfinfo, dockerfile)\n t.close()\n f.seek(0)\n return f\n\n\ndef decode_json_header(header):\n data = base64.b64decode(header)\n if six.PY3:\n data = data.decode('utf-8')\n return json.loads(data)\n\n\ndef build_file_list(root):\n files = []\n for dirname, dirnames, fnames in os.walk(root):\n for filename in fnames + dirnames:\n longpath = os.path.join(dirname, filename)\n files.append(\n longpath.replace(root, '', 1).lstrip('/')\n )\n\n return files\n\n\ndef create_archive(root, files=None, fileobj=None, gzip=False):\n if not fileobj:\n fileobj = tempfile.NamedTemporaryFile()\n t = tarfile.open(mode='w:gz' if gzip else 'w', fileobj=fileobj)\n if files is None:\n files = build_file_list(root)\n for path in files:\n full_path = os.path.join(root, path)\n\n if os.lstat(full_path).st_mode & os.R_OK == 0:\n raise IOError(\n 'Can not access file in context: {}'.format(full_path)\n )\n i = t.gettarinfo(full_path, arcname=path)\n if i is None:\n # This happens when we encounter a socket file. We can safely\n # ignore it and proceed.\n continue\n\n if constants.IS_WINDOWS_PLATFORM:\n # Windows doesn't keep track of the execute bit, so we make files\n # and directories executable by default.\n i.mode = i.mode & 0o755 | 0o111\n\n if i.isfile():\n try:\n with open(full_path, 'rb') as f:\n t.addfile(i, f)\n except IOError:\n t.addfile(i, None)\n else:\n # Directories, FIFOs, symlinks... don't need to be read.\n t.addfile(i, None)\n t.close()\n fileobj.seek(0)\n return fileobj\n\n\ndef compare_version(v1, v2):\n \"\"\"Compare docker versions\n\n >>> v1 = '1.9'\n >>> v2 = '1.10'\n >>> compare_version(v1, v2)\n 1\n >>> compare_version(v2, v1)\n -1\n >>> compare_version(v2, v2)\n 0\n \"\"\"\n s1 = StrictVersion(v1)\n s2 = StrictVersion(v2)\n if s1 == s2:\n return 0\n elif s1 > s2:\n return -1\n else:\n return 1\n\n\ndef version_lt(v1, v2):\n return compare_version(v1, v2) > 0\n\n\ndef version_gte(v1, v2):\n return not version_lt(v1, v2)\n\n\ndef ping_registry(url):\n warnings.warn(\n 'The `ping_registry` method is deprecated and will be removed.',\n DeprecationWarning\n )\n\n return ping(url + '/v2/', [401]) or ping(url + '/v1/_ping')\n\n\ndef ping(url, valid_4xx_statuses=None):\n try:\n res = requests.get(url, timeout=3)\n except Exception:\n return False\n else:\n # We don't send yet auth headers\n # and a v2 registry will respond with status 401\n return (\n res.status_code < 400 or\n (valid_4xx_statuses and res.status_code in valid_4xx_statuses)\n )\n\n\ndef _convert_port_binding(binding):\n result = {'HostIp': '', 'HostPort': ''}\n if isinstance(binding, tuple):\n if len(binding) == 2:\n result['HostPort'] = binding[1]\n result['HostIp'] = binding[0]\n elif isinstance(binding[0], six.string_types):\n result['HostIp'] = binding[0]\n else:\n result['HostPort'] = binding[0]\n elif isinstance(binding, dict):\n if 'HostPort' in binding:\n result['HostPort'] = binding['HostPort']\n if 'HostIp' in binding:\n result['HostIp'] = binding['HostIp']\n else:\n raise ValueError(binding)\n else:\n result['HostPort'] = binding\n\n if result['HostPort'] is None:\n result['HostPort'] = ''\n else:\n result['HostPort'] = str(result['HostPort'])\n\n return result\n\n\ndef convert_port_bindings(port_bindings):\n result = {}\n for k, v in six.iteritems(port_bindings):\n key = str(k)\n if '/' not in key:\n key += '/tcp'\n if isinstance(v, list):\n result[key] = [_convert_port_binding(binding) for binding in v]\n else:\n result[key] = [_convert_port_binding(v)]\n return result\n\n\ndef convert_volume_binds(binds):\n if isinstance(binds, list):\n return binds\n\n result = []\n for k, v in binds.items():\n if isinstance(k, six.binary_type):\n k = k.decode('utf-8')\n\n if isinstance(v, dict):\n if 'ro' in v and 'mode' in v:\n raise ValueError(\n 'Binding cannot contain both \"ro\" and \"mode\": {}'\n .format(repr(v))\n )\n\n bind = v['bind']\n if isinstance(bind, six.binary_type):\n bind = bind.decode('utf-8')\n\n if 'ro' in v:\n mode = 'ro' if v['ro'] else 'rw'\n elif 'mode' in v:\n mode = v['mode']\n else:\n mode = 'rw'\n\n result.append(\n six.text_type('{0}:{1}:{2}').format(k, bind, mode)\n )\n else:\n if isinstance(v, six.binary_type):\n v = v.decode('utf-8')\n result.append(\n six.text_type('{0}:{1}:rw').format(k, v)\n )\n return result\n\n\ndef convert_tmpfs_mounts(tmpfs):\n if isinstance(tmpfs, dict):\n return tmpfs\n\n if not isinstance(tmpfs, list):\n raise ValueError(\n 'Expected tmpfs value to be either a list or a dict, found: {}'\n .format(type(tmpfs).__name__)\n )\n\n result = {}\n for mount in tmpfs:\n if isinstance(mount, six.string_types):\n if \":\" in mount:\n name, options = mount.split(\":\", 1)\n else:\n name = mount\n options = \"\"\n\n else:\n raise ValueError(\n \"Expected item in tmpfs list to be a string, found: {}\"\n .format(type(mount).__name__)\n )\n\n result[name] = options\n return result\n\n\ndef convert_service_networks(networks):\n if not networks:\n return networks\n if not isinstance(networks, list):\n raise TypeError('networks parameter must be a list.')\n\n result = []\n for n in networks:\n if isinstance(n, six.string_types):\n n = {'Target': n}\n result.append(n)\n return result\n\n\ndef parse_repository_tag(repo_name):\n parts = repo_name.rsplit('@', 1)\n if len(parts) == 2:\n return tuple(parts)\n parts = repo_name.rsplit(':', 1)\n if len(parts) == 2 and '/' not in parts[1]:\n return tuple(parts)\n return repo_name, None\n\n\n# Based on utils.go:ParseHost http://tinyurl.com/nkahcfh\n# fd:// protocol unsupported (for obvious reasons)\n# Added support for http and https\n# Protocol translation: tcp -> http, unix -> http+unix\ndef parse_host(addr, is_win32=False, tls=False):\n proto = \"http+unix\"\n port = None\n path = ''\n\n if not addr and is_win32:\n addr = DEFAULT_NPIPE\n\n if not addr or addr.strip() == 'unix://':\n return DEFAULT_UNIX_SOCKET\n\n addr = addr.strip()\n if addr.startswith('http://'):\n addr = addr.replace('http://', 'tcp://')\n if addr.startswith('http+unix://'):\n addr = addr.replace('http+unix://', 'unix://')\n\n if addr == 'tcp://':\n raise errors.DockerException(\n \"Invalid bind address format: {0}\".format(addr)\n )\n elif addr.startswith('unix://'):\n addr = addr[7:]\n elif addr.startswith('tcp://'):\n proto = 'http{0}'.format('s' if tls else '')\n addr = addr[6:]\n elif addr.startswith('https://'):\n proto = \"https\"\n addr = addr[8:]\n elif addr.startswith('npipe://'):\n proto = 'npipe'\n addr = addr[8:]\n elif addr.startswith('fd://'):\n raise errors.DockerException(\"fd protocol is not implemented\")\n else:\n if \"://\" in addr:\n raise errors.DockerException(\n \"Invalid bind address protocol: {0}\".format(addr)\n )\n proto = \"https\" if tls else \"http\"\n\n if proto in (\"http\", \"https\"):\n address_parts = addr.split('/', 1)\n host = address_parts[0]\n if len(address_parts) == 2:\n path = '/' + address_parts[1]\n host, port = splitnport(host)\n\n if port is None:\n raise errors.DockerException(\n \"Invalid port: {0}\".format(addr)\n )\n\n if not host:\n host = DEFAULT_HTTP_HOST\n else:\n host = addr\n\n if proto in (\"http\", \"https\") and port == -1:\n raise errors.DockerException(\n \"Bind address needs a port: {0}\".format(addr))\n\n if proto == \"http+unix\" or proto == 'npipe':\n return \"{0}://{1}\".format(proto, host).rstrip('/')\n return \"{0}://{1}:{2}{3}\".format(proto, host, port, path).rstrip('/')\n\n\ndef parse_devices(devices):\n device_list = []\n for device in devices:\n if isinstance(device, dict):\n device_list.append(device)\n continue\n if not isinstance(device, six.string_types):\n raise errors.DockerException(\n 'Invalid device type {0}'.format(type(device))\n )\n device_mapping = device.split(':')\n if device_mapping:\n path_on_host = device_mapping[0]\n if len(device_mapping) > 1:\n path_in_container = device_mapping[1]\n else:\n path_in_container = path_on_host\n if len(device_mapping) > 2:\n permissions = device_mapping[2]\n else:\n permissions = 'rwm'\n device_list.append({\n 'PathOnHost': path_on_host,\n 'PathInContainer': path_in_container,\n 'CgroupPermissions': permissions\n })\n return device_list\n\n\ndef kwargs_from_env(ssl_version=None, assert_hostname=None, environment=None):\n if not environment:\n environment = os.environ\n host = environment.get('DOCKER_HOST')\n\n # empty string for cert path is the same as unset.\n cert_path = environment.get('DOCKER_CERT_PATH') or None\n\n # empty string for tls verify counts as \"false\".\n # Any value or 'unset' counts as true.\n tls_verify = environment.get('DOCKER_TLS_VERIFY')\n if tls_verify == '':\n tls_verify = False\n else:\n tls_verify = tls_verify is not None\n enable_tls = cert_path or tls_verify\n\n params = {}\n\n if host:\n params['base_url'] = (\n host.replace('tcp://', 'https://') if enable_tls else host\n )\n\n if not enable_tls:\n return params\n\n if not cert_path:\n cert_path = os.path.join(os.path.expanduser('~'), '.docker')\n\n if not tls_verify and assert_hostname is None:\n # assert_hostname is a subset of TLS verification,\n # so if it's not set already then set it to false.\n assert_hostname = False\n\n params['tls'] = tls.TLSConfig(\n client_cert=(os.path.join(cert_path, 'cert.pem'),\n os.path.join(cert_path, 'key.pem')),\n ca_cert=os.path.join(cert_path, 'ca.pem'),\n verify=tls_verify,\n ssl_version=ssl_version,\n assert_hostname=assert_hostname,\n )\n\n return params\n\n\ndef convert_filters(filters):\n result = {}\n for k, v in six.iteritems(filters):\n if isinstance(v, bool):\n v = 'true' if v else 'false'\n if not isinstance(v, list):\n v = [v, ]\n result[k] = v\n return json.dumps(result)\n\n\ndef datetime_to_timestamp(dt):\n \"\"\"Convert a UTC datetime to a Unix timestamp\"\"\"\n delta = dt - datetime.utcfromtimestamp(0)\n return delta.seconds + delta.days * 24 * 3600\n\n\ndef parse_bytes(s):\n if isinstance(s, six.integer_types + (float,)):\n return s\n if len(s) == 0:\n return 0\n\n if s[-2:-1].isalpha() and s[-1].isalpha():\n if s[-1] == \"b\" or s[-1] == \"B\":\n s = s[:-1]\n units = BYTE_UNITS\n suffix = s[-1].lower()\n\n # Check if the variable is a string representation of an int\n # without a units part. Assuming that the units are bytes.\n if suffix.isdigit():\n digits_part = s\n suffix = 'b'\n else:\n digits_part = s[:-1]\n\n if suffix in units.keys() or suffix.isdigit():\n try:\n digits = int(digits_part)\n except ValueError:\n raise errors.DockerException(\n 'Failed converting the string value for memory ({0}) to'\n ' an integer.'.format(digits_part)\n )\n\n # Reconvert to long for the final result\n s = int(digits * units[suffix])\n else:\n raise errors.DockerException(\n 'The specified value for memory ({0}) should specify the'\n ' units. The postfix should be one of the `b` `k` `m` `g`'\n ' characters'.format(s)\n )\n\n return s\n\n\ndef normalize_links(links):\n if isinstance(links, dict):\n links = six.iteritems(links)\n\n return ['{0}:{1}'.format(k, v) for k, v in sorted(links)]\n\n\ndef parse_env_file(env_file):\n \"\"\"\n Reads a line-separated environment file.\n The format of each line should be \"key=value\".\n \"\"\"\n environment = {}\n\n with open(env_file, 'r') as f:\n for line in f:\n\n if line[0] == '#':\n continue\n\n line = line.strip()\n if not line:\n continue\n\n parse_line = line.split('=', 1)\n if len(parse_line) == 2:\n k, v = parse_line\n environment[k] = v\n else:\n raise errors.DockerException(\n 'Invalid line in environment file {0}:\\n{1}'.format(\n env_file, line))\n\n return environment\n\n\ndef split_command(command):\n if six.PY2 and not isinstance(command, six.binary_type):\n command = command.encode('utf-8')\n return shlex.split(command)\n\n\ndef format_environment(environment):\n def format_env(key, value):\n if value is None:\n return key\n if isinstance(value, six.binary_type):\n value = value.decode('utf-8')\n\n return u'{key}={value}'.format(key=key, value=value)\n return [format_env(*var) for var in six.iteritems(environment)]\n\n\ndef format_extra_hosts(extra_hosts, task=False):\n # Use format dictated by Swarm API if container is part of a task\n if task:\n return [\n '{} {}'.format(v, k) for k, v in sorted(six.iteritems(extra_hosts))\n ]\n\n return [\n '{}:{}'.format(k, v) for k, v in sorted(six.iteritems(extra_hosts))\n ]\n\n\ndef create_host_config(self, *args, **kwargs):\n raise errors.DeprecatedMethod(\n 'utils.create_host_config has been removed. Please use a '\n 'docker.types.HostConfig object instead.'\n )\n", "path": "docker/utils/utils.py"}]} |
gh_patches_debug_1344 | rasdani/github-patches | git_diff | kivy__kivy-1926 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
SoundLoader can't determine file types for URL's with URL parameters in them.
Kivy currently can't load audio files from URL's that have URL parameters in them (For example `https://audio.example.com/get/test.wav?dl=true&token=9a8s76f9a876`).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kivy/core/audio/__init__.py`
Content:
```
1 '''
2 Audio
3 =====
4
5 Load an audio sound and play it with::
6
7 from kivy.core.audio import SoundLoader
8
9 sound = SoundLoader.load('mytest.wav')
10 if sound:
11 print("Sound found at %s" % sound.source)
12 print("Sound is %.3f seconds" % sound.length)
13 sound.play()
14
15 You should not use the Sound class directly. The class returned by
16 **SoundLoader.load** will be the best sound provider for that particular file
17 type, so it might return different Sound classes depending the file type.
18
19 .. versionchanged:: 1.8.0
20 There is now 2 distinct Gstreamer implementation: one using Gi/Gst working
21 for both Python 2+3 with Gstreamer 1.0, and one using PyGST working
22 only for Python 2 + Gstreamer 0.10.
23 If you have issue with GStreamer, have a look at
24 :ref:`gstreamer-compatibility`
25
26 .. note::
27
28 Recording audio is not supported.
29
30 '''
31
32 __all__ = ('Sound', 'SoundLoader')
33
34 from kivy.logger import Logger
35 from kivy.event import EventDispatcher
36 from kivy.core import core_register_libs
37 from kivy.compat import PY2
38 from kivy.resources import resource_find
39 from kivy.properties import StringProperty, NumericProperty, OptionProperty, \
40 AliasProperty, BooleanProperty
41
42
43 class SoundLoader:
44 '''Load a sound, using the best loader for the given file type.
45 '''
46
47 _classes = []
48
49 @staticmethod
50 def register(classobj):
51 '''Register a new class to load the sound.'''
52 Logger.debug('Audio: register %s' % classobj.__name__)
53 SoundLoader._classes.append(classobj)
54
55 @staticmethod
56 def load(filename):
57 '''Load a sound, and return a Sound() instance.'''
58 rfn = resource_find(filename)
59 if rfn is not None:
60 filename = rfn
61 ext = filename.split('.')[-1].lower()
62 for classobj in SoundLoader._classes:
63 if ext in classobj.extensions():
64 return classobj(source=filename)
65 Logger.warning('Audio: Unable to find a loader for <%s>' %
66 filename)
67 return None
68
69
70 class Sound(EventDispatcher):
71 '''Represents a sound to play. This class is abstract, and cannot be used
72 directly.
73
74 Use SoundLoader to load a sound.
75
76 :Events:
77 `on_play` : None
78 Fired when the sound is played.
79 `on_stop` : None
80 Fired when the sound is stopped.
81 '''
82
83 source = StringProperty(None)
84 '''Filename / source of your audio file.
85
86 .. versionadded:: 1.3.0
87
88 :attr:`source` is a :class:`~kivy.properties.StringProperty` that defaults
89 to None and is read-only. Use the :meth:`SoundLoader.load` for loading
90 audio.
91 '''
92
93 volume = NumericProperty(1.)
94 '''Volume, in the range 0-1. 1 means full volume, 0 means mute.
95
96 .. versionadded:: 1.3.0
97
98 :attr:`volume` is a :class:`~kivy.properties.NumericProperty` and defaults
99 to 1.
100 '''
101
102 state = OptionProperty('stop', options=('stop', 'play'))
103 '''State of the sound, one of 'stop' or 'play'.
104
105 .. versionadded:: 1.3.0
106
107 :attr:`state` is a read-only :class:`~kivy.properties.OptionProperty`.'''
108
109 loop = BooleanProperty(False)
110 '''Set to True if the sound should automatically loop when it finishes.
111
112 .. versionadded:: 1.8.0
113
114 :attr:`loop` is a :class:`~kivy.properties.BooleanProperty` and defaults to
115 False.'''
116
117 #
118 # deprecated
119 #
120 def _get_status(self):
121 return self.state
122 status = AliasProperty(_get_status, None, bind=('state', ))
123 '''
124 .. deprecated:: 1.3.0
125 Use :attr:`state` instead.
126 '''
127
128 def _get_filename(self):
129 return self.source
130 filename = AliasProperty(_get_filename, None, bind=('source', ))
131 '''
132 .. deprecated:: 1.3.0
133 Use :attr:`source` instead.
134 '''
135
136 __events__ = ('on_play', 'on_stop')
137
138 def on_source(self, instance, filename):
139 self.unload()
140 if filename is None:
141 return
142 self.load()
143
144 def get_pos(self):
145 '''
146 Returns the current position of the audio file.
147 Returns 0 if not playing.
148
149 .. versionadded:: 1.4.1
150 '''
151 return 0
152
153 def _get_length(self):
154 return 0
155
156 length = property(lambda self: self._get_length(),
157 doc='Get length of the sound (in seconds).')
158
159 def load(self):
160 '''Load the file into memory.'''
161 pass
162
163 def unload(self):
164 '''Unload the file from memory.'''
165 pass
166
167 def play(self):
168 '''Play the file.'''
169 self.state = 'play'
170 self.dispatch('on_play')
171
172 def stop(self):
173 '''Stop playback.'''
174 self.state = 'stop'
175 self.dispatch('on_stop')
176
177 def seek(self, position):
178 '''Go to the <position> (in seconds).'''
179 pass
180
181 def on_play(self):
182 pass
183
184 def on_stop(self):
185 pass
186
187
188 # Little trick here, don't activate gstreamer on window
189 # seem to have lot of crackle or something...
190 audio_libs = []
191
192 # from now on, prefer our gstplayer instead of gi/pygst.
193 try:
194 from kivy.lib.gstplayer import GstPlayer # NOQA
195 audio_libs += [('gstplayer', 'audio_gstplayer')]
196 except ImportError:
197 #audio_libs += [('gi', 'audio_gi')]
198 if PY2:
199 audio_libs += [('pygst', 'audio_pygst')]
200 audio_libs += [('sdl', 'audio_sdl')]
201 audio_libs += [('pygame', 'audio_pygame')]
202
203 core_register_libs('audio', audio_libs)
204
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kivy/core/audio/__init__.py b/kivy/core/audio/__init__.py
--- a/kivy/core/audio/__init__.py
+++ b/kivy/core/audio/__init__.py
@@ -59,6 +59,8 @@
if rfn is not None:
filename = rfn
ext = filename.split('.')[-1].lower()
+ if '?' in ext:
+ ext = ext.split('?')[0]
for classobj in SoundLoader._classes:
if ext in classobj.extensions():
return classobj(source=filename)
| {"golden_diff": "diff --git a/kivy/core/audio/__init__.py b/kivy/core/audio/__init__.py\n--- a/kivy/core/audio/__init__.py\n+++ b/kivy/core/audio/__init__.py\n@@ -59,6 +59,8 @@\n if rfn is not None:\n filename = rfn\n ext = filename.split('.')[-1].lower()\n+ if '?' in ext:\n+ ext = ext.split('?')[0]\n for classobj in SoundLoader._classes:\n if ext in classobj.extensions():\n return classobj(source=filename)\n", "issue": "SoundLoader can't determine file types for URL's with URL parameters in them.\nKivy currently can't load audio files from URL's that have URL parameters in them (For example `https://audio.example.com/get/test.wav?dl=true&token=9a8s76f9a876`).\n\n", "before_files": [{"content": "'''\nAudio\n=====\n\nLoad an audio sound and play it with::\n\n from kivy.core.audio import SoundLoader\n\n sound = SoundLoader.load('mytest.wav')\n if sound:\n print(\"Sound found at %s\" % sound.source)\n print(\"Sound is %.3f seconds\" % sound.length)\n sound.play()\n\nYou should not use the Sound class directly. The class returned by\n**SoundLoader.load** will be the best sound provider for that particular file\ntype, so it might return different Sound classes depending the file type.\n\n.. versionchanged:: 1.8.0\n There is now 2 distinct Gstreamer implementation: one using Gi/Gst working\n for both Python 2+3 with Gstreamer 1.0, and one using PyGST working\n only for Python 2 + Gstreamer 0.10.\n If you have issue with GStreamer, have a look at\n :ref:`gstreamer-compatibility`\n\n.. note::\n\n Recording audio is not supported.\n\n'''\n\n__all__ = ('Sound', 'SoundLoader')\n\nfrom kivy.logger import Logger\nfrom kivy.event import EventDispatcher\nfrom kivy.core import core_register_libs\nfrom kivy.compat import PY2\nfrom kivy.resources import resource_find\nfrom kivy.properties import StringProperty, NumericProperty, OptionProperty, \\\n AliasProperty, BooleanProperty\n\n\nclass SoundLoader:\n '''Load a sound, using the best loader for the given file type.\n '''\n\n _classes = []\n\n @staticmethod\n def register(classobj):\n '''Register a new class to load the sound.'''\n Logger.debug('Audio: register %s' % classobj.__name__)\n SoundLoader._classes.append(classobj)\n\n @staticmethod\n def load(filename):\n '''Load a sound, and return a Sound() instance.'''\n rfn = resource_find(filename)\n if rfn is not None:\n filename = rfn\n ext = filename.split('.')[-1].lower()\n for classobj in SoundLoader._classes:\n if ext in classobj.extensions():\n return classobj(source=filename)\n Logger.warning('Audio: Unable to find a loader for <%s>' %\n filename)\n return None\n\n\nclass Sound(EventDispatcher):\n '''Represents a sound to play. This class is abstract, and cannot be used\n directly.\n\n Use SoundLoader to load a sound.\n\n :Events:\n `on_play` : None\n Fired when the sound is played.\n `on_stop` : None\n Fired when the sound is stopped.\n '''\n\n source = StringProperty(None)\n '''Filename / source of your audio file.\n\n .. versionadded:: 1.3.0\n\n :attr:`source` is a :class:`~kivy.properties.StringProperty` that defaults\n to None and is read-only. Use the :meth:`SoundLoader.load` for loading\n audio.\n '''\n\n volume = NumericProperty(1.)\n '''Volume, in the range 0-1. 1 means full volume, 0 means mute.\n\n .. versionadded:: 1.3.0\n\n :attr:`volume` is a :class:`~kivy.properties.NumericProperty` and defaults\n to 1.\n '''\n\n state = OptionProperty('stop', options=('stop', 'play'))\n '''State of the sound, one of 'stop' or 'play'.\n\n .. versionadded:: 1.3.0\n\n :attr:`state` is a read-only :class:`~kivy.properties.OptionProperty`.'''\n\n loop = BooleanProperty(False)\n '''Set to True if the sound should automatically loop when it finishes.\n\n .. versionadded:: 1.8.0\n\n :attr:`loop` is a :class:`~kivy.properties.BooleanProperty` and defaults to\n False.'''\n\n #\n # deprecated\n #\n def _get_status(self):\n return self.state\n status = AliasProperty(_get_status, None, bind=('state', ))\n '''\n .. deprecated:: 1.3.0\n Use :attr:`state` instead.\n '''\n\n def _get_filename(self):\n return self.source\n filename = AliasProperty(_get_filename, None, bind=('source', ))\n '''\n .. deprecated:: 1.3.0\n Use :attr:`source` instead.\n '''\n\n __events__ = ('on_play', 'on_stop')\n\n def on_source(self, instance, filename):\n self.unload()\n if filename is None:\n return\n self.load()\n\n def get_pos(self):\n '''\n Returns the current position of the audio file.\n Returns 0 if not playing.\n\n .. versionadded:: 1.4.1\n '''\n return 0\n\n def _get_length(self):\n return 0\n\n length = property(lambda self: self._get_length(),\n doc='Get length of the sound (in seconds).')\n\n def load(self):\n '''Load the file into memory.'''\n pass\n\n def unload(self):\n '''Unload the file from memory.'''\n pass\n\n def play(self):\n '''Play the file.'''\n self.state = 'play'\n self.dispatch('on_play')\n\n def stop(self):\n '''Stop playback.'''\n self.state = 'stop'\n self.dispatch('on_stop')\n\n def seek(self, position):\n '''Go to the <position> (in seconds).'''\n pass\n\n def on_play(self):\n pass\n\n def on_stop(self):\n pass\n\n\n# Little trick here, don't activate gstreamer on window\n# seem to have lot of crackle or something...\naudio_libs = []\n\n# from now on, prefer our gstplayer instead of gi/pygst.\ntry:\n from kivy.lib.gstplayer import GstPlayer # NOQA\n audio_libs += [('gstplayer', 'audio_gstplayer')]\nexcept ImportError:\n #audio_libs += [('gi', 'audio_gi')]\n if PY2:\n audio_libs += [('pygst', 'audio_pygst')]\naudio_libs += [('sdl', 'audio_sdl')]\naudio_libs += [('pygame', 'audio_pygame')]\n\ncore_register_libs('audio', audio_libs)\n", "path": "kivy/core/audio/__init__.py"}], "after_files": [{"content": "'''\nAudio\n=====\n\nLoad an audio sound and play it with::\n\n from kivy.core.audio import SoundLoader\n\n sound = SoundLoader.load('mytest.wav')\n if sound:\n print(\"Sound found at %s\" % sound.source)\n print(\"Sound is %.3f seconds\" % sound.length)\n sound.play()\n\nYou should not use the Sound class directly. The class returned by\n**SoundLoader.load** will be the best sound provider for that particular file\ntype, so it might return different Sound classes depending the file type.\n\n.. versionchanged:: 1.8.0\n There is now 2 distinct Gstreamer implementation: one using Gi/Gst working\n for both Python 2+3 with Gstreamer 1.0, and one using PyGST working\n only for Python 2 + Gstreamer 0.10.\n If you have issue with GStreamer, have a look at\n :ref:`gstreamer-compatibility`\n\n.. note::\n\n Recording audio is not supported.\n\n'''\n\n__all__ = ('Sound', 'SoundLoader')\n\nfrom kivy.logger import Logger\nfrom kivy.event import EventDispatcher\nfrom kivy.core import core_register_libs\nfrom kivy.compat import PY2\nfrom kivy.resources import resource_find\nfrom kivy.properties import StringProperty, NumericProperty, OptionProperty, \\\n AliasProperty, BooleanProperty\n\n\nclass SoundLoader:\n '''Load a sound, using the best loader for the given file type.\n '''\n\n _classes = []\n\n @staticmethod\n def register(classobj):\n '''Register a new class to load the sound.'''\n Logger.debug('Audio: register %s' % classobj.__name__)\n SoundLoader._classes.append(classobj)\n\n @staticmethod\n def load(filename):\n '''Load a sound, and return a Sound() instance.'''\n rfn = resource_find(filename)\n if rfn is not None:\n filename = rfn\n ext = filename.split('.')[-1].lower()\n if '?' in ext:\n ext = ext.split('?')[0]\n for classobj in SoundLoader._classes:\n if ext in classobj.extensions():\n return classobj(source=filename)\n Logger.warning('Audio: Unable to find a loader for <%s>' %\n filename)\n return None\n\n\nclass Sound(EventDispatcher):\n '''Represents a sound to play. This class is abstract, and cannot be used\n directly.\n\n Use SoundLoader to load a sound.\n\n :Events:\n `on_play` : None\n Fired when the sound is played.\n `on_stop` : None\n Fired when the sound is stopped.\n '''\n\n source = StringProperty(None)\n '''Filename / source of your audio file.\n\n .. versionadded:: 1.3.0\n\n :attr:`source` is a :class:`~kivy.properties.StringProperty` that defaults\n to None and is read-only. Use the :meth:`SoundLoader.load` for loading\n audio.\n '''\n\n volume = NumericProperty(1.)\n '''Volume, in the range 0-1. 1 means full volume, 0 means mute.\n\n .. versionadded:: 1.3.0\n\n :attr:`volume` is a :class:`~kivy.properties.NumericProperty` and defaults\n to 1.\n '''\n\n state = OptionProperty('stop', options=('stop', 'play'))\n '''State of the sound, one of 'stop' or 'play'.\n\n .. versionadded:: 1.3.0\n\n :attr:`state` is a read-only :class:`~kivy.properties.OptionProperty`.'''\n\n loop = BooleanProperty(False)\n '''Set to True if the sound should automatically loop when it finishes.\n\n .. versionadded:: 1.8.0\n\n :attr:`loop` is a :class:`~kivy.properties.BooleanProperty` and defaults to\n False.'''\n\n #\n # deprecated\n #\n def _get_status(self):\n return self.state\n status = AliasProperty(_get_status, None, bind=('state', ))\n '''\n .. deprecated:: 1.3.0\n Use :attr:`state` instead.\n '''\n\n def _get_filename(self):\n return self.source\n filename = AliasProperty(_get_filename, None, bind=('source', ))\n '''\n .. deprecated:: 1.3.0\n Use :attr:`source` instead.\n '''\n\n __events__ = ('on_play', 'on_stop')\n\n def on_source(self, instance, filename):\n self.unload()\n if filename is None:\n return\n self.load()\n\n def get_pos(self):\n '''\n Returns the current position of the audio file.\n Returns 0 if not playing.\n\n .. versionadded:: 1.4.1\n '''\n return 0\n\n def _get_length(self):\n return 0\n\n length = property(lambda self: self._get_length(),\n doc='Get length of the sound (in seconds).')\n\n def load(self):\n '''Load the file into memory.'''\n pass\n\n def unload(self):\n '''Unload the file from memory.'''\n pass\n\n def play(self):\n '''Play the file.'''\n self.state = 'play'\n self.dispatch('on_play')\n\n def stop(self):\n '''Stop playback.'''\n self.state = 'stop'\n self.dispatch('on_stop')\n\n def seek(self, position):\n '''Go to the <position> (in seconds).'''\n pass\n\n def on_play(self):\n pass\n\n def on_stop(self):\n pass\n\n\n# Little trick here, don't activate gstreamer on window\n# seem to have lot of crackle or something...\naudio_libs = []\n\n# from now on, prefer our gstplayer instead of gi/pygst.\ntry:\n from kivy.lib.gstplayer import GstPlayer # NOQA\n audio_libs += [('gstplayer', 'audio_gstplayer')]\nexcept ImportError:\n #audio_libs += [('gi', 'audio_gi')]\n if PY2:\n audio_libs += [('pygst', 'audio_pygst')]\naudio_libs += [('sdl', 'audio_sdl')]\naudio_libs += [('pygame', 'audio_pygame')]\n\ncore_register_libs('audio', audio_libs)\n", "path": "kivy/core/audio/__init__.py"}]} |
gh_patches_debug_1345 | rasdani/github-patches | git_diff | mozilla__pontoon-3117 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Hitting a server error when accessing a Tag page of a Tag without any resoures associated to it
This is a regression from https://github.com/mozilla/pontoon/commit/1dcd7382221f7b943b9b743ee32322f7233f6a86.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pontoon/tags/utils.py`
Content:
```
1 from django.db.models import Q, Max, Sum
2
3 from pontoon.base.models import TranslatedResource, Translation
4 from pontoon.tags.models import Tag
5
6
7 class Tags:
8 """This provides an API for retrieving related ``Tags`` for given filters,
9 providing statistical information and latest activity data.
10 """
11
12 def __init__(self, **kwargs):
13 self.project = kwargs.get("project")
14 self.locale = kwargs.get("locale")
15 self.slug = kwargs.get("slug")
16 self.tag = Tag.objects.filter(project=self.project, slug=self.slug).first()
17
18 def get(self):
19 tags = (
20 Tag.objects.filter(project=self.project, resources__isnull=False)
21 .distinct()
22 .order_by("-priority", "name")
23 )
24
25 chart = self.chart(Q(), "resource__tag")
26 latest_activity = self.latest_activity(Q(), "resource__tag")
27 for tag in tags:
28 tag.chart = chart.get(tag.pk)
29 tag.latest_activity = latest_activity.get(tag.pk)
30
31 return tags
32
33 def get_tag_locales(self):
34 tag = self.tag
35
36 if tag is None:
37 return None
38
39 chart = self.chart(Q(resource__tag=self.tag), "resource__tag")
40 tag.chart = chart.get(tag.pk)
41 tag.locales = self.project.locales.all()
42
43 locale_chart = self.chart(Q(resource__tag=self.tag), "locale")
44 locale_latest_activity = self.latest_activity(
45 Q(resource__tag=self.tag), "locale"
46 )
47 for locale in tag.locales:
48 locale.chart = locale_chart.get(locale.pk)
49 locale.latest_activity = locale_latest_activity.get(locale.pk)
50
51 return tag
52
53 def chart(self, query, group_by):
54 trs = (
55 self.translated_resources.filter(query)
56 .values(group_by)
57 .annotate(
58 total_strings=Sum("resource__total_strings"),
59 approved_strings=Sum("approved_strings"),
60 pretranslated_strings=Sum("pretranslated_strings"),
61 strings_with_errors=Sum("strings_with_errors"),
62 strings_with_warnings=Sum("strings_with_warnings"),
63 unreviewed_strings=Sum("unreviewed_strings"),
64 )
65 )
66
67 return {
68 tr[group_by]: TranslatedResource.get_chart_dict(
69 TranslatedResource(**{key: tr[key] for key in list(tr.keys())[1:]})
70 )
71 for tr in trs
72 }
73
74 def latest_activity(self, query, group_by):
75 latest_activity = {}
76 dates = {}
77 translations = Translation.objects.none()
78
79 trs = (
80 self.translated_resources.exclude(latest_translation__isnull=True)
81 .filter(query)
82 .values(group_by)
83 .annotate(
84 date=Max("latest_translation__date"),
85 approved_date=Max("latest_translation__approved_date"),
86 )
87 )
88
89 for tr in trs:
90 date = max(tr["date"], tr["approved_date"] or tr["date"])
91 dates[date] = tr[group_by]
92 prefix = "entity__" if group_by == "resource__tag" else ""
93
94 # Find translations with matching date and tag/locale
95 translations |= Translation.objects.filter(
96 Q(**{"date": date, f"{prefix}{group_by}": tr[group_by]})
97 ).prefetch_related("user", "approved_user")
98
99 for t in translations:
100 key = dates[t.latest_activity["date"]]
101 latest_activity[key] = t.latest_activity
102
103 return latest_activity
104
105 @property
106 def translated_resources(self):
107 trs = TranslatedResource.objects
108
109 if self.project is not None:
110 trs = trs.filter(resource__project=self.project)
111
112 if self.locale is not None:
113 trs = trs.filter(locale=self.locale)
114
115 return trs
116
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pontoon/tags/utils.py b/pontoon/tags/utils.py
--- a/pontoon/tags/utils.py
+++ b/pontoon/tags/utils.py
@@ -13,7 +13,9 @@
self.project = kwargs.get("project")
self.locale = kwargs.get("locale")
self.slug = kwargs.get("slug")
- self.tag = Tag.objects.filter(project=self.project, slug=self.slug).first()
+ self.tag = Tag.objects.filter(
+ project=self.project, slug=self.slug, resources__isnull=False
+ ).first()
def get(self):
tags = (
| {"golden_diff": "diff --git a/pontoon/tags/utils.py b/pontoon/tags/utils.py\n--- a/pontoon/tags/utils.py\n+++ b/pontoon/tags/utils.py\n@@ -13,7 +13,9 @@\n self.project = kwargs.get(\"project\")\n self.locale = kwargs.get(\"locale\")\n self.slug = kwargs.get(\"slug\")\n- self.tag = Tag.objects.filter(project=self.project, slug=self.slug).first()\n+ self.tag = Tag.objects.filter(\n+ project=self.project, slug=self.slug, resources__isnull=False\n+ ).first()\n \n def get(self):\n tags = (\n", "issue": "Hitting a server error when accessing a Tag page of a Tag without any resoures associated to it\nThis is a regression from https://github.com/mozilla/pontoon/commit/1dcd7382221f7b943b9b743ee32322f7233f6a86.\n", "before_files": [{"content": "from django.db.models import Q, Max, Sum\n\nfrom pontoon.base.models import TranslatedResource, Translation\nfrom pontoon.tags.models import Tag\n\n\nclass Tags:\n \"\"\"This provides an API for retrieving related ``Tags`` for given filters,\n providing statistical information and latest activity data.\n \"\"\"\n\n def __init__(self, **kwargs):\n self.project = kwargs.get(\"project\")\n self.locale = kwargs.get(\"locale\")\n self.slug = kwargs.get(\"slug\")\n self.tag = Tag.objects.filter(project=self.project, slug=self.slug).first()\n\n def get(self):\n tags = (\n Tag.objects.filter(project=self.project, resources__isnull=False)\n .distinct()\n .order_by(\"-priority\", \"name\")\n )\n\n chart = self.chart(Q(), \"resource__tag\")\n latest_activity = self.latest_activity(Q(), \"resource__tag\")\n for tag in tags:\n tag.chart = chart.get(tag.pk)\n tag.latest_activity = latest_activity.get(tag.pk)\n\n return tags\n\n def get_tag_locales(self):\n tag = self.tag\n\n if tag is None:\n return None\n\n chart = self.chart(Q(resource__tag=self.tag), \"resource__tag\")\n tag.chart = chart.get(tag.pk)\n tag.locales = self.project.locales.all()\n\n locale_chart = self.chart(Q(resource__tag=self.tag), \"locale\")\n locale_latest_activity = self.latest_activity(\n Q(resource__tag=self.tag), \"locale\"\n )\n for locale in tag.locales:\n locale.chart = locale_chart.get(locale.pk)\n locale.latest_activity = locale_latest_activity.get(locale.pk)\n\n return tag\n\n def chart(self, query, group_by):\n trs = (\n self.translated_resources.filter(query)\n .values(group_by)\n .annotate(\n total_strings=Sum(\"resource__total_strings\"),\n approved_strings=Sum(\"approved_strings\"),\n pretranslated_strings=Sum(\"pretranslated_strings\"),\n strings_with_errors=Sum(\"strings_with_errors\"),\n strings_with_warnings=Sum(\"strings_with_warnings\"),\n unreviewed_strings=Sum(\"unreviewed_strings\"),\n )\n )\n\n return {\n tr[group_by]: TranslatedResource.get_chart_dict(\n TranslatedResource(**{key: tr[key] for key in list(tr.keys())[1:]})\n )\n for tr in trs\n }\n\n def latest_activity(self, query, group_by):\n latest_activity = {}\n dates = {}\n translations = Translation.objects.none()\n\n trs = (\n self.translated_resources.exclude(latest_translation__isnull=True)\n .filter(query)\n .values(group_by)\n .annotate(\n date=Max(\"latest_translation__date\"),\n approved_date=Max(\"latest_translation__approved_date\"),\n )\n )\n\n for tr in trs:\n date = max(tr[\"date\"], tr[\"approved_date\"] or tr[\"date\"])\n dates[date] = tr[group_by]\n prefix = \"entity__\" if group_by == \"resource__tag\" else \"\"\n\n # Find translations with matching date and tag/locale\n translations |= Translation.objects.filter(\n Q(**{\"date\": date, f\"{prefix}{group_by}\": tr[group_by]})\n ).prefetch_related(\"user\", \"approved_user\")\n\n for t in translations:\n key = dates[t.latest_activity[\"date\"]]\n latest_activity[key] = t.latest_activity\n\n return latest_activity\n\n @property\n def translated_resources(self):\n trs = TranslatedResource.objects\n\n if self.project is not None:\n trs = trs.filter(resource__project=self.project)\n\n if self.locale is not None:\n trs = trs.filter(locale=self.locale)\n\n return trs\n", "path": "pontoon/tags/utils.py"}], "after_files": [{"content": "from django.db.models import Q, Max, Sum\n\nfrom pontoon.base.models import TranslatedResource, Translation\nfrom pontoon.tags.models import Tag\n\n\nclass Tags:\n \"\"\"This provides an API for retrieving related ``Tags`` for given filters,\n providing statistical information and latest activity data.\n \"\"\"\n\n def __init__(self, **kwargs):\n self.project = kwargs.get(\"project\")\n self.locale = kwargs.get(\"locale\")\n self.slug = kwargs.get(\"slug\")\n self.tag = Tag.objects.filter(\n project=self.project, slug=self.slug, resources__isnull=False\n ).first()\n\n def get(self):\n tags = (\n Tag.objects.filter(project=self.project, resources__isnull=False)\n .distinct()\n .order_by(\"-priority\", \"name\")\n )\n\n chart = self.chart(Q(), \"resource__tag\")\n latest_activity = self.latest_activity(Q(), \"resource__tag\")\n for tag in tags:\n tag.chart = chart.get(tag.pk)\n tag.latest_activity = latest_activity.get(tag.pk)\n\n return tags\n\n def get_tag_locales(self):\n tag = self.tag\n\n if tag is None:\n return None\n\n chart = self.chart(Q(resource__tag=self.tag), \"resource__tag\")\n tag.chart = chart.get(tag.pk)\n tag.locales = self.project.locales.all()\n\n locale_chart = self.chart(Q(resource__tag=self.tag), \"locale\")\n locale_latest_activity = self.latest_activity(\n Q(resource__tag=self.tag), \"locale\"\n )\n for locale in tag.locales:\n locale.chart = locale_chart.get(locale.pk)\n locale.latest_activity = locale_latest_activity.get(locale.pk)\n\n return tag\n\n def chart(self, query, group_by):\n trs = (\n self.translated_resources.filter(query)\n .values(group_by)\n .annotate(\n total_strings=Sum(\"resource__total_strings\"),\n approved_strings=Sum(\"approved_strings\"),\n pretranslated_strings=Sum(\"pretranslated_strings\"),\n strings_with_errors=Sum(\"strings_with_errors\"),\n strings_with_warnings=Sum(\"strings_with_warnings\"),\n unreviewed_strings=Sum(\"unreviewed_strings\"),\n )\n )\n\n return {\n tr[group_by]: TranslatedResource.get_chart_dict(\n TranslatedResource(**{key: tr[key] for key in list(tr.keys())[1:]})\n )\n for tr in trs\n }\n\n def latest_activity(self, query, group_by):\n latest_activity = {}\n dates = {}\n translations = Translation.objects.none()\n\n trs = (\n self.translated_resources.exclude(latest_translation__isnull=True)\n .filter(query)\n .values(group_by)\n .annotate(\n date=Max(\"latest_translation__date\"),\n approved_date=Max(\"latest_translation__approved_date\"),\n )\n )\n\n for tr in trs:\n date = max(tr[\"date\"], tr[\"approved_date\"] or tr[\"date\"])\n dates[date] = tr[group_by]\n prefix = \"entity__\" if group_by == \"resource__tag\" else \"\"\n\n # Find translations with matching date and tag/locale\n translations |= Translation.objects.filter(\n Q(**{\"date\": date, f\"{prefix}{group_by}\": tr[group_by]})\n ).prefetch_related(\"user\", \"approved_user\")\n\n for t in translations:\n key = dates[t.latest_activity[\"date\"]]\n latest_activity[key] = t.latest_activity\n\n return latest_activity\n\n @property\n def translated_resources(self):\n trs = TranslatedResource.objects\n\n if self.project is not None:\n trs = trs.filter(resource__project=self.project)\n\n if self.locale is not None:\n trs = trs.filter(locale=self.locale)\n\n return trs\n", "path": "pontoon/tags/utils.py"}]} |
gh_patches_debug_1346 | rasdani/github-patches | git_diff | borgbackup__borg-3837 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
add a hint to setup.py that py37 requires llfuse >= 1.3.4
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 # -*- encoding: utf-8 *-*
2 import os
3 import io
4 import re
5 import sys
6 from collections import OrderedDict
7 from datetime import datetime
8 from glob import glob
9
10 from distutils.command.build import build
11 from distutils.core import Command
12
13 import textwrap
14
15 import setup_lz4
16 import setup_zstd
17 import setup_b2
18
19 # True: use the shared liblz4 (>= 1.7.0 / r129) from the system, False: use the bundled lz4 code
20 prefer_system_liblz4 = True
21
22 # True: use the shared libzstd (>= 1.3.0) from the system, False: use the bundled zstd code
23 prefer_system_libzstd = False
24
25 # True: use the shared libb2 from the system, False: use the bundled blake2 code
26 prefer_system_libb2 = True
27
28 min_python = (3, 4)
29 my_python = sys.version_info
30
31 if my_python < min_python:
32 print("Borg requires Python %d.%d or later" % min_python)
33 sys.exit(1)
34
35 # Are we building on ReadTheDocs?
36 on_rtd = os.environ.get('READTHEDOCS')
37
38 install_requires = [
39 # we are rather picky about msgpack versions, because a good working msgpack is
40 # very important for borg, see https://github.com/borgbackup/borg/issues/3753
41 # best versions seem to be 0.4.6, 0.4.7, 0.4.8 and 0.5.6:
42 'msgpack-python >=0.4.6, <=0.5.6, !=0.5.0, !=0.5.1, !=0.5.2, !=0.5.3, !=0.5.4, !=0.5.5',
43 # if you can't satisfy the above requirement, these are versions that might
44 # also work ok, IF you make sure to use the COMPILED version of msgpack-python,
45 # NOT the PURE PYTHON fallback implementation: ==0.5.1, ==0.5.4
46 # using any other version is not supported by borg development, feel free to
47 # do it on your own risk (and after own testing).
48 ]
49
50 # note for package maintainers: if you package borgbackup for distribution,
51 # please add llfuse as a *requirement* on all platforms that have a working
52 # llfuse package. "borg mount" needs llfuse to work.
53 # if you do not have llfuse, do not require it, most of borgbackup will work.
54 extras_require = {
55 # llfuse 0.40 (tested, proven, ok), needs FUSE version >= 2.8.0
56 # llfuse 0.41 (tested shortly, looks ok), needs FUSE version >= 2.8.0
57 # llfuse 0.41.1 (tested shortly, looks ok), needs FUSE version >= 2.8.0
58 # llfuse 0.42 (tested shortly, looks ok), needs FUSE version >= 2.8.0
59 # llfuse 1.0 (tested shortly, looks ok), needs FUSE version >= 2.8.0
60 # llfuse 1.1.1 (tested shortly, looks ok), needs FUSE version >= 2.8.0
61 # llfuse 1.2 (tested shortly, looks ok), needs FUSE version >= 2.8.0
62 # llfuse 1.3 (tested shortly, looks ok), needs FUSE version >= 2.8.0
63 # llfuse 2.0 will break API
64 'fuse': ['llfuse<2.0', ],
65 }
66
67 if sys.platform.startswith('freebsd'):
68 # llfuse was frequently broken / did not build on freebsd
69 # llfuse 0.41.1, 1.1 are ok
70 extras_require['fuse'] = ['llfuse <2.0, !=0.42.*, !=0.43, !=1.0', ]
71
72 from setuptools import setup, find_packages, Extension
73 from setuptools.command.sdist import sdist
74 from distutils.command.clean import clean
75
76 compress_source = 'src/borg/compress.pyx'
77 crypto_ll_source = 'src/borg/crypto/low_level.pyx'
78 chunker_source = 'src/borg/chunker.pyx'
79 hashindex_source = 'src/borg/hashindex.pyx'
80 item_source = 'src/borg/item.pyx'
81 checksums_source = 'src/borg/algorithms/checksums.pyx'
82 platform_posix_source = 'src/borg/platform/posix.pyx'
83 platform_linux_source = 'src/borg/platform/linux.pyx'
84 platform_darwin_source = 'src/borg/platform/darwin.pyx'
85 platform_freebsd_source = 'src/borg/platform/freebsd.pyx'
86
87 cython_sources = [
88 compress_source,
89 crypto_ll_source,
90 chunker_source,
91 hashindex_source,
92 item_source,
93 checksums_source,
94
95 platform_posix_source,
96 platform_linux_source,
97 platform_freebsd_source,
98 platform_darwin_source,
99 ]
100
101 try:
102 from Cython.Distutils import build_ext
103 import Cython.Compiler.Main as cython_compiler
104
105 class Sdist(sdist):
106 def __init__(self, *args, **kwargs):
107 for src in cython_sources:
108 cython_compiler.compile(src, cython_compiler.default_options)
109 super().__init__(*args, **kwargs)
110
111 def make_distribution(self):
112 self.filelist.extend([
113 'src/borg/compress.c',
114 'src/borg/crypto/low_level.c',
115 'src/borg/chunker.c', 'src/borg/_chunker.c',
116 'src/borg/hashindex.c', 'src/borg/_hashindex.c',
117 'src/borg/cache_sync/cache_sync.c', 'src/borg/cache_sync/sysdep.h', 'src/borg/cache_sync/unpack.h',
118 'src/borg/cache_sync/unpack_define.h', 'src/borg/cache_sync/unpack_template.h',
119 'src/borg/item.c',
120 'src/borg/algorithms/checksums.c',
121 'src/borg/algorithms/crc32_dispatch.c', 'src/borg/algorithms/crc32_clmul.c', 'src/borg/algorithms/crc32_slice_by_8.c',
122 'src/borg/algorithms/xxh64/xxhash.h', 'src/borg/algorithms/xxh64/xxhash.c',
123 'src/borg/platform/posix.c',
124 'src/borg/platform/linux.c',
125 'src/borg/platform/freebsd.c',
126 'src/borg/platform/darwin.c',
127 ])
128 super().make_distribution()
129
130 except ImportError:
131 class Sdist(sdist):
132 def __init__(self, *args, **kwargs):
133 raise Exception('Cython is required to run sdist')
134
135 compress_source = compress_source.replace('.pyx', '.c')
136 crypto_ll_source = crypto_ll_source.replace('.pyx', '.c')
137 chunker_source = chunker_source.replace('.pyx', '.c')
138 hashindex_source = hashindex_source.replace('.pyx', '.c')
139 item_source = item_source.replace('.pyx', '.c')
140 checksums_source = checksums_source.replace('.pyx', '.c')
141 platform_posix_source = platform_posix_source.replace('.pyx', '.c')
142 platform_linux_source = platform_linux_source.replace('.pyx', '.c')
143 platform_freebsd_source = platform_freebsd_source.replace('.pyx', '.c')
144 platform_darwin_source = platform_darwin_source.replace('.pyx', '.c')
145 from distutils.command.build_ext import build_ext
146 if not on_rtd and not all(os.path.exists(path) for path in [
147 compress_source, crypto_ll_source, chunker_source, hashindex_source, item_source, checksums_source,
148 platform_posix_source, platform_linux_source, platform_freebsd_source, platform_darwin_source]):
149 raise ImportError('The GIT version of Borg needs Cython. Install Cython or use a released version.')
150
151
152 def detect_openssl(prefixes):
153 for prefix in prefixes:
154 filename = os.path.join(prefix, 'include', 'openssl', 'evp.h')
155 if os.path.exists(filename):
156 with open(filename, 'rb') as fd:
157 if b'PKCS5_PBKDF2_HMAC(' in fd.read():
158 return prefix
159
160
161 include_dirs = []
162 library_dirs = []
163 define_macros = []
164
165 possible_openssl_prefixes = ['/usr', '/usr/local', '/usr/local/opt/openssl', '/usr/local/ssl', '/usr/local/openssl',
166 '/usr/local/borg', '/opt/local', '/opt/pkg', ]
167 if os.environ.get('BORG_OPENSSL_PREFIX'):
168 possible_openssl_prefixes.insert(0, os.environ.get('BORG_OPENSSL_PREFIX'))
169 ssl_prefix = detect_openssl(possible_openssl_prefixes)
170 if not ssl_prefix:
171 raise Exception('Unable to find OpenSSL >= 1.0 headers. (Looked here: {})'.format(', '.join(possible_openssl_prefixes)))
172 include_dirs.append(os.path.join(ssl_prefix, 'include'))
173 library_dirs.append(os.path.join(ssl_prefix, 'lib'))
174
175
176 possible_liblz4_prefixes = ['/usr', '/usr/local', '/usr/local/opt/lz4', '/usr/local/lz4',
177 '/usr/local/borg', '/opt/local', '/opt/pkg', ]
178 if os.environ.get('BORG_LIBLZ4_PREFIX'):
179 possible_liblz4_prefixes.insert(0, os.environ.get('BORG_LIBLZ4_PREFIX'))
180 liblz4_prefix = setup_lz4.lz4_system_prefix(possible_liblz4_prefixes)
181 if prefer_system_liblz4 and liblz4_prefix:
182 print('Detected and preferring liblz4 over bundled LZ4')
183 define_macros.append(('BORG_USE_LIBLZ4', 'YES'))
184 liblz4_system = True
185 else:
186 liblz4_system = False
187
188 possible_libb2_prefixes = ['/usr', '/usr/local', '/usr/local/opt/libb2', '/usr/local/libb2',
189 '/usr/local/borg', '/opt/local', '/opt/pkg', ]
190 if os.environ.get('BORG_LIBB2_PREFIX'):
191 possible_libb2_prefixes.insert(0, os.environ.get('BORG_LIBB2_PREFIX'))
192 libb2_prefix = setup_b2.b2_system_prefix(possible_libb2_prefixes)
193 if prefer_system_libb2 and libb2_prefix:
194 print('Detected and preferring libb2 over bundled BLAKE2')
195 define_macros.append(('BORG_USE_LIBB2', 'YES'))
196 libb2_system = True
197 else:
198 libb2_system = False
199
200 possible_libzstd_prefixes = ['/usr', '/usr/local', '/usr/local/opt/libzstd', '/usr/local/libzstd',
201 '/usr/local/borg', '/opt/local', '/opt/pkg', ]
202 if os.environ.get('BORG_LIBZSTD_PREFIX'):
203 possible_libzstd_prefixes.insert(0, os.environ.get('BORG_LIBZSTD_PREFIX'))
204 libzstd_prefix = setup_zstd.zstd_system_prefix(possible_libzstd_prefixes)
205 if prefer_system_libzstd and libzstd_prefix:
206 print('Detected and preferring libzstd over bundled ZSTD')
207 define_macros.append(('BORG_USE_LIBZSTD', 'YES'))
208 libzstd_system = True
209 else:
210 libzstd_system = False
211
212
213 with open('README.rst', 'r') as fd:
214 long_description = fd.read()
215 # remove badges
216 long_description = re.compile(r'^\.\. start-badges.*^\.\. end-badges', re.M | re.S).sub('', long_description)
217 # remove |substitutions|
218 long_description = re.compile(r'\|screencast\|').sub('', long_description)
219 # remove unknown directives
220 long_description = re.compile(r'^\.\. highlight:: \w+$', re.M).sub('', long_description)
221
222
223 def format_metavar(option):
224 if option.nargs in ('*', '...'):
225 return '[%s...]' % option.metavar
226 elif option.nargs == '?':
227 return '[%s]' % option.metavar
228 elif option.nargs is None:
229 return option.metavar
230 else:
231 raise ValueError('Can\'t format metavar %s, unknown nargs %s!' % (option.metavar, option.nargs))
232
233
234 class build_usage(Command):
235 description = "generate usage for each command"
236
237 user_options = [
238 ('output=', 'O', 'output directory'),
239 ]
240
241 def initialize_options(self):
242 pass
243
244 def finalize_options(self):
245 pass
246
247 def run(self):
248 print('generating usage docs')
249 import borg
250 borg.doc_mode = 'build_man'
251 if not os.path.exists('docs/usage'):
252 os.mkdir('docs/usage')
253 # allows us to build docs without the C modules fully loaded during help generation
254 from borg.archiver import Archiver
255 parser = Archiver(prog='borg').build_parser()
256 # borgfs has a separate man page to satisfy debian's "every program from a package
257 # must have a man page" requirement, but it doesn't need a separate HTML docs page
258 #borgfs_parser = Archiver(prog='borgfs').build_parser()
259
260 self.generate_level("", parser, Archiver)
261
262 def generate_level(self, prefix, parser, Archiver, extra_choices=None):
263 is_subcommand = False
264 choices = {}
265 for action in parser._actions:
266 if action.choices is not None and 'SubParsersAction' in str(action.__class__):
267 is_subcommand = True
268 for cmd, parser in action.choices.items():
269 choices[prefix + cmd] = parser
270 if extra_choices is not None:
271 choices.update(extra_choices)
272 if prefix and not choices:
273 return
274 print('found commands: %s' % list(choices.keys()))
275
276 for command, parser in sorted(choices.items()):
277 if command.startswith('debug'):
278 print('skipping', command)
279 continue
280 print('generating help for %s' % command)
281
282 if self.generate_level(command + " ", parser, Archiver):
283 continue
284
285 with open('docs/usage/%s.rst.inc' % command.replace(" ", "_"), 'w') as doc:
286 doc.write(".. IMPORTANT: this file is auto-generated from borg's built-in help, do not edit!\n\n")
287 if command == 'help':
288 for topic in Archiver.helptext:
289 params = {"topic": topic,
290 "underline": '~' * len('borg help ' + topic)}
291 doc.write(".. _borg_{topic}:\n\n".format(**params))
292 doc.write("borg help {topic}\n{underline}\n\n".format(**params))
293 doc.write(Archiver.helptext[topic])
294 else:
295 params = {"command": command,
296 "command_": command.replace(' ', '_'),
297 "underline": '-' * len('borg ' + command)}
298 doc.write(".. _borg_{command_}:\n\n".format(**params))
299 doc.write("borg {command}\n{underline}\n.. code-block:: none\n\n borg [common options] {command}".format(**params))
300 self.write_usage(parser, doc)
301 epilog = parser.epilog
302 parser.epilog = None
303 self.write_options(parser, doc)
304 doc.write("\n\nDescription\n~~~~~~~~~~~\n")
305 doc.write(epilog)
306
307 if 'create' in choices:
308 common_options = [group for group in choices['create']._action_groups if group.title == 'Common options'][0]
309 with open('docs/usage/common-options.rst.inc', 'w') as doc:
310 self.write_options_group(common_options, doc, False, base_indent=0)
311
312 return is_subcommand
313
314 def write_usage(self, parser, fp):
315 if any(len(o.option_strings) for o in parser._actions):
316 fp.write(' [options]')
317 for option in parser._actions:
318 if option.option_strings:
319 continue
320 fp.write(' ' + format_metavar(option))
321 fp.write('\n\n')
322
323 def write_options(self, parser, fp):
324 def is_positional_group(group):
325 return any(not o.option_strings for o in group._group_actions)
326
327 # HTML output:
328 # A table using some column-spans
329
330 def html_write(s):
331 for line in s.splitlines():
332 fp.write(' ' + line + '\n')
333
334 rows = []
335 for group in parser._action_groups:
336 if group.title == 'Common options':
337 # (no of columns used, columns, ...)
338 rows.append((1, '.. class:: borg-common-opt-ref\n\n:ref:`common_options`'))
339 else:
340 if not group._group_actions:
341 continue
342 group_header = '**%s**' % group.title
343 if group.description:
344 group_header += ' — ' + group.description
345 rows.append((1, group_header))
346 if is_positional_group(group):
347 for option in group._group_actions:
348 rows.append((3, '', '``%s``' % option.metavar, option.help or ''))
349 else:
350 for option in group._group_actions:
351 if option.metavar:
352 option_fmt = '``%s ' + option.metavar + '``'
353 else:
354 option_fmt = '``%s``'
355 option_str = ', '.join(option_fmt % s for s in option.option_strings)
356 option_desc = textwrap.dedent((option.help or '') % option.__dict__)
357 rows.append((3, '', option_str, option_desc))
358
359 fp.write('.. only:: html\n\n')
360 table = io.StringIO()
361 table.write('.. class:: borg-options-table\n\n')
362 self.rows_to_table(rows, table.write)
363 fp.write(textwrap.indent(table.getvalue(), ' ' * 4))
364
365 # LaTeX output:
366 # Regular rST option lists (irregular column widths)
367 latex_options = io.StringIO()
368 for group in parser._action_groups:
369 if group.title == 'Common options':
370 latex_options.write('\n\n:ref:`common_options`\n')
371 latex_options.write(' |')
372 else:
373 self.write_options_group(group, latex_options)
374 fp.write('\n.. only:: latex\n\n')
375 fp.write(textwrap.indent(latex_options.getvalue(), ' ' * 4))
376
377 def rows_to_table(self, rows, write):
378 def write_row_separator():
379 write('+')
380 for column_width in column_widths:
381 write('-' * (column_width + 1))
382 write('+')
383 write('\n')
384
385 # Find column count and width
386 column_count = max(columns for columns, *_ in rows)
387 column_widths = [0] * column_count
388 for columns, *cells in rows:
389 for i in range(columns):
390 # "+ 1" because we want a space between the cell contents and the delimiting "|" in the output
391 column_widths[i] = max(column_widths[i], len(cells[i]) + 1)
392
393 for columns, *original_cells in rows:
394 write_row_separator()
395 # If a cell contains newlines, then the row must be split up in individual rows
396 # where each cell contains no newline.
397 rowspanning_cells = []
398 original_cells = list(original_cells)
399 while any('\n' in cell for cell in original_cells):
400 cell_bloc = []
401 for i, cell in enumerate(original_cells):
402 pre, _, original_cells[i] = cell.partition('\n')
403 cell_bloc.append(pre)
404 rowspanning_cells.append(cell_bloc)
405 rowspanning_cells.append(original_cells)
406 for cells in rowspanning_cells:
407 for i, column_width in enumerate(column_widths):
408 if i < columns:
409 write('| ')
410 write(cells[i].ljust(column_width))
411 else:
412 write(' ')
413 write(''.ljust(column_width))
414 write('|\n')
415
416 write_row_separator()
417 # This bit of JavaScript kills the <colgroup> that is invariably inserted by docutils,
418 # but does absolutely no good here. It sets bogus column widths which cannot be overridden
419 # with CSS alone.
420 # Since this is HTML-only output, it would be possible to just generate a <table> directly,
421 # but then we'd lose rST formatting.
422 write(textwrap.dedent("""
423 .. raw:: html
424
425 <script type='text/javascript'>
426 $(document).ready(function () {
427 $('.borg-options-table colgroup').remove();
428 })
429 </script>
430 """))
431
432 def write_options_group(self, group, fp, with_title=True, base_indent=4):
433 def is_positional_group(group):
434 return any(not o.option_strings for o in group._group_actions)
435
436 indent = ' ' * base_indent
437
438 if is_positional_group(group):
439 for option in group._group_actions:
440 fp.write(option.metavar + '\n')
441 fp.write(textwrap.indent(option.help or '', ' ' * base_indent) + '\n')
442 return
443
444 if not group._group_actions:
445 return
446
447 if with_title:
448 fp.write('\n\n')
449 fp.write(group.title + '\n')
450
451 opts = OrderedDict()
452
453 for option in group._group_actions:
454 if option.metavar:
455 option_fmt = '%s ' + option.metavar
456 else:
457 option_fmt = '%s'
458 option_str = ', '.join(option_fmt % s for s in option.option_strings)
459 option_desc = textwrap.dedent((option.help or '') % option.__dict__)
460 opts[option_str] = textwrap.indent(option_desc, ' ' * 4)
461
462 padding = len(max(opts)) + 1
463
464 for option, desc in opts.items():
465 fp.write(indent + option.ljust(padding) + desc + '\n')
466
467
468 class build_man(Command):
469 description = 'build man pages'
470
471 user_options = []
472
473 see_also = {
474 'create': ('delete', 'prune', 'check', 'patterns', 'placeholders', 'compression'),
475 'recreate': ('patterns', 'placeholders', 'compression'),
476 'list': ('info', 'diff', 'prune', 'patterns'),
477 'info': ('list', 'diff'),
478 'init': ('create', 'delete', 'check', 'list', 'key-import', 'key-export', 'key-change-passphrase'),
479 'key-import': ('key-export', ),
480 'key-export': ('key-import', ),
481 'mount': ('umount', 'extract'), # Would be cooler if these two were on the same page
482 'umount': ('mount', ),
483 'extract': ('mount', ),
484 }
485
486 rst_prelude = textwrap.dedent("""
487 .. role:: ref(title)
488
489 .. |project_name| replace:: Borg
490
491 """)
492
493 usage_group = {
494 'break-lock': 'lock',
495 'with-lock': 'lock',
496
497 'change-passphrase': 'key',
498 'key_change-passphrase': 'key',
499 'key_export': 'key',
500 'key_import': 'key',
501 'key_migrate-to-repokey': 'key',
502
503 'export-tar': 'tar',
504
505 'benchmark_crud': 'benchmark',
506
507 'umount': 'mount',
508 }
509
510 def initialize_options(self):
511 pass
512
513 def finalize_options(self):
514 pass
515
516 def run(self):
517 print('building man pages (in docs/man)', file=sys.stderr)
518 import borg
519 borg.doc_mode = 'build_man'
520 os.makedirs('docs/man', exist_ok=True)
521 # allows us to build docs without the C modules fully loaded during help generation
522 from borg.archiver import Archiver
523 parser = Archiver(prog='borg').build_parser()
524 borgfs_parser = Archiver(prog='borgfs').build_parser()
525
526 self.generate_level('', parser, Archiver, {'borgfs': borgfs_parser})
527 self.build_topic_pages(Archiver)
528 self.build_intro_page()
529
530 def generate_level(self, prefix, parser, Archiver, extra_choices=None):
531 is_subcommand = False
532 choices = {}
533 for action in parser._actions:
534 if action.choices is not None and 'SubParsersAction' in str(action.__class__):
535 is_subcommand = True
536 for cmd, parser in action.choices.items():
537 choices[prefix + cmd] = parser
538 if extra_choices is not None:
539 choices.update(extra_choices)
540 if prefix and not choices:
541 return
542
543 for command, parser in sorted(choices.items()):
544 if command.startswith('debug') or command == 'help':
545 continue
546
547 if command == "borgfs":
548 man_title = command
549 else:
550 man_title = 'borg-' + command.replace(' ', '-')
551 print('building man page', man_title + '(1)', file=sys.stderr)
552
553 is_intermediary = self.generate_level(command + ' ', parser, Archiver)
554
555 doc, write = self.new_doc()
556 self.write_man_header(write, man_title, parser.description)
557
558 self.write_heading(write, 'SYNOPSIS')
559 if is_intermediary:
560 subparsers = [action for action in parser._actions if 'SubParsersAction' in str(action.__class__)][0]
561 for subcommand in subparsers.choices:
562 write('| borg', '[common options]', command, subcommand, '...')
563 self.see_also.setdefault(command, []).append('%s-%s' % (command, subcommand))
564 else:
565 if command == "borgfs":
566 write(command, end='')
567 else:
568 write('borg', '[common options]', command, end='')
569 self.write_usage(write, parser)
570 write('\n')
571
572 description, _, notes = parser.epilog.partition('\n.. man NOTES')
573
574 if description:
575 self.write_heading(write, 'DESCRIPTION')
576 write(description)
577
578 if not is_intermediary:
579 self.write_heading(write, 'OPTIONS')
580 write('See `borg-common(1)` for common options of Borg commands.')
581 write()
582 self.write_options(write, parser)
583
584 self.write_examples(write, command)
585
586 if notes:
587 self.write_heading(write, 'NOTES')
588 write(notes)
589
590 self.write_see_also(write, man_title)
591
592 self.gen_man_page(man_title, doc.getvalue())
593
594 # Generate the borg-common(1) man page with the common options.
595 if 'create' in choices:
596 doc, write = self.new_doc()
597 man_title = 'borg-common'
598 self.write_man_header(write, man_title, 'Common options of Borg commands')
599
600 common_options = [group for group in choices['create']._action_groups if group.title == 'Common options'][0]
601
602 self.write_heading(write, 'SYNOPSIS')
603 self.write_options_group(write, common_options)
604 self.write_see_also(write, man_title)
605 self.gen_man_page(man_title, doc.getvalue())
606
607 return is_subcommand
608
609 def build_topic_pages(self, Archiver):
610 for topic, text in Archiver.helptext.items():
611 doc, write = self.new_doc()
612 man_title = 'borg-' + topic
613 print('building man page', man_title + '(1)', file=sys.stderr)
614
615 self.write_man_header(write, man_title, 'Details regarding ' + topic)
616 self.write_heading(write, 'DESCRIPTION')
617 write(text)
618 self.gen_man_page(man_title, doc.getvalue())
619
620 def build_intro_page(self):
621 print('building man page borg(1)', file=sys.stderr)
622 with open('docs/man_intro.rst') as fd:
623 man_intro = fd.read()
624 self.gen_man_page('borg', self.rst_prelude + man_intro)
625
626 def new_doc(self):
627 doc = io.StringIO(self.rst_prelude)
628 doc.read()
629 write = self.printer(doc)
630 return doc, write
631
632 def printer(self, fd):
633 def write(*args, **kwargs):
634 print(*args, file=fd, **kwargs)
635 return write
636
637 def write_heading(self, write, header, char='-', double_sided=False):
638 write()
639 if double_sided:
640 write(char * len(header))
641 write(header)
642 write(char * len(header))
643 write()
644
645 def write_man_header(self, write, title, description):
646 self.write_heading(write, title, '=', double_sided=True)
647 self.write_heading(write, description, double_sided=True)
648 # man page metadata
649 write(':Author: The Borg Collective')
650 write(':Date:', datetime.utcnow().date().isoformat())
651 write(':Manual section: 1')
652 write(':Manual group: borg backup tool')
653 write()
654
655 def write_examples(self, write, command):
656 command = command.replace(' ', '_')
657 with open('docs/usage/%s.rst' % self.usage_group.get(command, command)) as fd:
658 usage = fd.read()
659 usage_include = '.. include:: %s.rst.inc' % command
660 begin = usage.find(usage_include)
661 end = usage.find('.. include', begin + 1)
662 # If a command has a dedicated anchor, it will occur before the command's include.
663 if 0 < usage.find('.. _', begin + 1) < end:
664 end = usage.find('.. _', begin + 1)
665 examples = usage[begin:end]
666 examples = examples.replace(usage_include, '')
667 examples = examples.replace('Examples\n~~~~~~~~', '')
668 examples = examples.replace('Miscellaneous Help\n------------------', '')
669 examples = examples.replace('``docs/misc/prune-example.txt``:', '``docs/misc/prune-example.txt``.')
670 examples = examples.replace('.. highlight:: none\n', '') # we don't support highlight
671 examples = re.sub('^(~+)$', lambda matches: '+' * len(matches.group(0)), examples, flags=re.MULTILINE)
672 examples = examples.strip()
673 if examples:
674 self.write_heading(write, 'EXAMPLES', '-')
675 write(examples)
676
677 def write_see_also(self, write, man_title):
678 see_also = self.see_also.get(man_title.replace('borg-', ''), ())
679 see_also = ['`borg-%s(1)`' % s for s in see_also]
680 see_also.insert(0, '`borg-common(1)`')
681 self.write_heading(write, 'SEE ALSO')
682 write(', '.join(see_also))
683
684 def gen_man_page(self, name, rst):
685 from docutils.writers import manpage
686 from docutils.core import publish_string
687 from docutils.nodes import inline
688 from docutils.parsers.rst import roles
689
690 def issue(name, rawtext, text, lineno, inliner, options={}, content=[]):
691 return [inline(rawtext, '#' + text)], []
692
693 roles.register_local_role('issue', issue)
694 # We give the source_path so that docutils can find relative includes
695 # as-if the document where located in the docs/ directory.
696 man_page = publish_string(source=rst, source_path='docs/%s.rst' % name, writer=manpage.Writer())
697 with open('docs/man/%s.1' % name, 'wb') as fd:
698 fd.write(man_page)
699
700 def write_usage(self, write, parser):
701 if any(len(o.option_strings) for o in parser._actions):
702 write(' [options] ', end='')
703 for option in parser._actions:
704 if option.option_strings:
705 continue
706 write(format_metavar(option), end=' ')
707
708 def write_options(self, write, parser):
709 for group in parser._action_groups:
710 if group.title == 'Common options' or not group._group_actions:
711 continue
712 title = 'arguments' if group.title == 'positional arguments' else group.title
713 self.write_heading(write, title, '+')
714 self.write_options_group(write, group)
715
716 def write_options_group(self, write, group):
717 def is_positional_group(group):
718 return any(not o.option_strings for o in group._group_actions)
719
720 if is_positional_group(group):
721 for option in group._group_actions:
722 write(option.metavar)
723 write(textwrap.indent(option.help or '', ' ' * 4))
724 return
725
726 opts = OrderedDict()
727
728 for option in group._group_actions:
729 if option.metavar:
730 option_fmt = '%s ' + option.metavar
731 else:
732 option_fmt = '%s'
733 option_str = ', '.join(option_fmt % s for s in option.option_strings)
734 option_desc = textwrap.dedent((option.help or '') % option.__dict__)
735 opts[option_str] = textwrap.indent(option_desc, ' ' * 4)
736
737 padding = len(max(opts)) + 1
738
739 for option, desc in opts.items():
740 write(option.ljust(padding), desc)
741
742
743 def rm(file):
744 try:
745 os.unlink(file)
746 print('rm', file)
747 except FileNotFoundError:
748 pass
749
750
751 class Clean(clean):
752 def run(self):
753 super().run()
754 for source in cython_sources:
755 genc = source.replace('.pyx', '.c')
756 rm(genc)
757 compiled_glob = source.replace('.pyx', '.cpython*')
758 for compiled in sorted(glob(compiled_glob)):
759 rm(compiled)
760
761 cmdclass = {
762 'build_ext': build_ext,
763 'build_usage': build_usage,
764 'build_man': build_man,
765 'sdist': Sdist,
766 'clean': Clean,
767 }
768
769 ext_modules = []
770 if not on_rtd:
771 compress_ext_kwargs = dict(sources=[compress_source], include_dirs=include_dirs, library_dirs=library_dirs,
772 define_macros=define_macros)
773 compress_ext_kwargs = setup_lz4.lz4_ext_kwargs(bundled_path='src/borg/algorithms/lz4',
774 system_prefix=liblz4_prefix, system=liblz4_system,
775 **compress_ext_kwargs)
776 compress_ext_kwargs = setup_zstd.zstd_ext_kwargs(bundled_path='src/borg/algorithms/zstd',
777 system_prefix=libzstd_prefix, system=libzstd_system,
778 multithreaded=False, legacy=False, **compress_ext_kwargs)
779 crypto_ext_kwargs = dict(sources=[crypto_ll_source], libraries=['crypto'],
780 include_dirs=include_dirs, library_dirs=library_dirs, define_macros=define_macros)
781 crypto_ext_kwargs = setup_b2.b2_ext_kwargs(bundled_path='src/borg/algorithms/blake2',
782 system_prefix=libb2_prefix, system=libb2_system,
783 **crypto_ext_kwargs)
784 ext_modules += [
785 Extension('borg.compress', **compress_ext_kwargs),
786 Extension('borg.crypto.low_level', **crypto_ext_kwargs),
787 Extension('borg.hashindex', [hashindex_source]),
788 Extension('borg.item', [item_source]),
789 Extension('borg.chunker', [chunker_source]),
790 Extension('borg.algorithms.checksums', [checksums_source]),
791 ]
792 if not sys.platform.startswith(('win32', )):
793 ext_modules.append(Extension('borg.platform.posix', [platform_posix_source]))
794 if sys.platform == 'linux':
795 ext_modules.append(Extension('borg.platform.linux', [platform_linux_source], libraries=['acl']))
796 elif sys.platform.startswith('freebsd'):
797 ext_modules.append(Extension('borg.platform.freebsd', [platform_freebsd_source]))
798 elif sys.platform == 'darwin':
799 ext_modules.append(Extension('borg.platform.darwin', [platform_darwin_source]))
800
801 setup(
802 name='borgbackup',
803 use_scm_version={
804 'write_to': 'src/borg/_version.py',
805 },
806 author='The Borg Collective (see AUTHORS file)',
807 author_email='[email protected]',
808 url='https://borgbackup.readthedocs.io/',
809 description='Deduplicated, encrypted, authenticated and compressed backups',
810 long_description=long_description,
811 license='BSD',
812 platforms=['Linux', 'MacOS X', 'FreeBSD', 'OpenBSD', 'NetBSD', ],
813 classifiers=[
814 'Development Status :: 4 - Beta',
815 'Environment :: Console',
816 'Intended Audience :: System Administrators',
817 'License :: OSI Approved :: BSD License',
818 'Operating System :: POSIX :: BSD :: FreeBSD',
819 'Operating System :: POSIX :: BSD :: OpenBSD',
820 'Operating System :: POSIX :: BSD :: NetBSD',
821 'Operating System :: MacOS :: MacOS X',
822 'Operating System :: POSIX :: Linux',
823 'Programming Language :: Python',
824 'Programming Language :: Python :: 3',
825 'Programming Language :: Python :: 3.4',
826 'Programming Language :: Python :: 3.5',
827 'Programming Language :: Python :: 3.6',
828 'Topic :: Security :: Cryptography',
829 'Topic :: System :: Archiving :: Backup',
830 ],
831 packages=find_packages('src'),
832 package_dir={'': 'src'},
833 zip_safe=False,
834 entry_points={
835 'console_scripts': [
836 'borg = borg.archiver:main',
837 'borgfs = borg.archiver:main',
838 ]
839 },
840 # See also the MANIFEST.in file.
841 # We want to install all the files in the package directories...
842 include_package_data=True,
843 # ...except the source files which have been compiled (C extensions):
844 exclude_package_data={
845 '': ['*.c', '*.h', '*.pyx', ],
846 },
847 cmdclass=cmdclass,
848 ext_modules=ext_modules,
849 setup_requires=['setuptools_scm>=1.7'],
850 install_requires=install_requires,
851 extras_require=extras_require,
852 )
853
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -69,6 +69,9 @@
# llfuse 0.41.1, 1.1 are ok
extras_require['fuse'] = ['llfuse <2.0, !=0.42.*, !=0.43, !=1.0', ]
+if my_python >= (3, 7):
+ extras_require['fuse'][0] += ', >=1.3.4'
+
from setuptools import setup, find_packages, Extension
from setuptools.command.sdist import sdist
from distutils.command.clean import clean
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -69,6 +69,9 @@\n # llfuse 0.41.1, 1.1 are ok\n extras_require['fuse'] = ['llfuse <2.0, !=0.42.*, !=0.43, !=1.0', ]\n \n+if my_python >= (3, 7):\n+ extras_require['fuse'][0] += ', >=1.3.4'\n+\n from setuptools import setup, find_packages, Extension\n from setuptools.command.sdist import sdist\n from distutils.command.clean import clean\n", "issue": "add a hint to setup.py that py37 requires llfuse >= 1.3.4\n\n", "before_files": [{"content": "# -*- encoding: utf-8 *-*\nimport os\nimport io\nimport re\nimport sys\nfrom collections import OrderedDict\nfrom datetime import datetime\nfrom glob import glob\n\nfrom distutils.command.build import build\nfrom distutils.core import Command\n\nimport textwrap\n\nimport setup_lz4\nimport setup_zstd\nimport setup_b2\n\n# True: use the shared liblz4 (>= 1.7.0 / r129) from the system, False: use the bundled lz4 code\nprefer_system_liblz4 = True\n\n# True: use the shared libzstd (>= 1.3.0) from the system, False: use the bundled zstd code\nprefer_system_libzstd = False\n\n# True: use the shared libb2 from the system, False: use the bundled blake2 code\nprefer_system_libb2 = True\n\nmin_python = (3, 4)\nmy_python = sys.version_info\n\nif my_python < min_python:\n print(\"Borg requires Python %d.%d or later\" % min_python)\n sys.exit(1)\n\n# Are we building on ReadTheDocs?\non_rtd = os.environ.get('READTHEDOCS')\n\ninstall_requires = [\n # we are rather picky about msgpack versions, because a good working msgpack is\n # very important for borg, see https://github.com/borgbackup/borg/issues/3753\n # best versions seem to be 0.4.6, 0.4.7, 0.4.8 and 0.5.6:\n 'msgpack-python >=0.4.6, <=0.5.6, !=0.5.0, !=0.5.1, !=0.5.2, !=0.5.3, !=0.5.4, !=0.5.5',\n # if you can't satisfy the above requirement, these are versions that might\n # also work ok, IF you make sure to use the COMPILED version of msgpack-python,\n # NOT the PURE PYTHON fallback implementation: ==0.5.1, ==0.5.4\n # using any other version is not supported by borg development, feel free to\n # do it on your own risk (and after own testing).\n]\n\n# note for package maintainers: if you package borgbackup for distribution,\n# please add llfuse as a *requirement* on all platforms that have a working\n# llfuse package. \"borg mount\" needs llfuse to work.\n# if you do not have llfuse, do not require it, most of borgbackup will work.\nextras_require = {\n # llfuse 0.40 (tested, proven, ok), needs FUSE version >= 2.8.0\n # llfuse 0.41 (tested shortly, looks ok), needs FUSE version >= 2.8.0\n # llfuse 0.41.1 (tested shortly, looks ok), needs FUSE version >= 2.8.0\n # llfuse 0.42 (tested shortly, looks ok), needs FUSE version >= 2.8.0\n # llfuse 1.0 (tested shortly, looks ok), needs FUSE version >= 2.8.0\n # llfuse 1.1.1 (tested shortly, looks ok), needs FUSE version >= 2.8.0\n # llfuse 1.2 (tested shortly, looks ok), needs FUSE version >= 2.8.0\n # llfuse 1.3 (tested shortly, looks ok), needs FUSE version >= 2.8.0\n # llfuse 2.0 will break API\n 'fuse': ['llfuse<2.0', ],\n}\n\nif sys.platform.startswith('freebsd'):\n # llfuse was frequently broken / did not build on freebsd\n # llfuse 0.41.1, 1.1 are ok\n extras_require['fuse'] = ['llfuse <2.0, !=0.42.*, !=0.43, !=1.0', ]\n\nfrom setuptools import setup, find_packages, Extension\nfrom setuptools.command.sdist import sdist\nfrom distutils.command.clean import clean\n\ncompress_source = 'src/borg/compress.pyx'\ncrypto_ll_source = 'src/borg/crypto/low_level.pyx'\nchunker_source = 'src/borg/chunker.pyx'\nhashindex_source = 'src/borg/hashindex.pyx'\nitem_source = 'src/borg/item.pyx'\nchecksums_source = 'src/borg/algorithms/checksums.pyx'\nplatform_posix_source = 'src/borg/platform/posix.pyx'\nplatform_linux_source = 'src/borg/platform/linux.pyx'\nplatform_darwin_source = 'src/borg/platform/darwin.pyx'\nplatform_freebsd_source = 'src/borg/platform/freebsd.pyx'\n\ncython_sources = [\n compress_source,\n crypto_ll_source,\n chunker_source,\n hashindex_source,\n item_source,\n checksums_source,\n\n platform_posix_source,\n platform_linux_source,\n platform_freebsd_source,\n platform_darwin_source,\n]\n\ntry:\n from Cython.Distutils import build_ext\n import Cython.Compiler.Main as cython_compiler\n\n class Sdist(sdist):\n def __init__(self, *args, **kwargs):\n for src in cython_sources:\n cython_compiler.compile(src, cython_compiler.default_options)\n super().__init__(*args, **kwargs)\n\n def make_distribution(self):\n self.filelist.extend([\n 'src/borg/compress.c',\n 'src/borg/crypto/low_level.c',\n 'src/borg/chunker.c', 'src/borg/_chunker.c',\n 'src/borg/hashindex.c', 'src/borg/_hashindex.c',\n 'src/borg/cache_sync/cache_sync.c', 'src/borg/cache_sync/sysdep.h', 'src/borg/cache_sync/unpack.h',\n 'src/borg/cache_sync/unpack_define.h', 'src/borg/cache_sync/unpack_template.h',\n 'src/borg/item.c',\n 'src/borg/algorithms/checksums.c',\n 'src/borg/algorithms/crc32_dispatch.c', 'src/borg/algorithms/crc32_clmul.c', 'src/borg/algorithms/crc32_slice_by_8.c',\n 'src/borg/algorithms/xxh64/xxhash.h', 'src/borg/algorithms/xxh64/xxhash.c',\n 'src/borg/platform/posix.c',\n 'src/borg/platform/linux.c',\n 'src/borg/platform/freebsd.c',\n 'src/borg/platform/darwin.c',\n ])\n super().make_distribution()\n\nexcept ImportError:\n class Sdist(sdist):\n def __init__(self, *args, **kwargs):\n raise Exception('Cython is required to run sdist')\n\n compress_source = compress_source.replace('.pyx', '.c')\n crypto_ll_source = crypto_ll_source.replace('.pyx', '.c')\n chunker_source = chunker_source.replace('.pyx', '.c')\n hashindex_source = hashindex_source.replace('.pyx', '.c')\n item_source = item_source.replace('.pyx', '.c')\n checksums_source = checksums_source.replace('.pyx', '.c')\n platform_posix_source = platform_posix_source.replace('.pyx', '.c')\n platform_linux_source = platform_linux_source.replace('.pyx', '.c')\n platform_freebsd_source = platform_freebsd_source.replace('.pyx', '.c')\n platform_darwin_source = platform_darwin_source.replace('.pyx', '.c')\n from distutils.command.build_ext import build_ext\n if not on_rtd and not all(os.path.exists(path) for path in [\n compress_source, crypto_ll_source, chunker_source, hashindex_source, item_source, checksums_source,\n platform_posix_source, platform_linux_source, platform_freebsd_source, platform_darwin_source]):\n raise ImportError('The GIT version of Borg needs Cython. Install Cython or use a released version.')\n\n\ndef detect_openssl(prefixes):\n for prefix in prefixes:\n filename = os.path.join(prefix, 'include', 'openssl', 'evp.h')\n if os.path.exists(filename):\n with open(filename, 'rb') as fd:\n if b'PKCS5_PBKDF2_HMAC(' in fd.read():\n return prefix\n\n\ninclude_dirs = []\nlibrary_dirs = []\ndefine_macros = []\n\npossible_openssl_prefixes = ['/usr', '/usr/local', '/usr/local/opt/openssl', '/usr/local/ssl', '/usr/local/openssl',\n '/usr/local/borg', '/opt/local', '/opt/pkg', ]\nif os.environ.get('BORG_OPENSSL_PREFIX'):\n possible_openssl_prefixes.insert(0, os.environ.get('BORG_OPENSSL_PREFIX'))\nssl_prefix = detect_openssl(possible_openssl_prefixes)\nif not ssl_prefix:\n raise Exception('Unable to find OpenSSL >= 1.0 headers. (Looked here: {})'.format(', '.join(possible_openssl_prefixes)))\ninclude_dirs.append(os.path.join(ssl_prefix, 'include'))\nlibrary_dirs.append(os.path.join(ssl_prefix, 'lib'))\n\n\npossible_liblz4_prefixes = ['/usr', '/usr/local', '/usr/local/opt/lz4', '/usr/local/lz4',\n '/usr/local/borg', '/opt/local', '/opt/pkg', ]\nif os.environ.get('BORG_LIBLZ4_PREFIX'):\n possible_liblz4_prefixes.insert(0, os.environ.get('BORG_LIBLZ4_PREFIX'))\nliblz4_prefix = setup_lz4.lz4_system_prefix(possible_liblz4_prefixes)\nif prefer_system_liblz4 and liblz4_prefix:\n print('Detected and preferring liblz4 over bundled LZ4')\n define_macros.append(('BORG_USE_LIBLZ4', 'YES'))\n liblz4_system = True\nelse:\n liblz4_system = False\n\npossible_libb2_prefixes = ['/usr', '/usr/local', '/usr/local/opt/libb2', '/usr/local/libb2',\n '/usr/local/borg', '/opt/local', '/opt/pkg', ]\nif os.environ.get('BORG_LIBB2_PREFIX'):\n possible_libb2_prefixes.insert(0, os.environ.get('BORG_LIBB2_PREFIX'))\nlibb2_prefix = setup_b2.b2_system_prefix(possible_libb2_prefixes)\nif prefer_system_libb2 and libb2_prefix:\n print('Detected and preferring libb2 over bundled BLAKE2')\n define_macros.append(('BORG_USE_LIBB2', 'YES'))\n libb2_system = True\nelse:\n libb2_system = False\n\npossible_libzstd_prefixes = ['/usr', '/usr/local', '/usr/local/opt/libzstd', '/usr/local/libzstd',\n '/usr/local/borg', '/opt/local', '/opt/pkg', ]\nif os.environ.get('BORG_LIBZSTD_PREFIX'):\n possible_libzstd_prefixes.insert(0, os.environ.get('BORG_LIBZSTD_PREFIX'))\nlibzstd_prefix = setup_zstd.zstd_system_prefix(possible_libzstd_prefixes)\nif prefer_system_libzstd and libzstd_prefix:\n print('Detected and preferring libzstd over bundled ZSTD')\n define_macros.append(('BORG_USE_LIBZSTD', 'YES'))\n libzstd_system = True\nelse:\n libzstd_system = False\n\n\nwith open('README.rst', 'r') as fd:\n long_description = fd.read()\n # remove badges\n long_description = re.compile(r'^\\.\\. start-badges.*^\\.\\. end-badges', re.M | re.S).sub('', long_description)\n # remove |substitutions|\n long_description = re.compile(r'\\|screencast\\|').sub('', long_description)\n # remove unknown directives\n long_description = re.compile(r'^\\.\\. highlight:: \\w+$', re.M).sub('', long_description)\n\n\ndef format_metavar(option):\n if option.nargs in ('*', '...'):\n return '[%s...]' % option.metavar\n elif option.nargs == '?':\n return '[%s]' % option.metavar\n elif option.nargs is None:\n return option.metavar\n else:\n raise ValueError('Can\\'t format metavar %s, unknown nargs %s!' % (option.metavar, option.nargs))\n\n\nclass build_usage(Command):\n description = \"generate usage for each command\"\n\n user_options = [\n ('output=', 'O', 'output directory'),\n ]\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n def run(self):\n print('generating usage docs')\n import borg\n borg.doc_mode = 'build_man'\n if not os.path.exists('docs/usage'):\n os.mkdir('docs/usage')\n # allows us to build docs without the C modules fully loaded during help generation\n from borg.archiver import Archiver\n parser = Archiver(prog='borg').build_parser()\n # borgfs has a separate man page to satisfy debian's \"every program from a package\n # must have a man page\" requirement, but it doesn't need a separate HTML docs page\n #borgfs_parser = Archiver(prog='borgfs').build_parser()\n\n self.generate_level(\"\", parser, Archiver)\n\n def generate_level(self, prefix, parser, Archiver, extra_choices=None):\n is_subcommand = False\n choices = {}\n for action in parser._actions:\n if action.choices is not None and 'SubParsersAction' in str(action.__class__):\n is_subcommand = True\n for cmd, parser in action.choices.items():\n choices[prefix + cmd] = parser\n if extra_choices is not None:\n choices.update(extra_choices)\n if prefix and not choices:\n return\n print('found commands: %s' % list(choices.keys()))\n\n for command, parser in sorted(choices.items()):\n if command.startswith('debug'):\n print('skipping', command)\n continue\n print('generating help for %s' % command)\n\n if self.generate_level(command + \" \", parser, Archiver):\n continue\n\n with open('docs/usage/%s.rst.inc' % command.replace(\" \", \"_\"), 'w') as doc:\n doc.write(\".. IMPORTANT: this file is auto-generated from borg's built-in help, do not edit!\\n\\n\")\n if command == 'help':\n for topic in Archiver.helptext:\n params = {\"topic\": topic,\n \"underline\": '~' * len('borg help ' + topic)}\n doc.write(\".. _borg_{topic}:\\n\\n\".format(**params))\n doc.write(\"borg help {topic}\\n{underline}\\n\\n\".format(**params))\n doc.write(Archiver.helptext[topic])\n else:\n params = {\"command\": command,\n \"command_\": command.replace(' ', '_'),\n \"underline\": '-' * len('borg ' + command)}\n doc.write(\".. _borg_{command_}:\\n\\n\".format(**params))\n doc.write(\"borg {command}\\n{underline}\\n.. code-block:: none\\n\\n borg [common options] {command}\".format(**params))\n self.write_usage(parser, doc)\n epilog = parser.epilog\n parser.epilog = None\n self.write_options(parser, doc)\n doc.write(\"\\n\\nDescription\\n~~~~~~~~~~~\\n\")\n doc.write(epilog)\n\n if 'create' in choices:\n common_options = [group for group in choices['create']._action_groups if group.title == 'Common options'][0]\n with open('docs/usage/common-options.rst.inc', 'w') as doc:\n self.write_options_group(common_options, doc, False, base_indent=0)\n\n return is_subcommand\n\n def write_usage(self, parser, fp):\n if any(len(o.option_strings) for o in parser._actions):\n fp.write(' [options]')\n for option in parser._actions:\n if option.option_strings:\n continue\n fp.write(' ' + format_metavar(option))\n fp.write('\\n\\n')\n\n def write_options(self, parser, fp):\n def is_positional_group(group):\n return any(not o.option_strings for o in group._group_actions)\n\n # HTML output:\n # A table using some column-spans\n\n def html_write(s):\n for line in s.splitlines():\n fp.write(' ' + line + '\\n')\n\n rows = []\n for group in parser._action_groups:\n if group.title == 'Common options':\n # (no of columns used, columns, ...)\n rows.append((1, '.. class:: borg-common-opt-ref\\n\\n:ref:`common_options`'))\n else:\n if not group._group_actions:\n continue\n group_header = '**%s**' % group.title\n if group.description:\n group_header += ' \u2014 ' + group.description\n rows.append((1, group_header))\n if is_positional_group(group):\n for option in group._group_actions:\n rows.append((3, '', '``%s``' % option.metavar, option.help or ''))\n else:\n for option in group._group_actions:\n if option.metavar:\n option_fmt = '``%s ' + option.metavar + '``'\n else:\n option_fmt = '``%s``'\n option_str = ', '.join(option_fmt % s for s in option.option_strings)\n option_desc = textwrap.dedent((option.help or '') % option.__dict__)\n rows.append((3, '', option_str, option_desc))\n\n fp.write('.. only:: html\\n\\n')\n table = io.StringIO()\n table.write('.. class:: borg-options-table\\n\\n')\n self.rows_to_table(rows, table.write)\n fp.write(textwrap.indent(table.getvalue(), ' ' * 4))\n\n # LaTeX output:\n # Regular rST option lists (irregular column widths)\n latex_options = io.StringIO()\n for group in parser._action_groups:\n if group.title == 'Common options':\n latex_options.write('\\n\\n:ref:`common_options`\\n')\n latex_options.write(' |')\n else:\n self.write_options_group(group, latex_options)\n fp.write('\\n.. only:: latex\\n\\n')\n fp.write(textwrap.indent(latex_options.getvalue(), ' ' * 4))\n\n def rows_to_table(self, rows, write):\n def write_row_separator():\n write('+')\n for column_width in column_widths:\n write('-' * (column_width + 1))\n write('+')\n write('\\n')\n\n # Find column count and width\n column_count = max(columns for columns, *_ in rows)\n column_widths = [0] * column_count\n for columns, *cells in rows:\n for i in range(columns):\n # \"+ 1\" because we want a space between the cell contents and the delimiting \"|\" in the output\n column_widths[i] = max(column_widths[i], len(cells[i]) + 1)\n\n for columns, *original_cells in rows:\n write_row_separator()\n # If a cell contains newlines, then the row must be split up in individual rows\n # where each cell contains no newline.\n rowspanning_cells = []\n original_cells = list(original_cells)\n while any('\\n' in cell for cell in original_cells):\n cell_bloc = []\n for i, cell in enumerate(original_cells):\n pre, _, original_cells[i] = cell.partition('\\n')\n cell_bloc.append(pre)\n rowspanning_cells.append(cell_bloc)\n rowspanning_cells.append(original_cells)\n for cells in rowspanning_cells:\n for i, column_width in enumerate(column_widths):\n if i < columns:\n write('| ')\n write(cells[i].ljust(column_width))\n else:\n write(' ')\n write(''.ljust(column_width))\n write('|\\n')\n\n write_row_separator()\n # This bit of JavaScript kills the <colgroup> that is invariably inserted by docutils,\n # but does absolutely no good here. It sets bogus column widths which cannot be overridden\n # with CSS alone.\n # Since this is HTML-only output, it would be possible to just generate a <table> directly,\n # but then we'd lose rST formatting.\n write(textwrap.dedent(\"\"\"\n .. raw:: html\n\n <script type='text/javascript'>\n $(document).ready(function () {\n $('.borg-options-table colgroup').remove();\n })\n </script>\n \"\"\"))\n\n def write_options_group(self, group, fp, with_title=True, base_indent=4):\n def is_positional_group(group):\n return any(not o.option_strings for o in group._group_actions)\n\n indent = ' ' * base_indent\n\n if is_positional_group(group):\n for option in group._group_actions:\n fp.write(option.metavar + '\\n')\n fp.write(textwrap.indent(option.help or '', ' ' * base_indent) + '\\n')\n return\n\n if not group._group_actions:\n return\n\n if with_title:\n fp.write('\\n\\n')\n fp.write(group.title + '\\n')\n\n opts = OrderedDict()\n\n for option in group._group_actions:\n if option.metavar:\n option_fmt = '%s ' + option.metavar\n else:\n option_fmt = '%s'\n option_str = ', '.join(option_fmt % s for s in option.option_strings)\n option_desc = textwrap.dedent((option.help or '') % option.__dict__)\n opts[option_str] = textwrap.indent(option_desc, ' ' * 4)\n\n padding = len(max(opts)) + 1\n\n for option, desc in opts.items():\n fp.write(indent + option.ljust(padding) + desc + '\\n')\n\n\nclass build_man(Command):\n description = 'build man pages'\n\n user_options = []\n\n see_also = {\n 'create': ('delete', 'prune', 'check', 'patterns', 'placeholders', 'compression'),\n 'recreate': ('patterns', 'placeholders', 'compression'),\n 'list': ('info', 'diff', 'prune', 'patterns'),\n 'info': ('list', 'diff'),\n 'init': ('create', 'delete', 'check', 'list', 'key-import', 'key-export', 'key-change-passphrase'),\n 'key-import': ('key-export', ),\n 'key-export': ('key-import', ),\n 'mount': ('umount', 'extract'), # Would be cooler if these two were on the same page\n 'umount': ('mount', ),\n 'extract': ('mount', ),\n }\n\n rst_prelude = textwrap.dedent(\"\"\"\n .. role:: ref(title)\n\n .. |project_name| replace:: Borg\n\n \"\"\")\n\n usage_group = {\n 'break-lock': 'lock',\n 'with-lock': 'lock',\n\n 'change-passphrase': 'key',\n 'key_change-passphrase': 'key',\n 'key_export': 'key',\n 'key_import': 'key',\n 'key_migrate-to-repokey': 'key',\n\n 'export-tar': 'tar',\n\n 'benchmark_crud': 'benchmark',\n\n 'umount': 'mount',\n }\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n def run(self):\n print('building man pages (in docs/man)', file=sys.stderr)\n import borg\n borg.doc_mode = 'build_man'\n os.makedirs('docs/man', exist_ok=True)\n # allows us to build docs without the C modules fully loaded during help generation\n from borg.archiver import Archiver\n parser = Archiver(prog='borg').build_parser()\n borgfs_parser = Archiver(prog='borgfs').build_parser()\n\n self.generate_level('', parser, Archiver, {'borgfs': borgfs_parser})\n self.build_topic_pages(Archiver)\n self.build_intro_page()\n\n def generate_level(self, prefix, parser, Archiver, extra_choices=None):\n is_subcommand = False\n choices = {}\n for action in parser._actions:\n if action.choices is not None and 'SubParsersAction' in str(action.__class__):\n is_subcommand = True\n for cmd, parser in action.choices.items():\n choices[prefix + cmd] = parser\n if extra_choices is not None:\n choices.update(extra_choices)\n if prefix and not choices:\n return\n\n for command, parser in sorted(choices.items()):\n if command.startswith('debug') or command == 'help':\n continue\n\n if command == \"borgfs\":\n man_title = command\n else:\n man_title = 'borg-' + command.replace(' ', '-')\n print('building man page', man_title + '(1)', file=sys.stderr)\n\n is_intermediary = self.generate_level(command + ' ', parser, Archiver)\n\n doc, write = self.new_doc()\n self.write_man_header(write, man_title, parser.description)\n\n self.write_heading(write, 'SYNOPSIS')\n if is_intermediary:\n subparsers = [action for action in parser._actions if 'SubParsersAction' in str(action.__class__)][0]\n for subcommand in subparsers.choices:\n write('| borg', '[common options]', command, subcommand, '...')\n self.see_also.setdefault(command, []).append('%s-%s' % (command, subcommand))\n else:\n if command == \"borgfs\":\n write(command, end='')\n else:\n write('borg', '[common options]', command, end='')\n self.write_usage(write, parser)\n write('\\n')\n\n description, _, notes = parser.epilog.partition('\\n.. man NOTES')\n\n if description:\n self.write_heading(write, 'DESCRIPTION')\n write(description)\n\n if not is_intermediary:\n self.write_heading(write, 'OPTIONS')\n write('See `borg-common(1)` for common options of Borg commands.')\n write()\n self.write_options(write, parser)\n\n self.write_examples(write, command)\n\n if notes:\n self.write_heading(write, 'NOTES')\n write(notes)\n\n self.write_see_also(write, man_title)\n\n self.gen_man_page(man_title, doc.getvalue())\n\n # Generate the borg-common(1) man page with the common options.\n if 'create' in choices:\n doc, write = self.new_doc()\n man_title = 'borg-common'\n self.write_man_header(write, man_title, 'Common options of Borg commands')\n\n common_options = [group for group in choices['create']._action_groups if group.title == 'Common options'][0]\n\n self.write_heading(write, 'SYNOPSIS')\n self.write_options_group(write, common_options)\n self.write_see_also(write, man_title)\n self.gen_man_page(man_title, doc.getvalue())\n\n return is_subcommand\n\n def build_topic_pages(self, Archiver):\n for topic, text in Archiver.helptext.items():\n doc, write = self.new_doc()\n man_title = 'borg-' + topic\n print('building man page', man_title + '(1)', file=sys.stderr)\n\n self.write_man_header(write, man_title, 'Details regarding ' + topic)\n self.write_heading(write, 'DESCRIPTION')\n write(text)\n self.gen_man_page(man_title, doc.getvalue())\n\n def build_intro_page(self):\n print('building man page borg(1)', file=sys.stderr)\n with open('docs/man_intro.rst') as fd:\n man_intro = fd.read()\n self.gen_man_page('borg', self.rst_prelude + man_intro)\n\n def new_doc(self):\n doc = io.StringIO(self.rst_prelude)\n doc.read()\n write = self.printer(doc)\n return doc, write\n\n def printer(self, fd):\n def write(*args, **kwargs):\n print(*args, file=fd, **kwargs)\n return write\n\n def write_heading(self, write, header, char='-', double_sided=False):\n write()\n if double_sided:\n write(char * len(header))\n write(header)\n write(char * len(header))\n write()\n\n def write_man_header(self, write, title, description):\n self.write_heading(write, title, '=', double_sided=True)\n self.write_heading(write, description, double_sided=True)\n # man page metadata\n write(':Author: The Borg Collective')\n write(':Date:', datetime.utcnow().date().isoformat())\n write(':Manual section: 1')\n write(':Manual group: borg backup tool')\n write()\n\n def write_examples(self, write, command):\n command = command.replace(' ', '_')\n with open('docs/usage/%s.rst' % self.usage_group.get(command, command)) as fd:\n usage = fd.read()\n usage_include = '.. include:: %s.rst.inc' % command\n begin = usage.find(usage_include)\n end = usage.find('.. include', begin + 1)\n # If a command has a dedicated anchor, it will occur before the command's include.\n if 0 < usage.find('.. _', begin + 1) < end:\n end = usage.find('.. _', begin + 1)\n examples = usage[begin:end]\n examples = examples.replace(usage_include, '')\n examples = examples.replace('Examples\\n~~~~~~~~', '')\n examples = examples.replace('Miscellaneous Help\\n------------------', '')\n examples = examples.replace('``docs/misc/prune-example.txt``:', '``docs/misc/prune-example.txt``.')\n examples = examples.replace('.. highlight:: none\\n', '') # we don't support highlight\n examples = re.sub('^(~+)$', lambda matches: '+' * len(matches.group(0)), examples, flags=re.MULTILINE)\n examples = examples.strip()\n if examples:\n self.write_heading(write, 'EXAMPLES', '-')\n write(examples)\n\n def write_see_also(self, write, man_title):\n see_also = self.see_also.get(man_title.replace('borg-', ''), ())\n see_also = ['`borg-%s(1)`' % s for s in see_also]\n see_also.insert(0, '`borg-common(1)`')\n self.write_heading(write, 'SEE ALSO')\n write(', '.join(see_also))\n\n def gen_man_page(self, name, rst):\n from docutils.writers import manpage\n from docutils.core import publish_string\n from docutils.nodes import inline\n from docutils.parsers.rst import roles\n\n def issue(name, rawtext, text, lineno, inliner, options={}, content=[]):\n return [inline(rawtext, '#' + text)], []\n\n roles.register_local_role('issue', issue)\n # We give the source_path so that docutils can find relative includes\n # as-if the document where located in the docs/ directory.\n man_page = publish_string(source=rst, source_path='docs/%s.rst' % name, writer=manpage.Writer())\n with open('docs/man/%s.1' % name, 'wb') as fd:\n fd.write(man_page)\n\n def write_usage(self, write, parser):\n if any(len(o.option_strings) for o in parser._actions):\n write(' [options] ', end='')\n for option in parser._actions:\n if option.option_strings:\n continue\n write(format_metavar(option), end=' ')\n\n def write_options(self, write, parser):\n for group in parser._action_groups:\n if group.title == 'Common options' or not group._group_actions:\n continue\n title = 'arguments' if group.title == 'positional arguments' else group.title\n self.write_heading(write, title, '+')\n self.write_options_group(write, group)\n\n def write_options_group(self, write, group):\n def is_positional_group(group):\n return any(not o.option_strings for o in group._group_actions)\n\n if is_positional_group(group):\n for option in group._group_actions:\n write(option.metavar)\n write(textwrap.indent(option.help or '', ' ' * 4))\n return\n\n opts = OrderedDict()\n\n for option in group._group_actions:\n if option.metavar:\n option_fmt = '%s ' + option.metavar\n else:\n option_fmt = '%s'\n option_str = ', '.join(option_fmt % s for s in option.option_strings)\n option_desc = textwrap.dedent((option.help or '') % option.__dict__)\n opts[option_str] = textwrap.indent(option_desc, ' ' * 4)\n\n padding = len(max(opts)) + 1\n\n for option, desc in opts.items():\n write(option.ljust(padding), desc)\n\n\ndef rm(file):\n try:\n os.unlink(file)\n print('rm', file)\n except FileNotFoundError:\n pass\n\n\nclass Clean(clean):\n def run(self):\n super().run()\n for source in cython_sources:\n genc = source.replace('.pyx', '.c')\n rm(genc)\n compiled_glob = source.replace('.pyx', '.cpython*')\n for compiled in sorted(glob(compiled_glob)):\n rm(compiled)\n\ncmdclass = {\n 'build_ext': build_ext,\n 'build_usage': build_usage,\n 'build_man': build_man,\n 'sdist': Sdist,\n 'clean': Clean,\n}\n\next_modules = []\nif not on_rtd:\n compress_ext_kwargs = dict(sources=[compress_source], include_dirs=include_dirs, library_dirs=library_dirs,\n define_macros=define_macros)\n compress_ext_kwargs = setup_lz4.lz4_ext_kwargs(bundled_path='src/borg/algorithms/lz4',\n system_prefix=liblz4_prefix, system=liblz4_system,\n **compress_ext_kwargs)\n compress_ext_kwargs = setup_zstd.zstd_ext_kwargs(bundled_path='src/borg/algorithms/zstd',\n system_prefix=libzstd_prefix, system=libzstd_system,\n multithreaded=False, legacy=False, **compress_ext_kwargs)\n crypto_ext_kwargs = dict(sources=[crypto_ll_source], libraries=['crypto'],\n include_dirs=include_dirs, library_dirs=library_dirs, define_macros=define_macros)\n crypto_ext_kwargs = setup_b2.b2_ext_kwargs(bundled_path='src/borg/algorithms/blake2',\n system_prefix=libb2_prefix, system=libb2_system,\n **crypto_ext_kwargs)\n ext_modules += [\n Extension('borg.compress', **compress_ext_kwargs),\n Extension('borg.crypto.low_level', **crypto_ext_kwargs),\n Extension('borg.hashindex', [hashindex_source]),\n Extension('borg.item', [item_source]),\n Extension('borg.chunker', [chunker_source]),\n Extension('borg.algorithms.checksums', [checksums_source]),\n ]\n if not sys.platform.startswith(('win32', )):\n ext_modules.append(Extension('borg.platform.posix', [platform_posix_source]))\n if sys.platform == 'linux':\n ext_modules.append(Extension('borg.platform.linux', [platform_linux_source], libraries=['acl']))\n elif sys.platform.startswith('freebsd'):\n ext_modules.append(Extension('borg.platform.freebsd', [platform_freebsd_source]))\n elif sys.platform == 'darwin':\n ext_modules.append(Extension('borg.platform.darwin', [platform_darwin_source]))\n\nsetup(\n name='borgbackup',\n use_scm_version={\n 'write_to': 'src/borg/_version.py',\n },\n author='The Borg Collective (see AUTHORS file)',\n author_email='[email protected]',\n url='https://borgbackup.readthedocs.io/',\n description='Deduplicated, encrypted, authenticated and compressed backups',\n long_description=long_description,\n license='BSD',\n platforms=['Linux', 'MacOS X', 'FreeBSD', 'OpenBSD', 'NetBSD', ],\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: Console',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: POSIX :: BSD :: FreeBSD',\n 'Operating System :: POSIX :: BSD :: OpenBSD',\n 'Operating System :: POSIX :: BSD :: NetBSD',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Security :: Cryptography',\n 'Topic :: System :: Archiving :: Backup',\n ],\n packages=find_packages('src'),\n package_dir={'': 'src'},\n zip_safe=False,\n entry_points={\n 'console_scripts': [\n 'borg = borg.archiver:main',\n 'borgfs = borg.archiver:main',\n ]\n },\n # See also the MANIFEST.in file.\n # We want to install all the files in the package directories...\n include_package_data=True,\n # ...except the source files which have been compiled (C extensions):\n exclude_package_data={\n '': ['*.c', '*.h', '*.pyx', ],\n },\n cmdclass=cmdclass,\n ext_modules=ext_modules,\n setup_requires=['setuptools_scm>=1.7'],\n install_requires=install_requires,\n extras_require=extras_require,\n)\n", "path": "setup.py"}], "after_files": [{"content": "# -*- encoding: utf-8 *-*\nimport os\nimport io\nimport re\nimport sys\nfrom collections import OrderedDict\nfrom datetime import datetime\nfrom glob import glob\n\nfrom distutils.command.build import build\nfrom distutils.core import Command\n\nimport textwrap\n\nimport setup_lz4\nimport setup_zstd\nimport setup_b2\n\n# True: use the shared liblz4 (>= 1.7.0 / r129) from the system, False: use the bundled lz4 code\nprefer_system_liblz4 = True\n\n# True: use the shared libzstd (>= 1.3.0) from the system, False: use the bundled zstd code\nprefer_system_libzstd = False\n\n# True: use the shared libb2 from the system, False: use the bundled blake2 code\nprefer_system_libb2 = True\n\nmin_python = (3, 4)\nmy_python = sys.version_info\n\nif my_python < min_python:\n print(\"Borg requires Python %d.%d or later\" % min_python)\n sys.exit(1)\n\n# Are we building on ReadTheDocs?\non_rtd = os.environ.get('READTHEDOCS')\n\ninstall_requires = [\n # we are rather picky about msgpack versions, because a good working msgpack is\n # very important for borg, see https://github.com/borgbackup/borg/issues/3753\n # best versions seem to be 0.4.6, 0.4.7, 0.4.8 and 0.5.6:\n 'msgpack-python >=0.4.6, <=0.5.6, !=0.5.0, !=0.5.1, !=0.5.2, !=0.5.3, !=0.5.4, !=0.5.5',\n # if you can't satisfy the above requirement, these are versions that might\n # also work ok, IF you make sure to use the COMPILED version of msgpack-python,\n # NOT the PURE PYTHON fallback implementation: ==0.5.1, ==0.5.4\n # using any other version is not supported by borg development, feel free to\n # do it on your own risk (and after own testing).\n]\n\n# note for package maintainers: if you package borgbackup for distribution,\n# please add llfuse as a *requirement* on all platforms that have a working\n# llfuse package. \"borg mount\" needs llfuse to work.\n# if you do not have llfuse, do not require it, most of borgbackup will work.\nextras_require = {\n # llfuse 0.40 (tested, proven, ok), needs FUSE version >= 2.8.0\n # llfuse 0.41 (tested shortly, looks ok), needs FUSE version >= 2.8.0\n # llfuse 0.41.1 (tested shortly, looks ok), needs FUSE version >= 2.8.0\n # llfuse 0.42 (tested shortly, looks ok), needs FUSE version >= 2.8.0\n # llfuse 1.0 (tested shortly, looks ok), needs FUSE version >= 2.8.0\n # llfuse 1.1.1 (tested shortly, looks ok), needs FUSE version >= 2.8.0\n # llfuse 1.2 (tested shortly, looks ok), needs FUSE version >= 2.8.0\n # llfuse 1.3 (tested shortly, looks ok), needs FUSE version >= 2.8.0\n # llfuse 2.0 will break API\n 'fuse': ['llfuse<2.0', ],\n}\n\nif sys.platform.startswith('freebsd'):\n # llfuse was frequently broken / did not build on freebsd\n # llfuse 0.41.1, 1.1 are ok\n extras_require['fuse'] = ['llfuse <2.0, !=0.42.*, !=0.43, !=1.0', ]\n\nif my_python >= (3, 7):\n extras_require['fuse'][0] += ', >=1.3.4'\n\nfrom setuptools import setup, find_packages, Extension\nfrom setuptools.command.sdist import sdist\nfrom distutils.command.clean import clean\n\ncompress_source = 'src/borg/compress.pyx'\ncrypto_ll_source = 'src/borg/crypto/low_level.pyx'\nchunker_source = 'src/borg/chunker.pyx'\nhashindex_source = 'src/borg/hashindex.pyx'\nitem_source = 'src/borg/item.pyx'\nchecksums_source = 'src/borg/algorithms/checksums.pyx'\nplatform_posix_source = 'src/borg/platform/posix.pyx'\nplatform_linux_source = 'src/borg/platform/linux.pyx'\nplatform_darwin_source = 'src/borg/platform/darwin.pyx'\nplatform_freebsd_source = 'src/borg/platform/freebsd.pyx'\n\ncython_sources = [\n compress_source,\n crypto_ll_source,\n chunker_source,\n hashindex_source,\n item_source,\n checksums_source,\n\n platform_posix_source,\n platform_linux_source,\n platform_freebsd_source,\n platform_darwin_source,\n]\n\ntry:\n from Cython.Distutils import build_ext\n import Cython.Compiler.Main as cython_compiler\n\n class Sdist(sdist):\n def __init__(self, *args, **kwargs):\n for src in cython_sources:\n cython_compiler.compile(src, cython_compiler.default_options)\n super().__init__(*args, **kwargs)\n\n def make_distribution(self):\n self.filelist.extend([\n 'src/borg/compress.c',\n 'src/borg/crypto/low_level.c',\n 'src/borg/chunker.c', 'src/borg/_chunker.c',\n 'src/borg/hashindex.c', 'src/borg/_hashindex.c',\n 'src/borg/cache_sync/cache_sync.c', 'src/borg/cache_sync/sysdep.h', 'src/borg/cache_sync/unpack.h',\n 'src/borg/cache_sync/unpack_define.h', 'src/borg/cache_sync/unpack_template.h',\n 'src/borg/item.c',\n 'src/borg/algorithms/checksums.c',\n 'src/borg/algorithms/crc32_dispatch.c', 'src/borg/algorithms/crc32_clmul.c', 'src/borg/algorithms/crc32_slice_by_8.c',\n 'src/borg/algorithms/xxh64/xxhash.h', 'src/borg/algorithms/xxh64/xxhash.c',\n 'src/borg/platform/posix.c',\n 'src/borg/platform/linux.c',\n 'src/borg/platform/freebsd.c',\n 'src/borg/platform/darwin.c',\n ])\n super().make_distribution()\n\nexcept ImportError:\n class Sdist(sdist):\n def __init__(self, *args, **kwargs):\n raise Exception('Cython is required to run sdist')\n\n compress_source = compress_source.replace('.pyx', '.c')\n crypto_ll_source = crypto_ll_source.replace('.pyx', '.c')\n chunker_source = chunker_source.replace('.pyx', '.c')\n hashindex_source = hashindex_source.replace('.pyx', '.c')\n item_source = item_source.replace('.pyx', '.c')\n checksums_source = checksums_source.replace('.pyx', '.c')\n platform_posix_source = platform_posix_source.replace('.pyx', '.c')\n platform_linux_source = platform_linux_source.replace('.pyx', '.c')\n platform_freebsd_source = platform_freebsd_source.replace('.pyx', '.c')\n platform_darwin_source = platform_darwin_source.replace('.pyx', '.c')\n from distutils.command.build_ext import build_ext\n if not on_rtd and not all(os.path.exists(path) for path in [\n compress_source, crypto_ll_source, chunker_source, hashindex_source, item_source, checksums_source,\n platform_posix_source, platform_linux_source, platform_freebsd_source, platform_darwin_source]):\n raise ImportError('The GIT version of Borg needs Cython. Install Cython or use a released version.')\n\n\ndef detect_openssl(prefixes):\n for prefix in prefixes:\n filename = os.path.join(prefix, 'include', 'openssl', 'evp.h')\n if os.path.exists(filename):\n with open(filename, 'rb') as fd:\n if b'PKCS5_PBKDF2_HMAC(' in fd.read():\n return prefix\n\n\ninclude_dirs = []\nlibrary_dirs = []\ndefine_macros = []\n\npossible_openssl_prefixes = ['/usr', '/usr/local', '/usr/local/opt/openssl', '/usr/local/ssl', '/usr/local/openssl',\n '/usr/local/borg', '/opt/local', '/opt/pkg', ]\nif os.environ.get('BORG_OPENSSL_PREFIX'):\n possible_openssl_prefixes.insert(0, os.environ.get('BORG_OPENSSL_PREFIX'))\nssl_prefix = detect_openssl(possible_openssl_prefixes)\nif not ssl_prefix:\n raise Exception('Unable to find OpenSSL >= 1.0 headers. (Looked here: {})'.format(', '.join(possible_openssl_prefixes)))\ninclude_dirs.append(os.path.join(ssl_prefix, 'include'))\nlibrary_dirs.append(os.path.join(ssl_prefix, 'lib'))\n\n\npossible_liblz4_prefixes = ['/usr', '/usr/local', '/usr/local/opt/lz4', '/usr/local/lz4',\n '/usr/local/borg', '/opt/local', '/opt/pkg', ]\nif os.environ.get('BORG_LIBLZ4_PREFIX'):\n possible_liblz4_prefixes.insert(0, os.environ.get('BORG_LIBLZ4_PREFIX'))\nliblz4_prefix = setup_lz4.lz4_system_prefix(possible_liblz4_prefixes)\nif prefer_system_liblz4 and liblz4_prefix:\n print('Detected and preferring liblz4 over bundled LZ4')\n define_macros.append(('BORG_USE_LIBLZ4', 'YES'))\n liblz4_system = True\nelse:\n liblz4_system = False\n\npossible_libb2_prefixes = ['/usr', '/usr/local', '/usr/local/opt/libb2', '/usr/local/libb2',\n '/usr/local/borg', '/opt/local', '/opt/pkg', ]\nif os.environ.get('BORG_LIBB2_PREFIX'):\n possible_libb2_prefixes.insert(0, os.environ.get('BORG_LIBB2_PREFIX'))\nlibb2_prefix = setup_b2.b2_system_prefix(possible_libb2_prefixes)\nif prefer_system_libb2 and libb2_prefix:\n print('Detected and preferring libb2 over bundled BLAKE2')\n define_macros.append(('BORG_USE_LIBB2', 'YES'))\n libb2_system = True\nelse:\n libb2_system = False\n\npossible_libzstd_prefixes = ['/usr', '/usr/local', '/usr/local/opt/libzstd', '/usr/local/libzstd',\n '/usr/local/borg', '/opt/local', '/opt/pkg', ]\nif os.environ.get('BORG_LIBZSTD_PREFIX'):\n possible_libzstd_prefixes.insert(0, os.environ.get('BORG_LIBZSTD_PREFIX'))\nlibzstd_prefix = setup_zstd.zstd_system_prefix(possible_libzstd_prefixes)\nif prefer_system_libzstd and libzstd_prefix:\n print('Detected and preferring libzstd over bundled ZSTD')\n define_macros.append(('BORG_USE_LIBZSTD', 'YES'))\n libzstd_system = True\nelse:\n libzstd_system = False\n\n\nwith open('README.rst', 'r') as fd:\n long_description = fd.read()\n # remove badges\n long_description = re.compile(r'^\\.\\. start-badges.*^\\.\\. end-badges', re.M | re.S).sub('', long_description)\n # remove |substitutions|\n long_description = re.compile(r'\\|screencast\\|').sub('', long_description)\n # remove unknown directives\n long_description = re.compile(r'^\\.\\. highlight:: \\w+$', re.M).sub('', long_description)\n\n\ndef format_metavar(option):\n if option.nargs in ('*', '...'):\n return '[%s...]' % option.metavar\n elif option.nargs == '?':\n return '[%s]' % option.metavar\n elif option.nargs is None:\n return option.metavar\n else:\n raise ValueError('Can\\'t format metavar %s, unknown nargs %s!' % (option.metavar, option.nargs))\n\n\nclass build_usage(Command):\n description = \"generate usage for each command\"\n\n user_options = [\n ('output=', 'O', 'output directory'),\n ]\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n def run(self):\n print('generating usage docs')\n import borg\n borg.doc_mode = 'build_man'\n if not os.path.exists('docs/usage'):\n os.mkdir('docs/usage')\n # allows us to build docs without the C modules fully loaded during help generation\n from borg.archiver import Archiver\n parser = Archiver(prog='borg').build_parser()\n # borgfs has a separate man page to satisfy debian's \"every program from a package\n # must have a man page\" requirement, but it doesn't need a separate HTML docs page\n #borgfs_parser = Archiver(prog='borgfs').build_parser()\n\n self.generate_level(\"\", parser, Archiver)\n\n def generate_level(self, prefix, parser, Archiver, extra_choices=None):\n is_subcommand = False\n choices = {}\n for action in parser._actions:\n if action.choices is not None and 'SubParsersAction' in str(action.__class__):\n is_subcommand = True\n for cmd, parser in action.choices.items():\n choices[prefix + cmd] = parser\n if extra_choices is not None:\n choices.update(extra_choices)\n if prefix and not choices:\n return\n print('found commands: %s' % list(choices.keys()))\n\n for command, parser in sorted(choices.items()):\n if command.startswith('debug'):\n print('skipping', command)\n continue\n print('generating help for %s' % command)\n\n if self.generate_level(command + \" \", parser, Archiver):\n continue\n\n with open('docs/usage/%s.rst.inc' % command.replace(\" \", \"_\"), 'w') as doc:\n doc.write(\".. IMPORTANT: this file is auto-generated from borg's built-in help, do not edit!\\n\\n\")\n if command == 'help':\n for topic in Archiver.helptext:\n params = {\"topic\": topic,\n \"underline\": '~' * len('borg help ' + topic)}\n doc.write(\".. _borg_{topic}:\\n\\n\".format(**params))\n doc.write(\"borg help {topic}\\n{underline}\\n\\n\".format(**params))\n doc.write(Archiver.helptext[topic])\n else:\n params = {\"command\": command,\n \"command_\": command.replace(' ', '_'),\n \"underline\": '-' * len('borg ' + command)}\n doc.write(\".. _borg_{command_}:\\n\\n\".format(**params))\n doc.write(\"borg {command}\\n{underline}\\n.. code-block:: none\\n\\n borg [common options] {command}\".format(**params))\n self.write_usage(parser, doc)\n epilog = parser.epilog\n parser.epilog = None\n self.write_options(parser, doc)\n doc.write(\"\\n\\nDescription\\n~~~~~~~~~~~\\n\")\n doc.write(epilog)\n\n if 'create' in choices:\n common_options = [group for group in choices['create']._action_groups if group.title == 'Common options'][0]\n with open('docs/usage/common-options.rst.inc', 'w') as doc:\n self.write_options_group(common_options, doc, False, base_indent=0)\n\n return is_subcommand\n\n def write_usage(self, parser, fp):\n if any(len(o.option_strings) for o in parser._actions):\n fp.write(' [options]')\n for option in parser._actions:\n if option.option_strings:\n continue\n fp.write(' ' + format_metavar(option))\n fp.write('\\n\\n')\n\n def write_options(self, parser, fp):\n def is_positional_group(group):\n return any(not o.option_strings for o in group._group_actions)\n\n # HTML output:\n # A table using some column-spans\n\n def html_write(s):\n for line in s.splitlines():\n fp.write(' ' + line + '\\n')\n\n rows = []\n for group in parser._action_groups:\n if group.title == 'Common options':\n # (no of columns used, columns, ...)\n rows.append((1, '.. class:: borg-common-opt-ref\\n\\n:ref:`common_options`'))\n else:\n if not group._group_actions:\n continue\n group_header = '**%s**' % group.title\n if group.description:\n group_header += ' \u2014 ' + group.description\n rows.append((1, group_header))\n if is_positional_group(group):\n for option in group._group_actions:\n rows.append((3, '', '``%s``' % option.metavar, option.help or ''))\n else:\n for option in group._group_actions:\n if option.metavar:\n option_fmt = '``%s ' + option.metavar + '``'\n else:\n option_fmt = '``%s``'\n option_str = ', '.join(option_fmt % s for s in option.option_strings)\n option_desc = textwrap.dedent((option.help or '') % option.__dict__)\n rows.append((3, '', option_str, option_desc))\n\n fp.write('.. only:: html\\n\\n')\n table = io.StringIO()\n table.write('.. class:: borg-options-table\\n\\n')\n self.rows_to_table(rows, table.write)\n fp.write(textwrap.indent(table.getvalue(), ' ' * 4))\n\n # LaTeX output:\n # Regular rST option lists (irregular column widths)\n latex_options = io.StringIO()\n for group in parser._action_groups:\n if group.title == 'Common options':\n latex_options.write('\\n\\n:ref:`common_options`\\n')\n latex_options.write(' |')\n else:\n self.write_options_group(group, latex_options)\n fp.write('\\n.. only:: latex\\n\\n')\n fp.write(textwrap.indent(latex_options.getvalue(), ' ' * 4))\n\n def rows_to_table(self, rows, write):\n def write_row_separator():\n write('+')\n for column_width in column_widths:\n write('-' * (column_width + 1))\n write('+')\n write('\\n')\n\n # Find column count and width\n column_count = max(columns for columns, *_ in rows)\n column_widths = [0] * column_count\n for columns, *cells in rows:\n for i in range(columns):\n # \"+ 1\" because we want a space between the cell contents and the delimiting \"|\" in the output\n column_widths[i] = max(column_widths[i], len(cells[i]) + 1)\n\n for columns, *original_cells in rows:\n write_row_separator()\n # If a cell contains newlines, then the row must be split up in individual rows\n # where each cell contains no newline.\n rowspanning_cells = []\n original_cells = list(original_cells)\n while any('\\n' in cell for cell in original_cells):\n cell_bloc = []\n for i, cell in enumerate(original_cells):\n pre, _, original_cells[i] = cell.partition('\\n')\n cell_bloc.append(pre)\n rowspanning_cells.append(cell_bloc)\n rowspanning_cells.append(original_cells)\n for cells in rowspanning_cells:\n for i, column_width in enumerate(column_widths):\n if i < columns:\n write('| ')\n write(cells[i].ljust(column_width))\n else:\n write(' ')\n write(''.ljust(column_width))\n write('|\\n')\n\n write_row_separator()\n # This bit of JavaScript kills the <colgroup> that is invariably inserted by docutils,\n # but does absolutely no good here. It sets bogus column widths which cannot be overridden\n # with CSS alone.\n # Since this is HTML-only output, it would be possible to just generate a <table> directly,\n # but then we'd lose rST formatting.\n write(textwrap.dedent(\"\"\"\n .. raw:: html\n\n <script type='text/javascript'>\n $(document).ready(function () {\n $('.borg-options-table colgroup').remove();\n })\n </script>\n \"\"\"))\n\n def write_options_group(self, group, fp, with_title=True, base_indent=4):\n def is_positional_group(group):\n return any(not o.option_strings for o in group._group_actions)\n\n indent = ' ' * base_indent\n\n if is_positional_group(group):\n for option in group._group_actions:\n fp.write(option.metavar + '\\n')\n fp.write(textwrap.indent(option.help or '', ' ' * base_indent) + '\\n')\n return\n\n if not group._group_actions:\n return\n\n if with_title:\n fp.write('\\n\\n')\n fp.write(group.title + '\\n')\n\n opts = OrderedDict()\n\n for option in group._group_actions:\n if option.metavar:\n option_fmt = '%s ' + option.metavar\n else:\n option_fmt = '%s'\n option_str = ', '.join(option_fmt % s for s in option.option_strings)\n option_desc = textwrap.dedent((option.help or '') % option.__dict__)\n opts[option_str] = textwrap.indent(option_desc, ' ' * 4)\n\n padding = len(max(opts)) + 1\n\n for option, desc in opts.items():\n fp.write(indent + option.ljust(padding) + desc + '\\n')\n\n\nclass build_man(Command):\n description = 'build man pages'\n\n user_options = []\n\n see_also = {\n 'create': ('delete', 'prune', 'check', 'patterns', 'placeholders', 'compression'),\n 'recreate': ('patterns', 'placeholders', 'compression'),\n 'list': ('info', 'diff', 'prune', 'patterns'),\n 'info': ('list', 'diff'),\n 'init': ('create', 'delete', 'check', 'list', 'key-import', 'key-export', 'key-change-passphrase'),\n 'key-import': ('key-export', ),\n 'key-export': ('key-import', ),\n 'mount': ('umount', 'extract'), # Would be cooler if these two were on the same page\n 'umount': ('mount', ),\n 'extract': ('mount', ),\n }\n\n rst_prelude = textwrap.dedent(\"\"\"\n .. role:: ref(title)\n\n .. |project_name| replace:: Borg\n\n \"\"\")\n\n usage_group = {\n 'break-lock': 'lock',\n 'with-lock': 'lock',\n\n 'change-passphrase': 'key',\n 'key_change-passphrase': 'key',\n 'key_export': 'key',\n 'key_import': 'key',\n 'key_migrate-to-repokey': 'key',\n\n 'export-tar': 'tar',\n\n 'benchmark_crud': 'benchmark',\n\n 'umount': 'mount',\n }\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n def run(self):\n print('building man pages (in docs/man)', file=sys.stderr)\n import borg\n borg.doc_mode = 'build_man'\n os.makedirs('docs/man', exist_ok=True)\n # allows us to build docs without the C modules fully loaded during help generation\n from borg.archiver import Archiver\n parser = Archiver(prog='borg').build_parser()\n borgfs_parser = Archiver(prog='borgfs').build_parser()\n\n self.generate_level('', parser, Archiver, {'borgfs': borgfs_parser})\n self.build_topic_pages(Archiver)\n self.build_intro_page()\n\n def generate_level(self, prefix, parser, Archiver, extra_choices=None):\n is_subcommand = False\n choices = {}\n for action in parser._actions:\n if action.choices is not None and 'SubParsersAction' in str(action.__class__):\n is_subcommand = True\n for cmd, parser in action.choices.items():\n choices[prefix + cmd] = parser\n if extra_choices is not None:\n choices.update(extra_choices)\n if prefix and not choices:\n return\n\n for command, parser in sorted(choices.items()):\n if command.startswith('debug') or command == 'help':\n continue\n\n if command == \"borgfs\":\n man_title = command\n else:\n man_title = 'borg-' + command.replace(' ', '-')\n print('building man page', man_title + '(1)', file=sys.stderr)\n\n is_intermediary = self.generate_level(command + ' ', parser, Archiver)\n\n doc, write = self.new_doc()\n self.write_man_header(write, man_title, parser.description)\n\n self.write_heading(write, 'SYNOPSIS')\n if is_intermediary:\n subparsers = [action for action in parser._actions if 'SubParsersAction' in str(action.__class__)][0]\n for subcommand in subparsers.choices:\n write('| borg', '[common options]', command, subcommand, '...')\n self.see_also.setdefault(command, []).append('%s-%s' % (command, subcommand))\n else:\n if command == \"borgfs\":\n write(command, end='')\n else:\n write('borg', '[common options]', command, end='')\n self.write_usage(write, parser)\n write('\\n')\n\n description, _, notes = parser.epilog.partition('\\n.. man NOTES')\n\n if description:\n self.write_heading(write, 'DESCRIPTION')\n write(description)\n\n if not is_intermediary:\n self.write_heading(write, 'OPTIONS')\n write('See `borg-common(1)` for common options of Borg commands.')\n write()\n self.write_options(write, parser)\n\n self.write_examples(write, command)\n\n if notes:\n self.write_heading(write, 'NOTES')\n write(notes)\n\n self.write_see_also(write, man_title)\n\n self.gen_man_page(man_title, doc.getvalue())\n\n # Generate the borg-common(1) man page with the common options.\n if 'create' in choices:\n doc, write = self.new_doc()\n man_title = 'borg-common'\n self.write_man_header(write, man_title, 'Common options of Borg commands')\n\n common_options = [group for group in choices['create']._action_groups if group.title == 'Common options'][0]\n\n self.write_heading(write, 'SYNOPSIS')\n self.write_options_group(write, common_options)\n self.write_see_also(write, man_title)\n self.gen_man_page(man_title, doc.getvalue())\n\n return is_subcommand\n\n def build_topic_pages(self, Archiver):\n for topic, text in Archiver.helptext.items():\n doc, write = self.new_doc()\n man_title = 'borg-' + topic\n print('building man page', man_title + '(1)', file=sys.stderr)\n\n self.write_man_header(write, man_title, 'Details regarding ' + topic)\n self.write_heading(write, 'DESCRIPTION')\n write(text)\n self.gen_man_page(man_title, doc.getvalue())\n\n def build_intro_page(self):\n print('building man page borg(1)', file=sys.stderr)\n with open('docs/man_intro.rst') as fd:\n man_intro = fd.read()\n self.gen_man_page('borg', self.rst_prelude + man_intro)\n\n def new_doc(self):\n doc = io.StringIO(self.rst_prelude)\n doc.read()\n write = self.printer(doc)\n return doc, write\n\n def printer(self, fd):\n def write(*args, **kwargs):\n print(*args, file=fd, **kwargs)\n return write\n\n def write_heading(self, write, header, char='-', double_sided=False):\n write()\n if double_sided:\n write(char * len(header))\n write(header)\n write(char * len(header))\n write()\n\n def write_man_header(self, write, title, description):\n self.write_heading(write, title, '=', double_sided=True)\n self.write_heading(write, description, double_sided=True)\n # man page metadata\n write(':Author: The Borg Collective')\n write(':Date:', datetime.utcnow().date().isoformat())\n write(':Manual section: 1')\n write(':Manual group: borg backup tool')\n write()\n\n def write_examples(self, write, command):\n command = command.replace(' ', '_')\n with open('docs/usage/%s.rst' % self.usage_group.get(command, command)) as fd:\n usage = fd.read()\n usage_include = '.. include:: %s.rst.inc' % command\n begin = usage.find(usage_include)\n end = usage.find('.. include', begin + 1)\n # If a command has a dedicated anchor, it will occur before the command's include.\n if 0 < usage.find('.. _', begin + 1) < end:\n end = usage.find('.. _', begin + 1)\n examples = usage[begin:end]\n examples = examples.replace(usage_include, '')\n examples = examples.replace('Examples\\n~~~~~~~~', '')\n examples = examples.replace('Miscellaneous Help\\n------------------', '')\n examples = examples.replace('``docs/misc/prune-example.txt``:', '``docs/misc/prune-example.txt``.')\n examples = examples.replace('.. highlight:: none\\n', '') # we don't support highlight\n examples = re.sub('^(~+)$', lambda matches: '+' * len(matches.group(0)), examples, flags=re.MULTILINE)\n examples = examples.strip()\n if examples:\n self.write_heading(write, 'EXAMPLES', '-')\n write(examples)\n\n def write_see_also(self, write, man_title):\n see_also = self.see_also.get(man_title.replace('borg-', ''), ())\n see_also = ['`borg-%s(1)`' % s for s in see_also]\n see_also.insert(0, '`borg-common(1)`')\n self.write_heading(write, 'SEE ALSO')\n write(', '.join(see_also))\n\n def gen_man_page(self, name, rst):\n from docutils.writers import manpage\n from docutils.core import publish_string\n from docutils.nodes import inline\n from docutils.parsers.rst import roles\n\n def issue(name, rawtext, text, lineno, inliner, options={}, content=[]):\n return [inline(rawtext, '#' + text)], []\n\n roles.register_local_role('issue', issue)\n # We give the source_path so that docutils can find relative includes\n # as-if the document where located in the docs/ directory.\n man_page = publish_string(source=rst, source_path='docs/%s.rst' % name, writer=manpage.Writer())\n with open('docs/man/%s.1' % name, 'wb') as fd:\n fd.write(man_page)\n\n def write_usage(self, write, parser):\n if any(len(o.option_strings) for o in parser._actions):\n write(' [options] ', end='')\n for option in parser._actions:\n if option.option_strings:\n continue\n write(format_metavar(option), end=' ')\n\n def write_options(self, write, parser):\n for group in parser._action_groups:\n if group.title == 'Common options' or not group._group_actions:\n continue\n title = 'arguments' if group.title == 'positional arguments' else group.title\n self.write_heading(write, title, '+')\n self.write_options_group(write, group)\n\n def write_options_group(self, write, group):\n def is_positional_group(group):\n return any(not o.option_strings for o in group._group_actions)\n\n if is_positional_group(group):\n for option in group._group_actions:\n write(option.metavar)\n write(textwrap.indent(option.help or '', ' ' * 4))\n return\n\n opts = OrderedDict()\n\n for option in group._group_actions:\n if option.metavar:\n option_fmt = '%s ' + option.metavar\n else:\n option_fmt = '%s'\n option_str = ', '.join(option_fmt % s for s in option.option_strings)\n option_desc = textwrap.dedent((option.help or '') % option.__dict__)\n opts[option_str] = textwrap.indent(option_desc, ' ' * 4)\n\n padding = len(max(opts)) + 1\n\n for option, desc in opts.items():\n write(option.ljust(padding), desc)\n\n\ndef rm(file):\n try:\n os.unlink(file)\n print('rm', file)\n except FileNotFoundError:\n pass\n\n\nclass Clean(clean):\n def run(self):\n super().run()\n for source in cython_sources:\n genc = source.replace('.pyx', '.c')\n rm(genc)\n compiled_glob = source.replace('.pyx', '.cpython*')\n for compiled in sorted(glob(compiled_glob)):\n rm(compiled)\n\ncmdclass = {\n 'build_ext': build_ext,\n 'build_usage': build_usage,\n 'build_man': build_man,\n 'sdist': Sdist,\n 'clean': Clean,\n}\n\next_modules = []\nif not on_rtd:\n compress_ext_kwargs = dict(sources=[compress_source], include_dirs=include_dirs, library_dirs=library_dirs,\n define_macros=define_macros)\n compress_ext_kwargs = setup_lz4.lz4_ext_kwargs(bundled_path='src/borg/algorithms/lz4',\n system_prefix=liblz4_prefix, system=liblz4_system,\n **compress_ext_kwargs)\n compress_ext_kwargs = setup_zstd.zstd_ext_kwargs(bundled_path='src/borg/algorithms/zstd',\n system_prefix=libzstd_prefix, system=libzstd_system,\n multithreaded=False, legacy=False, **compress_ext_kwargs)\n crypto_ext_kwargs = dict(sources=[crypto_ll_source], libraries=['crypto'],\n include_dirs=include_dirs, library_dirs=library_dirs, define_macros=define_macros)\n crypto_ext_kwargs = setup_b2.b2_ext_kwargs(bundled_path='src/borg/algorithms/blake2',\n system_prefix=libb2_prefix, system=libb2_system,\n **crypto_ext_kwargs)\n ext_modules += [\n Extension('borg.compress', **compress_ext_kwargs),\n Extension('borg.crypto.low_level', **crypto_ext_kwargs),\n Extension('borg.hashindex', [hashindex_source]),\n Extension('borg.item', [item_source]),\n Extension('borg.chunker', [chunker_source]),\n Extension('borg.algorithms.checksums', [checksums_source]),\n ]\n if not sys.platform.startswith(('win32', )):\n ext_modules.append(Extension('borg.platform.posix', [platform_posix_source]))\n if sys.platform == 'linux':\n ext_modules.append(Extension('borg.platform.linux', [platform_linux_source], libraries=['acl']))\n elif sys.platform.startswith('freebsd'):\n ext_modules.append(Extension('borg.platform.freebsd', [platform_freebsd_source]))\n elif sys.platform == 'darwin':\n ext_modules.append(Extension('borg.platform.darwin', [platform_darwin_source]))\n\nsetup(\n name='borgbackup',\n use_scm_version={\n 'write_to': 'src/borg/_version.py',\n },\n author='The Borg Collective (see AUTHORS file)',\n author_email='[email protected]',\n url='https://borgbackup.readthedocs.io/',\n description='Deduplicated, encrypted, authenticated and compressed backups',\n long_description=long_description,\n license='BSD',\n platforms=['Linux', 'MacOS X', 'FreeBSD', 'OpenBSD', 'NetBSD', ],\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: Console',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: POSIX :: BSD :: FreeBSD',\n 'Operating System :: POSIX :: BSD :: OpenBSD',\n 'Operating System :: POSIX :: BSD :: NetBSD',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Security :: Cryptography',\n 'Topic :: System :: Archiving :: Backup',\n ],\n packages=find_packages('src'),\n package_dir={'': 'src'},\n zip_safe=False,\n entry_points={\n 'console_scripts': [\n 'borg = borg.archiver:main',\n 'borgfs = borg.archiver:main',\n ]\n },\n # See also the MANIFEST.in file.\n # We want to install all the files in the package directories...\n include_package_data=True,\n # ...except the source files which have been compiled (C extensions):\n exclude_package_data={\n '': ['*.c', '*.h', '*.pyx', ],\n },\n cmdclass=cmdclass,\n ext_modules=ext_modules,\n setup_requires=['setuptools_scm>=1.7'],\n install_requires=install_requires,\n extras_require=extras_require,\n)\n", "path": "setup.py"}]} |
gh_patches_debug_1347 | rasdani/github-patches | git_diff | cisagov__manage.get.gov-1071 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Show simplified domain statuses to the registrant
### Story
As a domain manager
I want to know the status of my domain in simple language
so that I know if any action is needed or if any functions are limited
### Acceptance Criteria
- [x] Domains table on the dashboard shows a user-friendly domain status
- [ ] Show the domain status on the "Domain Overview" page
- [ ] For Domain statuses Deleted and Hold, change manage link to View with eye icon [(Figma)](https://www.figma.com/file/aJbuDLJNNikqQObTuCNKQa/GetGov_Designs_ECS-Truss?type=design&node-id=428%3A11621&mode=design&t=eottghQ6NoX98F6q-1)
### Additional Context
**BACKGROUND**
In general, EPP / domain statuses are not easily understandable to most users; therefore, we need to ensure we're showing "user-friendly" versions of the domain status.
User-friendly statuses include:
- _Note:_ "Unknown" _Domain status shows as_ "DNS needed"
- DNS needed
- Ready
- On hold
- Deleted
Refer to[ status diagram Miro](https://miro.com/app/board/uXjVMuqbLOk=/?moveToWidget=3458764561795634398&cot=14)
**DOMAINS TABLE**
Currently, the approved Domains table displays the application status. But, because the application process is complete, we should update this to show the _domain_ status.
Reference [Figma mock up](https://www.figma.com/file/aJbuDLJNNikqQObTuCNKQa/GetGov_Designs_ECS-Truss?type=design&node-id=428-25637&mode=design&t=VBfj3tkkg2p3f8UT-0)
**DOMAIN OVERVIEW**
Currently, we do not show the domain status when viewing the "Manage Domains" pages. The "Manage Domains" pages can be accessed by clicking the "Manage" button next to an approved domain.
The first page is the "Domain Overview." Add stylized message to the top of that page to indicate the user-friendly domain status.
Reference [Figma mock up](https://www.figma.com/file/aJbuDLJNNikqQObTuCNKQa/GetGov_Designs_ECS-Truss?type=design&node-id=4829-39693&mode=design&t=VBfj3tkkg2p3f8UT-0)
### Issue Links
Depends on: Domain Status presence for testing
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/registrar/views/index.py`
Content:
```
1 from django.db.models import F
2 from django.shortcuts import render
3
4 from registrar.models import DomainApplication
5
6
7 def index(request):
8 """This page is available to anyone without logging in."""
9 context = {}
10 if request.user.is_authenticated:
11 applications = DomainApplication.objects.filter(creator=request.user)
12 # Let's exclude the approved applications since our
13 # domain_applications context will be used to populate
14 # the active applications table
15 context["domain_applications"] = applications.exclude(status="approved")
16
17 domains = request.user.permissions.values(
18 "role",
19 pk=F("domain__id"),
20 name=F("domain__name"),
21 created_time=F("domain__created_at"),
22 application_status=F("domain__domain_application__status"),
23 )
24 context["domains"] = domains
25 return render(request, "home.html", context)
26
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/registrar/views/index.py b/src/registrar/views/index.py
--- a/src/registrar/views/index.py
+++ b/src/registrar/views/index.py
@@ -19,7 +19,7 @@
pk=F("domain__id"),
name=F("domain__name"),
created_time=F("domain__created_at"),
- application_status=F("domain__domain_application__status"),
+ state=F("domain__state"),
)
context["domains"] = domains
return render(request, "home.html", context)
| {"golden_diff": "diff --git a/src/registrar/views/index.py b/src/registrar/views/index.py\n--- a/src/registrar/views/index.py\n+++ b/src/registrar/views/index.py\n@@ -19,7 +19,7 @@\n pk=F(\"domain__id\"),\n name=F(\"domain__name\"),\n created_time=F(\"domain__created_at\"),\n- application_status=F(\"domain__domain_application__status\"),\n+ state=F(\"domain__state\"),\n )\n context[\"domains\"] = domains\n return render(request, \"home.html\", context)\n", "issue": "Show simplified domain statuses to the registrant\n### Story\r\n\r\nAs a domain manager\r\nI want to know the status of my domain in simple language\r\nso that I know if any action is needed or if any functions are limited\r\n\r\n\r\n### Acceptance Criteria\r\n\r\n- [x] Domains table on the dashboard shows a user-friendly domain status\r\n- [ ] Show the domain status on the \"Domain Overview\" page\r\n- [ ] For Domain statuses Deleted and Hold, change manage link to View with eye icon [(Figma)](https://www.figma.com/file/aJbuDLJNNikqQObTuCNKQa/GetGov_Designs_ECS-Truss?type=design&node-id=428%3A11621&mode=design&t=eottghQ6NoX98F6q-1)\r\n\r\n\r\n### Additional Context\r\n**BACKGROUND**\r\nIn general, EPP / domain statuses are not easily understandable to most users; therefore, we need to ensure we're showing \"user-friendly\" versions of the domain status. \r\n\r\nUser-friendly statuses include: \r\n- _Note:_ \"Unknown\" _Domain status shows as_ \"DNS needed\"\r\n- DNS needed \r\n- Ready\r\n- On hold\r\n- Deleted\r\n\r\nRefer to[ status diagram Miro](https://miro.com/app/board/uXjVMuqbLOk=/?moveToWidget=3458764561795634398&cot=14)\r\n\r\n**DOMAINS TABLE**\r\nCurrently, the approved Domains table displays the application status. But, because the application process is complete, we should update this to show the _domain_ status. \r\n\r\nReference [Figma mock up](https://www.figma.com/file/aJbuDLJNNikqQObTuCNKQa/GetGov_Designs_ECS-Truss?type=design&node-id=428-25637&mode=design&t=VBfj3tkkg2p3f8UT-0)\r\n\r\n**DOMAIN OVERVIEW**\r\nCurrently, we do not show the domain status when viewing the \"Manage Domains\" pages. The \"Manage Domains\" pages can be accessed by clicking the \"Manage\" button next to an approved domain. \r\n\r\nThe first page is the \"Domain Overview.\" Add stylized message to the top of that page to indicate the user-friendly domain status.\r\n\r\nReference [Figma mock up](https://www.figma.com/file/aJbuDLJNNikqQObTuCNKQa/GetGov_Designs_ECS-Truss?type=design&node-id=4829-39693&mode=design&t=VBfj3tkkg2p3f8UT-0)\r\n\r\n### Issue Links\r\n\r\nDepends on: Domain Status presence for testing\n", "before_files": [{"content": "from django.db.models import F\nfrom django.shortcuts import render\n\nfrom registrar.models import DomainApplication\n\n\ndef index(request):\n \"\"\"This page is available to anyone without logging in.\"\"\"\n context = {}\n if request.user.is_authenticated:\n applications = DomainApplication.objects.filter(creator=request.user)\n # Let's exclude the approved applications since our\n # domain_applications context will be used to populate\n # the active applications table\n context[\"domain_applications\"] = applications.exclude(status=\"approved\")\n\n domains = request.user.permissions.values(\n \"role\",\n pk=F(\"domain__id\"),\n name=F(\"domain__name\"),\n created_time=F(\"domain__created_at\"),\n application_status=F(\"domain__domain_application__status\"),\n )\n context[\"domains\"] = domains\n return render(request, \"home.html\", context)\n", "path": "src/registrar/views/index.py"}], "after_files": [{"content": "from django.db.models import F\nfrom django.shortcuts import render\n\nfrom registrar.models import DomainApplication\n\n\ndef index(request):\n \"\"\"This page is available to anyone without logging in.\"\"\"\n context = {}\n if request.user.is_authenticated:\n applications = DomainApplication.objects.filter(creator=request.user)\n # Let's exclude the approved applications since our\n # domain_applications context will be used to populate\n # the active applications table\n context[\"domain_applications\"] = applications.exclude(status=\"approved\")\n\n domains = request.user.permissions.values(\n \"role\",\n pk=F(\"domain__id\"),\n name=F(\"domain__name\"),\n created_time=F(\"domain__created_at\"),\n state=F(\"domain__state\"),\n )\n context[\"domains\"] = domains\n return render(request, \"home.html\", context)\n", "path": "src/registrar/views/index.py"}]} |
gh_patches_debug_1348 | rasdani/github-patches | git_diff | readthedocs__readthedocs.org-11429 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Document serve failed today
## Details
* Read the Docs project URL: https://readthedocs.org/projects/{your_project_slug}/
* Build URL (if applicable): https://readthedocs.org/projects/{your_project_slug}/builds/{build_id}/
* Read the Docs username (if applicable): https://readthedocs.org/profiles/HydrogenSulfate/
## Expected Result
*A description of what you wanted to happen*
Today our document can not be accessed and report 404: <https://paddlescience-docs.readthedocs.io/zh/latest/>. But all things right yesterday. Can any one help?
## Actual Result
*A description of what actually happened*
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `readthedocs/projects/constants.py`
Content:
```
1 """
2 Project constants.
3
4 Default values and other various configuration for projects, including available
5 theme names and repository types.
6 """
7
8 import os
9 import re
10
11 from django.utils.translation import gettext_lazy as _
12
13 SPHINX = "sphinx"
14 MKDOCS = "mkdocs"
15 SPHINX_HTMLDIR = "sphinx_htmldir"
16 SPHINX_SINGLEHTML = "sphinx_singlehtml"
17 # This type is defined by the users in their mkdocs.yml file.
18 MKDOCS_HTML = "mkdocs_html"
19 GENERIC = "generic"
20 DOCUMENTATION_CHOICES = (
21 (SPHINX, _("Sphinx Html")),
22 (MKDOCS, _("Mkdocs")),
23 (SPHINX_HTMLDIR, _("Sphinx HtmlDir")),
24 (SPHINX_SINGLEHTML, _("Sphinx Single Page HTML")),
25 )
26 DOCTYPE_CHOICES = DOCUMENTATION_CHOICES + (
27 (MKDOCS_HTML, _("Mkdocs Html Pages")),
28 (GENERIC, _("Generic")),
29 )
30
31
32 MEDIA_TYPE_HTML = "html"
33 MEDIA_TYPE_PDF = "pdf"
34 MEDIA_TYPE_EPUB = "epub"
35 MEDIA_TYPE_HTMLZIP = "htmlzip"
36 MEDIA_TYPE_JSON = "json"
37 DOWNLOADABLE_MEDIA_TYPES = (
38 MEDIA_TYPE_PDF,
39 MEDIA_TYPE_EPUB,
40 MEDIA_TYPE_HTMLZIP,
41 )
42 MEDIA_TYPES = (
43 MEDIA_TYPE_HTML,
44 MEDIA_TYPE_PDF,
45 MEDIA_TYPE_EPUB,
46 MEDIA_TYPE_HTMLZIP,
47 MEDIA_TYPE_JSON,
48 )
49
50 BUILD_COMMANDS_OUTPUT_PATH = "_readthedocs/"
51 BUILD_COMMANDS_OUTPUT_PATH_HTML = os.path.join(BUILD_COMMANDS_OUTPUT_PATH, "html")
52
53 SAMPLE_FILES = (
54 ("Installation", "projects/samples/installation.rst.html"),
55 ("Getting started", "projects/samples/getting_started.rst.html"),
56 )
57
58 SCRAPE_CONF_SETTINGS = [
59 "copyright",
60 "project",
61 "version",
62 "release",
63 "source_suffix",
64 "html_theme",
65 "extensions",
66 ]
67
68 HEADING_MARKUP = (
69 (1, "="),
70 (2, "-"),
71 (3, "^"),
72 (4, '"'),
73 )
74
75 LIVE_STATUS = 1
76 DELETED_STATUS = 99
77
78 STATUS_CHOICES = (
79 (LIVE_STATUS, _("Live")),
80 (DELETED_STATUS, _("Deleted")),
81 )
82
83 REPO_TYPE_GIT = "git"
84
85 # TODO: Remove this since we only have 1 type.
86 REPO_CHOICES = ((REPO_TYPE_GIT, _("Git")),)
87
88 PUBLIC = "public"
89 PRIVATE = "private"
90
91 PRIVACY_CHOICES = (
92 (PUBLIC, _("Public")),
93 (PRIVATE, _("Private")),
94 )
95
96 IMPORTANT_VERSION_FILTERS = {
97 "slug": "important",
98 }
99
100 # in the future this constant can be replaced with a implementation that
101 # detect all available Python interpreters in the fly (Maybe using
102 # update-alternatives linux tool family?).
103 PYTHON_CHOICES = (
104 ("python", _("CPython 2.x")),
105 ("python3", _("CPython 3.x")),
106 )
107
108 # Via http://sphinx-doc.org/latest/config.html#confval-language
109 # Languages supported for the lang_slug in the URL
110 # Translations for builtin Sphinx messages only available for a subset of these
111 LANGUAGES = (
112 ("aa", "Afar"),
113 ("ab", "Abkhaz"),
114 ("acr", "Achi"),
115 ("af", "Afrikaans"),
116 ("agu", "Awakateko"),
117 ("am", "Amharic"),
118 ("ar", "Arabic"),
119 ("as", "Assamese"),
120 ("ay", "Aymara"),
121 ("az", "Azerbaijani"),
122 ("ba", "Bashkir"),
123 ("be", "Belarusian"),
124 ("bg", "Bulgarian"),
125 ("bh", "Bihari"),
126 ("bi", "Bislama"),
127 ("bn", "Bengali"),
128 ("bo", "Tibetan"),
129 ("br", "Breton"),
130 ("ca", "Catalan"),
131 ("caa", "Ch'orti'"),
132 ("cac", "Chuj"),
133 ("cab", "Garífuna"),
134 ("cak", "Kaqchikel"),
135 ("co", "Corsican"),
136 ("cs", "Czech"),
137 ("cy", "Welsh"),
138 ("da", "Danish"),
139 ("de", "German"),
140 ("dz", "Dzongkha"),
141 ("el", "Greek"),
142 ("en", "English"),
143 ("eo", "Esperanto"),
144 ("es", "Spanish"),
145 ("et", "Estonian"),
146 ("eu", "Basque"),
147 ("fa", "Iranian"),
148 ("fi", "Finnish"),
149 ("fj", "Fijian"),
150 ("fo", "Faroese"),
151 ("fr", "French"),
152 ("fy", "Western Frisian"),
153 ("ga", "Irish"),
154 ("gd", "Scottish Gaelic"),
155 ("gl", "Galician"),
156 ("gn", "Guarani"),
157 ("gu", "Gujarati"),
158 ("ha", "Hausa"),
159 ("hi", "Hindi"),
160 ("he", "Hebrew"),
161 ("hr", "Croatian"),
162 ("hu", "Hungarian"),
163 ("hy", "Armenian"),
164 ("ia", "Interlingua"),
165 ("id", "Indonesian"),
166 ("ie", "Interlingue"),
167 ("ik", "Inupiaq"),
168 ("is", "Icelandic"),
169 ("it", "Italian"),
170 ("itz", "Itza'"),
171 ("iu", "Inuktitut"),
172 ("ixl", "Ixil"),
173 ("ja", "Japanese"),
174 ("jac", "Popti'"),
175 ("jv", "Javanese"),
176 ("ka", "Georgian"),
177 ("kjb", "Q'anjob'al"),
178 ("kek", "Q'eqchi'"),
179 ("kk", "Kazakh"),
180 ("kl", "Kalaallisut"),
181 ("km", "Khmer"),
182 ("kn", "Kannada"),
183 ("knj", "Akateko"),
184 ("ko", "Korean"),
185 ("ks", "Kashmiri"),
186 ("ku", "Kurdish"),
187 ("ky", "Kyrgyz"),
188 ("la", "Latin"),
189 ("ln", "Lingala"),
190 ("lo", "Lao"),
191 ("lt", "Lithuanian"),
192 ("lv", "Latvian"),
193 ("mam", "Mam"),
194 ("mg", "Malagasy"),
195 ("mi", "Maori"),
196 ("mk", "Macedonian"),
197 ("ml", "Malayalam"),
198 ("mn", "Mongolian"),
199 ("mop", "Mopan"),
200 ("mr", "Marathi"),
201 ("ms", "Malay"),
202 ("mt", "Maltese"),
203 ("my", "Burmese"),
204 ("na", "Nauru"),
205 ("ne", "Nepali"),
206 ("nl", "Dutch"),
207 ("no", "Norwegian"),
208 ("oc", "Occitan"),
209 ("om", "Oromo"),
210 ("or", "Oriya"),
211 ("pa", "Panjabi"),
212 ("pl", "Polish"),
213 ("pnb", "Western Punjabi"),
214 ("poc", "Poqomam"),
215 ("poh", "Poqomchi"),
216 ("ps", "Pashto"),
217 ("pt", "Portuguese"),
218 ("qu", "Quechua"),
219 ("quc", "K'iche'"),
220 ("qum", "Sipakapense"),
221 ("quv", "Sakapulteko"),
222 ("rm", "Romansh"),
223 ("rn", "Kirundi"),
224 ("ro", "Romanian"),
225 ("ru", "Russian"),
226 ("rw", "Kinyarwanda"),
227 ("sa", "Sanskrit"),
228 ("sd", "Sindhi"),
229 ("sg", "Sango"),
230 ("si", "Sinhala"),
231 ("sk", "Slovak"),
232 ("skr", "Saraiki"),
233 ("sl", "Slovenian"),
234 ("sm", "Samoan"),
235 ("sn", "Shona"),
236 ("so", "Somali"),
237 ("sq", "Albanian"),
238 ("sr", "Serbian"),
239 ("ss", "Swati"),
240 ("st", "Southern Sotho"),
241 ("su", "Sudanese"),
242 ("sv", "Swedish"),
243 ("sw", "Swahili"),
244 ("ta", "Tamil"),
245 ("te", "Telugu"),
246 ("tg", "Tajik"),
247 ("th", "Thai"),
248 ("ti", "Tigrinya"),
249 ("tk", "Turkmen"),
250 ("tl", "Tagalog"),
251 ("tn", "Tswana"),
252 ("to", "Tonga"),
253 ("tr", "Turkish"),
254 ("ts", "Tsonga"),
255 ("tt", "Tatar"),
256 ("ttc", "Tektiteko"),
257 ("tzj", "Tz'utujil"),
258 ("tw", "Twi"),
259 ("ug", "Uyghur"),
260 ("uk", "Ukrainian"),
261 ("ur", "Urdu"),
262 ("usp", "Uspanteko"),
263 ("uz", "Uzbek"),
264 ("vi", "Vietnamese"),
265 ("vo", "Volapuk"),
266 ("wo", "Wolof"),
267 ("xh", "Xhosa"),
268 ("xin", "Xinka"),
269 ("yi", "Yiddish"),
270 ("yo", "Yoruba"),
271 ("za", "Zhuang"),
272 # TODO: migrate those projects that are currently using "zh" as language.
273 # This is an invalid language code, so the first step is remove it from the
274 # list of possible languages.
275 # https://github.com/readthedocs/readthedocs.org/issues/11387
276 #
277 # In [1]: Project.objects.filter(language='zh').count()
278 # Out[1]: 1485
279 #
280 # ("zh", "Chinese"),
281 ("zu", "Zulu"),
282 # Try these to test our non-2 letter language support
283 ("nb-no", "Norwegian Bokmal"),
284 ("pt-br", "Brazilian Portuguese"),
285 ("es-mx", "Mexican Spanish"),
286 ("uk-ua", "Ukrainian"),
287 ("zh-cn", "Simplified Chinese"),
288 ("zh-tw", "Traditional Chinese"),
289 )
290 LANGUAGE_CODES = [code for code, *_ in LANGUAGES]
291
292 # Normalize the language codes to lowercase with dashes,
293 # we use them to match the language codes in the URL.
294 # The old language codes were uppercase with underscores,
295 # and are deprecated, but we still need to support them.
296 old_language_codes = [
297 "nb_NO",
298 "pt_BR",
299 "es_MX",
300 "uk_UA",
301 "zh_CN",
302 "zh_TW",
303 ]
304 OLD_LANGUAGES_CODE_MAPPING = {
305 code.lower().replace("_", "-"): code for code in old_language_codes
306 }
307
308 LANGUAGES_REGEX = "|".join(
309 [
310 re.escape(code)
311 for code in LANGUAGE_CODES + list(OLD_LANGUAGES_CODE_MAPPING.values())
312 ]
313 )
314
315 PROGRAMMING_LANGUAGES = (
316 ("words", "Only Words"),
317 ("py", "Python"),
318 ("js", "JavaScript"),
319 ("php", "PHP"),
320 ("ruby", "Ruby"),
321 ("perl", "Perl"),
322 ("java", "Java"),
323 ("go", "Go"),
324 ("julia", "Julia"),
325 ("c", "C"),
326 ("csharp", "C#"),
327 ("cpp", "C++"),
328 ("objc", "Objective-C"),
329 ("css", "CSS"),
330 ("ts", "TypeScript"),
331 ("swift", "Swift"),
332 ("vb", "Visual Basic"),
333 ("r", "R"),
334 ("scala", "Scala"),
335 ("groovy", "Groovy"),
336 ("coffee", "CoffeeScript"),
337 ("lua", "Lua"),
338 ("haskell", "Haskell"),
339 ("other", "Other"),
340 )
341
342 PROJECT_PK_REGEX = r"(?:[-\w]+)"
343 PROJECT_SLUG_REGEX = r"(?:[-\w]+)"
344
345 GITHUB_REGEXS = [
346 re.compile(r"github.com/(.+)/(.+)(?:\.git){1}$"),
347 # This must come before the one without a / to make sure we don't capture the /
348 re.compile(r"github.com/(.+)/(.+)/"),
349 re.compile(r"github.com/(.+)/(.+)"),
350 re.compile(r"github.com:(.+)/(.+)\.git$"),
351 ]
352 BITBUCKET_REGEXS = [
353 re.compile(r"bitbucket.org/(.+)/(.+)\.git$"),
354 re.compile(r"@bitbucket.org/(.+)/(.+)\.git$"),
355 # This must come before the one without a / to make sure we don't capture the /
356 re.compile(r"bitbucket.org/(.+)/(.+)/"),
357 re.compile(r"bitbucket.org/(.+)/(.+)"),
358 re.compile(r"bitbucket.org:(.+)/(.+)\.git$"),
359 ]
360 GITLAB_REGEXS = [
361 re.compile(r"gitlab.com/(.+)/(.+)(?:\.git){1}$"),
362 # This must come before the one without a / to make sure we don't capture the /
363 re.compile(r"gitlab.com/(.+)/(.+)/"),
364 re.compile(r"gitlab.com/(.+)/(.+)"),
365 re.compile(r"gitlab.com:(.+)/(.+)\.git$"),
366 ]
367 GITHUB_URL = (
368 "https://github.com/{user}/{repo}/"
369 "{action}/{version}{docroot}{path}{source_suffix}"
370 )
371 GITHUB_COMMIT_URL = "https://github.com/{user}/{repo}/commit/{commit}"
372 GITHUB_PULL_REQUEST_URL = "https://github.com/{user}/{repo}/pull/{number}"
373 GITHUB_PULL_REQUEST_COMMIT_URL = (
374 "https://github.com/{user}/{repo}/pull/{number}/commits/{commit}"
375 )
376 BITBUCKET_URL = (
377 "https://bitbucket.org/{user}/{repo}/src/{version}{docroot}{path}{source_suffix}"
378 )
379 BITBUCKET_COMMIT_URL = "https://bitbucket.org/{user}/{repo}/commits/{commit}"
380 GITLAB_URL = (
381 "https://gitlab.com/{user}/{repo}/"
382 "{action}/{version}{docroot}{path}{source_suffix}"
383 )
384 GITLAB_COMMIT_URL = "https://gitlab.com/{user}/{repo}/commit/{commit}"
385 GITLAB_MERGE_REQUEST_COMMIT_URL = (
386 "https://gitlab.com/{user}/{repo}/commit/{commit}?merge_request_iid={number}"
387 )
388 GITLAB_MERGE_REQUEST_URL = "https://gitlab.com/{user}/{repo}/merge_requests/{number}"
389
390 # Patterns to pull merge/pull request from providers
391 GITHUB_PR_PULL_PATTERN = "pull/{id}/head:external-{id}"
392 GITLAB_MR_PULL_PATTERN = "merge-requests/{id}/head:external-{id}"
393
394 # Git provider names
395 GITHUB_BRAND = "GitHub"
396 GITLAB_BRAND = "GitLab"
397
398 # SSL statuses
399 SSL_STATUS_VALID = "valid"
400 SSL_STATUS_INVALID = "invalid"
401 SSL_STATUS_PENDING = "pending"
402 SSL_STATUS_UNKNOWN = "unknown"
403 SSL_STATUS_CHOICES = (
404 (SSL_STATUS_VALID, _("Valid and active")),
405 (SSL_STATUS_INVALID, _("Invalid")),
406 (SSL_STATUS_PENDING, _("Pending")),
407 (SSL_STATUS_UNKNOWN, _("Unknown")),
408 )
409
410 MULTIPLE_VERSIONS_WITH_TRANSLATIONS = "multiple_versions_with_translations"
411 MULTIPLE_VERSIONS_WITHOUT_TRANSLATIONS = "multiple_versions_without_translations"
412 SINGLE_VERSION_WITHOUT_TRANSLATIONS = "single_version_without_translations"
413 VERSIONING_SCHEME_CHOICES = (
414 (
415 MULTIPLE_VERSIONS_WITH_TRANSLATIONS,
416 _("Multiple versions with translations (/<language>/<version>/<filename>)"),
417 ),
418 (
419 MULTIPLE_VERSIONS_WITHOUT_TRANSLATIONS,
420 _("Multiple versions without translations (/<version>/<filename>)"),
421 ),
422 (
423 SINGLE_VERSION_WITHOUT_TRANSLATIONS,
424 _("Single version without translations (/<filename>)"),
425 ),
426 )
427
428
429 ADDONS_FLYOUT_SORTING_ALPHABETICALLY = "alphabetically"
430 # Compatibility to keep the behavior of the old flyout.
431 # This isn't a good algorithm, but it's a way to keep the old behavior in case we need it.
432 ADDONS_FLYOUT_SORTING_SEMVER_READTHEDOCS_COMPATIBLE = "semver-readthedocs-compatible"
433 # https://pypi.org/project/packaging/
434 ADDONS_FLYOUT_SORTING_PYTHON_PACKAGING = "python-packaging"
435 ADDONS_FLYOUT_SORTING_CALVER = "calver"
436 # Let the user to define a custom pattern and use BumpVer to parse and sort the versions.
437 # https://github.com/mbarkhau/bumpver#pattern-examples
438 ADDONS_FLYOUT_SORTING_CUSTOM_PATTERN = "custom-pattern"
439
440 ADDONS_FLYOUT_SORTING_CHOICES = (
441 (ADDONS_FLYOUT_SORTING_ALPHABETICALLY, _("Alphabetically")),
442 (ADDONS_FLYOUT_SORTING_SEMVER_READTHEDOCS_COMPATIBLE, _("SemVer (Read the Docs)")),
443 (
444 ADDONS_FLYOUT_SORTING_PYTHON_PACKAGING,
445 _("Python Packaging (PEP 440 and PEP 425)"),
446 ),
447 (ADDONS_FLYOUT_SORTING_CALVER, _("CalVer (YYYY.0M.0M)")),
448 (ADDONS_FLYOUT_SORTING_CUSTOM_PATTERN, _("Define your own pattern")),
449 )
450
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/readthedocs/projects/constants.py b/readthedocs/projects/constants.py
--- a/readthedocs/projects/constants.py
+++ b/readthedocs/projects/constants.py
@@ -310,6 +310,11 @@
re.escape(code)
for code in LANGUAGE_CODES + list(OLD_LANGUAGES_CODE_MAPPING.values())
]
+ # Add "zh" here to be able to keep serving projects with this old invalid language code.
+ # We don't allow new projects to select this language code anymore.
+ #
+ # https://github.com/readthedocs/readthedocs.org/issues/11428
+ + ["zh"]
)
PROGRAMMING_LANGUAGES = (
| {"golden_diff": "diff --git a/readthedocs/projects/constants.py b/readthedocs/projects/constants.py\n--- a/readthedocs/projects/constants.py\n+++ b/readthedocs/projects/constants.py\n@@ -310,6 +310,11 @@\n re.escape(code)\n for code in LANGUAGE_CODES + list(OLD_LANGUAGES_CODE_MAPPING.values())\n ]\n+ # Add \"zh\" here to be able to keep serving projects with this old invalid language code.\n+ # We don't allow new projects to select this language code anymore.\n+ #\n+ # https://github.com/readthedocs/readthedocs.org/issues/11428\n+ + [\"zh\"]\n )\n \n PROGRAMMING_LANGUAGES = (\n", "issue": "Document serve failed today\n## Details\r\n\r\n* Read the Docs project URL: https://readthedocs.org/projects/{your_project_slug}/\r\n* Build URL (if applicable): https://readthedocs.org/projects/{your_project_slug}/builds/{build_id}/\r\n* Read the Docs username (if applicable): https://readthedocs.org/profiles/HydrogenSulfate/\r\n\r\n## Expected Result\r\n\r\n*A description of what you wanted to happen*\r\nToday our document can not be accessed and report 404: <https://paddlescience-docs.readthedocs.io/zh/latest/>. But all things right yesterday. Can any one help?\r\n## Actual Result\r\n\r\n*A description of what actually happened*\r\n\n", "before_files": [{"content": "\"\"\"\nProject constants.\n\nDefault values and other various configuration for projects, including available\ntheme names and repository types.\n\"\"\"\n\nimport os\nimport re\n\nfrom django.utils.translation import gettext_lazy as _\n\nSPHINX = \"sphinx\"\nMKDOCS = \"mkdocs\"\nSPHINX_HTMLDIR = \"sphinx_htmldir\"\nSPHINX_SINGLEHTML = \"sphinx_singlehtml\"\n# This type is defined by the users in their mkdocs.yml file.\nMKDOCS_HTML = \"mkdocs_html\"\nGENERIC = \"generic\"\nDOCUMENTATION_CHOICES = (\n (SPHINX, _(\"Sphinx Html\")),\n (MKDOCS, _(\"Mkdocs\")),\n (SPHINX_HTMLDIR, _(\"Sphinx HtmlDir\")),\n (SPHINX_SINGLEHTML, _(\"Sphinx Single Page HTML\")),\n)\nDOCTYPE_CHOICES = DOCUMENTATION_CHOICES + (\n (MKDOCS_HTML, _(\"Mkdocs Html Pages\")),\n (GENERIC, _(\"Generic\")),\n)\n\n\nMEDIA_TYPE_HTML = \"html\"\nMEDIA_TYPE_PDF = \"pdf\"\nMEDIA_TYPE_EPUB = \"epub\"\nMEDIA_TYPE_HTMLZIP = \"htmlzip\"\nMEDIA_TYPE_JSON = \"json\"\nDOWNLOADABLE_MEDIA_TYPES = (\n MEDIA_TYPE_PDF,\n MEDIA_TYPE_EPUB,\n MEDIA_TYPE_HTMLZIP,\n)\nMEDIA_TYPES = (\n MEDIA_TYPE_HTML,\n MEDIA_TYPE_PDF,\n MEDIA_TYPE_EPUB,\n MEDIA_TYPE_HTMLZIP,\n MEDIA_TYPE_JSON,\n)\n\nBUILD_COMMANDS_OUTPUT_PATH = \"_readthedocs/\"\nBUILD_COMMANDS_OUTPUT_PATH_HTML = os.path.join(BUILD_COMMANDS_OUTPUT_PATH, \"html\")\n\nSAMPLE_FILES = (\n (\"Installation\", \"projects/samples/installation.rst.html\"),\n (\"Getting started\", \"projects/samples/getting_started.rst.html\"),\n)\n\nSCRAPE_CONF_SETTINGS = [\n \"copyright\",\n \"project\",\n \"version\",\n \"release\",\n \"source_suffix\",\n \"html_theme\",\n \"extensions\",\n]\n\nHEADING_MARKUP = (\n (1, \"=\"),\n (2, \"-\"),\n (3, \"^\"),\n (4, '\"'),\n)\n\nLIVE_STATUS = 1\nDELETED_STATUS = 99\n\nSTATUS_CHOICES = (\n (LIVE_STATUS, _(\"Live\")),\n (DELETED_STATUS, _(\"Deleted\")),\n)\n\nREPO_TYPE_GIT = \"git\"\n\n# TODO: Remove this since we only have 1 type.\nREPO_CHOICES = ((REPO_TYPE_GIT, _(\"Git\")),)\n\nPUBLIC = \"public\"\nPRIVATE = \"private\"\n\nPRIVACY_CHOICES = (\n (PUBLIC, _(\"Public\")),\n (PRIVATE, _(\"Private\")),\n)\n\nIMPORTANT_VERSION_FILTERS = {\n \"slug\": \"important\",\n}\n\n# in the future this constant can be replaced with a implementation that\n# detect all available Python interpreters in the fly (Maybe using\n# update-alternatives linux tool family?).\nPYTHON_CHOICES = (\n (\"python\", _(\"CPython 2.x\")),\n (\"python3\", _(\"CPython 3.x\")),\n)\n\n# Via http://sphinx-doc.org/latest/config.html#confval-language\n# Languages supported for the lang_slug in the URL\n# Translations for builtin Sphinx messages only available for a subset of these\nLANGUAGES = (\n (\"aa\", \"Afar\"),\n (\"ab\", \"Abkhaz\"),\n (\"acr\", \"Achi\"),\n (\"af\", \"Afrikaans\"),\n (\"agu\", \"Awakateko\"),\n (\"am\", \"Amharic\"),\n (\"ar\", \"Arabic\"),\n (\"as\", \"Assamese\"),\n (\"ay\", \"Aymara\"),\n (\"az\", \"Azerbaijani\"),\n (\"ba\", \"Bashkir\"),\n (\"be\", \"Belarusian\"),\n (\"bg\", \"Bulgarian\"),\n (\"bh\", \"Bihari\"),\n (\"bi\", \"Bislama\"),\n (\"bn\", \"Bengali\"),\n (\"bo\", \"Tibetan\"),\n (\"br\", \"Breton\"),\n (\"ca\", \"Catalan\"),\n (\"caa\", \"Ch'orti'\"),\n (\"cac\", \"Chuj\"),\n (\"cab\", \"Gar\u00edfuna\"),\n (\"cak\", \"Kaqchikel\"),\n (\"co\", \"Corsican\"),\n (\"cs\", \"Czech\"),\n (\"cy\", \"Welsh\"),\n (\"da\", \"Danish\"),\n (\"de\", \"German\"),\n (\"dz\", \"Dzongkha\"),\n (\"el\", \"Greek\"),\n (\"en\", \"English\"),\n (\"eo\", \"Esperanto\"),\n (\"es\", \"Spanish\"),\n (\"et\", \"Estonian\"),\n (\"eu\", \"Basque\"),\n (\"fa\", \"Iranian\"),\n (\"fi\", \"Finnish\"),\n (\"fj\", \"Fijian\"),\n (\"fo\", \"Faroese\"),\n (\"fr\", \"French\"),\n (\"fy\", \"Western Frisian\"),\n (\"ga\", \"Irish\"),\n (\"gd\", \"Scottish Gaelic\"),\n (\"gl\", \"Galician\"),\n (\"gn\", \"Guarani\"),\n (\"gu\", \"Gujarati\"),\n (\"ha\", \"Hausa\"),\n (\"hi\", \"Hindi\"),\n (\"he\", \"Hebrew\"),\n (\"hr\", \"Croatian\"),\n (\"hu\", \"Hungarian\"),\n (\"hy\", \"Armenian\"),\n (\"ia\", \"Interlingua\"),\n (\"id\", \"Indonesian\"),\n (\"ie\", \"Interlingue\"),\n (\"ik\", \"Inupiaq\"),\n (\"is\", \"Icelandic\"),\n (\"it\", \"Italian\"),\n (\"itz\", \"Itza'\"),\n (\"iu\", \"Inuktitut\"),\n (\"ixl\", \"Ixil\"),\n (\"ja\", \"Japanese\"),\n (\"jac\", \"Popti'\"),\n (\"jv\", \"Javanese\"),\n (\"ka\", \"Georgian\"),\n (\"kjb\", \"Q'anjob'al\"),\n (\"kek\", \"Q'eqchi'\"),\n (\"kk\", \"Kazakh\"),\n (\"kl\", \"Kalaallisut\"),\n (\"km\", \"Khmer\"),\n (\"kn\", \"Kannada\"),\n (\"knj\", \"Akateko\"),\n (\"ko\", \"Korean\"),\n (\"ks\", \"Kashmiri\"),\n (\"ku\", \"Kurdish\"),\n (\"ky\", \"Kyrgyz\"),\n (\"la\", \"Latin\"),\n (\"ln\", \"Lingala\"),\n (\"lo\", \"Lao\"),\n (\"lt\", \"Lithuanian\"),\n (\"lv\", \"Latvian\"),\n (\"mam\", \"Mam\"),\n (\"mg\", \"Malagasy\"),\n (\"mi\", \"Maori\"),\n (\"mk\", \"Macedonian\"),\n (\"ml\", \"Malayalam\"),\n (\"mn\", \"Mongolian\"),\n (\"mop\", \"Mopan\"),\n (\"mr\", \"Marathi\"),\n (\"ms\", \"Malay\"),\n (\"mt\", \"Maltese\"),\n (\"my\", \"Burmese\"),\n (\"na\", \"Nauru\"),\n (\"ne\", \"Nepali\"),\n (\"nl\", \"Dutch\"),\n (\"no\", \"Norwegian\"),\n (\"oc\", \"Occitan\"),\n (\"om\", \"Oromo\"),\n (\"or\", \"Oriya\"),\n (\"pa\", \"Panjabi\"),\n (\"pl\", \"Polish\"),\n (\"pnb\", \"Western Punjabi\"),\n (\"poc\", \"Poqomam\"),\n (\"poh\", \"Poqomchi\"),\n (\"ps\", \"Pashto\"),\n (\"pt\", \"Portuguese\"),\n (\"qu\", \"Quechua\"),\n (\"quc\", \"K'iche'\"),\n (\"qum\", \"Sipakapense\"),\n (\"quv\", \"Sakapulteko\"),\n (\"rm\", \"Romansh\"),\n (\"rn\", \"Kirundi\"),\n (\"ro\", \"Romanian\"),\n (\"ru\", \"Russian\"),\n (\"rw\", \"Kinyarwanda\"),\n (\"sa\", \"Sanskrit\"),\n (\"sd\", \"Sindhi\"),\n (\"sg\", \"Sango\"),\n (\"si\", \"Sinhala\"),\n (\"sk\", \"Slovak\"),\n (\"skr\", \"Saraiki\"),\n (\"sl\", \"Slovenian\"),\n (\"sm\", \"Samoan\"),\n (\"sn\", \"Shona\"),\n (\"so\", \"Somali\"),\n (\"sq\", \"Albanian\"),\n (\"sr\", \"Serbian\"),\n (\"ss\", \"Swati\"),\n (\"st\", \"Southern Sotho\"),\n (\"su\", \"Sudanese\"),\n (\"sv\", \"Swedish\"),\n (\"sw\", \"Swahili\"),\n (\"ta\", \"Tamil\"),\n (\"te\", \"Telugu\"),\n (\"tg\", \"Tajik\"),\n (\"th\", \"Thai\"),\n (\"ti\", \"Tigrinya\"),\n (\"tk\", \"Turkmen\"),\n (\"tl\", \"Tagalog\"),\n (\"tn\", \"Tswana\"),\n (\"to\", \"Tonga\"),\n (\"tr\", \"Turkish\"),\n (\"ts\", \"Tsonga\"),\n (\"tt\", \"Tatar\"),\n (\"ttc\", \"Tektiteko\"),\n (\"tzj\", \"Tz'utujil\"),\n (\"tw\", \"Twi\"),\n (\"ug\", \"Uyghur\"),\n (\"uk\", \"Ukrainian\"),\n (\"ur\", \"Urdu\"),\n (\"usp\", \"Uspanteko\"),\n (\"uz\", \"Uzbek\"),\n (\"vi\", \"Vietnamese\"),\n (\"vo\", \"Volapuk\"),\n (\"wo\", \"Wolof\"),\n (\"xh\", \"Xhosa\"),\n (\"xin\", \"Xinka\"),\n (\"yi\", \"Yiddish\"),\n (\"yo\", \"Yoruba\"),\n (\"za\", \"Zhuang\"),\n # TODO: migrate those projects that are currently using \"zh\" as language.\n # This is an invalid language code, so the first step is remove it from the\n # list of possible languages.\n # https://github.com/readthedocs/readthedocs.org/issues/11387\n #\n # In [1]: Project.objects.filter(language='zh').count()\n # Out[1]: 1485\n #\n # (\"zh\", \"Chinese\"),\n (\"zu\", \"Zulu\"),\n # Try these to test our non-2 letter language support\n (\"nb-no\", \"Norwegian Bokmal\"),\n (\"pt-br\", \"Brazilian Portuguese\"),\n (\"es-mx\", \"Mexican Spanish\"),\n (\"uk-ua\", \"Ukrainian\"),\n (\"zh-cn\", \"Simplified Chinese\"),\n (\"zh-tw\", \"Traditional Chinese\"),\n)\nLANGUAGE_CODES = [code for code, *_ in LANGUAGES]\n\n# Normalize the language codes to lowercase with dashes,\n# we use them to match the language codes in the URL.\n# The old language codes were uppercase with underscores,\n# and are deprecated, but we still need to support them.\nold_language_codes = [\n \"nb_NO\",\n \"pt_BR\",\n \"es_MX\",\n \"uk_UA\",\n \"zh_CN\",\n \"zh_TW\",\n]\nOLD_LANGUAGES_CODE_MAPPING = {\n code.lower().replace(\"_\", \"-\"): code for code in old_language_codes\n}\n\nLANGUAGES_REGEX = \"|\".join(\n [\n re.escape(code)\n for code in LANGUAGE_CODES + list(OLD_LANGUAGES_CODE_MAPPING.values())\n ]\n)\n\nPROGRAMMING_LANGUAGES = (\n (\"words\", \"Only Words\"),\n (\"py\", \"Python\"),\n (\"js\", \"JavaScript\"),\n (\"php\", \"PHP\"),\n (\"ruby\", \"Ruby\"),\n (\"perl\", \"Perl\"),\n (\"java\", \"Java\"),\n (\"go\", \"Go\"),\n (\"julia\", \"Julia\"),\n (\"c\", \"C\"),\n (\"csharp\", \"C#\"),\n (\"cpp\", \"C++\"),\n (\"objc\", \"Objective-C\"),\n (\"css\", \"CSS\"),\n (\"ts\", \"TypeScript\"),\n (\"swift\", \"Swift\"),\n (\"vb\", \"Visual Basic\"),\n (\"r\", \"R\"),\n (\"scala\", \"Scala\"),\n (\"groovy\", \"Groovy\"),\n (\"coffee\", \"CoffeeScript\"),\n (\"lua\", \"Lua\"),\n (\"haskell\", \"Haskell\"),\n (\"other\", \"Other\"),\n)\n\nPROJECT_PK_REGEX = r\"(?:[-\\w]+)\"\nPROJECT_SLUG_REGEX = r\"(?:[-\\w]+)\"\n\nGITHUB_REGEXS = [\n re.compile(r\"github.com/(.+)/(.+)(?:\\.git){1}$\"),\n # This must come before the one without a / to make sure we don't capture the /\n re.compile(r\"github.com/(.+)/(.+)/\"),\n re.compile(r\"github.com/(.+)/(.+)\"),\n re.compile(r\"github.com:(.+)/(.+)\\.git$\"),\n]\nBITBUCKET_REGEXS = [\n re.compile(r\"bitbucket.org/(.+)/(.+)\\.git$\"),\n re.compile(r\"@bitbucket.org/(.+)/(.+)\\.git$\"),\n # This must come before the one without a / to make sure we don't capture the /\n re.compile(r\"bitbucket.org/(.+)/(.+)/\"),\n re.compile(r\"bitbucket.org/(.+)/(.+)\"),\n re.compile(r\"bitbucket.org:(.+)/(.+)\\.git$\"),\n]\nGITLAB_REGEXS = [\n re.compile(r\"gitlab.com/(.+)/(.+)(?:\\.git){1}$\"),\n # This must come before the one without a / to make sure we don't capture the /\n re.compile(r\"gitlab.com/(.+)/(.+)/\"),\n re.compile(r\"gitlab.com/(.+)/(.+)\"),\n re.compile(r\"gitlab.com:(.+)/(.+)\\.git$\"),\n]\nGITHUB_URL = (\n \"https://github.com/{user}/{repo}/\"\n \"{action}/{version}{docroot}{path}{source_suffix}\"\n)\nGITHUB_COMMIT_URL = \"https://github.com/{user}/{repo}/commit/{commit}\"\nGITHUB_PULL_REQUEST_URL = \"https://github.com/{user}/{repo}/pull/{number}\"\nGITHUB_PULL_REQUEST_COMMIT_URL = (\n \"https://github.com/{user}/{repo}/pull/{number}/commits/{commit}\"\n)\nBITBUCKET_URL = (\n \"https://bitbucket.org/{user}/{repo}/src/{version}{docroot}{path}{source_suffix}\"\n)\nBITBUCKET_COMMIT_URL = \"https://bitbucket.org/{user}/{repo}/commits/{commit}\"\nGITLAB_URL = (\n \"https://gitlab.com/{user}/{repo}/\"\n \"{action}/{version}{docroot}{path}{source_suffix}\"\n)\nGITLAB_COMMIT_URL = \"https://gitlab.com/{user}/{repo}/commit/{commit}\"\nGITLAB_MERGE_REQUEST_COMMIT_URL = (\n \"https://gitlab.com/{user}/{repo}/commit/{commit}?merge_request_iid={number}\"\n)\nGITLAB_MERGE_REQUEST_URL = \"https://gitlab.com/{user}/{repo}/merge_requests/{number}\"\n\n# Patterns to pull merge/pull request from providers\nGITHUB_PR_PULL_PATTERN = \"pull/{id}/head:external-{id}\"\nGITLAB_MR_PULL_PATTERN = \"merge-requests/{id}/head:external-{id}\"\n\n# Git provider names\nGITHUB_BRAND = \"GitHub\"\nGITLAB_BRAND = \"GitLab\"\n\n# SSL statuses\nSSL_STATUS_VALID = \"valid\"\nSSL_STATUS_INVALID = \"invalid\"\nSSL_STATUS_PENDING = \"pending\"\nSSL_STATUS_UNKNOWN = \"unknown\"\nSSL_STATUS_CHOICES = (\n (SSL_STATUS_VALID, _(\"Valid and active\")),\n (SSL_STATUS_INVALID, _(\"Invalid\")),\n (SSL_STATUS_PENDING, _(\"Pending\")),\n (SSL_STATUS_UNKNOWN, _(\"Unknown\")),\n)\n\nMULTIPLE_VERSIONS_WITH_TRANSLATIONS = \"multiple_versions_with_translations\"\nMULTIPLE_VERSIONS_WITHOUT_TRANSLATIONS = \"multiple_versions_without_translations\"\nSINGLE_VERSION_WITHOUT_TRANSLATIONS = \"single_version_without_translations\"\nVERSIONING_SCHEME_CHOICES = (\n (\n MULTIPLE_VERSIONS_WITH_TRANSLATIONS,\n _(\"Multiple versions with translations (/<language>/<version>/<filename>)\"),\n ),\n (\n MULTIPLE_VERSIONS_WITHOUT_TRANSLATIONS,\n _(\"Multiple versions without translations (/<version>/<filename>)\"),\n ),\n (\n SINGLE_VERSION_WITHOUT_TRANSLATIONS,\n _(\"Single version without translations (/<filename>)\"),\n ),\n)\n\n\nADDONS_FLYOUT_SORTING_ALPHABETICALLY = \"alphabetically\"\n# Compatibility to keep the behavior of the old flyout.\n# This isn't a good algorithm, but it's a way to keep the old behavior in case we need it.\nADDONS_FLYOUT_SORTING_SEMVER_READTHEDOCS_COMPATIBLE = \"semver-readthedocs-compatible\"\n# https://pypi.org/project/packaging/\nADDONS_FLYOUT_SORTING_PYTHON_PACKAGING = \"python-packaging\"\nADDONS_FLYOUT_SORTING_CALVER = \"calver\"\n# Let the user to define a custom pattern and use BumpVer to parse and sort the versions.\n# https://github.com/mbarkhau/bumpver#pattern-examples\nADDONS_FLYOUT_SORTING_CUSTOM_PATTERN = \"custom-pattern\"\n\nADDONS_FLYOUT_SORTING_CHOICES = (\n (ADDONS_FLYOUT_SORTING_ALPHABETICALLY, _(\"Alphabetically\")),\n (ADDONS_FLYOUT_SORTING_SEMVER_READTHEDOCS_COMPATIBLE, _(\"SemVer (Read the Docs)\")),\n (\n ADDONS_FLYOUT_SORTING_PYTHON_PACKAGING,\n _(\"Python Packaging (PEP 440 and PEP 425)\"),\n ),\n (ADDONS_FLYOUT_SORTING_CALVER, _(\"CalVer (YYYY.0M.0M)\")),\n (ADDONS_FLYOUT_SORTING_CUSTOM_PATTERN, _(\"Define your own pattern\")),\n)\n", "path": "readthedocs/projects/constants.py"}], "after_files": [{"content": "\"\"\"\nProject constants.\n\nDefault values and other various configuration for projects, including available\ntheme names and repository types.\n\"\"\"\n\nimport os\nimport re\n\nfrom django.utils.translation import gettext_lazy as _\n\nSPHINX = \"sphinx\"\nMKDOCS = \"mkdocs\"\nSPHINX_HTMLDIR = \"sphinx_htmldir\"\nSPHINX_SINGLEHTML = \"sphinx_singlehtml\"\n# This type is defined by the users in their mkdocs.yml file.\nMKDOCS_HTML = \"mkdocs_html\"\nGENERIC = \"generic\"\nDOCUMENTATION_CHOICES = (\n (SPHINX, _(\"Sphinx Html\")),\n (MKDOCS, _(\"Mkdocs\")),\n (SPHINX_HTMLDIR, _(\"Sphinx HtmlDir\")),\n (SPHINX_SINGLEHTML, _(\"Sphinx Single Page HTML\")),\n)\nDOCTYPE_CHOICES = DOCUMENTATION_CHOICES + (\n (MKDOCS_HTML, _(\"Mkdocs Html Pages\")),\n (GENERIC, _(\"Generic\")),\n)\n\n\nMEDIA_TYPE_HTML = \"html\"\nMEDIA_TYPE_PDF = \"pdf\"\nMEDIA_TYPE_EPUB = \"epub\"\nMEDIA_TYPE_HTMLZIP = \"htmlzip\"\nMEDIA_TYPE_JSON = \"json\"\nDOWNLOADABLE_MEDIA_TYPES = (\n MEDIA_TYPE_PDF,\n MEDIA_TYPE_EPUB,\n MEDIA_TYPE_HTMLZIP,\n)\nMEDIA_TYPES = (\n MEDIA_TYPE_HTML,\n MEDIA_TYPE_PDF,\n MEDIA_TYPE_EPUB,\n MEDIA_TYPE_HTMLZIP,\n MEDIA_TYPE_JSON,\n)\n\nBUILD_COMMANDS_OUTPUT_PATH = \"_readthedocs/\"\nBUILD_COMMANDS_OUTPUT_PATH_HTML = os.path.join(BUILD_COMMANDS_OUTPUT_PATH, \"html\")\n\nSAMPLE_FILES = (\n (\"Installation\", \"projects/samples/installation.rst.html\"),\n (\"Getting started\", \"projects/samples/getting_started.rst.html\"),\n)\n\nSCRAPE_CONF_SETTINGS = [\n \"copyright\",\n \"project\",\n \"version\",\n \"release\",\n \"source_suffix\",\n \"html_theme\",\n \"extensions\",\n]\n\nHEADING_MARKUP = (\n (1, \"=\"),\n (2, \"-\"),\n (3, \"^\"),\n (4, '\"'),\n)\n\nLIVE_STATUS = 1\nDELETED_STATUS = 99\n\nSTATUS_CHOICES = (\n (LIVE_STATUS, _(\"Live\")),\n (DELETED_STATUS, _(\"Deleted\")),\n)\n\nREPO_TYPE_GIT = \"git\"\n\n# TODO: Remove this since we only have 1 type.\nREPO_CHOICES = ((REPO_TYPE_GIT, _(\"Git\")),)\n\nPUBLIC = \"public\"\nPRIVATE = \"private\"\n\nPRIVACY_CHOICES = (\n (PUBLIC, _(\"Public\")),\n (PRIVATE, _(\"Private\")),\n)\n\nIMPORTANT_VERSION_FILTERS = {\n \"slug\": \"important\",\n}\n\n# in the future this constant can be replaced with a implementation that\n# detect all available Python interpreters in the fly (Maybe using\n# update-alternatives linux tool family?).\nPYTHON_CHOICES = (\n (\"python\", _(\"CPython 2.x\")),\n (\"python3\", _(\"CPython 3.x\")),\n)\n\n# Via http://sphinx-doc.org/latest/config.html#confval-language\n# Languages supported for the lang_slug in the URL\n# Translations for builtin Sphinx messages only available for a subset of these\nLANGUAGES = (\n (\"aa\", \"Afar\"),\n (\"ab\", \"Abkhaz\"),\n (\"acr\", \"Achi\"),\n (\"af\", \"Afrikaans\"),\n (\"agu\", \"Awakateko\"),\n (\"am\", \"Amharic\"),\n (\"ar\", \"Arabic\"),\n (\"as\", \"Assamese\"),\n (\"ay\", \"Aymara\"),\n (\"az\", \"Azerbaijani\"),\n (\"ba\", \"Bashkir\"),\n (\"be\", \"Belarusian\"),\n (\"bg\", \"Bulgarian\"),\n (\"bh\", \"Bihari\"),\n (\"bi\", \"Bislama\"),\n (\"bn\", \"Bengali\"),\n (\"bo\", \"Tibetan\"),\n (\"br\", \"Breton\"),\n (\"ca\", \"Catalan\"),\n (\"caa\", \"Ch'orti'\"),\n (\"cac\", \"Chuj\"),\n (\"cab\", \"Gar\u00edfuna\"),\n (\"cak\", \"Kaqchikel\"),\n (\"co\", \"Corsican\"),\n (\"cs\", \"Czech\"),\n (\"cy\", \"Welsh\"),\n (\"da\", \"Danish\"),\n (\"de\", \"German\"),\n (\"dz\", \"Dzongkha\"),\n (\"el\", \"Greek\"),\n (\"en\", \"English\"),\n (\"eo\", \"Esperanto\"),\n (\"es\", \"Spanish\"),\n (\"et\", \"Estonian\"),\n (\"eu\", \"Basque\"),\n (\"fa\", \"Iranian\"),\n (\"fi\", \"Finnish\"),\n (\"fj\", \"Fijian\"),\n (\"fo\", \"Faroese\"),\n (\"fr\", \"French\"),\n (\"fy\", \"Western Frisian\"),\n (\"ga\", \"Irish\"),\n (\"gd\", \"Scottish Gaelic\"),\n (\"gl\", \"Galician\"),\n (\"gn\", \"Guarani\"),\n (\"gu\", \"Gujarati\"),\n (\"ha\", \"Hausa\"),\n (\"hi\", \"Hindi\"),\n (\"he\", \"Hebrew\"),\n (\"hr\", \"Croatian\"),\n (\"hu\", \"Hungarian\"),\n (\"hy\", \"Armenian\"),\n (\"ia\", \"Interlingua\"),\n (\"id\", \"Indonesian\"),\n (\"ie\", \"Interlingue\"),\n (\"ik\", \"Inupiaq\"),\n (\"is\", \"Icelandic\"),\n (\"it\", \"Italian\"),\n (\"itz\", \"Itza'\"),\n (\"iu\", \"Inuktitut\"),\n (\"ixl\", \"Ixil\"),\n (\"ja\", \"Japanese\"),\n (\"jac\", \"Popti'\"),\n (\"jv\", \"Javanese\"),\n (\"ka\", \"Georgian\"),\n (\"kjb\", \"Q'anjob'al\"),\n (\"kek\", \"Q'eqchi'\"),\n (\"kk\", \"Kazakh\"),\n (\"kl\", \"Kalaallisut\"),\n (\"km\", \"Khmer\"),\n (\"kn\", \"Kannada\"),\n (\"knj\", \"Akateko\"),\n (\"ko\", \"Korean\"),\n (\"ks\", \"Kashmiri\"),\n (\"ku\", \"Kurdish\"),\n (\"ky\", \"Kyrgyz\"),\n (\"la\", \"Latin\"),\n (\"ln\", \"Lingala\"),\n (\"lo\", \"Lao\"),\n (\"lt\", \"Lithuanian\"),\n (\"lv\", \"Latvian\"),\n (\"mam\", \"Mam\"),\n (\"mg\", \"Malagasy\"),\n (\"mi\", \"Maori\"),\n (\"mk\", \"Macedonian\"),\n (\"ml\", \"Malayalam\"),\n (\"mn\", \"Mongolian\"),\n (\"mop\", \"Mopan\"),\n (\"mr\", \"Marathi\"),\n (\"ms\", \"Malay\"),\n (\"mt\", \"Maltese\"),\n (\"my\", \"Burmese\"),\n (\"na\", \"Nauru\"),\n (\"ne\", \"Nepali\"),\n (\"nl\", \"Dutch\"),\n (\"no\", \"Norwegian\"),\n (\"oc\", \"Occitan\"),\n (\"om\", \"Oromo\"),\n (\"or\", \"Oriya\"),\n (\"pa\", \"Panjabi\"),\n (\"pl\", \"Polish\"),\n (\"pnb\", \"Western Punjabi\"),\n (\"poc\", \"Poqomam\"),\n (\"poh\", \"Poqomchi\"),\n (\"ps\", \"Pashto\"),\n (\"pt\", \"Portuguese\"),\n (\"qu\", \"Quechua\"),\n (\"quc\", \"K'iche'\"),\n (\"qum\", \"Sipakapense\"),\n (\"quv\", \"Sakapulteko\"),\n (\"rm\", \"Romansh\"),\n (\"rn\", \"Kirundi\"),\n (\"ro\", \"Romanian\"),\n (\"ru\", \"Russian\"),\n (\"rw\", \"Kinyarwanda\"),\n (\"sa\", \"Sanskrit\"),\n (\"sd\", \"Sindhi\"),\n (\"sg\", \"Sango\"),\n (\"si\", \"Sinhala\"),\n (\"sk\", \"Slovak\"),\n (\"skr\", \"Saraiki\"),\n (\"sl\", \"Slovenian\"),\n (\"sm\", \"Samoan\"),\n (\"sn\", \"Shona\"),\n (\"so\", \"Somali\"),\n (\"sq\", \"Albanian\"),\n (\"sr\", \"Serbian\"),\n (\"ss\", \"Swati\"),\n (\"st\", \"Southern Sotho\"),\n (\"su\", \"Sudanese\"),\n (\"sv\", \"Swedish\"),\n (\"sw\", \"Swahili\"),\n (\"ta\", \"Tamil\"),\n (\"te\", \"Telugu\"),\n (\"tg\", \"Tajik\"),\n (\"th\", \"Thai\"),\n (\"ti\", \"Tigrinya\"),\n (\"tk\", \"Turkmen\"),\n (\"tl\", \"Tagalog\"),\n (\"tn\", \"Tswana\"),\n (\"to\", \"Tonga\"),\n (\"tr\", \"Turkish\"),\n (\"ts\", \"Tsonga\"),\n (\"tt\", \"Tatar\"),\n (\"ttc\", \"Tektiteko\"),\n (\"tzj\", \"Tz'utujil\"),\n (\"tw\", \"Twi\"),\n (\"ug\", \"Uyghur\"),\n (\"uk\", \"Ukrainian\"),\n (\"ur\", \"Urdu\"),\n (\"usp\", \"Uspanteko\"),\n (\"uz\", \"Uzbek\"),\n (\"vi\", \"Vietnamese\"),\n (\"vo\", \"Volapuk\"),\n (\"wo\", \"Wolof\"),\n (\"xh\", \"Xhosa\"),\n (\"xin\", \"Xinka\"),\n (\"yi\", \"Yiddish\"),\n (\"yo\", \"Yoruba\"),\n (\"za\", \"Zhuang\"),\n # TODO: migrate those projects that are currently using \"zh\" as language.\n # This is an invalid language code, so the first step is remove it from the\n # list of possible languages.\n # https://github.com/readthedocs/readthedocs.org/issues/11387\n #\n # In [1]: Project.objects.filter(language='zh').count()\n # Out[1]: 1485\n #\n # (\"zh\", \"Chinese\"),\n (\"zu\", \"Zulu\"),\n # Try these to test our non-2 letter language support\n (\"nb-no\", \"Norwegian Bokmal\"),\n (\"pt-br\", \"Brazilian Portuguese\"),\n (\"es-mx\", \"Mexican Spanish\"),\n (\"uk-ua\", \"Ukrainian\"),\n (\"zh-cn\", \"Simplified Chinese\"),\n (\"zh-tw\", \"Traditional Chinese\"),\n)\nLANGUAGE_CODES = [code for code, *_ in LANGUAGES]\n\n# Normalize the language codes to lowercase with dashes,\n# we use them to match the language codes in the URL.\n# The old language codes were uppercase with underscores,\n# and are deprecated, but we still need to support them.\nold_language_codes = [\n \"nb_NO\",\n \"pt_BR\",\n \"es_MX\",\n \"uk_UA\",\n \"zh_CN\",\n \"zh_TW\",\n]\nOLD_LANGUAGES_CODE_MAPPING = {\n code.lower().replace(\"_\", \"-\"): code for code in old_language_codes\n}\n\nLANGUAGES_REGEX = \"|\".join(\n [\n re.escape(code)\n for code in LANGUAGE_CODES + list(OLD_LANGUAGES_CODE_MAPPING.values())\n ]\n # Add \"zh\" here to be able to keep serving projects with this old invalid language code.\n # We don't allow new projects to select this language code anymore.\n #\n # https://github.com/readthedocs/readthedocs.org/issues/11428\n + [\"zh\"]\n)\n\nPROGRAMMING_LANGUAGES = (\n (\"words\", \"Only Words\"),\n (\"py\", \"Python\"),\n (\"js\", \"JavaScript\"),\n (\"php\", \"PHP\"),\n (\"ruby\", \"Ruby\"),\n (\"perl\", \"Perl\"),\n (\"java\", \"Java\"),\n (\"go\", \"Go\"),\n (\"julia\", \"Julia\"),\n (\"c\", \"C\"),\n (\"csharp\", \"C#\"),\n (\"cpp\", \"C++\"),\n (\"objc\", \"Objective-C\"),\n (\"css\", \"CSS\"),\n (\"ts\", \"TypeScript\"),\n (\"swift\", \"Swift\"),\n (\"vb\", \"Visual Basic\"),\n (\"r\", \"R\"),\n (\"scala\", \"Scala\"),\n (\"groovy\", \"Groovy\"),\n (\"coffee\", \"CoffeeScript\"),\n (\"lua\", \"Lua\"),\n (\"haskell\", \"Haskell\"),\n (\"other\", \"Other\"),\n)\n\nPROJECT_PK_REGEX = r\"(?:[-\\w]+)\"\nPROJECT_SLUG_REGEX = r\"(?:[-\\w]+)\"\n\nGITHUB_REGEXS = [\n re.compile(r\"github.com/(.+)/(.+)(?:\\.git){1}$\"),\n # This must come before the one without a / to make sure we don't capture the /\n re.compile(r\"github.com/(.+)/(.+)/\"),\n re.compile(r\"github.com/(.+)/(.+)\"),\n re.compile(r\"github.com:(.+)/(.+)\\.git$\"),\n]\nBITBUCKET_REGEXS = [\n re.compile(r\"bitbucket.org/(.+)/(.+)\\.git$\"),\n re.compile(r\"@bitbucket.org/(.+)/(.+)\\.git$\"),\n # This must come before the one without a / to make sure we don't capture the /\n re.compile(r\"bitbucket.org/(.+)/(.+)/\"),\n re.compile(r\"bitbucket.org/(.+)/(.+)\"),\n re.compile(r\"bitbucket.org:(.+)/(.+)\\.git$\"),\n]\nGITLAB_REGEXS = [\n re.compile(r\"gitlab.com/(.+)/(.+)(?:\\.git){1}$\"),\n # This must come before the one without a / to make sure we don't capture the /\n re.compile(r\"gitlab.com/(.+)/(.+)/\"),\n re.compile(r\"gitlab.com/(.+)/(.+)\"),\n re.compile(r\"gitlab.com:(.+)/(.+)\\.git$\"),\n]\nGITHUB_URL = (\n \"https://github.com/{user}/{repo}/\"\n \"{action}/{version}{docroot}{path}{source_suffix}\"\n)\nGITHUB_COMMIT_URL = \"https://github.com/{user}/{repo}/commit/{commit}\"\nGITHUB_PULL_REQUEST_URL = \"https://github.com/{user}/{repo}/pull/{number}\"\nGITHUB_PULL_REQUEST_COMMIT_URL = (\n \"https://github.com/{user}/{repo}/pull/{number}/commits/{commit}\"\n)\nBITBUCKET_URL = (\n \"https://bitbucket.org/{user}/{repo}/src/{version}{docroot}{path}{source_suffix}\"\n)\nBITBUCKET_COMMIT_URL = \"https://bitbucket.org/{user}/{repo}/commits/{commit}\"\nGITLAB_URL = (\n \"https://gitlab.com/{user}/{repo}/\"\n \"{action}/{version}{docroot}{path}{source_suffix}\"\n)\nGITLAB_COMMIT_URL = \"https://gitlab.com/{user}/{repo}/commit/{commit}\"\nGITLAB_MERGE_REQUEST_COMMIT_URL = (\n \"https://gitlab.com/{user}/{repo}/commit/{commit}?merge_request_iid={number}\"\n)\nGITLAB_MERGE_REQUEST_URL = \"https://gitlab.com/{user}/{repo}/merge_requests/{number}\"\n\n# Patterns to pull merge/pull request from providers\nGITHUB_PR_PULL_PATTERN = \"pull/{id}/head:external-{id}\"\nGITLAB_MR_PULL_PATTERN = \"merge-requests/{id}/head:external-{id}\"\n\n# Git provider names\nGITHUB_BRAND = \"GitHub\"\nGITLAB_BRAND = \"GitLab\"\n\n# SSL statuses\nSSL_STATUS_VALID = \"valid\"\nSSL_STATUS_INVALID = \"invalid\"\nSSL_STATUS_PENDING = \"pending\"\nSSL_STATUS_UNKNOWN = \"unknown\"\nSSL_STATUS_CHOICES = (\n (SSL_STATUS_VALID, _(\"Valid and active\")),\n (SSL_STATUS_INVALID, _(\"Invalid\")),\n (SSL_STATUS_PENDING, _(\"Pending\")),\n (SSL_STATUS_UNKNOWN, _(\"Unknown\")),\n)\n\nMULTIPLE_VERSIONS_WITH_TRANSLATIONS = \"multiple_versions_with_translations\"\nMULTIPLE_VERSIONS_WITHOUT_TRANSLATIONS = \"multiple_versions_without_translations\"\nSINGLE_VERSION_WITHOUT_TRANSLATIONS = \"single_version_without_translations\"\nVERSIONING_SCHEME_CHOICES = (\n (\n MULTIPLE_VERSIONS_WITH_TRANSLATIONS,\n _(\"Multiple versions with translations (/<language>/<version>/<filename>)\"),\n ),\n (\n MULTIPLE_VERSIONS_WITHOUT_TRANSLATIONS,\n _(\"Multiple versions without translations (/<version>/<filename>)\"),\n ),\n (\n SINGLE_VERSION_WITHOUT_TRANSLATIONS,\n _(\"Single version without translations (/<filename>)\"),\n ),\n)\n\n\nADDONS_FLYOUT_SORTING_ALPHABETICALLY = \"alphabetically\"\n# Compatibility to keep the behavior of the old flyout.\n# This isn't a good algorithm, but it's a way to keep the old behavior in case we need it.\nADDONS_FLYOUT_SORTING_SEMVER_READTHEDOCS_COMPATIBLE = \"semver-readthedocs-compatible\"\n# https://pypi.org/project/packaging/\nADDONS_FLYOUT_SORTING_PYTHON_PACKAGING = \"python-packaging\"\nADDONS_FLYOUT_SORTING_CALVER = \"calver\"\n# Let the user to define a custom pattern and use BumpVer to parse and sort the versions.\n# https://github.com/mbarkhau/bumpver#pattern-examples\nADDONS_FLYOUT_SORTING_CUSTOM_PATTERN = \"custom-pattern\"\n\nADDONS_FLYOUT_SORTING_CHOICES = (\n (ADDONS_FLYOUT_SORTING_ALPHABETICALLY, _(\"Alphabetically\")),\n (ADDONS_FLYOUT_SORTING_SEMVER_READTHEDOCS_COMPATIBLE, _(\"SemVer (Read the Docs)\")),\n (\n ADDONS_FLYOUT_SORTING_PYTHON_PACKAGING,\n _(\"Python Packaging (PEP 440 and PEP 425)\"),\n ),\n (ADDONS_FLYOUT_SORTING_CALVER, _(\"CalVer (YYYY.0M.0M)\")),\n (ADDONS_FLYOUT_SORTING_CUSTOM_PATTERN, _(\"Define your own pattern\")),\n)\n", "path": "readthedocs/projects/constants.py"}]} |
gh_patches_debug_1349 | rasdani/github-patches | git_diff | biopython__biopython-3922 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
KEGG.Compound.parse not returning mass
### Setup
I am reporting a problem with Biopython version, Python version, and operating
system as follows:
1.78
3.9.12
Windows 10 Pro
### Expected behaviour
Calling KEGG.Compound.parse on a KEGG record should return a KEGG record object containing the mass. For example, compound C00120 should have a mass attribute containing 244.0882.
### Actual behaviour
However, no mass attribute is returned.
### Steps to reproduce
```
from Bio.KEGG.Compound import parse
from Bio.KEGG.REST import kegg_get
c00120 = next(parse(kegg_get('C00120')))
print(c00120.mass)
```
### Fix
This is because the KEGG record now uses separate EXACT_MASS and MOL_WEIGHT fields (can be seen by running kegg_get('C00120').read()). Fixed by replacing line 156 in KEGG.Compound.__init__.py with:
`elif keyword == "EXACT_MASS ":`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `Bio/KEGG/Compound/__init__.py`
Content:
```
1 # Copyright 2001 by Tarjei Mikkelsen. All rights reserved.
2 # Copyright 2007 by Michiel de Hoon. All rights reserved.
3 #
4 # This file is part of the Biopython distribution and governed by your
5 # choice of the "Biopython License Agreement" or the "BSD 3-Clause License".
6 # Please see the LICENSE file that should have been included as part of this
7 # package.
8
9 """Code to work with the KEGG Ligand/Compound database.
10
11 Functions:
12 - parse - Returns an iterator giving Record objects.
13
14 Classes:
15 - Record - A representation of a KEGG Ligand/Compound.
16 """
17
18
19 from Bio.KEGG import _default_wrap, _struct_wrap, _wrap_kegg, _write_kegg
20
21
22 # Set up line wrapping rules (see Bio.KEGG._wrap_kegg)
23 name_wrap = [0, "", (" ", "$", 1, 1), ("-", "$", 1, 1)]
24 id_wrap = _default_wrap
25 struct_wrap = _struct_wrap
26
27
28 class Record:
29 """Holds info from a KEGG Ligand/Compound record.
30
31 Attributes:
32 - entry The entry identifier.
33 - name A list of the compound names.
34 - formula The chemical formula for the compound
35 - mass The molecular weight for the compound
36 - pathway A list of 3-tuples: ('PATH', pathway id, pathway)
37 - enzyme A list of the EC numbers.
38 - structures A list of 2-tuples: (database, list of struct ids)
39 - dblinks A list of 2-tuples: (database, list of link ids)
40
41 """
42
43 def __init__(self):
44 """Initialize as new record."""
45 self.entry = ""
46 self.name = []
47 self.formula = ""
48 self.mass = ""
49 self.pathway = []
50 self.enzyme = []
51 self.structures = []
52 self.dblinks = []
53
54 def __str__(self):
55 """Return a string representation of this Record."""
56 return (
57 self._entry()
58 + self._name()
59 + self._formula()
60 + self._mass()
61 + self._pathway()
62 + self._enzyme()
63 + self._structures()
64 + self._dblinks()
65 + "///"
66 )
67
68 def _entry(self):
69 return _write_kegg("ENTRY", [self.entry])
70
71 def _name(self):
72 return _write_kegg(
73 "NAME", [_wrap_kegg(l, wrap_rule=name_wrap) for l in self.name]
74 )
75
76 def _formula(self):
77 return _write_kegg("FORMULA", [self.formula])
78
79 def _mass(self):
80 return _write_kegg("MASS", [self.mass])
81
82 def _pathway(self):
83 s = []
84 for entry in self.pathway:
85 s.append(entry[0] + " " + entry[1])
86 return _write_kegg("PATHWAY", [_wrap_kegg(l, wrap_rule=id_wrap(16)) for l in s])
87
88 def _enzyme(self):
89 return _write_kegg(
90 "ENZYME", [_wrap_kegg(l, wrap_rule=name_wrap) for l in self.enzyme]
91 )
92
93 def _structures(self):
94 s = []
95 for entry in self.structures:
96 s.append(entry[0] + ": " + " ".join(entry[1]) + " ")
97 return _write_kegg(
98 "STRUCTURES", [_wrap_kegg(l, wrap_rule=struct_wrap(5)) for l in s]
99 )
100
101 def _dblinks(self):
102 s = []
103 for entry in self.dblinks:
104 s.append(entry[0] + ": " + " ".join(entry[1]))
105 return _write_kegg("DBLINKS", [_wrap_kegg(l, wrap_rule=id_wrap(9)) for l in s])
106
107
108 def parse(handle):
109 """Parse a KEGG Ligan/Compound file, returning Record objects.
110
111 This is an iterator function, typically used in a for loop. For
112 example, using one of the example KEGG files in the Biopython
113 test suite,
114
115 >>> with open("KEGG/compound.sample") as handle:
116 ... for record in parse(handle):
117 ... print("%s %s" % (record.entry, record.name[0]))
118 ...
119 C00023 Iron
120 C00017 Protein
121 C00099 beta-Alanine
122 C00294 Inosine
123 C00298 Trypsin
124 C00348 all-trans-Undecaprenyl phosphate
125 C00349 2-Methyl-3-oxopropanoate
126 C01386 NH2Mec
127
128 """
129 record = Record()
130 for line in handle:
131 if line[:3] == "///":
132 yield record
133 record = Record()
134 continue
135 if line[:12] != " ":
136 keyword = line[:12]
137 data = line[12:].strip()
138 if keyword == "ENTRY ":
139 words = data.split()
140 record.entry = words[0]
141 elif keyword == "NAME ":
142 data = data.strip(";")
143 record.name.append(data)
144 elif keyword == "ENZYME ":
145 while data:
146 column = data[:16]
147 data = data[16:]
148 enzyme = column.strip()
149 record.enzyme.append(enzyme)
150 elif keyword == "PATHWAY ":
151 map, name = data.split(" ")
152 pathway = ("PATH", map, name)
153 record.pathway.append(pathway)
154 elif keyword == "FORMULA ":
155 record.formula = data
156 elif keyword == "MASS ":
157 record.mass = data
158 elif keyword == "DBLINKS ":
159 if ":" in data:
160 key, values = data.split(":")
161 values = values.split()
162 row = (key, values)
163 record.dblinks.append(row)
164 else:
165 row = record.dblinks[-1]
166 key, values = row
167 values.extend(data.split())
168 row = key, values
169 record.dblinks[-1] = row
170
171
172 if __name__ == "__main__":
173 from Bio._utils import run_doctest
174
175 run_doctest()
176
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/Bio/KEGG/Compound/__init__.py b/Bio/KEGG/Compound/__init__.py
--- a/Bio/KEGG/Compound/__init__.py
+++ b/Bio/KEGG/Compound/__init__.py
@@ -153,7 +153,7 @@
record.pathway.append(pathway)
elif keyword == "FORMULA ":
record.formula = data
- elif keyword == "MASS ":
+ elif keyword in ("MASS ", "EXACT_MASS "):
record.mass = data
elif keyword == "DBLINKS ":
if ":" in data:
| {"golden_diff": "diff --git a/Bio/KEGG/Compound/__init__.py b/Bio/KEGG/Compound/__init__.py\n--- a/Bio/KEGG/Compound/__init__.py\n+++ b/Bio/KEGG/Compound/__init__.py\n@@ -153,7 +153,7 @@\n record.pathway.append(pathway)\n elif keyword == \"FORMULA \":\n record.formula = data\n- elif keyword == \"MASS \":\n+ elif keyword in (\"MASS \", \"EXACT_MASS \"):\n record.mass = data\n elif keyword == \"DBLINKS \":\n if \":\" in data:\n", "issue": "KEGG.Compound.parse not returning mass \n### Setup\r\n\r\nI am reporting a problem with Biopython version, Python version, and operating\r\nsystem as follows:\r\n\r\n1.78\r\n3.9.12\r\nWindows 10 Pro\r\n\r\n### Expected behaviour\r\n\r\nCalling KEGG.Compound.parse on a KEGG record should return a KEGG record object containing the mass. For example, compound C00120 should have a mass attribute containing 244.0882.\r\n\r\n### Actual behaviour\r\n\r\nHowever, no mass attribute is returned. \r\n\r\n### Steps to reproduce\r\n\r\n```\r\nfrom Bio.KEGG.Compound import parse\r\nfrom Bio.KEGG.REST import kegg_get\r\nc00120 = next(parse(kegg_get('C00120')))\r\nprint(c00120.mass)\r\n```\r\n### Fix\r\nThis is because the KEGG record now uses separate EXACT_MASS and MOL_WEIGHT fields (can be seen by running kegg_get('C00120').read()). Fixed by replacing line 156 in KEGG.Compound.__init__.py with:\r\n`elif keyword == \"EXACT_MASS \":`\r\n\r\n\n", "before_files": [{"content": "# Copyright 2001 by Tarjei Mikkelsen. All rights reserved.\n# Copyright 2007 by Michiel de Hoon. All rights reserved.\n#\n# This file is part of the Biopython distribution and governed by your\n# choice of the \"Biopython License Agreement\" or the \"BSD 3-Clause License\".\n# Please see the LICENSE file that should have been included as part of this\n# package.\n\n\"\"\"Code to work with the KEGG Ligand/Compound database.\n\nFunctions:\n - parse - Returns an iterator giving Record objects.\n\nClasses:\n - Record - A representation of a KEGG Ligand/Compound.\n\"\"\"\n\n\nfrom Bio.KEGG import _default_wrap, _struct_wrap, _wrap_kegg, _write_kegg\n\n\n# Set up line wrapping rules (see Bio.KEGG._wrap_kegg)\nname_wrap = [0, \"\", (\" \", \"$\", 1, 1), (\"-\", \"$\", 1, 1)]\nid_wrap = _default_wrap\nstruct_wrap = _struct_wrap\n\n\nclass Record:\n \"\"\"Holds info from a KEGG Ligand/Compound record.\n\n Attributes:\n - entry The entry identifier.\n - name A list of the compound names.\n - formula The chemical formula for the compound\n - mass The molecular weight for the compound\n - pathway A list of 3-tuples: ('PATH', pathway id, pathway)\n - enzyme A list of the EC numbers.\n - structures A list of 2-tuples: (database, list of struct ids)\n - dblinks A list of 2-tuples: (database, list of link ids)\n\n \"\"\"\n\n def __init__(self):\n \"\"\"Initialize as new record.\"\"\"\n self.entry = \"\"\n self.name = []\n self.formula = \"\"\n self.mass = \"\"\n self.pathway = []\n self.enzyme = []\n self.structures = []\n self.dblinks = []\n\n def __str__(self):\n \"\"\"Return a string representation of this Record.\"\"\"\n return (\n self._entry()\n + self._name()\n + self._formula()\n + self._mass()\n + self._pathway()\n + self._enzyme()\n + self._structures()\n + self._dblinks()\n + \"///\"\n )\n\n def _entry(self):\n return _write_kegg(\"ENTRY\", [self.entry])\n\n def _name(self):\n return _write_kegg(\n \"NAME\", [_wrap_kegg(l, wrap_rule=name_wrap) for l in self.name]\n )\n\n def _formula(self):\n return _write_kegg(\"FORMULA\", [self.formula])\n\n def _mass(self):\n return _write_kegg(\"MASS\", [self.mass])\n\n def _pathway(self):\n s = []\n for entry in self.pathway:\n s.append(entry[0] + \" \" + entry[1])\n return _write_kegg(\"PATHWAY\", [_wrap_kegg(l, wrap_rule=id_wrap(16)) for l in s])\n\n def _enzyme(self):\n return _write_kegg(\n \"ENZYME\", [_wrap_kegg(l, wrap_rule=name_wrap) for l in self.enzyme]\n )\n\n def _structures(self):\n s = []\n for entry in self.structures:\n s.append(entry[0] + \": \" + \" \".join(entry[1]) + \" \")\n return _write_kegg(\n \"STRUCTURES\", [_wrap_kegg(l, wrap_rule=struct_wrap(5)) for l in s]\n )\n\n def _dblinks(self):\n s = []\n for entry in self.dblinks:\n s.append(entry[0] + \": \" + \" \".join(entry[1]))\n return _write_kegg(\"DBLINKS\", [_wrap_kegg(l, wrap_rule=id_wrap(9)) for l in s])\n\n\ndef parse(handle):\n \"\"\"Parse a KEGG Ligan/Compound file, returning Record objects.\n\n This is an iterator function, typically used in a for loop. For\n example, using one of the example KEGG files in the Biopython\n test suite,\n\n >>> with open(\"KEGG/compound.sample\") as handle:\n ... for record in parse(handle):\n ... print(\"%s %s\" % (record.entry, record.name[0]))\n ...\n C00023 Iron\n C00017 Protein\n C00099 beta-Alanine\n C00294 Inosine\n C00298 Trypsin\n C00348 all-trans-Undecaprenyl phosphate\n C00349 2-Methyl-3-oxopropanoate\n C01386 NH2Mec\n\n \"\"\"\n record = Record()\n for line in handle:\n if line[:3] == \"///\":\n yield record\n record = Record()\n continue\n if line[:12] != \" \":\n keyword = line[:12]\n data = line[12:].strip()\n if keyword == \"ENTRY \":\n words = data.split()\n record.entry = words[0]\n elif keyword == \"NAME \":\n data = data.strip(\";\")\n record.name.append(data)\n elif keyword == \"ENZYME \":\n while data:\n column = data[:16]\n data = data[16:]\n enzyme = column.strip()\n record.enzyme.append(enzyme)\n elif keyword == \"PATHWAY \":\n map, name = data.split(\" \")\n pathway = (\"PATH\", map, name)\n record.pathway.append(pathway)\n elif keyword == \"FORMULA \":\n record.formula = data\n elif keyword == \"MASS \":\n record.mass = data\n elif keyword == \"DBLINKS \":\n if \":\" in data:\n key, values = data.split(\":\")\n values = values.split()\n row = (key, values)\n record.dblinks.append(row)\n else:\n row = record.dblinks[-1]\n key, values = row\n values.extend(data.split())\n row = key, values\n record.dblinks[-1] = row\n\n\nif __name__ == \"__main__\":\n from Bio._utils import run_doctest\n\n run_doctest()\n", "path": "Bio/KEGG/Compound/__init__.py"}], "after_files": [{"content": "# Copyright 2001 by Tarjei Mikkelsen. All rights reserved.\n# Copyright 2007 by Michiel de Hoon. All rights reserved.\n#\n# This file is part of the Biopython distribution and governed by your\n# choice of the \"Biopython License Agreement\" or the \"BSD 3-Clause License\".\n# Please see the LICENSE file that should have been included as part of this\n# package.\n\n\"\"\"Code to work with the KEGG Ligand/Compound database.\n\nFunctions:\n - parse - Returns an iterator giving Record objects.\n\nClasses:\n - Record - A representation of a KEGG Ligand/Compound.\n\"\"\"\n\n\nfrom Bio.KEGG import _default_wrap, _struct_wrap, _wrap_kegg, _write_kegg\n\n\n# Set up line wrapping rules (see Bio.KEGG._wrap_kegg)\nname_wrap = [0, \"\", (\" \", \"$\", 1, 1), (\"-\", \"$\", 1, 1)]\nid_wrap = _default_wrap\nstruct_wrap = _struct_wrap\n\n\nclass Record:\n \"\"\"Holds info from a KEGG Ligand/Compound record.\n\n Attributes:\n - entry The entry identifier.\n - name A list of the compound names.\n - formula The chemical formula for the compound\n - mass The molecular weight for the compound\n - pathway A list of 3-tuples: ('PATH', pathway id, pathway)\n - enzyme A list of the EC numbers.\n - structures A list of 2-tuples: (database, list of struct ids)\n - dblinks A list of 2-tuples: (database, list of link ids)\n\n \"\"\"\n\n def __init__(self):\n \"\"\"Initialize as new record.\"\"\"\n self.entry = \"\"\n self.name = []\n self.formula = \"\"\n self.mass = \"\"\n self.pathway = []\n self.enzyme = []\n self.structures = []\n self.dblinks = []\n\n def __str__(self):\n \"\"\"Return a string representation of this Record.\"\"\"\n return (\n self._entry()\n + self._name()\n + self._formula()\n + self._mass()\n + self._pathway()\n + self._enzyme()\n + self._structures()\n + self._dblinks()\n + \"///\"\n )\n\n def _entry(self):\n return _write_kegg(\"ENTRY\", [self.entry])\n\n def _name(self):\n return _write_kegg(\n \"NAME\", [_wrap_kegg(l, wrap_rule=name_wrap) for l in self.name]\n )\n\n def _formula(self):\n return _write_kegg(\"FORMULA\", [self.formula])\n\n def _mass(self):\n return _write_kegg(\"MASS\", [self.mass])\n\n def _pathway(self):\n s = []\n for entry in self.pathway:\n s.append(entry[0] + \" \" + entry[1])\n return _write_kegg(\"PATHWAY\", [_wrap_kegg(l, wrap_rule=id_wrap(16)) for l in s])\n\n def _enzyme(self):\n return _write_kegg(\n \"ENZYME\", [_wrap_kegg(l, wrap_rule=name_wrap) for l in self.enzyme]\n )\n\n def _structures(self):\n s = []\n for entry in self.structures:\n s.append(entry[0] + \": \" + \" \".join(entry[1]) + \" \")\n return _write_kegg(\n \"STRUCTURES\", [_wrap_kegg(l, wrap_rule=struct_wrap(5)) for l in s]\n )\n\n def _dblinks(self):\n s = []\n for entry in self.dblinks:\n s.append(entry[0] + \": \" + \" \".join(entry[1]))\n return _write_kegg(\"DBLINKS\", [_wrap_kegg(l, wrap_rule=id_wrap(9)) for l in s])\n\n\ndef parse(handle):\n \"\"\"Parse a KEGG Ligan/Compound file, returning Record objects.\n\n This is an iterator function, typically used in a for loop. For\n example, using one of the example KEGG files in the Biopython\n test suite,\n\n >>> with open(\"KEGG/compound.sample\") as handle:\n ... for record in parse(handle):\n ... print(\"%s %s\" % (record.entry, record.name[0]))\n ...\n C00023 Iron\n C00017 Protein\n C00099 beta-Alanine\n C00294 Inosine\n C00298 Trypsin\n C00348 all-trans-Undecaprenyl phosphate\n C00349 2-Methyl-3-oxopropanoate\n C01386 NH2Mec\n\n \"\"\"\n record = Record()\n for line in handle:\n if line[:3] == \"///\":\n yield record\n record = Record()\n continue\n if line[:12] != \" \":\n keyword = line[:12]\n data = line[12:].strip()\n if keyword == \"ENTRY \":\n words = data.split()\n record.entry = words[0]\n elif keyword == \"NAME \":\n data = data.strip(\";\")\n record.name.append(data)\n elif keyword == \"ENZYME \":\n while data:\n column = data[:16]\n data = data[16:]\n enzyme = column.strip()\n record.enzyme.append(enzyme)\n elif keyword == \"PATHWAY \":\n map, name = data.split(\" \")\n pathway = (\"PATH\", map, name)\n record.pathway.append(pathway)\n elif keyword == \"FORMULA \":\n record.formula = data\n elif keyword in (\"MASS \", \"EXACT_MASS \"):\n record.mass = data\n elif keyword == \"DBLINKS \":\n if \":\" in data:\n key, values = data.split(\":\")\n values = values.split()\n row = (key, values)\n record.dblinks.append(row)\n else:\n row = record.dblinks[-1]\n key, values = row\n values.extend(data.split())\n row = key, values\n record.dblinks[-1] = row\n\n\nif __name__ == \"__main__\":\n from Bio._utils import run_doctest\n\n run_doctest()\n", "path": "Bio/KEGG/Compound/__init__.py"}]} |
gh_patches_debug_1350 | rasdani/github-patches | git_diff | pfnet__pytorch-pfn-extras-52 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Bug in when we use ProcessWriter with extensions.
## problem statement
I run a code that includes the below fragment. Then, the prompt throws the below error.
```
writer = writing.ProcessWriter(savefun=torch.save, out_dir=save_path)
manager.extend(extensions.snapshot(writer=writer), trigger=(1, 'iteration'))
manager.extend(extensions.snapshot(writer=writer, filename='gen_{.epoch}', target=generator.module), trigger=(10, 'iteration'))
manager.extend(extensions.snapshot(), trigger=(10, 'epoch'))
manager.extend(extensions.snapshot(filename='gen_{.epoch}', target=generator.module), trigger=(10, 'epoch'))
```
## error message
```
Traceback (most recent call last):
File "main_train.py", line 232, in <module>
train()
File "/usr/local/lib/python3.7/site-packages/click/core.py", line 764, in __call__
return self.main(*args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/click/core.py", line 717, in main
rv = self.invoke(ctx)
File "/usr/local/lib/python3.7/site-packages/click/core.py", line 956, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/usr/local/lib/python3.7/site-packages/click/core.py", line 555, in invoke
return callback(*args, **kwargs)
File "main_train.py", line 226, in train
Image.fromarray(x).save(f'{i}.png')
File "/usr/local/Cellar/python/3.7.7/Frameworks/Python.framework/Versions/3.7/lib/python3.7/contextlib.py", line 119, in __exit__
next(self.gen)
File "/usr/local/lib/python3.7/site-packages/pytorch_pfn_extras/training/manager.py", line 390, in run_iteration
self.run_extensions()
File "/usr/local/lib/python3.7/site-packages/pytorch_pfn_extras/training/manager.py", line 272, in run_extensions
entry.extension(self)
File "/usr/local/lib/python3.7/site-packages/pytorch_pfn_extras/training/extensions/_snapshot.py", line 397, in __call__
self._make_snapshot(manager)
File "/usr/local/lib/python3.7/site-packages/pytorch_pfn_extras/training/extensions/_snapshot.py", line 422, in _make_snapshot
writer(filename, outdir, serialized_target, savefun=self._savefun)
File "/usr/local/lib/python3.7/site-packages/pytorch_pfn_extras/writing.py", line 308, in __call__
savefun, **self._kwds)
TypeError: create_worker() takes 4 positional arguments but 5 were given
```
The `create_worker` in StandardWriter accepts five arguments including `self`.
However, `create_worker` in ProccessWriter and ThreadWriter accept only four arguments.
https://github.com/pfnet/pytorch-pfn-extras/blob/8b16df9433a024c63d786c76f1e7ccbb88847283/pytorch_pfn_extras/writing.py#L307-L312
https://github.com/pfnet/pytorch-pfn-extras/blob/8b16df9433a024c63d786c76f1e7ccbb88847283/pytorch_pfn_extras/writing.py#L374-L378
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pytorch_pfn_extras/writing.py`
Content:
```
1 import multiprocessing
2 import io
3 import os
4 import queue
5 import shutil
6 import sys
7 import threading
8
9 import torch
10
11
12 def open_wrapper(func):
13 def wrapper(self, file_path, mode='rb',
14 buffering=-1, encoding=None,
15 errors=None, newline=None,
16 closefd=True,
17 opener=None):
18 file_obj = func(self, file_path, mode, buffering, encoding,
19 errors, newline, closefd, opener)
20 return self._wrap_fileobject(
21 file_obj, file_path, mode, buffering, encoding,
22 errors, newline, closefd, opener)
23 return wrapper
24
25
26 class _PosixFileStat:
27 def __init__(self, _stat, filename):
28 self.filename = filename
29 self.last_modified = _stat.st_mtime
30 self.last_accessed = _stat.st_atime
31 self.created = _stat.st_ctime
32 self.mode = _stat.st_mode
33 self.size = _stat.st_size
34
35
36 class _PosixFileSystem(object):
37 """Class to abstract the calls to the FileSystem
38
39 This class obeys the same interface as PFIO's POSIX
40 Filesystems declarations. When using HDFS, PFIO
41 handler can be used instead (requires PFIO>1.0).
42
43 This class currently abstracts POSIX
44 """
45 def __init__(self):
46 pass
47
48 def get_actual_path(self, path):
49 return os.path.join(self.root, path)
50
51 def _wrap_fileobject(self, file_obj, file_path, *args, **kwargs):
52 return file_obj
53
54 @property
55 def root(self):
56 return self._root
57
58 @root.setter
59 def root(self, root):
60 self._root = root
61
62 @open_wrapper
63 def open(self, file_path, mode='r',
64 buffering=-1, encoding=None, errors=None,
65 newline=None, closefd=True, opener=None):
66
67 return io.open(file_path, mode,
68 buffering, encoding, errors,
69 newline, closefd, opener)
70
71 def list(self, path_or_prefix: str = None, recursive=False):
72 if recursive:
73 path_or_prefix = path_or_prefix.rstrip("/")
74 # plus 1 to include the trailing slash
75 prefix_end_index = len(path_or_prefix) + 1
76 yield from self._recursive_list(prefix_end_index, path_or_prefix)
77 else:
78 for file in os.scandir(path_or_prefix):
79 yield file.name
80
81 def _recursive_list(self, prefix_end_index: int, path: str):
82 for file in os.scandir(path):
83 yield file.path[prefix_end_index:]
84
85 if file.is_dir():
86 yield from self._recursive_list(prefix_end_index,
87 file.path)
88
89 def stat(self, path):
90 return _PosixFileStat(os.stat(path), path)
91
92 def close(self):
93 pass
94
95 def __enter__(self):
96 return self
97
98 def __exit__(self, exc_type, exc_value, traceback):
99 pass
100
101 def isdir(self, file_path):
102 return os.path.isdir(file_path)
103
104 def mkdir(self, file_path, mode=0o777, *args, dir_fd=None):
105 return os.mkdir(file_path, mode, *args, dir_fd=None)
106
107 def makedirs(self, file_path, mode=0o777, exist_ok=False):
108 return os.makedirs(file_path, mode, exist_ok)
109
110 def exists(self, file_path):
111 return os.path.exists(file_path)
112
113 def rename(self, src, dst):
114 try:
115 return os.replace(src, dst)
116 except OSError:
117 print('Destination {} is a directory '
118 'but source is not'.format(src),
119 file=sys.stderr)
120 raise
121
122 def remove(self, file_path, recursive=False):
123 if recursive:
124 return shutil.rmtree(file_path)
125 if os.path.isdir(file_path):
126 return os.rmdir(file_path)
127
128 return os.remove(file_path)
129
130
131 class Writer:
132
133 """Base class of snapshot writers.
134
135 :class:`~pytorch_pfn_extras.training.extensions.Snapshot`
136 invokes ``__call__`` of this class every time when taking a snapshot.
137 This class determines how the actual saving function will be invoked.
138
139 .. note::
140 This extension first writes the serialized object to a temporary file
141 and then rename it to the target file name. Thus, if the program stops
142 right before the renaming, the temporary file might be left in the
143 output directory.
144
145 .. seealso::
146
147 - :meth:`pytorch_pfn_extras.training.extensions.snapshot`
148 """
149
150 def __init__(self, fs=None, out_dir=None):
151 self._post_save_hooks = []
152 self.fs = fs
153 self.out_dir = out_dir
154 if fs is None:
155 self.fs = _PosixFileSystem()
156
157 self._initialized = False
158
159 def __call__(self, filename, out_dir, target):
160 """Invokes the actual snapshot function.
161
162 This method is invoked by a
163 :class:`~pytorch_pfn_extras.training.extensions.Snapshot` object
164 every time it takes a snapshot.
165
166 Args:
167 filename (str): Name of the file into which the serialized target
168 is saved. It is a concrete file name, i.e. not a pre-formatted
169 template string.
170 out_dir (str): Output directory. Corresponds to
171 :py:attr:`ExtensionsManager.out
172 <pytorch_pfn_extras.training.ExtensionsManager.out>`.
173 target (dict): Serialized object which will be saved.
174 """
175 raise NotImplementedError
176
177 def initialize(self, out_dir):
178 if not self.fs.exists(out_dir):
179 self.fs.makedirs(out_dir)
180 self._initialized = True
181
182 def __del__(self):
183 self.finalize()
184
185 def finalize(self):
186 """Finalizes the writer.
187
188 this method is invoked at the end of the training in
189 :class:`~pytorch_pfn_extras.training.ExtensionsManager`,
190
191 """
192 pass
193
194 def save(self, filename, out_dir, target, savefun, **kwds):
195 if self.out_dir is not None:
196 out_dir = self.out_dir
197 if not self._initialized:
198 self.initialize(out_dir)
199 # Some filesystems are not compatible with temp folders, etc
200 # so we rely on raw temp files
201 prefix = 'tmp_{}'.format(filename)
202 dest = os.path.join(out_dir, filename)
203 tmppath = os.path.join(out_dir, prefix)
204 make_backup = self.fs.exists(dest)
205 if make_backup:
206 bak = '{}.bak'.format(dest)
207 self.fs.rename(dest, bak)
208 with self.fs.open(tmppath, 'wb') as f:
209 # HDFS does not support overwrite
210 savefun(target, f)
211 self.fs.rename(tmppath, dest)
212 if make_backup:
213 self.fs.remove(bak)
214
215 self._post_save()
216
217 def _add_cleanup_hook(self, hook_fun):
218 """Adds cleanup hook function.
219
220 Technically, arbitrary user-defined hook can be called, but
221 this is intended for cleaning up stale snapshots.
222
223 Args:
224 hook_fun (callable): callable function to be called
225 right after save is done. It takes no arguments.
226
227 """
228 self._post_save_hooks.append(hook_fun)
229
230 def _post_save(self):
231 for hook in self._post_save_hooks:
232 hook()
233
234
235 class SimpleWriter(Writer):
236 """The most simple snapshot writer.
237
238 This class just passes the arguments to the actual saving function.
239
240 Args:
241 savefun: Callable object. It takes three arguments: the output file
242 path, the serialized dictionary object, and the optional keyword
243 arguments.
244 fs: FileSystem abstracting interface to implement all the operations.
245 optional, defaults to None
246 out_dir: str. Specifies the directory this writer will use.
247 It takes precedence over the one specified in `__call__`
248 optional, defaults to None
249 kwds: Keyword arguments for the ``savefun``.
250
251 .. seealso::
252
253 - :meth:`pytorch_pfn_extras.training.extensions.snapshot`
254 """
255
256 def __init__(self, savefun=torch.save, fs=None, out_dir=None, **kwds):
257 super().__init__(fs=fs, out_dir=out_dir)
258 self._savefun = savefun
259 self._kwds = kwds
260
261 def __call__(self, filename, out_dir, target, *, savefun=None):
262 if savefun is None:
263 savefun = self._savefun
264 self.save(filename, out_dir, target, savefun, **self._kwds)
265
266
267 class StandardWriter(Writer):
268 """Base class of snapshot writers which use thread or process.
269
270 This class creates a new thread or a process every time when ``__call__``
271 is invoked.
272
273 Args:
274 savefun: Callable object. It takes three arguments: the output file
275 path, the serialized dictionary object, and the optional keyword
276 arguments.
277 fs: FileSystem abstracting interface to implement all the operations.
278 optional, defaults to None
279 out_dir: str. Specifies the directory this writer will use.
280 It takes precedence over the one specified in `__call__`
281 optional, defaults to None
282 kwds: Keyword arguments for the ``savefun``.
283
284 .. seealso::
285
286 - :meth:`pytorch_pfn_extras.training.extensions.snapshot`
287 """
288
289 _started = False
290 _finalized = False
291 _worker = None
292
293 def __init__(self, savefun=torch.save, fs=None, out_dir=None, **kwds):
294 super().__init__(fs=fs, out_dir=out_dir)
295 self._savefun = savefun
296 self._kwds = kwds
297 self._started = False
298 self._finalized = False
299
300 def __call__(self, filename, out_dir, target, *, savefun=None):
301 if savefun is None:
302 savefun = self._savefun
303 if self._started:
304 self._worker.join()
305 self._started = False
306 self._filename = filename
307 self._worker = self.create_worker(filename, out_dir, target,
308 savefun, **self._kwds)
309 self._worker.start()
310 self._started = True
311
312 def create_worker(self, filename, out_dir, target, savefun, **kwds):
313 """Creates a worker for the snapshot.
314
315 This method creates a thread or a process to take a snapshot. The
316 created worker must have :meth:`start` and :meth:`join` methods.
317
318 Args:
319 filename (str): Name of the file into which the serialized target
320 is saved. It is already formated string.
321 out_dir (str): Output directory. Passed by `manager.out`.
322 target (dict): Serialized object which will be saved.
323 kwds: Keyword arguments for the ``savefun``.
324
325 """
326 raise NotImplementedError
327
328 def finalize(self):
329 if self._started:
330 if not self._finalized:
331 self._worker.join()
332 self._started = False
333 self._finalized = True
334
335
336 class ThreadWriter(StandardWriter):
337 """Snapshot writer that uses a separate thread.
338
339 This class creates a new thread that invokes the actual saving function.
340
341 .. seealso::
342
343 - :meth:`pytorch_pfn_extras.training.extensions.snapshot`
344 """
345
346 def __init__(self, savefun=torch.save, fs=None, out_dir=None, **kwds):
347 super().__init__(savefun=savefun, fs=fs, out_dir=out_dir, **kwds)
348
349 def create_worker(self, filename, out_dir, target, **kwds):
350 return threading.Thread(
351 target=self.save,
352 args=(filename, out_dir, target, self._savefun),
353 kwargs=self._kwds)
354
355
356 class ProcessWriter(StandardWriter):
357 """Snapshot writer that uses a separate process.
358
359 This class creates a new process that invokes the actual saving function.
360
361 .. note::
362 Forking a new process from a MPI process might be danger. Consider
363 using :class:`ThreadWriter` instead of ``ProcessWriter`` if you are
364 using MPI.
365
366 .. seealso::
367
368 - :meth:`pytorch_pfn_extras.training.extensions.snapshot`
369 """
370
371 def __init__(self, savefun=torch.save, fs=None, out_dir=None, **kwds):
372 super().__init__(savefun=savefun, fs=fs, out_dir=out_dir, **kwds)
373
374 def create_worker(self, filename, out_dir, target, **kwds):
375 return multiprocessing.Process(
376 target=self.save,
377 args=(filename, out_dir, target, self._savefun),
378 kwargs=self._kwds)
379
380
381 class QueueWriter(Writer):
382 """Base class of queue snapshot writers.
383
384 This class is a base class of snapshot writers that use a queue.
385 A Queue is created when this class is constructed, and every time when
386 ``__call__`` is invoked, a snapshot task is put into the queue.
387
388 Args:
389 savefun: Callable object which is passed to the :meth:`create_task`
390 if the task is ``None``. It takes three arguments: the output file
391 path, the serialized dictionary object, and the optional keyword
392 arguments.
393 fs: FileSystem abstracting interface to implement all the operations.
394 optional, defaults to None
395 out_dir: str. Specifies the directory this writer will use.
396 It takes precedence over the one specified in `__call__`
397 optional, defaults to None
398 task: Callable object. Its ``__call__`` must have a same interface to
399 ``Writer.__call__``. This object is directly put into the queue.
400
401 .. seealso::
402
403 - :meth:`pytorch_pfn_extras.training.extensions.snapshot`
404 """
405
406 _started = False
407 _finalized = False
408 _queue = None
409 _consumer = None
410
411 def __init__(self, savefun=torch.save, fs=None, out_dir=None, task=None):
412 super().__init__(fs=fs, out_dir=out_dir)
413 if task is None:
414 self._task = self.create_task(savefun)
415 else:
416 self._task = task
417 self._queue = self.create_queue()
418 self._consumer = self.create_consumer(self._queue)
419 self._consumer.start()
420 self._started = True
421 self._finalized = False
422
423 def __call__(self, filename, out_dir, target, *, savefun=None):
424 self._queue.put([self._task, filename, out_dir, target, savefun])
425
426 def create_task(self, savefun):
427 return SimpleWriter(savefun=savefun)
428
429 def create_queue(self):
430 raise NotImplementedError
431
432 def create_consumer(self, q):
433 raise NotImplementedError
434
435 def consume(self, q):
436 while True:
437 task = q.get()
438 if task is None:
439 q.task_done()
440 return
441 else:
442 task[0](task[1], task[2], task[3], savefun=task[4])
443 q.task_done()
444
445 def finalize(self):
446 if self._started:
447 if not self._finalized:
448 self._queue.put(None)
449 self._queue.join()
450 self._consumer.join()
451 self._started = False
452 self._finalized = True
453
454
455 class ThreadQueueWriter(QueueWriter):
456 """Snapshot writer that uses a thread queue.
457
458 This class creates a thread and a queue by :mod:`threading` and
459 :mod:`queue` modules
460 respectively. The thread will be a consumer of the queue, and the main
461 thread will be a producer of the queue.
462
463 .. seealso::
464
465 - :meth:`pytorch_pfn_extras.training.extensions.snapshot`
466 """
467
468 def __init__(self, savefun=torch.save, fs=None, out_dir=None, task=None):
469 super().__init__(savefun=savefun, fs=fs, task=task, out_dir=out_dir)
470
471 def create_queue(self):
472 return queue.Queue()
473
474 def create_consumer(self, q):
475 return threading.Thread(target=self.consume, args=(q,))
476
477
478 class ProcessQueueWriter(QueueWriter):
479 """Snapshot writer that uses process queue.
480
481 This class creates a process and a queue by :mod:`multiprocessing` module.
482 The process will be a consumer of this queue, and the main process will be
483 a producer of this queue.
484
485 .. note::
486 Forking a new process from MPI process might be danger. Consider using
487 :class:`ThreadQueueWriter` instead of ``ProcessQueueWriter`` if you are
488 using MPI.
489
490 .. seealso::
491
492 - :meth:`pytorch_pfn_extras.training.extensions.snapshot`
493 """
494
495 def __init__(self, savefun=torch.save, fs=None, out_dir=None, task=None):
496 super().__init__(savefun=savefun, fs=fs, out_dir=out_dir, task=task)
497
498 def create_queue(self):
499 return multiprocessing.JoinableQueue()
500
501 def create_consumer(self, q):
502 return multiprocessing.Process(target=self.consume, args=(q,))
503
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pytorch_pfn_extras/writing.py b/pytorch_pfn_extras/writing.py
--- a/pytorch_pfn_extras/writing.py
+++ b/pytorch_pfn_extras/writing.py
@@ -305,7 +305,7 @@
self._started = False
self._filename = filename
self._worker = self.create_worker(filename, out_dir, target,
- savefun, **self._kwds)
+ savefun=savefun, **self._kwds)
self._worker.start()
self._started = True
| {"golden_diff": "diff --git a/pytorch_pfn_extras/writing.py b/pytorch_pfn_extras/writing.py\n--- a/pytorch_pfn_extras/writing.py\n+++ b/pytorch_pfn_extras/writing.py\n@@ -305,7 +305,7 @@\n self._started = False\n self._filename = filename\n self._worker = self.create_worker(filename, out_dir, target,\n- savefun, **self._kwds)\n+ savefun=savefun, **self._kwds)\n self._worker.start()\n self._started = True\n", "issue": "Bug in when we use ProcessWriter with extensions.\n## problem statement\r\n\r\n\r\nI run a code that includes the below fragment. Then, the prompt throws the below error. \r\n\r\n```\r\n writer = writing.ProcessWriter(savefun=torch.save, out_dir=save_path)\r\n manager.extend(extensions.snapshot(writer=writer), trigger=(1, 'iteration'))\r\n manager.extend(extensions.snapshot(writer=writer, filename='gen_{.epoch}', target=generator.module), trigger=(10, 'iteration'))\r\n manager.extend(extensions.snapshot(), trigger=(10, 'epoch'))\r\n manager.extend(extensions.snapshot(filename='gen_{.epoch}', target=generator.module), trigger=(10, 'epoch')) \r\n```\r\n\r\n## error message\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"main_train.py\", line 232, in <module>\r\n train()\r\n File \"/usr/local/lib/python3.7/site-packages/click/core.py\", line 764, in __call__\r\n return self.main(*args, **kwargs)\r\n File \"/usr/local/lib/python3.7/site-packages/click/core.py\", line 717, in main\r\n rv = self.invoke(ctx)\r\n File \"/usr/local/lib/python3.7/site-packages/click/core.py\", line 956, in invoke\r\n return ctx.invoke(self.callback, **ctx.params)\r\n File \"/usr/local/lib/python3.7/site-packages/click/core.py\", line 555, in invoke\r\n return callback(*args, **kwargs)\r\n File \"main_train.py\", line 226, in train\r\n Image.fromarray(x).save(f'{i}.png')\r\n File \"/usr/local/Cellar/python/3.7.7/Frameworks/Python.framework/Versions/3.7/lib/python3.7/contextlib.py\", line 119, in __exit__\r\n next(self.gen)\r\n File \"/usr/local/lib/python3.7/site-packages/pytorch_pfn_extras/training/manager.py\", line 390, in run_iteration\r\n self.run_extensions()\r\n File \"/usr/local/lib/python3.7/site-packages/pytorch_pfn_extras/training/manager.py\", line 272, in run_extensions\r\n entry.extension(self)\r\n File \"/usr/local/lib/python3.7/site-packages/pytorch_pfn_extras/training/extensions/_snapshot.py\", line 397, in __call__\r\n self._make_snapshot(manager)\r\n File \"/usr/local/lib/python3.7/site-packages/pytorch_pfn_extras/training/extensions/_snapshot.py\", line 422, in _make_snapshot\r\n writer(filename, outdir, serialized_target, savefun=self._savefun)\r\n File \"/usr/local/lib/python3.7/site-packages/pytorch_pfn_extras/writing.py\", line 308, in __call__\r\n savefun, **self._kwds)\r\nTypeError: create_worker() takes 4 positional arguments but 5 were given\r\n```\r\n\r\n\r\nThe `create_worker` in StandardWriter accepts five arguments including `self`.\r\nHowever, `create_worker` in ProccessWriter and ThreadWriter accept only four arguments. \r\n\r\n\r\nhttps://github.com/pfnet/pytorch-pfn-extras/blob/8b16df9433a024c63d786c76f1e7ccbb88847283/pytorch_pfn_extras/writing.py#L307-L312\r\n\r\nhttps://github.com/pfnet/pytorch-pfn-extras/blob/8b16df9433a024c63d786c76f1e7ccbb88847283/pytorch_pfn_extras/writing.py#L374-L378\n", "before_files": [{"content": "import multiprocessing\nimport io\nimport os\nimport queue\nimport shutil\nimport sys\nimport threading\n\nimport torch\n\n\ndef open_wrapper(func):\n def wrapper(self, file_path, mode='rb',\n buffering=-1, encoding=None,\n errors=None, newline=None,\n closefd=True,\n opener=None):\n file_obj = func(self, file_path, mode, buffering, encoding,\n errors, newline, closefd, opener)\n return self._wrap_fileobject(\n file_obj, file_path, mode, buffering, encoding,\n errors, newline, closefd, opener)\n return wrapper\n\n\nclass _PosixFileStat:\n def __init__(self, _stat, filename):\n self.filename = filename\n self.last_modified = _stat.st_mtime\n self.last_accessed = _stat.st_atime\n self.created = _stat.st_ctime\n self.mode = _stat.st_mode\n self.size = _stat.st_size\n\n\nclass _PosixFileSystem(object):\n \"\"\"Class to abstract the calls to the FileSystem\n\n This class obeys the same interface as PFIO's POSIX\n Filesystems declarations. When using HDFS, PFIO\n handler can be used instead (requires PFIO>1.0).\n\n This class currently abstracts POSIX\n \"\"\"\n def __init__(self):\n pass\n\n def get_actual_path(self, path):\n return os.path.join(self.root, path)\n\n def _wrap_fileobject(self, file_obj, file_path, *args, **kwargs):\n return file_obj\n\n @property\n def root(self):\n return self._root\n\n @root.setter\n def root(self, root):\n self._root = root\n\n @open_wrapper\n def open(self, file_path, mode='r',\n buffering=-1, encoding=None, errors=None,\n newline=None, closefd=True, opener=None):\n\n return io.open(file_path, mode,\n buffering, encoding, errors,\n newline, closefd, opener)\n\n def list(self, path_or_prefix: str = None, recursive=False):\n if recursive:\n path_or_prefix = path_or_prefix.rstrip(\"/\")\n # plus 1 to include the trailing slash\n prefix_end_index = len(path_or_prefix) + 1\n yield from self._recursive_list(prefix_end_index, path_or_prefix)\n else:\n for file in os.scandir(path_or_prefix):\n yield file.name\n\n def _recursive_list(self, prefix_end_index: int, path: str):\n for file in os.scandir(path):\n yield file.path[prefix_end_index:]\n\n if file.is_dir():\n yield from self._recursive_list(prefix_end_index,\n file.path)\n\n def stat(self, path):\n return _PosixFileStat(os.stat(path), path)\n\n def close(self):\n pass\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n pass\n\n def isdir(self, file_path):\n return os.path.isdir(file_path)\n\n def mkdir(self, file_path, mode=0o777, *args, dir_fd=None):\n return os.mkdir(file_path, mode, *args, dir_fd=None)\n\n def makedirs(self, file_path, mode=0o777, exist_ok=False):\n return os.makedirs(file_path, mode, exist_ok)\n\n def exists(self, file_path):\n return os.path.exists(file_path)\n\n def rename(self, src, dst):\n try:\n return os.replace(src, dst)\n except OSError:\n print('Destination {} is a directory '\n 'but source is not'.format(src),\n file=sys.stderr)\n raise\n\n def remove(self, file_path, recursive=False):\n if recursive:\n return shutil.rmtree(file_path)\n if os.path.isdir(file_path):\n return os.rmdir(file_path)\n\n return os.remove(file_path)\n\n\nclass Writer:\n\n \"\"\"Base class of snapshot writers.\n\n :class:`~pytorch_pfn_extras.training.extensions.Snapshot`\n invokes ``__call__`` of this class every time when taking a snapshot.\n This class determines how the actual saving function will be invoked.\n\n .. note::\n This extension first writes the serialized object to a temporary file\n and then rename it to the target file name. Thus, if the program stops\n right before the renaming, the temporary file might be left in the\n output directory.\n\n .. seealso::\n\n - :meth:`pytorch_pfn_extras.training.extensions.snapshot`\n \"\"\"\n\n def __init__(self, fs=None, out_dir=None):\n self._post_save_hooks = []\n self.fs = fs\n self.out_dir = out_dir\n if fs is None:\n self.fs = _PosixFileSystem()\n\n self._initialized = False\n\n def __call__(self, filename, out_dir, target):\n \"\"\"Invokes the actual snapshot function.\n\n This method is invoked by a\n :class:`~pytorch_pfn_extras.training.extensions.Snapshot` object\n every time it takes a snapshot.\n\n Args:\n filename (str): Name of the file into which the serialized target\n is saved. It is a concrete file name, i.e. not a pre-formatted\n template string.\n out_dir (str): Output directory. Corresponds to\n :py:attr:`ExtensionsManager.out\n <pytorch_pfn_extras.training.ExtensionsManager.out>`.\n target (dict): Serialized object which will be saved.\n \"\"\"\n raise NotImplementedError\n\n def initialize(self, out_dir):\n if not self.fs.exists(out_dir):\n self.fs.makedirs(out_dir)\n self._initialized = True\n\n def __del__(self):\n self.finalize()\n\n def finalize(self):\n \"\"\"Finalizes the writer.\n\n this method is invoked at the end of the training in\n :class:`~pytorch_pfn_extras.training.ExtensionsManager`,\n\n \"\"\"\n pass\n\n def save(self, filename, out_dir, target, savefun, **kwds):\n if self.out_dir is not None:\n out_dir = self.out_dir\n if not self._initialized:\n self.initialize(out_dir)\n # Some filesystems are not compatible with temp folders, etc\n # so we rely on raw temp files\n prefix = 'tmp_{}'.format(filename)\n dest = os.path.join(out_dir, filename)\n tmppath = os.path.join(out_dir, prefix)\n make_backup = self.fs.exists(dest)\n if make_backup:\n bak = '{}.bak'.format(dest)\n self.fs.rename(dest, bak)\n with self.fs.open(tmppath, 'wb') as f:\n # HDFS does not support overwrite\n savefun(target, f)\n self.fs.rename(tmppath, dest)\n if make_backup:\n self.fs.remove(bak)\n\n self._post_save()\n\n def _add_cleanup_hook(self, hook_fun):\n \"\"\"Adds cleanup hook function.\n\n Technically, arbitrary user-defined hook can be called, but\n this is intended for cleaning up stale snapshots.\n\n Args:\n hook_fun (callable): callable function to be called\n right after save is done. It takes no arguments.\n\n \"\"\"\n self._post_save_hooks.append(hook_fun)\n\n def _post_save(self):\n for hook in self._post_save_hooks:\n hook()\n\n\nclass SimpleWriter(Writer):\n \"\"\"The most simple snapshot writer.\n\n This class just passes the arguments to the actual saving function.\n\n Args:\n savefun: Callable object. It takes three arguments: the output file\n path, the serialized dictionary object, and the optional keyword\n arguments.\n fs: FileSystem abstracting interface to implement all the operations.\n optional, defaults to None\n out_dir: str. Specifies the directory this writer will use.\n It takes precedence over the one specified in `__call__`\n optional, defaults to None\n kwds: Keyword arguments for the ``savefun``.\n\n .. seealso::\n\n - :meth:`pytorch_pfn_extras.training.extensions.snapshot`\n \"\"\"\n\n def __init__(self, savefun=torch.save, fs=None, out_dir=None, **kwds):\n super().__init__(fs=fs, out_dir=out_dir)\n self._savefun = savefun\n self._kwds = kwds\n\n def __call__(self, filename, out_dir, target, *, savefun=None):\n if savefun is None:\n savefun = self._savefun\n self.save(filename, out_dir, target, savefun, **self._kwds)\n\n\nclass StandardWriter(Writer):\n \"\"\"Base class of snapshot writers which use thread or process.\n\n This class creates a new thread or a process every time when ``__call__``\n is invoked.\n\n Args:\n savefun: Callable object. It takes three arguments: the output file\n path, the serialized dictionary object, and the optional keyword\n arguments.\n fs: FileSystem abstracting interface to implement all the operations.\n optional, defaults to None\n out_dir: str. Specifies the directory this writer will use.\n It takes precedence over the one specified in `__call__`\n optional, defaults to None\n kwds: Keyword arguments for the ``savefun``.\n\n .. seealso::\n\n - :meth:`pytorch_pfn_extras.training.extensions.snapshot`\n \"\"\"\n\n _started = False\n _finalized = False\n _worker = None\n\n def __init__(self, savefun=torch.save, fs=None, out_dir=None, **kwds):\n super().__init__(fs=fs, out_dir=out_dir)\n self._savefun = savefun\n self._kwds = kwds\n self._started = False\n self._finalized = False\n\n def __call__(self, filename, out_dir, target, *, savefun=None):\n if savefun is None:\n savefun = self._savefun\n if self._started:\n self._worker.join()\n self._started = False\n self._filename = filename\n self._worker = self.create_worker(filename, out_dir, target,\n savefun, **self._kwds)\n self._worker.start()\n self._started = True\n\n def create_worker(self, filename, out_dir, target, savefun, **kwds):\n \"\"\"Creates a worker for the snapshot.\n\n This method creates a thread or a process to take a snapshot. The\n created worker must have :meth:`start` and :meth:`join` methods.\n\n Args:\n filename (str): Name of the file into which the serialized target\n is saved. It is already formated string.\n out_dir (str): Output directory. Passed by `manager.out`.\n target (dict): Serialized object which will be saved.\n kwds: Keyword arguments for the ``savefun``.\n\n \"\"\"\n raise NotImplementedError\n\n def finalize(self):\n if self._started:\n if not self._finalized:\n self._worker.join()\n self._started = False\n self._finalized = True\n\n\nclass ThreadWriter(StandardWriter):\n \"\"\"Snapshot writer that uses a separate thread.\n\n This class creates a new thread that invokes the actual saving function.\n\n .. seealso::\n\n - :meth:`pytorch_pfn_extras.training.extensions.snapshot`\n \"\"\"\n\n def __init__(self, savefun=torch.save, fs=None, out_dir=None, **kwds):\n super().__init__(savefun=savefun, fs=fs, out_dir=out_dir, **kwds)\n\n def create_worker(self, filename, out_dir, target, **kwds):\n return threading.Thread(\n target=self.save,\n args=(filename, out_dir, target, self._savefun),\n kwargs=self._kwds)\n\n\nclass ProcessWriter(StandardWriter):\n \"\"\"Snapshot writer that uses a separate process.\n\n This class creates a new process that invokes the actual saving function.\n\n .. note::\n Forking a new process from a MPI process might be danger. Consider\n using :class:`ThreadWriter` instead of ``ProcessWriter`` if you are\n using MPI.\n\n .. seealso::\n\n - :meth:`pytorch_pfn_extras.training.extensions.snapshot`\n \"\"\"\n\n def __init__(self, savefun=torch.save, fs=None, out_dir=None, **kwds):\n super().__init__(savefun=savefun, fs=fs, out_dir=out_dir, **kwds)\n\n def create_worker(self, filename, out_dir, target, **kwds):\n return multiprocessing.Process(\n target=self.save,\n args=(filename, out_dir, target, self._savefun),\n kwargs=self._kwds)\n\n\nclass QueueWriter(Writer):\n \"\"\"Base class of queue snapshot writers.\n\n This class is a base class of snapshot writers that use a queue.\n A Queue is created when this class is constructed, and every time when\n ``__call__`` is invoked, a snapshot task is put into the queue.\n\n Args:\n savefun: Callable object which is passed to the :meth:`create_task`\n if the task is ``None``. It takes three arguments: the output file\n path, the serialized dictionary object, and the optional keyword\n arguments.\n fs: FileSystem abstracting interface to implement all the operations.\n optional, defaults to None\n out_dir: str. Specifies the directory this writer will use.\n It takes precedence over the one specified in `__call__`\n optional, defaults to None\n task: Callable object. Its ``__call__`` must have a same interface to\n ``Writer.__call__``. This object is directly put into the queue.\n\n .. seealso::\n\n - :meth:`pytorch_pfn_extras.training.extensions.snapshot`\n \"\"\"\n\n _started = False\n _finalized = False\n _queue = None\n _consumer = None\n\n def __init__(self, savefun=torch.save, fs=None, out_dir=None, task=None):\n super().__init__(fs=fs, out_dir=out_dir)\n if task is None:\n self._task = self.create_task(savefun)\n else:\n self._task = task\n self._queue = self.create_queue()\n self._consumer = self.create_consumer(self._queue)\n self._consumer.start()\n self._started = True\n self._finalized = False\n\n def __call__(self, filename, out_dir, target, *, savefun=None):\n self._queue.put([self._task, filename, out_dir, target, savefun])\n\n def create_task(self, savefun):\n return SimpleWriter(savefun=savefun)\n\n def create_queue(self):\n raise NotImplementedError\n\n def create_consumer(self, q):\n raise NotImplementedError\n\n def consume(self, q):\n while True:\n task = q.get()\n if task is None:\n q.task_done()\n return\n else:\n task[0](task[1], task[2], task[3], savefun=task[4])\n q.task_done()\n\n def finalize(self):\n if self._started:\n if not self._finalized:\n self._queue.put(None)\n self._queue.join()\n self._consumer.join()\n self._started = False\n self._finalized = True\n\n\nclass ThreadQueueWriter(QueueWriter):\n \"\"\"Snapshot writer that uses a thread queue.\n\n This class creates a thread and a queue by :mod:`threading` and\n :mod:`queue` modules\n respectively. The thread will be a consumer of the queue, and the main\n thread will be a producer of the queue.\n\n .. seealso::\n\n - :meth:`pytorch_pfn_extras.training.extensions.snapshot`\n \"\"\"\n\n def __init__(self, savefun=torch.save, fs=None, out_dir=None, task=None):\n super().__init__(savefun=savefun, fs=fs, task=task, out_dir=out_dir)\n\n def create_queue(self):\n return queue.Queue()\n\n def create_consumer(self, q):\n return threading.Thread(target=self.consume, args=(q,))\n\n\nclass ProcessQueueWriter(QueueWriter):\n \"\"\"Snapshot writer that uses process queue.\n\n This class creates a process and a queue by :mod:`multiprocessing` module.\n The process will be a consumer of this queue, and the main process will be\n a producer of this queue.\n\n .. note::\n Forking a new process from MPI process might be danger. Consider using\n :class:`ThreadQueueWriter` instead of ``ProcessQueueWriter`` if you are\n using MPI.\n\n .. seealso::\n\n - :meth:`pytorch_pfn_extras.training.extensions.snapshot`\n \"\"\"\n\n def __init__(self, savefun=torch.save, fs=None, out_dir=None, task=None):\n super().__init__(savefun=savefun, fs=fs, out_dir=out_dir, task=task)\n\n def create_queue(self):\n return multiprocessing.JoinableQueue()\n\n def create_consumer(self, q):\n return multiprocessing.Process(target=self.consume, args=(q,))\n", "path": "pytorch_pfn_extras/writing.py"}], "after_files": [{"content": "import multiprocessing\nimport io\nimport os\nimport queue\nimport shutil\nimport sys\nimport threading\n\nimport torch\n\n\ndef open_wrapper(func):\n def wrapper(self, file_path, mode='rb',\n buffering=-1, encoding=None,\n errors=None, newline=None,\n closefd=True,\n opener=None):\n file_obj = func(self, file_path, mode, buffering, encoding,\n errors, newline, closefd, opener)\n return self._wrap_fileobject(\n file_obj, file_path, mode, buffering, encoding,\n errors, newline, closefd, opener)\n return wrapper\n\n\nclass _PosixFileStat:\n def __init__(self, _stat, filename):\n self.filename = filename\n self.last_modified = _stat.st_mtime\n self.last_accessed = _stat.st_atime\n self.created = _stat.st_ctime\n self.mode = _stat.st_mode\n self.size = _stat.st_size\n\n\nclass _PosixFileSystem(object):\n \"\"\"Class to abstract the calls to the FileSystem\n\n This class obeys the same interface as PFIO's POSIX\n Filesystems declarations. When using HDFS, PFIO\n handler can be used instead (requires PFIO>1.0).\n\n This class currently abstracts POSIX\n \"\"\"\n def __init__(self):\n pass\n\n def get_actual_path(self, path):\n return os.path.join(self.root, path)\n\n def _wrap_fileobject(self, file_obj, file_path, *args, **kwargs):\n return file_obj\n\n @property\n def root(self):\n return self._root\n\n @root.setter\n def root(self, root):\n self._root = root\n\n @open_wrapper\n def open(self, file_path, mode='r',\n buffering=-1, encoding=None, errors=None,\n newline=None, closefd=True, opener=None):\n\n return io.open(file_path, mode,\n buffering, encoding, errors,\n newline, closefd, opener)\n\n def list(self, path_or_prefix: str = None, recursive=False):\n if recursive:\n path_or_prefix = path_or_prefix.rstrip(\"/\")\n # plus 1 to include the trailing slash\n prefix_end_index = len(path_or_prefix) + 1\n yield from self._recursive_list(prefix_end_index, path_or_prefix)\n else:\n for file in os.scandir(path_or_prefix):\n yield file.name\n\n def _recursive_list(self, prefix_end_index: int, path: str):\n for file in os.scandir(path):\n yield file.path[prefix_end_index:]\n\n if file.is_dir():\n yield from self._recursive_list(prefix_end_index,\n file.path)\n\n def stat(self, path):\n return _PosixFileStat(os.stat(path), path)\n\n def close(self):\n pass\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n pass\n\n def isdir(self, file_path):\n return os.path.isdir(file_path)\n\n def mkdir(self, file_path, mode=0o777, *args, dir_fd=None):\n return os.mkdir(file_path, mode, *args, dir_fd=None)\n\n def makedirs(self, file_path, mode=0o777, exist_ok=False):\n return os.makedirs(file_path, mode, exist_ok)\n\n def exists(self, file_path):\n return os.path.exists(file_path)\n\n def rename(self, src, dst):\n try:\n return os.replace(src, dst)\n except OSError:\n print('Destination {} is a directory '\n 'but source is not'.format(src),\n file=sys.stderr)\n raise\n\n def remove(self, file_path, recursive=False):\n if recursive:\n return shutil.rmtree(file_path)\n if os.path.isdir(file_path):\n return os.rmdir(file_path)\n\n return os.remove(file_path)\n\n\nclass Writer:\n\n \"\"\"Base class of snapshot writers.\n\n :class:`~pytorch_pfn_extras.training.extensions.Snapshot`\n invokes ``__call__`` of this class every time when taking a snapshot.\n This class determines how the actual saving function will be invoked.\n\n .. note::\n This extension first writes the serialized object to a temporary file\n and then rename it to the target file name. Thus, if the program stops\n right before the renaming, the temporary file might be left in the\n output directory.\n\n .. seealso::\n\n - :meth:`pytorch_pfn_extras.training.extensions.snapshot`\n \"\"\"\n\n def __init__(self, fs=None, out_dir=None):\n self._post_save_hooks = []\n self.fs = fs\n self.out_dir = out_dir\n if fs is None:\n self.fs = _PosixFileSystem()\n\n self._initialized = False\n\n def __call__(self, filename, out_dir, target):\n \"\"\"Invokes the actual snapshot function.\n\n This method is invoked by a\n :class:`~pytorch_pfn_extras.training.extensions.Snapshot` object\n every time it takes a snapshot.\n\n Args:\n filename (str): Name of the file into which the serialized target\n is saved. It is a concrete file name, i.e. not a pre-formatted\n template string.\n out_dir (str): Output directory. Corresponds to\n :py:attr:`ExtensionsManager.out\n <pytorch_pfn_extras.training.ExtensionsManager.out>`.\n target (dict): Serialized object which will be saved.\n \"\"\"\n raise NotImplementedError\n\n def initialize(self, out_dir):\n if not self.fs.exists(out_dir):\n self.fs.makedirs(out_dir)\n self._initialized = True\n\n def __del__(self):\n self.finalize()\n\n def finalize(self):\n \"\"\"Finalizes the writer.\n\n this method is invoked at the end of the training in\n :class:`~pytorch_pfn_extras.training.ExtensionsManager`,\n\n \"\"\"\n pass\n\n def save(self, filename, out_dir, target, savefun, **kwds):\n if self.out_dir is not None:\n out_dir = self.out_dir\n if not self._initialized:\n self.initialize(out_dir)\n # Some filesystems are not compatible with temp folders, etc\n # so we rely on raw temp files\n prefix = 'tmp_{}'.format(filename)\n dest = os.path.join(out_dir, filename)\n tmppath = os.path.join(out_dir, prefix)\n make_backup = self.fs.exists(dest)\n if make_backup:\n bak = '{}.bak'.format(dest)\n self.fs.rename(dest, bak)\n with self.fs.open(tmppath, 'wb') as f:\n # HDFS does not support overwrite\n savefun(target, f)\n self.fs.rename(tmppath, dest)\n if make_backup:\n self.fs.remove(bak)\n\n self._post_save()\n\n def _add_cleanup_hook(self, hook_fun):\n \"\"\"Adds cleanup hook function.\n\n Technically, arbitrary user-defined hook can be called, but\n this is intended for cleaning up stale snapshots.\n\n Args:\n hook_fun (callable): callable function to be called\n right after save is done. It takes no arguments.\n\n \"\"\"\n self._post_save_hooks.append(hook_fun)\n\n def _post_save(self):\n for hook in self._post_save_hooks:\n hook()\n\n\nclass SimpleWriter(Writer):\n \"\"\"The most simple snapshot writer.\n\n This class just passes the arguments to the actual saving function.\n\n Args:\n savefun: Callable object. It takes three arguments: the output file\n path, the serialized dictionary object, and the optional keyword\n arguments.\n fs: FileSystem abstracting interface to implement all the operations.\n optional, defaults to None\n out_dir: str. Specifies the directory this writer will use.\n It takes precedence over the one specified in `__call__`\n optional, defaults to None\n kwds: Keyword arguments for the ``savefun``.\n\n .. seealso::\n\n - :meth:`pytorch_pfn_extras.training.extensions.snapshot`\n \"\"\"\n\n def __init__(self, savefun=torch.save, fs=None, out_dir=None, **kwds):\n super().__init__(fs=fs, out_dir=out_dir)\n self._savefun = savefun\n self._kwds = kwds\n\n def __call__(self, filename, out_dir, target, *, savefun=None):\n if savefun is None:\n savefun = self._savefun\n self.save(filename, out_dir, target, savefun, **self._kwds)\n\n\nclass StandardWriter(Writer):\n \"\"\"Base class of snapshot writers which use thread or process.\n\n This class creates a new thread or a process every time when ``__call__``\n is invoked.\n\n Args:\n savefun: Callable object. It takes three arguments: the output file\n path, the serialized dictionary object, and the optional keyword\n arguments.\n fs: FileSystem abstracting interface to implement all the operations.\n optional, defaults to None\n out_dir: str. Specifies the directory this writer will use.\n It takes precedence over the one specified in `__call__`\n optional, defaults to None\n kwds: Keyword arguments for the ``savefun``.\n\n .. seealso::\n\n - :meth:`pytorch_pfn_extras.training.extensions.snapshot`\n \"\"\"\n\n _started = False\n _finalized = False\n _worker = None\n\n def __init__(self, savefun=torch.save, fs=None, out_dir=None, **kwds):\n super().__init__(fs=fs, out_dir=out_dir)\n self._savefun = savefun\n self._kwds = kwds\n self._started = False\n self._finalized = False\n\n def __call__(self, filename, out_dir, target, *, savefun=None):\n if savefun is None:\n savefun = self._savefun\n if self._started:\n self._worker.join()\n self._started = False\n self._filename = filename\n self._worker = self.create_worker(filename, out_dir, target,\n savefun=savefun, **self._kwds)\n self._worker.start()\n self._started = True\n\n def create_worker(self, filename, out_dir, target, savefun, **kwds):\n \"\"\"Creates a worker for the snapshot.\n\n This method creates a thread or a process to take a snapshot. The\n created worker must have :meth:`start` and :meth:`join` methods.\n\n Args:\n filename (str): Name of the file into which the serialized target\n is saved. It is already formated string.\n out_dir (str): Output directory. Passed by `manager.out`.\n target (dict): Serialized object which will be saved.\n kwds: Keyword arguments for the ``savefun``.\n\n \"\"\"\n raise NotImplementedError\n\n def finalize(self):\n if self._started:\n if not self._finalized:\n self._worker.join()\n self._started = False\n self._finalized = True\n\n\nclass ThreadWriter(StandardWriter):\n \"\"\"Snapshot writer that uses a separate thread.\n\n This class creates a new thread that invokes the actual saving function.\n\n .. seealso::\n\n - :meth:`pytorch_pfn_extras.training.extensions.snapshot`\n \"\"\"\n\n def __init__(self, savefun=torch.save, fs=None, out_dir=None, **kwds):\n super().__init__(savefun=savefun, fs=fs, out_dir=out_dir, **kwds)\n\n def create_worker(self, filename, out_dir, target, **kwds):\n return threading.Thread(\n target=self.save,\n args=(filename, out_dir, target, self._savefun),\n kwargs=self._kwds)\n\n\nclass ProcessWriter(StandardWriter):\n \"\"\"Snapshot writer that uses a separate process.\n\n This class creates a new process that invokes the actual saving function.\n\n .. note::\n Forking a new process from a MPI process might be danger. Consider\n using :class:`ThreadWriter` instead of ``ProcessWriter`` if you are\n using MPI.\n\n .. seealso::\n\n - :meth:`pytorch_pfn_extras.training.extensions.snapshot`\n \"\"\"\n\n def __init__(self, savefun=torch.save, fs=None, out_dir=None, **kwds):\n super().__init__(savefun=savefun, fs=fs, out_dir=out_dir, **kwds)\n\n def create_worker(self, filename, out_dir, target, **kwds):\n return multiprocessing.Process(\n target=self.save,\n args=(filename, out_dir, target, self._savefun),\n kwargs=self._kwds)\n\n\nclass QueueWriter(Writer):\n \"\"\"Base class of queue snapshot writers.\n\n This class is a base class of snapshot writers that use a queue.\n A Queue is created when this class is constructed, and every time when\n ``__call__`` is invoked, a snapshot task is put into the queue.\n\n Args:\n savefun: Callable object which is passed to the :meth:`create_task`\n if the task is ``None``. It takes three arguments: the output file\n path, the serialized dictionary object, and the optional keyword\n arguments.\n fs: FileSystem abstracting interface to implement all the operations.\n optional, defaults to None\n out_dir: str. Specifies the directory this writer will use.\n It takes precedence over the one specified in `__call__`\n optional, defaults to None\n task: Callable object. Its ``__call__`` must have a same interface to\n ``Writer.__call__``. This object is directly put into the queue.\n\n .. seealso::\n\n - :meth:`pytorch_pfn_extras.training.extensions.snapshot`\n \"\"\"\n\n _started = False\n _finalized = False\n _queue = None\n _consumer = None\n\n def __init__(self, savefun=torch.save, fs=None, out_dir=None, task=None):\n super().__init__(fs=fs, out_dir=out_dir)\n if task is None:\n self._task = self.create_task(savefun)\n else:\n self._task = task\n self._queue = self.create_queue()\n self._consumer = self.create_consumer(self._queue)\n self._consumer.start()\n self._started = True\n self._finalized = False\n\n def __call__(self, filename, out_dir, target, *, savefun=None):\n self._queue.put([self._task, filename, out_dir, target, savefun])\n\n def create_task(self, savefun):\n return SimpleWriter(savefun=savefun)\n\n def create_queue(self):\n raise NotImplementedError\n\n def create_consumer(self, q):\n raise NotImplementedError\n\n def consume(self, q):\n while True:\n task = q.get()\n if task is None:\n q.task_done()\n return\n else:\n task[0](task[1], task[2], task[3], savefun=task[4])\n q.task_done()\n\n def finalize(self):\n if self._started:\n if not self._finalized:\n self._queue.put(None)\n self._queue.join()\n self._consumer.join()\n self._started = False\n self._finalized = True\n\n\nclass ThreadQueueWriter(QueueWriter):\n \"\"\"Snapshot writer that uses a thread queue.\n\n This class creates a thread and a queue by :mod:`threading` and\n :mod:`queue` modules\n respectively. The thread will be a consumer of the queue, and the main\n thread will be a producer of the queue.\n\n .. seealso::\n\n - :meth:`pytorch_pfn_extras.training.extensions.snapshot`\n \"\"\"\n\n def __init__(self, savefun=torch.save, fs=None, out_dir=None, task=None):\n super().__init__(savefun=savefun, fs=fs, task=task, out_dir=out_dir)\n\n def create_queue(self):\n return queue.Queue()\n\n def create_consumer(self, q):\n return threading.Thread(target=self.consume, args=(q,))\n\n\nclass ProcessQueueWriter(QueueWriter):\n \"\"\"Snapshot writer that uses process queue.\n\n This class creates a process and a queue by :mod:`multiprocessing` module.\n The process will be a consumer of this queue, and the main process will be\n a producer of this queue.\n\n .. note::\n Forking a new process from MPI process might be danger. Consider using\n :class:`ThreadQueueWriter` instead of ``ProcessQueueWriter`` if you are\n using MPI.\n\n .. seealso::\n\n - :meth:`pytorch_pfn_extras.training.extensions.snapshot`\n \"\"\"\n\n def __init__(self, savefun=torch.save, fs=None, out_dir=None, task=None):\n super().__init__(savefun=savefun, fs=fs, out_dir=out_dir, task=task)\n\n def create_queue(self):\n return multiprocessing.JoinableQueue()\n\n def create_consumer(self, q):\n return multiprocessing.Process(target=self.consume, args=(q,))\n", "path": "pytorch_pfn_extras/writing.py"}]} |
gh_patches_debug_1351 | rasdani/github-patches | git_diff | facebookresearch__Mephisto-489 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Probably getting null Worker ID in the frontend
Hi,
Thank you for the great crowdsourcing platform. We are using Mephisto for a chat-style data collection.
When the task was live on AMT, many of the workers reported that they saw the following message:
`Sorry, you have already worked on the maximum number of these tasks available to you, or are no longer eligible to work on this task.`
However, we did not set the property `maximum_units_per_worker` in our config.
We did not face any errors in `localhost` or AMT Sandbox.
From the [code](https://github.com/facebookresearch/Mephisto/blob/c571bf9c2e6395553e71ce922c14a14e363ad7d5/packages/mephisto-task/src/index.js#L109), it looks like this message arises when the `workerId` received is `null`.
Is there any other scenario, apart from exceeding `maximum_units_per_worker`, that can result in a null `workerId`?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mephisto/data_model/unit.py`
Content:
```
1 #!/usr/bin/env python3
2
3 # Copyright (c) Facebook, Inc. and its affiliates.
4 # This source code is licensed under the MIT license found in the
5 # LICENSE file in the root directory of this source tree.
6
7
8 from abc import ABC
9 from mephisto.data_model.constants.assignment_state import AssignmentState
10 from mephisto.data_model.task import Task
11 from mephisto.data_model.task_run import TaskRun
12 from mephisto.data_model.agent import Agent
13 from mephisto.data_model.db_backed_meta import MephistoDBBackedABCMeta
14 from mephisto.abstractions.blueprint import AgentState
15 from mephisto.data_model.requester import Requester
16 from typing import Optional, Mapping, Dict, Any, Type, TYPE_CHECKING
17
18 if TYPE_CHECKING:
19 from mephisto.abstractions.database import MephistoDB
20 from mephisto.data_model.worker import Worker
21 from mephisto.abstractions.crowd_provider import CrowdProvider
22 from mephisto.data_model.assignment import Assignment
23
24 import os
25
26 from mephisto.operations.logger_core import get_logger
27
28 logger = get_logger(name=__name__)
29
30
31 class Unit(metaclass=MephistoDBBackedABCMeta):
32 """
33 This class tracks the status of an individual worker's contribution to a
34 higher level assignment. It is the smallest 'unit' of work to complete
35 the assignment, and this class is only responsible for checking
36 the status of that work itself being done.
37
38 It should be extended for usage with a specific crowd provider
39 """
40
41 def __init__(
42 self, db: "MephistoDB", db_id: str, row: Optional[Mapping[str, Any]] = None
43 ):
44 self.db: "MephistoDB" = db
45 if row is None:
46 row = db.get_unit(db_id)
47 assert row is not None, f"Given db_id {db_id} did not exist in given db"
48 self.db_id: str = row["unit_id"]
49 self.assignment_id = row["assignment_id"]
50 self.unit_index = row["unit_index"]
51 self.pay_amount = row["pay_amount"]
52 self.agent_id = row["agent_id"]
53 self.provider_type = row["provider_type"]
54 self.db_status = row["status"]
55 self.task_type = row["task_type"]
56 self.task_id = row["task_id"]
57 self.task_run_id = row["task_run_id"]
58 self.sandbox = row["sandbox"]
59 self.requester_id = row["requester_id"]
60 self.worker_id = row["worker_id"]
61
62 # Deferred loading of related entities
63 self.__task: Optional["Task"] = None
64 self.__task_run: Optional["TaskRun"] = None
65 self.__assignment: Optional["Assignment"] = None
66 self.__requester: Optional["Requester"] = None
67 self.__agent: Optional["Agent"] = None
68 self.__worker: Optional["Worker"] = None
69
70 def __new__(
71 cls, db: "MephistoDB", db_id: str, row: Optional[Mapping[str, Any]] = None
72 ) -> "Unit":
73 """
74 The new method is overridden to be able to automatically generate
75 the expected Unit class without needing to specifically find it
76 for a given db_id. As such it is impossible to create a Unit
77 as you will instead be returned the correct Unit class according to
78 the crowdprovider associated with this Unit.
79 """
80 if cls == Unit:
81 # We are trying to construct a Unit, find what type to use and
82 # create that instead
83 from mephisto.operations.registry import get_crowd_provider_from_type
84
85 if row is None:
86 row = db.get_unit(db_id)
87 assert row is not None, f"Given db_id {db_id} did not exist in given db"
88 correct_class = get_crowd_provider_from_type(row["provider_type"]).UnitClass
89 return super().__new__(correct_class)
90 else:
91 # We are constructing another instance directly
92 return super().__new__(cls)
93
94 def get_crowd_provider_class(self) -> Type["CrowdProvider"]:
95 """Get the CrowdProvider class that manages this Unit"""
96 from mephisto.operations.registry import get_crowd_provider_from_type
97
98 return get_crowd_provider_from_type(self.provider_type)
99
100 def get_assignment_data(self) -> Optional[Dict[str, Any]]:
101 """Return the specific assignment data for this assignment"""
102 return self.get_assignment().get_assignment_data()
103
104 def sync_status(self) -> None:
105 """
106 Ensure that the queried status from this unit and the db status
107 are up to date
108 """
109 # TODO(102) this will need to be run periodically/on crashes
110 # to sync any lost state
111 self.set_db_status(self.get_status())
112
113 def get_db_status(self) -> str:
114 """
115 Return the status as currently stored in the database
116 """
117 if self.db_status in AssignmentState.final_unit():
118 return self.db_status
119 row = self.db.get_unit(self.db_id)
120 assert row is not None, f"Unit {self.db_id} stopped existing in the db..."
121 return row["status"]
122
123 def set_db_status(self, status: str) -> None:
124 """
125 Set the status reflected in the database for this Unit
126 """
127 assert (
128 status in AssignmentState.valid_unit()
129 ), f"{status} not valid Assignment Status, not in {AssignmentState.valid_unit()}"
130 if status == self.db_status:
131 return
132 logger.debug(f"Updating status for {self} to {status}")
133 self.db_status = status
134 self.db.update_unit(self.db_id, status=status)
135
136 def get_assignment(self) -> "Assignment":
137 """
138 Return the assignment that this Unit is part of.
139 """
140 if self.__assignment is None:
141 from mephisto.data_model.assignment import Assignment
142
143 self.__assignment = Assignment(self.db, self.assignment_id)
144 return self.__assignment
145
146 def get_task_run(self) -> TaskRun:
147 """
148 Return the task run that this assignment is part of
149 """
150 if self.__task_run is None:
151 if self.__assignment is not None:
152 self.__task_run = self.__assignment.get_task_run()
153 else:
154 self.__task_run = TaskRun(self.db, self.task_run_id)
155 return self.__task_run
156
157 def get_task(self) -> Task:
158 """
159 Return the task that this assignment is part of
160 """
161 if self.__task is None:
162 if self.__assignment is not None:
163 self.__task = self.__assignment.get_task()
164 elif self.__task_run is not None:
165 self.__task = self.__task_run.get_task()
166 else:
167 self.__task = Task(self.db, self.task_id)
168 return self.__task
169
170 def get_requester(self) -> "Requester":
171 """
172 Return the requester who offered this Unit
173 """
174 if self.__requester is None:
175 if self.__assignment is not None:
176 self.__requester = self.__assignment.get_requester()
177 elif self.__task_run is not None:
178 self.__requester = self.__task_run.get_requester()
179 else:
180 self.__requester = Requester(self.db, self.requester_id)
181 return self.__requester
182
183 def clear_assigned_agent(self) -> None:
184 """Clear the agent that is assigned to this unit"""
185 logger.debug(f"Clearing assigned agent {self.agent_id} from {self}")
186 self.db.clear_unit_agent_assignment(self.db_id)
187 self.get_task_run().clear_reservation(self)
188 self.agent_id = None
189 self.__agent = None
190
191 def get_assigned_agent(self) -> Optional[Agent]:
192 """
193 Get the agent assigned to this Unit if there is one, else return None
194 """
195 # In these statuses, we know the agent isn't changing anymore, and thus will
196 # not need to be re-queried
197 # TODO(#97) add test to ensure this behavior/assumption holds always
198 if self.db_status in AssignmentState.final_unit():
199 if self.agent_id is None:
200 return None
201 return Agent(self.db, self.agent_id)
202
203 # Query the database to get the most up-to-date assignment, as this can
204 # change after instantiation if the Unit status isn't final
205 unit_copy = Unit(self.db, self.db_id)
206 self.agent_id = unit_copy.agent_id
207 if self.agent_id is not None:
208 return Agent(self.db, self.agent_id)
209 return None
210
211 @staticmethod
212 def _register_unit(
213 db: "MephistoDB",
214 assignment: "Assignment",
215 index: int,
216 pay_amount: float,
217 provider_type: str,
218 ) -> "Unit":
219 """
220 Create an entry for this unit in the database
221 """
222 db_id = db.new_unit(
223 assignment.task_id,
224 assignment.task_run_id,
225 assignment.requester_id,
226 assignment.db_id,
227 index,
228 pay_amount,
229 provider_type,
230 assignment.task_type,
231 )
232 unit = Unit(db, db_id)
233 logger.debug(f"Registered new unit {unit} for {assignment}.")
234 return unit
235
236 def get_pay_amount(self) -> float:
237 """
238 Return the amount that this Unit is costing against the budget,
239 calculating additional fees as relevant
240 """
241 return self.pay_amount
242
243 def __repr__(self) -> str:
244 return f"{self.__class__.__name__}({self.db_id}, {self.db_status})"
245
246 # Children classes may need to override the following
247
248 def get_status(self) -> str:
249 """
250 Get the status of this unit, as determined by whether there's
251 a worker working on it at the moment, and any other possible states. Should
252 return one of UNIT_STATUSES
253
254 Accurate status is crowd-provider dependent, and thus this method should be
255 defined in the child class to ensure that the local record matches
256 the ground truth in the provider
257 """
258 from mephisto.abstractions.blueprint import AgentState
259
260 db_status = self.db_status
261 computed_status = AssignmentState.LAUNCHED
262
263 agent = self.get_assigned_agent()
264 if agent is None:
265 row = self.db.get_unit(self.db_id)
266 computed_status = row["status"]
267 else:
268 agent_status = agent.get_status()
269 if agent_status == AgentState.STATUS_NONE:
270 computed_status = AssignmentState.LAUNCHED
271 elif agent_status in [
272 AgentState.STATUS_ACCEPTED,
273 AgentState.STATUS_ONBOARDING,
274 AgentState.STATUS_PARTNER_DISCONNECT,
275 AgentState.STATUS_WAITING,
276 AgentState.STATUS_IN_TASK,
277 ]:
278 computed_status = AssignmentState.ASSIGNED
279 elif agent_status in [AgentState.STATUS_COMPLETED]:
280 computed_status = AssignmentState.COMPLETED
281 elif agent_status in [AgentState.STATUS_SOFT_REJECTED]:
282 computed_status = AssignmentState.SOFT_REJECTED
283 elif agent_status in [AgentState.STATUS_EXPIRED]:
284 computed_status = AssignmentState.EXPIRED
285 elif agent_status in [
286 AgentState.STATUS_DISCONNECT,
287 AgentState.STATUS_RETURNED,
288 ]:
289 computed_status = AssignmentState.ASSIGNED
290 elif agent_status == AgentState.STATUS_APPROVED:
291 computed_status = AssignmentState.ACCEPTED
292 elif agent_status == AgentState.STATUS_REJECTED:
293 computed_status = AssignmentState.REJECTED
294
295 if computed_status != db_status:
296 self.set_db_status(computed_status)
297
298 return computed_status
299
300 # Children classes should implement the below methods
301
302 def launch(self, task_url: str) -> None:
303 """
304 Make this Unit available on the crowdsourcing vendor. Depending on
305 the task type, this could mean a number of different setup steps.
306
307 Some crowd providers require setting up a configuration for the
308 very first launch, and this method should call a helper to manage
309 that step if necessary.
310 """
311 raise NotImplementedError()
312
313 def expire(self) -> float:
314 """
315 Expire this unit, removing it from being workable on the vendor.
316 Return the maximum time needed to wait before we know it's taken down.
317 """
318 raise NotImplementedError()
319
320 def is_expired(self) -> bool:
321 """Determine if this unit is expired as according to the vendor."""
322 raise NotImplementedError()
323
324 @staticmethod
325 def new(
326 db: "MephistoDB", assignment: "Assignment", index: int, pay_amount: float
327 ) -> "Unit":
328 """
329 Create a Unit for the given assignment
330
331 Implementation should return the result of _register_unit when sure the unit
332 can be successfully created to have it put into the db.
333 """
334 raise NotImplementedError()
335
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mephisto/data_model/unit.py b/mephisto/data_model/unit.py
--- a/mephisto/data_model/unit.py
+++ b/mephisto/data_model/unit.py
@@ -258,6 +258,11 @@
from mephisto.abstractions.blueprint import AgentState
db_status = self.db_status
+
+ # Expiration is a terminal state, and shouldn't be changed
+ if db_status == AssignmentState.EXPIRED:
+ return db_status
+
computed_status = AssignmentState.LAUNCHED
agent = self.get_assigned_agent()
| {"golden_diff": "diff --git a/mephisto/data_model/unit.py b/mephisto/data_model/unit.py\n--- a/mephisto/data_model/unit.py\n+++ b/mephisto/data_model/unit.py\n@@ -258,6 +258,11 @@\n from mephisto.abstractions.blueprint import AgentState\n \n db_status = self.db_status\n+\n+ # Expiration is a terminal state, and shouldn't be changed\n+ if db_status == AssignmentState.EXPIRED:\n+ return db_status\n+\n computed_status = AssignmentState.LAUNCHED\n \n agent = self.get_assigned_agent()\n", "issue": "Probably getting null Worker ID in the frontend\nHi,\r\n\r\nThank you for the great crowdsourcing platform. We are using Mephisto for a chat-style data collection.\r\n\r\nWhen the task was live on AMT, many of the workers reported that they saw the following message:\r\n\r\n`Sorry, you have already worked on the maximum number of these tasks available to you, or are no longer eligible to work on this task.`\r\n\r\nHowever, we did not set the property `maximum_units_per_worker` in our config.\r\nWe did not face any errors in `localhost` or AMT Sandbox.\r\n\r\nFrom the [code](https://github.com/facebookresearch/Mephisto/blob/c571bf9c2e6395553e71ce922c14a14e363ad7d5/packages/mephisto-task/src/index.js#L109), it looks like this message arises when the `workerId` received is `null`.\r\nIs there any other scenario, apart from exceeding `maximum_units_per_worker`, that can result in a null `workerId`?\r\n\n", "before_files": [{"content": "#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\n\nfrom abc import ABC\nfrom mephisto.data_model.constants.assignment_state import AssignmentState\nfrom mephisto.data_model.task import Task\nfrom mephisto.data_model.task_run import TaskRun\nfrom mephisto.data_model.agent import Agent\nfrom mephisto.data_model.db_backed_meta import MephistoDBBackedABCMeta\nfrom mephisto.abstractions.blueprint import AgentState\nfrom mephisto.data_model.requester import Requester\nfrom typing import Optional, Mapping, Dict, Any, Type, TYPE_CHECKING\n\nif TYPE_CHECKING:\n from mephisto.abstractions.database import MephistoDB\n from mephisto.data_model.worker import Worker\n from mephisto.abstractions.crowd_provider import CrowdProvider\n from mephisto.data_model.assignment import Assignment\n\nimport os\n\nfrom mephisto.operations.logger_core import get_logger\n\nlogger = get_logger(name=__name__)\n\n\nclass Unit(metaclass=MephistoDBBackedABCMeta):\n \"\"\"\n This class tracks the status of an individual worker's contribution to a\n higher level assignment. It is the smallest 'unit' of work to complete\n the assignment, and this class is only responsible for checking\n the status of that work itself being done.\n\n It should be extended for usage with a specific crowd provider\n \"\"\"\n\n def __init__(\n self, db: \"MephistoDB\", db_id: str, row: Optional[Mapping[str, Any]] = None\n ):\n self.db: \"MephistoDB\" = db\n if row is None:\n row = db.get_unit(db_id)\n assert row is not None, f\"Given db_id {db_id} did not exist in given db\"\n self.db_id: str = row[\"unit_id\"]\n self.assignment_id = row[\"assignment_id\"]\n self.unit_index = row[\"unit_index\"]\n self.pay_amount = row[\"pay_amount\"]\n self.agent_id = row[\"agent_id\"]\n self.provider_type = row[\"provider_type\"]\n self.db_status = row[\"status\"]\n self.task_type = row[\"task_type\"]\n self.task_id = row[\"task_id\"]\n self.task_run_id = row[\"task_run_id\"]\n self.sandbox = row[\"sandbox\"]\n self.requester_id = row[\"requester_id\"]\n self.worker_id = row[\"worker_id\"]\n\n # Deferred loading of related entities\n self.__task: Optional[\"Task\"] = None\n self.__task_run: Optional[\"TaskRun\"] = None\n self.__assignment: Optional[\"Assignment\"] = None\n self.__requester: Optional[\"Requester\"] = None\n self.__agent: Optional[\"Agent\"] = None\n self.__worker: Optional[\"Worker\"] = None\n\n def __new__(\n cls, db: \"MephistoDB\", db_id: str, row: Optional[Mapping[str, Any]] = None\n ) -> \"Unit\":\n \"\"\"\n The new method is overridden to be able to automatically generate\n the expected Unit class without needing to specifically find it\n for a given db_id. As such it is impossible to create a Unit\n as you will instead be returned the correct Unit class according to\n the crowdprovider associated with this Unit.\n \"\"\"\n if cls == Unit:\n # We are trying to construct a Unit, find what type to use and\n # create that instead\n from mephisto.operations.registry import get_crowd_provider_from_type\n\n if row is None:\n row = db.get_unit(db_id)\n assert row is not None, f\"Given db_id {db_id} did not exist in given db\"\n correct_class = get_crowd_provider_from_type(row[\"provider_type\"]).UnitClass\n return super().__new__(correct_class)\n else:\n # We are constructing another instance directly\n return super().__new__(cls)\n\n def get_crowd_provider_class(self) -> Type[\"CrowdProvider\"]:\n \"\"\"Get the CrowdProvider class that manages this Unit\"\"\"\n from mephisto.operations.registry import get_crowd_provider_from_type\n\n return get_crowd_provider_from_type(self.provider_type)\n\n def get_assignment_data(self) -> Optional[Dict[str, Any]]:\n \"\"\"Return the specific assignment data for this assignment\"\"\"\n return self.get_assignment().get_assignment_data()\n\n def sync_status(self) -> None:\n \"\"\"\n Ensure that the queried status from this unit and the db status\n are up to date\n \"\"\"\n # TODO(102) this will need to be run periodically/on crashes\n # to sync any lost state\n self.set_db_status(self.get_status())\n\n def get_db_status(self) -> str:\n \"\"\"\n Return the status as currently stored in the database\n \"\"\"\n if self.db_status in AssignmentState.final_unit():\n return self.db_status\n row = self.db.get_unit(self.db_id)\n assert row is not None, f\"Unit {self.db_id} stopped existing in the db...\"\n return row[\"status\"]\n\n def set_db_status(self, status: str) -> None:\n \"\"\"\n Set the status reflected in the database for this Unit\n \"\"\"\n assert (\n status in AssignmentState.valid_unit()\n ), f\"{status} not valid Assignment Status, not in {AssignmentState.valid_unit()}\"\n if status == self.db_status:\n return\n logger.debug(f\"Updating status for {self} to {status}\")\n self.db_status = status\n self.db.update_unit(self.db_id, status=status)\n\n def get_assignment(self) -> \"Assignment\":\n \"\"\"\n Return the assignment that this Unit is part of.\n \"\"\"\n if self.__assignment is None:\n from mephisto.data_model.assignment import Assignment\n\n self.__assignment = Assignment(self.db, self.assignment_id)\n return self.__assignment\n\n def get_task_run(self) -> TaskRun:\n \"\"\"\n Return the task run that this assignment is part of\n \"\"\"\n if self.__task_run is None:\n if self.__assignment is not None:\n self.__task_run = self.__assignment.get_task_run()\n else:\n self.__task_run = TaskRun(self.db, self.task_run_id)\n return self.__task_run\n\n def get_task(self) -> Task:\n \"\"\"\n Return the task that this assignment is part of\n \"\"\"\n if self.__task is None:\n if self.__assignment is not None:\n self.__task = self.__assignment.get_task()\n elif self.__task_run is not None:\n self.__task = self.__task_run.get_task()\n else:\n self.__task = Task(self.db, self.task_id)\n return self.__task\n\n def get_requester(self) -> \"Requester\":\n \"\"\"\n Return the requester who offered this Unit\n \"\"\"\n if self.__requester is None:\n if self.__assignment is not None:\n self.__requester = self.__assignment.get_requester()\n elif self.__task_run is not None:\n self.__requester = self.__task_run.get_requester()\n else:\n self.__requester = Requester(self.db, self.requester_id)\n return self.__requester\n\n def clear_assigned_agent(self) -> None:\n \"\"\"Clear the agent that is assigned to this unit\"\"\"\n logger.debug(f\"Clearing assigned agent {self.agent_id} from {self}\")\n self.db.clear_unit_agent_assignment(self.db_id)\n self.get_task_run().clear_reservation(self)\n self.agent_id = None\n self.__agent = None\n\n def get_assigned_agent(self) -> Optional[Agent]:\n \"\"\"\n Get the agent assigned to this Unit if there is one, else return None\n \"\"\"\n # In these statuses, we know the agent isn't changing anymore, and thus will\n # not need to be re-queried\n # TODO(#97) add test to ensure this behavior/assumption holds always\n if self.db_status in AssignmentState.final_unit():\n if self.agent_id is None:\n return None\n return Agent(self.db, self.agent_id)\n\n # Query the database to get the most up-to-date assignment, as this can\n # change after instantiation if the Unit status isn't final\n unit_copy = Unit(self.db, self.db_id)\n self.agent_id = unit_copy.agent_id\n if self.agent_id is not None:\n return Agent(self.db, self.agent_id)\n return None\n\n @staticmethod\n def _register_unit(\n db: \"MephistoDB\",\n assignment: \"Assignment\",\n index: int,\n pay_amount: float,\n provider_type: str,\n ) -> \"Unit\":\n \"\"\"\n Create an entry for this unit in the database\n \"\"\"\n db_id = db.new_unit(\n assignment.task_id,\n assignment.task_run_id,\n assignment.requester_id,\n assignment.db_id,\n index,\n pay_amount,\n provider_type,\n assignment.task_type,\n )\n unit = Unit(db, db_id)\n logger.debug(f\"Registered new unit {unit} for {assignment}.\")\n return unit\n\n def get_pay_amount(self) -> float:\n \"\"\"\n Return the amount that this Unit is costing against the budget,\n calculating additional fees as relevant\n \"\"\"\n return self.pay_amount\n\n def __repr__(self) -> str:\n return f\"{self.__class__.__name__}({self.db_id}, {self.db_status})\"\n\n # Children classes may need to override the following\n\n def get_status(self) -> str:\n \"\"\"\n Get the status of this unit, as determined by whether there's\n a worker working on it at the moment, and any other possible states. Should\n return one of UNIT_STATUSES\n\n Accurate status is crowd-provider dependent, and thus this method should be\n defined in the child class to ensure that the local record matches\n the ground truth in the provider\n \"\"\"\n from mephisto.abstractions.blueprint import AgentState\n\n db_status = self.db_status\n computed_status = AssignmentState.LAUNCHED\n\n agent = self.get_assigned_agent()\n if agent is None:\n row = self.db.get_unit(self.db_id)\n computed_status = row[\"status\"]\n else:\n agent_status = agent.get_status()\n if agent_status == AgentState.STATUS_NONE:\n computed_status = AssignmentState.LAUNCHED\n elif agent_status in [\n AgentState.STATUS_ACCEPTED,\n AgentState.STATUS_ONBOARDING,\n AgentState.STATUS_PARTNER_DISCONNECT,\n AgentState.STATUS_WAITING,\n AgentState.STATUS_IN_TASK,\n ]:\n computed_status = AssignmentState.ASSIGNED\n elif agent_status in [AgentState.STATUS_COMPLETED]:\n computed_status = AssignmentState.COMPLETED\n elif agent_status in [AgentState.STATUS_SOFT_REJECTED]:\n computed_status = AssignmentState.SOFT_REJECTED\n elif agent_status in [AgentState.STATUS_EXPIRED]:\n computed_status = AssignmentState.EXPIRED\n elif agent_status in [\n AgentState.STATUS_DISCONNECT,\n AgentState.STATUS_RETURNED,\n ]:\n computed_status = AssignmentState.ASSIGNED\n elif agent_status == AgentState.STATUS_APPROVED:\n computed_status = AssignmentState.ACCEPTED\n elif agent_status == AgentState.STATUS_REJECTED:\n computed_status = AssignmentState.REJECTED\n\n if computed_status != db_status:\n self.set_db_status(computed_status)\n\n return computed_status\n\n # Children classes should implement the below methods\n\n def launch(self, task_url: str) -> None:\n \"\"\"\n Make this Unit available on the crowdsourcing vendor. Depending on\n the task type, this could mean a number of different setup steps.\n\n Some crowd providers require setting up a configuration for the\n very first launch, and this method should call a helper to manage\n that step if necessary.\n \"\"\"\n raise NotImplementedError()\n\n def expire(self) -> float:\n \"\"\"\n Expire this unit, removing it from being workable on the vendor.\n Return the maximum time needed to wait before we know it's taken down.\n \"\"\"\n raise NotImplementedError()\n\n def is_expired(self) -> bool:\n \"\"\"Determine if this unit is expired as according to the vendor.\"\"\"\n raise NotImplementedError()\n\n @staticmethod\n def new(\n db: \"MephistoDB\", assignment: \"Assignment\", index: int, pay_amount: float\n ) -> \"Unit\":\n \"\"\"\n Create a Unit for the given assignment\n\n Implementation should return the result of _register_unit when sure the unit\n can be successfully created to have it put into the db.\n \"\"\"\n raise NotImplementedError()\n", "path": "mephisto/data_model/unit.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\n\nfrom abc import ABC\nfrom mephisto.data_model.constants.assignment_state import AssignmentState\nfrom mephisto.data_model.task import Task\nfrom mephisto.data_model.task_run import TaskRun\nfrom mephisto.data_model.agent import Agent\nfrom mephisto.data_model.db_backed_meta import MephistoDBBackedABCMeta\nfrom mephisto.abstractions.blueprint import AgentState\nfrom mephisto.data_model.requester import Requester\nfrom typing import Optional, Mapping, Dict, Any, Type, TYPE_CHECKING\n\nif TYPE_CHECKING:\n from mephisto.abstractions.database import MephistoDB\n from mephisto.data_model.worker import Worker\n from mephisto.abstractions.crowd_provider import CrowdProvider\n from mephisto.data_model.assignment import Assignment\n\nimport os\n\nfrom mephisto.operations.logger_core import get_logger\n\nlogger = get_logger(name=__name__)\n\n\nclass Unit(metaclass=MephistoDBBackedABCMeta):\n \"\"\"\n This class tracks the status of an individual worker's contribution to a\n higher level assignment. It is the smallest 'unit' of work to complete\n the assignment, and this class is only responsible for checking\n the status of that work itself being done.\n\n It should be extended for usage with a specific crowd provider\n \"\"\"\n\n def __init__(\n self, db: \"MephistoDB\", db_id: str, row: Optional[Mapping[str, Any]] = None\n ):\n self.db: \"MephistoDB\" = db\n if row is None:\n row = db.get_unit(db_id)\n assert row is not None, f\"Given db_id {db_id} did not exist in given db\"\n self.db_id: str = row[\"unit_id\"]\n self.assignment_id = row[\"assignment_id\"]\n self.unit_index = row[\"unit_index\"]\n self.pay_amount = row[\"pay_amount\"]\n self.agent_id = row[\"agent_id\"]\n self.provider_type = row[\"provider_type\"]\n self.db_status = row[\"status\"]\n self.task_type = row[\"task_type\"]\n self.task_id = row[\"task_id\"]\n self.task_run_id = row[\"task_run_id\"]\n self.sandbox = row[\"sandbox\"]\n self.requester_id = row[\"requester_id\"]\n self.worker_id = row[\"worker_id\"]\n\n # Deferred loading of related entities\n self.__task: Optional[\"Task\"] = None\n self.__task_run: Optional[\"TaskRun\"] = None\n self.__assignment: Optional[\"Assignment\"] = None\n self.__requester: Optional[\"Requester\"] = None\n self.__agent: Optional[\"Agent\"] = None\n self.__worker: Optional[\"Worker\"] = None\n\n def __new__(\n cls, db: \"MephistoDB\", db_id: str, row: Optional[Mapping[str, Any]] = None\n ) -> \"Unit\":\n \"\"\"\n The new method is overridden to be able to automatically generate\n the expected Unit class without needing to specifically find it\n for a given db_id. As such it is impossible to create a Unit\n as you will instead be returned the correct Unit class according to\n the crowdprovider associated with this Unit.\n \"\"\"\n if cls == Unit:\n # We are trying to construct a Unit, find what type to use and\n # create that instead\n from mephisto.operations.registry import get_crowd_provider_from_type\n\n if row is None:\n row = db.get_unit(db_id)\n assert row is not None, f\"Given db_id {db_id} did not exist in given db\"\n correct_class = get_crowd_provider_from_type(row[\"provider_type\"]).UnitClass\n return super().__new__(correct_class)\n else:\n # We are constructing another instance directly\n return super().__new__(cls)\n\n def get_crowd_provider_class(self) -> Type[\"CrowdProvider\"]:\n \"\"\"Get the CrowdProvider class that manages this Unit\"\"\"\n from mephisto.operations.registry import get_crowd_provider_from_type\n\n return get_crowd_provider_from_type(self.provider_type)\n\n def get_assignment_data(self) -> Optional[Dict[str, Any]]:\n \"\"\"Return the specific assignment data for this assignment\"\"\"\n return self.get_assignment().get_assignment_data()\n\n def sync_status(self) -> None:\n \"\"\"\n Ensure that the queried status from this unit and the db status\n are up to date\n \"\"\"\n # TODO(102) this will need to be run periodically/on crashes\n # to sync any lost state\n self.set_db_status(self.get_status())\n\n def get_db_status(self) -> str:\n \"\"\"\n Return the status as currently stored in the database\n \"\"\"\n if self.db_status in AssignmentState.final_unit():\n return self.db_status\n row = self.db.get_unit(self.db_id)\n assert row is not None, f\"Unit {self.db_id} stopped existing in the db...\"\n return row[\"status\"]\n\n def set_db_status(self, status: str) -> None:\n \"\"\"\n Set the status reflected in the database for this Unit\n \"\"\"\n assert (\n status in AssignmentState.valid_unit()\n ), f\"{status} not valid Assignment Status, not in {AssignmentState.valid_unit()}\"\n if status == self.db_status:\n return\n logger.debug(f\"Updating status for {self} to {status}\")\n self.db_status = status\n self.db.update_unit(self.db_id, status=status)\n\n def get_assignment(self) -> \"Assignment\":\n \"\"\"\n Return the assignment that this Unit is part of.\n \"\"\"\n if self.__assignment is None:\n from mephisto.data_model.assignment import Assignment\n\n self.__assignment = Assignment(self.db, self.assignment_id)\n return self.__assignment\n\n def get_task_run(self) -> TaskRun:\n \"\"\"\n Return the task run that this assignment is part of\n \"\"\"\n if self.__task_run is None:\n if self.__assignment is not None:\n self.__task_run = self.__assignment.get_task_run()\n else:\n self.__task_run = TaskRun(self.db, self.task_run_id)\n return self.__task_run\n\n def get_task(self) -> Task:\n \"\"\"\n Return the task that this assignment is part of\n \"\"\"\n if self.__task is None:\n if self.__assignment is not None:\n self.__task = self.__assignment.get_task()\n elif self.__task_run is not None:\n self.__task = self.__task_run.get_task()\n else:\n self.__task = Task(self.db, self.task_id)\n return self.__task\n\n def get_requester(self) -> \"Requester\":\n \"\"\"\n Return the requester who offered this Unit\n \"\"\"\n if self.__requester is None:\n if self.__assignment is not None:\n self.__requester = self.__assignment.get_requester()\n elif self.__task_run is not None:\n self.__requester = self.__task_run.get_requester()\n else:\n self.__requester = Requester(self.db, self.requester_id)\n return self.__requester\n\n def clear_assigned_agent(self) -> None:\n \"\"\"Clear the agent that is assigned to this unit\"\"\"\n logger.debug(f\"Clearing assigned agent {self.agent_id} from {self}\")\n self.db.clear_unit_agent_assignment(self.db_id)\n self.get_task_run().clear_reservation(self)\n self.agent_id = None\n self.__agent = None\n\n def get_assigned_agent(self) -> Optional[Agent]:\n \"\"\"\n Get the agent assigned to this Unit if there is one, else return None\n \"\"\"\n # In these statuses, we know the agent isn't changing anymore, and thus will\n # not need to be re-queried\n # TODO(#97) add test to ensure this behavior/assumption holds always\n if self.db_status in AssignmentState.final_unit():\n if self.agent_id is None:\n return None\n return Agent(self.db, self.agent_id)\n\n # Query the database to get the most up-to-date assignment, as this can\n # change after instantiation if the Unit status isn't final\n unit_copy = Unit(self.db, self.db_id)\n self.agent_id = unit_copy.agent_id\n if self.agent_id is not None:\n return Agent(self.db, self.agent_id)\n return None\n\n @staticmethod\n def _register_unit(\n db: \"MephistoDB\",\n assignment: \"Assignment\",\n index: int,\n pay_amount: float,\n provider_type: str,\n ) -> \"Unit\":\n \"\"\"\n Create an entry for this unit in the database\n \"\"\"\n db_id = db.new_unit(\n assignment.task_id,\n assignment.task_run_id,\n assignment.requester_id,\n assignment.db_id,\n index,\n pay_amount,\n provider_type,\n assignment.task_type,\n )\n unit = Unit(db, db_id)\n logger.debug(f\"Registered new unit {unit} for {assignment}.\")\n return unit\n\n def get_pay_amount(self) -> float:\n \"\"\"\n Return the amount that this Unit is costing against the budget,\n calculating additional fees as relevant\n \"\"\"\n return self.pay_amount\n\n def __repr__(self) -> str:\n return f\"{self.__class__.__name__}({self.db_id}, {self.db_status})\"\n\n # Children classes may need to override the following\n\n def get_status(self) -> str:\n \"\"\"\n Get the status of this unit, as determined by whether there's\n a worker working on it at the moment, and any other possible states. Should\n return one of UNIT_STATUSES\n\n Accurate status is crowd-provider dependent, and thus this method should be\n defined in the child class to ensure that the local record matches\n the ground truth in the provider\n \"\"\"\n from mephisto.abstractions.blueprint import AgentState\n\n db_status = self.db_status\n\n # Expiration is a terminal state, and shouldn't be changed\n if db_status == AssignmentState.EXPIRED:\n return db_status\n\n computed_status = AssignmentState.LAUNCHED\n\n agent = self.get_assigned_agent()\n if agent is None:\n row = self.db.get_unit(self.db_id)\n computed_status = row[\"status\"]\n else:\n agent_status = agent.get_status()\n if agent_status == AgentState.STATUS_NONE:\n computed_status = AssignmentState.LAUNCHED\n elif agent_status in [\n AgentState.STATUS_ACCEPTED,\n AgentState.STATUS_ONBOARDING,\n AgentState.STATUS_PARTNER_DISCONNECT,\n AgentState.STATUS_WAITING,\n AgentState.STATUS_IN_TASK,\n ]:\n computed_status = AssignmentState.ASSIGNED\n elif agent_status in [AgentState.STATUS_COMPLETED]:\n computed_status = AssignmentState.COMPLETED\n elif agent_status in [AgentState.STATUS_SOFT_REJECTED]:\n computed_status = AssignmentState.SOFT_REJECTED\n elif agent_status in [AgentState.STATUS_EXPIRED]:\n computed_status = AssignmentState.EXPIRED\n elif agent_status in [\n AgentState.STATUS_DISCONNECT,\n AgentState.STATUS_RETURNED,\n ]:\n computed_status = AssignmentState.ASSIGNED\n elif agent_status == AgentState.STATUS_APPROVED:\n computed_status = AssignmentState.ACCEPTED\n elif agent_status == AgentState.STATUS_REJECTED:\n computed_status = AssignmentState.REJECTED\n\n if computed_status != db_status:\n self.set_db_status(computed_status)\n\n return computed_status\n\n # Children classes should implement the below methods\n\n def launch(self, task_url: str) -> None:\n \"\"\"\n Make this Unit available on the crowdsourcing vendor. Depending on\n the task type, this could mean a number of different setup steps.\n\n Some crowd providers require setting up a configuration for the\n very first launch, and this method should call a helper to manage\n that step if necessary.\n \"\"\"\n raise NotImplementedError()\n\n def expire(self) -> float:\n \"\"\"\n Expire this unit, removing it from being workable on the vendor.\n Return the maximum time needed to wait before we know it's taken down.\n \"\"\"\n raise NotImplementedError()\n\n def is_expired(self) -> bool:\n \"\"\"Determine if this unit is expired as according to the vendor.\"\"\"\n raise NotImplementedError()\n\n @staticmethod\n def new(\n db: \"MephistoDB\", assignment: \"Assignment\", index: int, pay_amount: float\n ) -> \"Unit\":\n \"\"\"\n Create a Unit for the given assignment\n\n Implementation should return the result of _register_unit when sure the unit\n can be successfully created to have it put into the db.\n \"\"\"\n raise NotImplementedError()\n", "path": "mephisto/data_model/unit.py"}]} |
gh_patches_debug_1352 | rasdani/github-patches | git_diff | liqd__a4-meinberlin-2388 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
same module displayed twice in project view
1 module is displayed twice (see picture) while another one (schönen text kommentieren) is not displayed at all.
have a look at this [project](https://meinberlin-dev.liqd.net/projects/ihre-vision-fur-berlin-welche-themen-sind-ihnen-wi/)

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `meinberlin/apps/projects/views.py`
Content:
```
1 import itertools
2
3 import django_filters
4 from django.apps import apps
5 from django.conf import settings
6 from django.contrib import messages
7 from django.contrib.auth import get_user_model
8 from django.db.models import Max
9 from django.db.models import Min
10 from django.db.models import Q
11 from django.shortcuts import get_object_or_404
12 from django.shortcuts import redirect
13 from django.utils import timezone
14 from django.utils.functional import cached_property
15 from django.utils.translation import ugettext_lazy as _
16 from django.utils.translation import ungettext
17 from django.views import generic
18 from rules.contrib.views import LoginRequiredMixin
19 from rules.contrib.views import PermissionRequiredMixin
20
21 from adhocracy4.administrative_districts.models import AdministrativeDistrict
22 from adhocracy4.dashboard import mixins as a4dashboard_mixins
23 from adhocracy4.dashboard import signals as a4dashboard_signals
24 from adhocracy4.filters import widgets as filters_widgets
25 from adhocracy4.filters.filters import DefaultsFilterSet
26 from adhocracy4.filters.filters import DistinctOrderingFilter
27 from adhocracy4.filters.filters import FreeTextFilter
28 from adhocracy4.filters.widgets import DropdownLinkWidget
29 from adhocracy4.modules import models as module_models
30 from adhocracy4.projects import models as project_models
31 from adhocracy4.projects.mixins import PhaseDispatchMixin
32 from adhocracy4.projects.mixins import ProjectMixin
33 from meinberlin.apps.contrib.mixins import ModuleClusterMixin
34
35 from . import forms
36 from . import get_project_type
37 from . import models
38
39 User = get_user_model()
40
41
42 class OrderingWidget(DropdownLinkWidget):
43 label = _('Ordering')
44 right = True
45
46
47 class OrganisationWidget(DropdownLinkWidget):
48 label = _('Organisation')
49
50
51 class DistrictWidget(DropdownLinkWidget):
52 label = _('District')
53
54
55 class FreeTextFilterWidget(filters_widgets.FreeTextFilterWidget):
56 label = _('Search')
57
58
59 class ArchivedWidget(DropdownLinkWidget):
60 label = _('Archived')
61
62 def __init__(self, attrs=None):
63 choices = (
64 ('', _('All')),
65 ('false', _('No')),
66 ('true', _('Yes')),
67 )
68 super().__init__(attrs, choices)
69
70
71 class YearWidget(DropdownLinkWidget):
72 label = _('Year')
73
74 def __init__(self, attrs=None):
75 choices = (('', _('Any')),)
76 now = timezone.now().year
77 try:
78 first_year = project_models.Project.objects.earliest('created').\
79 created.year
80 except project_models.Project.DoesNotExist:
81 first_year = now
82 for year in range(now, first_year - 1, -1):
83 choices += (year, year),
84 super().__init__(attrs, choices)
85
86
87 class ProjectFilterSet(DefaultsFilterSet):
88
89 defaults = {
90 'is_archived': 'false'
91 }
92
93 ordering = DistinctOrderingFilter(
94 choices=(
95 ('-created', _('Most recent')),
96 ),
97 empty_label=None,
98 widget=OrderingWidget,
99 )
100
101 search = FreeTextFilter(
102 widget=FreeTextFilterWidget,
103 fields=['name', 'description',
104 'projectcontainer__projects__name']
105 )
106
107 organisation = django_filters.ModelChoiceFilter(
108 queryset=apps.get_model(settings.A4_ORGANISATIONS_MODEL).objects
109 .order_by('name'),
110 widget=OrganisationWidget,
111 )
112
113 is_archived = django_filters.BooleanFilter(
114 widget=ArchivedWidget
115 )
116
117 created = django_filters.NumberFilter(
118 field_name='created',
119 lookup_expr='year',
120 widget=YearWidget,
121 )
122
123 administrative_district = django_filters.ModelChoiceFilter(
124 queryset=AdministrativeDistrict.objects.all(),
125 widget=DistrictWidget
126 )
127
128 class Meta:
129 model = project_models.Project
130 fields = ['search', 'organisation', 'is_archived',
131 'created', 'administrative_district']
132
133
134 class ParticipantInviteDetailView(generic.DetailView):
135 model = models.ParticipantInvite
136 slug_field = 'token'
137 slug_url_kwarg = 'invite_token'
138
139 def dispatch(self, request, invite_token, *args, **kwargs):
140 if request.user.is_authenticated:
141 return redirect(
142 'project-participant-invite-update',
143 invite_token=invite_token
144 )
145 else:
146 return super().dispatch(request, *args, **kwargs)
147
148
149 class ParticipantInviteUpdateView(LoginRequiredMixin, generic.UpdateView):
150 model = models.ParticipantInvite
151 form_class = forms.ParticipantInviteForm
152 slug_field = 'token'
153 slug_url_kwarg = 'invite_token'
154
155 def form_valid(self, form):
156 if form.is_accepted():
157 form.instance.accept(self.request.user)
158 return redirect(form.instance.project.get_absolute_url())
159 else:
160 form.instance.reject()
161 return redirect('/')
162
163
164 class ModeratorInviteDetailView(generic.DetailView):
165 model = models.ModeratorInvite
166 slug_field = 'token'
167 slug_url_kwarg = 'invite_token'
168
169 def dispatch(self, request, invite_token, *args, **kwargs):
170 if request.user.is_authenticated:
171 return redirect(
172 'project-moderator-invite-update',
173 invite_token=invite_token
174 )
175 else:
176 return super().dispatch(request, *args, **kwargs)
177
178
179 class ModeratorInviteUpdateView(LoginRequiredMixin, generic.UpdateView):
180 model = models.ModeratorInvite
181 form_class = forms.ModeratorInviteForm
182 slug_field = 'token'
183 slug_url_kwarg = 'invite_token'
184
185 def form_valid(self, form):
186 if form.is_accepted():
187 form.instance.accept(self.request.user)
188 return redirect(form.instance.project.get_absolute_url())
189 else:
190 form.instance.reject()
191 return redirect('/')
192
193
194 class AbstractProjectUserInviteListView(
195 ProjectMixin,
196 a4dashboard_mixins.DashboardBaseMixin,
197 a4dashboard_mixins.DashboardComponentMixin,
198 generic.base.TemplateResponseMixin,
199 generic.edit.FormMixin,
200 generic.detail.SingleObjectMixin,
201 generic.edit.ProcessFormView):
202
203 form_class = forms.InviteUsersFromEmailForm
204 invite_model = None
205
206 def get(self, request, *args, **kwargs):
207 self.object = self.get_object()
208 return super().get(request, *args, **kwargs)
209
210 def post(self, request, *args, **kwargs):
211 self.object = self.get_object()
212 if 'submit_action' in request.POST:
213 if request.POST['submit_action'] == 'remove_user':
214 pk = int(request.POST['user_pk'])
215 user = get_object_or_404(User, pk=pk)
216 related_users = getattr(self.object, self.related_users_field)
217 related_users.remove(user)
218 messages.success(request, self.success_message_removal)
219 elif request.POST['submit_action'] == 'remove_invite':
220 pk = int(request.POST['invite_pk'])
221 invite = self.invite_model.objects.get(pk=pk)
222 invite.delete()
223 messages.success(request, _('Invitation succesfully removed.'))
224
225 response = redirect(self.get_success_url())
226 else:
227 response = super().post(request, *args, **kwargs)
228
229 self._send_component_updated_signal()
230 return response
231
232 def filter_existing(self, emails):
233 related_users = getattr(self.object, self.related_users_field)
234 related_emails = [u.email for u in related_users.all()]
235 existing = []
236 filtered_emails = []
237 for email in emails:
238 if email in related_emails:
239 existing.append(email)
240 else:
241 filtered_emails.append(email)
242 return filtered_emails, existing
243
244 def filter_pending(self, emails):
245 pending = []
246 filtered_emails = []
247 for email in emails:
248 if self.invite_model.objects.filter(email=email,
249 project=self.project).exists():
250 pending.append(email)
251 else:
252 filtered_emails.append(email)
253 return filtered_emails, pending
254
255 def form_valid(self, form):
256 emails = list(set(
257 itertools.chain(form.cleaned_data['add_users'],
258 form.cleaned_data['add_users_upload'])))
259
260 emails, existing = self.filter_existing(emails)
261 if existing:
262 messages.error(
263 self.request,
264 _('Following users already accepted an invitation: ') +
265 ', '.join(existing)
266 )
267
268 emails, pending = self.filter_pending(emails)
269 if pending:
270 messages.error(
271 self.request,
272 _('Following users are already invited: ') +
273 ', '.join(pending)
274 )
275
276 for email in emails:
277 self.invite_model.objects.invite(
278 self.request.user,
279 self.project,
280 email
281 )
282
283 messages.success(
284 self.request,
285 ungettext(self.success_message[0], self.success_message[1],
286 len(emails)).format(len(emails))
287 )
288
289 return redirect(self.get_success_url())
290
291 def get_form_kwargs(self):
292 kwargs = super().get_form_kwargs()
293 kwargs['labels'] = (self.add_user_field_label,
294 self.add_user_upload_field_label)
295 return kwargs
296
297 def _send_component_updated_signal(self):
298 a4dashboard_signals.project_component_updated.send(
299 sender=self.component.__class__,
300 project=self.project,
301 component=self.component,
302 user=self.request.user
303 )
304
305
306 class DashboardProjectModeratorsView(AbstractProjectUserInviteListView):
307
308 model = project_models.Project
309 slug_url_kwarg = 'project_slug'
310 template_name = 'meinberlin_projects/project_moderators.html'
311 permission_required = 'a4projects.change_project'
312 menu_item = 'project'
313
314 related_users_field = 'moderators'
315 add_user_field_label = _('Invite moderators via email')
316 add_user_upload_field_label = _('Invite moderators via file upload')
317 success_message = (_('{} moderator invited.'), _('{} moderators invited.'))
318 success_message_removal = _('Moderator successfully removed.')
319
320 invite_model = models.ModeratorInvite
321
322 def get_permission_object(self):
323 return self.project
324
325
326 class DashboardProjectParticipantsView(AbstractProjectUserInviteListView):
327
328 model = project_models.Project
329 slug_url_kwarg = 'project_slug'
330 template_name = 'meinberlin_projects/project_participants.html'
331 permission_required = 'a4projects.change_project'
332 menu_item = 'project'
333
334 related_users_field = 'participants'
335 add_user_field_label = _('Invite users via email')
336 add_user_upload_field_label = _('Invite users via file upload')
337 success_message = (
338 _('{} participant invited.'),
339 _('{} participants invited.'))
340 success_message_removal = _('Participant successfully removed.')
341
342 invite_model = models.ParticipantInvite
343
344 def get_permission_object(self):
345 return self.project
346
347
348 class ProjectDetailView(PermissionRequiredMixin,
349 generic.DetailView,
350 ModuleClusterMixin):
351
352 model = models.Project
353 permission_required = 'a4projects.view_project'
354
355 def get_template_names(self):
356 type = get_project_type(self.project)
357 if type == 'container':
358 return ['meinberlin_projects/project_container_detail.html']
359 if type == 'bplan':
360 return ['meinberlin_projects/project_bplan_detail.html']
361 return ['meinberlin_projects/project_detail.html']
362
363 def dispatch(self, request, *args, **kwargs):
364 kwargs['project'] = self.project
365 kwargs['module'] = self.module
366
367 if self.modules.count() == 1 and not self.events:
368 return self._view_by_phase()(request, *args, **kwargs)
369 else:
370 return super().dispatch(request)
371
372 def get_context_data(self, **kwargs):
373 context = super().get_context_data(**kwargs)
374 context['event'] = self.get_current_event()
375 context['modules'] = self.get_current_modules()
376 context['participation_dates'] = self.full_list
377 context['initial_slide'] = self.initial_slide
378 return context
379
380 @cached_property
381 def project(self):
382 return self.get_object()
383
384 @cached_property
385 def module(self):
386 if self.modules.count() == 1 and not self.events:
387 return self.modules.first()
388
389 @cached_property
390 def modules(self):
391 return self.project.modules\
392 .annotate(start_date=Min('phase__start_date'))\
393 .annotate(end_date=Max('phase__end_date'))\
394 .exclude(Q(start_date=None) | Q(end_date=None))\
395 .order_by('start_date')
396
397 @cached_property
398 def events(self):
399 return self.project.offlineevent_set.all()
400
401 @cached_property
402 def full_list(self):
403 module_cluster = self.module_clusters
404 event_list = self.get_events_list()
405 full_list = module_cluster + list(event_list)
406 return sorted(full_list, key=lambda k: k['date'])
407
408 @cached_property
409 def module_clusters(self):
410 clusters = super().get_module_clusters(self.modules)
411 if len(clusters) == 1:
412 clusters[0]['title'] = _('Online Participation')
413 return clusters
414
415 @cached_property
416 def initial_slide(self):
417 initial_slide = self.request.GET.get('initialSlide')
418 if initial_slide:
419 return int(initial_slide)
420 else:
421 now = timezone.now()
422 for idx, val in enumerate(self.full_list):
423 if 'type' in val and val['type'] == 'module':
424 start_date = val['date']
425 end_date = val['end_date']
426 if start_date and end_date:
427 if now >= start_date and now <= end_date:
428 return idx
429 return 0
430
431 @cached_property
432 def display_timeline(self):
433 return len(self.full_list) > 1
434
435 @cached_property
436 def is_project_view(self):
437 return self.get_current_modules()
438
439 def _view_by_phase(self):
440 if self.module.last_active_phase:
441 return self.module.last_active_phase.view.as_view()
442 elif self.module.future_phases:
443 return self.module.future_phases.first().view.as_view()
444 else:
445 return super().dispatch
446
447 def _get_module_dict(self, count, start_date, end_date):
448 return {
449 'title': _('{}. Online Participation').format(str(count)),
450 'type': 'module',
451 'date': start_date,
452 'end_date': end_date,
453 'modules': []
454 }
455
456 def get_current_event(self):
457 fl = self.full_list
458 idx = self.initial_slide
459 try:
460 current_dict = fl[idx]
461 if 'type' not in current_dict:
462 return self.full_list[self.initial_slide]
463 except (IndexError, KeyError):
464 return []
465 return []
466
467 def get_current_modules(self):
468 fl = self.full_list
469 idx = self.initial_slide
470 try:
471 current_dict = fl[idx]
472 if current_dict['type'] == 'module':
473 return self.full_list[self.initial_slide]['modules']
474 except (IndexError, KeyError):
475 return []
476
477 def get_events_list(self):
478 return self.events.values('date', 'name',
479 'event_type',
480 'slug', 'description')
481
482 @property
483 def raise_exception(self):
484 return self.request.user.is_authenticated
485
486
487 class ModuleDetailview(PermissionRequiredMixin,
488 PhaseDispatchMixin):
489
490 model = module_models.Module
491 permission_required = 'a4projects.view_project'
492 slug_url_kwarg = 'module_slug'
493
494 @cached_property
495 def project(self):
496 return self.module.project
497
498 @cached_property
499 def module(self):
500 return self.get_object()
501
502 def get_permission_object(self):
503 return self.project
504
505 def get_context_data(self, **kwargs):
506 """Append project and module to the template context."""
507 if 'project' not in kwargs:
508 kwargs['project'] = self.project
509 if 'module' not in kwargs:
510 kwargs['module'] = self.module
511 return super().get_context_data(**kwargs)
512
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/meinberlin/apps/projects/views.py b/meinberlin/apps/projects/views.py
--- a/meinberlin/apps/projects/views.py
+++ b/meinberlin/apps/projects/views.py
@@ -392,7 +392,7 @@
.annotate(start_date=Min('phase__start_date'))\
.annotate(end_date=Max('phase__end_date'))\
.exclude(Q(start_date=None) | Q(end_date=None))\
- .order_by('start_date')
+ .order_by('start_date', 'id')
@cached_property
def events(self):
| {"golden_diff": "diff --git a/meinberlin/apps/projects/views.py b/meinberlin/apps/projects/views.py\n--- a/meinberlin/apps/projects/views.py\n+++ b/meinberlin/apps/projects/views.py\n@@ -392,7 +392,7 @@\n .annotate(start_date=Min('phase__start_date'))\\\n .annotate(end_date=Max('phase__end_date'))\\\n .exclude(Q(start_date=None) | Q(end_date=None))\\\n- .order_by('start_date')\n+ .order_by('start_date', 'id')\n \n @cached_property\n def events(self):\n", "issue": "same module displayed twice in project view\n1 module is displayed twice (see picture) while another one (sch\u00f6nen text kommentieren) is not displayed at all. \r\n\r\nhave a look at this [project](https://meinberlin-dev.liqd.net/projects/ihre-vision-fur-berlin-welche-themen-sind-ihnen-wi/)\r\n\r\n\r\n\n", "before_files": [{"content": "import itertools\n\nimport django_filters\nfrom django.apps import apps\nfrom django.conf import settings\nfrom django.contrib import messages\nfrom django.contrib.auth import get_user_model\nfrom django.db.models import Max\nfrom django.db.models import Min\nfrom django.db.models import Q\nfrom django.shortcuts import get_object_or_404\nfrom django.shortcuts import redirect\nfrom django.utils import timezone\nfrom django.utils.functional import cached_property\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.utils.translation import ungettext\nfrom django.views import generic\nfrom rules.contrib.views import LoginRequiredMixin\nfrom rules.contrib.views import PermissionRequiredMixin\n\nfrom adhocracy4.administrative_districts.models import AdministrativeDistrict\nfrom adhocracy4.dashboard import mixins as a4dashboard_mixins\nfrom adhocracy4.dashboard import signals as a4dashboard_signals\nfrom adhocracy4.filters import widgets as filters_widgets\nfrom adhocracy4.filters.filters import DefaultsFilterSet\nfrom adhocracy4.filters.filters import DistinctOrderingFilter\nfrom adhocracy4.filters.filters import FreeTextFilter\nfrom adhocracy4.filters.widgets import DropdownLinkWidget\nfrom adhocracy4.modules import models as module_models\nfrom adhocracy4.projects import models as project_models\nfrom adhocracy4.projects.mixins import PhaseDispatchMixin\nfrom adhocracy4.projects.mixins import ProjectMixin\nfrom meinberlin.apps.contrib.mixins import ModuleClusterMixin\n\nfrom . import forms\nfrom . import get_project_type\nfrom . import models\n\nUser = get_user_model()\n\n\nclass OrderingWidget(DropdownLinkWidget):\n label = _('Ordering')\n right = True\n\n\nclass OrganisationWidget(DropdownLinkWidget):\n label = _('Organisation')\n\n\nclass DistrictWidget(DropdownLinkWidget):\n label = _('District')\n\n\nclass FreeTextFilterWidget(filters_widgets.FreeTextFilterWidget):\n label = _('Search')\n\n\nclass ArchivedWidget(DropdownLinkWidget):\n label = _('Archived')\n\n def __init__(self, attrs=None):\n choices = (\n ('', _('All')),\n ('false', _('No')),\n ('true', _('Yes')),\n )\n super().__init__(attrs, choices)\n\n\nclass YearWidget(DropdownLinkWidget):\n label = _('Year')\n\n def __init__(self, attrs=None):\n choices = (('', _('Any')),)\n now = timezone.now().year\n try:\n first_year = project_models.Project.objects.earliest('created').\\\n created.year\n except project_models.Project.DoesNotExist:\n first_year = now\n for year in range(now, first_year - 1, -1):\n choices += (year, year),\n super().__init__(attrs, choices)\n\n\nclass ProjectFilterSet(DefaultsFilterSet):\n\n defaults = {\n 'is_archived': 'false'\n }\n\n ordering = DistinctOrderingFilter(\n choices=(\n ('-created', _('Most recent')),\n ),\n empty_label=None,\n widget=OrderingWidget,\n )\n\n search = FreeTextFilter(\n widget=FreeTextFilterWidget,\n fields=['name', 'description',\n 'projectcontainer__projects__name']\n )\n\n organisation = django_filters.ModelChoiceFilter(\n queryset=apps.get_model(settings.A4_ORGANISATIONS_MODEL).objects\n .order_by('name'),\n widget=OrganisationWidget,\n )\n\n is_archived = django_filters.BooleanFilter(\n widget=ArchivedWidget\n )\n\n created = django_filters.NumberFilter(\n field_name='created',\n lookup_expr='year',\n widget=YearWidget,\n )\n\n administrative_district = django_filters.ModelChoiceFilter(\n queryset=AdministrativeDistrict.objects.all(),\n widget=DistrictWidget\n )\n\n class Meta:\n model = project_models.Project\n fields = ['search', 'organisation', 'is_archived',\n 'created', 'administrative_district']\n\n\nclass ParticipantInviteDetailView(generic.DetailView):\n model = models.ParticipantInvite\n slug_field = 'token'\n slug_url_kwarg = 'invite_token'\n\n def dispatch(self, request, invite_token, *args, **kwargs):\n if request.user.is_authenticated:\n return redirect(\n 'project-participant-invite-update',\n invite_token=invite_token\n )\n else:\n return super().dispatch(request, *args, **kwargs)\n\n\nclass ParticipantInviteUpdateView(LoginRequiredMixin, generic.UpdateView):\n model = models.ParticipantInvite\n form_class = forms.ParticipantInviteForm\n slug_field = 'token'\n slug_url_kwarg = 'invite_token'\n\n def form_valid(self, form):\n if form.is_accepted():\n form.instance.accept(self.request.user)\n return redirect(form.instance.project.get_absolute_url())\n else:\n form.instance.reject()\n return redirect('/')\n\n\nclass ModeratorInviteDetailView(generic.DetailView):\n model = models.ModeratorInvite\n slug_field = 'token'\n slug_url_kwarg = 'invite_token'\n\n def dispatch(self, request, invite_token, *args, **kwargs):\n if request.user.is_authenticated:\n return redirect(\n 'project-moderator-invite-update',\n invite_token=invite_token\n )\n else:\n return super().dispatch(request, *args, **kwargs)\n\n\nclass ModeratorInviteUpdateView(LoginRequiredMixin, generic.UpdateView):\n model = models.ModeratorInvite\n form_class = forms.ModeratorInviteForm\n slug_field = 'token'\n slug_url_kwarg = 'invite_token'\n\n def form_valid(self, form):\n if form.is_accepted():\n form.instance.accept(self.request.user)\n return redirect(form.instance.project.get_absolute_url())\n else:\n form.instance.reject()\n return redirect('/')\n\n\nclass AbstractProjectUserInviteListView(\n ProjectMixin,\n a4dashboard_mixins.DashboardBaseMixin,\n a4dashboard_mixins.DashboardComponentMixin,\n generic.base.TemplateResponseMixin,\n generic.edit.FormMixin,\n generic.detail.SingleObjectMixin,\n generic.edit.ProcessFormView):\n\n form_class = forms.InviteUsersFromEmailForm\n invite_model = None\n\n def get(self, request, *args, **kwargs):\n self.object = self.get_object()\n return super().get(request, *args, **kwargs)\n\n def post(self, request, *args, **kwargs):\n self.object = self.get_object()\n if 'submit_action' in request.POST:\n if request.POST['submit_action'] == 'remove_user':\n pk = int(request.POST['user_pk'])\n user = get_object_or_404(User, pk=pk)\n related_users = getattr(self.object, self.related_users_field)\n related_users.remove(user)\n messages.success(request, self.success_message_removal)\n elif request.POST['submit_action'] == 'remove_invite':\n pk = int(request.POST['invite_pk'])\n invite = self.invite_model.objects.get(pk=pk)\n invite.delete()\n messages.success(request, _('Invitation succesfully removed.'))\n\n response = redirect(self.get_success_url())\n else:\n response = super().post(request, *args, **kwargs)\n\n self._send_component_updated_signal()\n return response\n\n def filter_existing(self, emails):\n related_users = getattr(self.object, self.related_users_field)\n related_emails = [u.email for u in related_users.all()]\n existing = []\n filtered_emails = []\n for email in emails:\n if email in related_emails:\n existing.append(email)\n else:\n filtered_emails.append(email)\n return filtered_emails, existing\n\n def filter_pending(self, emails):\n pending = []\n filtered_emails = []\n for email in emails:\n if self.invite_model.objects.filter(email=email,\n project=self.project).exists():\n pending.append(email)\n else:\n filtered_emails.append(email)\n return filtered_emails, pending\n\n def form_valid(self, form):\n emails = list(set(\n itertools.chain(form.cleaned_data['add_users'],\n form.cleaned_data['add_users_upload'])))\n\n emails, existing = self.filter_existing(emails)\n if existing:\n messages.error(\n self.request,\n _('Following users already accepted an invitation: ') +\n ', '.join(existing)\n )\n\n emails, pending = self.filter_pending(emails)\n if pending:\n messages.error(\n self.request,\n _('Following users are already invited: ') +\n ', '.join(pending)\n )\n\n for email in emails:\n self.invite_model.objects.invite(\n self.request.user,\n self.project,\n email\n )\n\n messages.success(\n self.request,\n ungettext(self.success_message[0], self.success_message[1],\n len(emails)).format(len(emails))\n )\n\n return redirect(self.get_success_url())\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs['labels'] = (self.add_user_field_label,\n self.add_user_upload_field_label)\n return kwargs\n\n def _send_component_updated_signal(self):\n a4dashboard_signals.project_component_updated.send(\n sender=self.component.__class__,\n project=self.project,\n component=self.component,\n user=self.request.user\n )\n\n\nclass DashboardProjectModeratorsView(AbstractProjectUserInviteListView):\n\n model = project_models.Project\n slug_url_kwarg = 'project_slug'\n template_name = 'meinberlin_projects/project_moderators.html'\n permission_required = 'a4projects.change_project'\n menu_item = 'project'\n\n related_users_field = 'moderators'\n add_user_field_label = _('Invite moderators via email')\n add_user_upload_field_label = _('Invite moderators via file upload')\n success_message = (_('{} moderator invited.'), _('{} moderators invited.'))\n success_message_removal = _('Moderator successfully removed.')\n\n invite_model = models.ModeratorInvite\n\n def get_permission_object(self):\n return self.project\n\n\nclass DashboardProjectParticipantsView(AbstractProjectUserInviteListView):\n\n model = project_models.Project\n slug_url_kwarg = 'project_slug'\n template_name = 'meinberlin_projects/project_participants.html'\n permission_required = 'a4projects.change_project'\n menu_item = 'project'\n\n related_users_field = 'participants'\n add_user_field_label = _('Invite users via email')\n add_user_upload_field_label = _('Invite users via file upload')\n success_message = (\n _('{} participant invited.'),\n _('{} participants invited.'))\n success_message_removal = _('Participant successfully removed.')\n\n invite_model = models.ParticipantInvite\n\n def get_permission_object(self):\n return self.project\n\n\nclass ProjectDetailView(PermissionRequiredMixin,\n generic.DetailView,\n ModuleClusterMixin):\n\n model = models.Project\n permission_required = 'a4projects.view_project'\n\n def get_template_names(self):\n type = get_project_type(self.project)\n if type == 'container':\n return ['meinberlin_projects/project_container_detail.html']\n if type == 'bplan':\n return ['meinberlin_projects/project_bplan_detail.html']\n return ['meinberlin_projects/project_detail.html']\n\n def dispatch(self, request, *args, **kwargs):\n kwargs['project'] = self.project\n kwargs['module'] = self.module\n\n if self.modules.count() == 1 and not self.events:\n return self._view_by_phase()(request, *args, **kwargs)\n else:\n return super().dispatch(request)\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['event'] = self.get_current_event()\n context['modules'] = self.get_current_modules()\n context['participation_dates'] = self.full_list\n context['initial_slide'] = self.initial_slide\n return context\n\n @cached_property\n def project(self):\n return self.get_object()\n\n @cached_property\n def module(self):\n if self.modules.count() == 1 and not self.events:\n return self.modules.first()\n\n @cached_property\n def modules(self):\n return self.project.modules\\\n .annotate(start_date=Min('phase__start_date'))\\\n .annotate(end_date=Max('phase__end_date'))\\\n .exclude(Q(start_date=None) | Q(end_date=None))\\\n .order_by('start_date')\n\n @cached_property\n def events(self):\n return self.project.offlineevent_set.all()\n\n @cached_property\n def full_list(self):\n module_cluster = self.module_clusters\n event_list = self.get_events_list()\n full_list = module_cluster + list(event_list)\n return sorted(full_list, key=lambda k: k['date'])\n\n @cached_property\n def module_clusters(self):\n clusters = super().get_module_clusters(self.modules)\n if len(clusters) == 1:\n clusters[0]['title'] = _('Online Participation')\n return clusters\n\n @cached_property\n def initial_slide(self):\n initial_slide = self.request.GET.get('initialSlide')\n if initial_slide:\n return int(initial_slide)\n else:\n now = timezone.now()\n for idx, val in enumerate(self.full_list):\n if 'type' in val and val['type'] == 'module':\n start_date = val['date']\n end_date = val['end_date']\n if start_date and end_date:\n if now >= start_date and now <= end_date:\n return idx\n return 0\n\n @cached_property\n def display_timeline(self):\n return len(self.full_list) > 1\n\n @cached_property\n def is_project_view(self):\n return self.get_current_modules()\n\n def _view_by_phase(self):\n if self.module.last_active_phase:\n return self.module.last_active_phase.view.as_view()\n elif self.module.future_phases:\n return self.module.future_phases.first().view.as_view()\n else:\n return super().dispatch\n\n def _get_module_dict(self, count, start_date, end_date):\n return {\n 'title': _('{}. Online Participation').format(str(count)),\n 'type': 'module',\n 'date': start_date,\n 'end_date': end_date,\n 'modules': []\n }\n\n def get_current_event(self):\n fl = self.full_list\n idx = self.initial_slide\n try:\n current_dict = fl[idx]\n if 'type' not in current_dict:\n return self.full_list[self.initial_slide]\n except (IndexError, KeyError):\n return []\n return []\n\n def get_current_modules(self):\n fl = self.full_list\n idx = self.initial_slide\n try:\n current_dict = fl[idx]\n if current_dict['type'] == 'module':\n return self.full_list[self.initial_slide]['modules']\n except (IndexError, KeyError):\n return []\n\n def get_events_list(self):\n return self.events.values('date', 'name',\n 'event_type',\n 'slug', 'description')\n\n @property\n def raise_exception(self):\n return self.request.user.is_authenticated\n\n\nclass ModuleDetailview(PermissionRequiredMixin,\n PhaseDispatchMixin):\n\n model = module_models.Module\n permission_required = 'a4projects.view_project'\n slug_url_kwarg = 'module_slug'\n\n @cached_property\n def project(self):\n return self.module.project\n\n @cached_property\n def module(self):\n return self.get_object()\n\n def get_permission_object(self):\n return self.project\n\n def get_context_data(self, **kwargs):\n \"\"\"Append project and module to the template context.\"\"\"\n if 'project' not in kwargs:\n kwargs['project'] = self.project\n if 'module' not in kwargs:\n kwargs['module'] = self.module\n return super().get_context_data(**kwargs)\n", "path": "meinberlin/apps/projects/views.py"}], "after_files": [{"content": "import itertools\n\nimport django_filters\nfrom django.apps import apps\nfrom django.conf import settings\nfrom django.contrib import messages\nfrom django.contrib.auth import get_user_model\nfrom django.db.models import Max\nfrom django.db.models import Min\nfrom django.db.models import Q\nfrom django.shortcuts import get_object_or_404\nfrom django.shortcuts import redirect\nfrom django.utils import timezone\nfrom django.utils.functional import cached_property\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.utils.translation import ungettext\nfrom django.views import generic\nfrom rules.contrib.views import LoginRequiredMixin\nfrom rules.contrib.views import PermissionRequiredMixin\n\nfrom adhocracy4.administrative_districts.models import AdministrativeDistrict\nfrom adhocracy4.dashboard import mixins as a4dashboard_mixins\nfrom adhocracy4.dashboard import signals as a4dashboard_signals\nfrom adhocracy4.filters import widgets as filters_widgets\nfrom adhocracy4.filters.filters import DefaultsFilterSet\nfrom adhocracy4.filters.filters import DistinctOrderingFilter\nfrom adhocracy4.filters.filters import FreeTextFilter\nfrom adhocracy4.filters.widgets import DropdownLinkWidget\nfrom adhocracy4.modules import models as module_models\nfrom adhocracy4.projects import models as project_models\nfrom adhocracy4.projects.mixins import PhaseDispatchMixin\nfrom adhocracy4.projects.mixins import ProjectMixin\nfrom meinberlin.apps.contrib.mixins import ModuleClusterMixin\n\nfrom . import forms\nfrom . import get_project_type\nfrom . import models\n\nUser = get_user_model()\n\n\nclass OrderingWidget(DropdownLinkWidget):\n label = _('Ordering')\n right = True\n\n\nclass OrganisationWidget(DropdownLinkWidget):\n label = _('Organisation')\n\n\nclass DistrictWidget(DropdownLinkWidget):\n label = _('District')\n\n\nclass FreeTextFilterWidget(filters_widgets.FreeTextFilterWidget):\n label = _('Search')\n\n\nclass ArchivedWidget(DropdownLinkWidget):\n label = _('Archived')\n\n def __init__(self, attrs=None):\n choices = (\n ('', _('All')),\n ('false', _('No')),\n ('true', _('Yes')),\n )\n super().__init__(attrs, choices)\n\n\nclass YearWidget(DropdownLinkWidget):\n label = _('Year')\n\n def __init__(self, attrs=None):\n choices = (('', _('Any')),)\n now = timezone.now().year\n try:\n first_year = project_models.Project.objects.earliest('created').\\\n created.year\n except project_models.Project.DoesNotExist:\n first_year = now\n for year in range(now, first_year - 1, -1):\n choices += (year, year),\n super().__init__(attrs, choices)\n\n\nclass ProjectFilterSet(DefaultsFilterSet):\n\n defaults = {\n 'is_archived': 'false'\n }\n\n ordering = DistinctOrderingFilter(\n choices=(\n ('-created', _('Most recent')),\n ),\n empty_label=None,\n widget=OrderingWidget,\n )\n\n search = FreeTextFilter(\n widget=FreeTextFilterWidget,\n fields=['name', 'description',\n 'projectcontainer__projects__name']\n )\n\n organisation = django_filters.ModelChoiceFilter(\n queryset=apps.get_model(settings.A4_ORGANISATIONS_MODEL).objects\n .order_by('name'),\n widget=OrganisationWidget,\n )\n\n is_archived = django_filters.BooleanFilter(\n widget=ArchivedWidget\n )\n\n created = django_filters.NumberFilter(\n field_name='created',\n lookup_expr='year',\n widget=YearWidget,\n )\n\n administrative_district = django_filters.ModelChoiceFilter(\n queryset=AdministrativeDistrict.objects.all(),\n widget=DistrictWidget\n )\n\n class Meta:\n model = project_models.Project\n fields = ['search', 'organisation', 'is_archived',\n 'created', 'administrative_district']\n\n\nclass ParticipantInviteDetailView(generic.DetailView):\n model = models.ParticipantInvite\n slug_field = 'token'\n slug_url_kwarg = 'invite_token'\n\n def dispatch(self, request, invite_token, *args, **kwargs):\n if request.user.is_authenticated:\n return redirect(\n 'project-participant-invite-update',\n invite_token=invite_token\n )\n else:\n return super().dispatch(request, *args, **kwargs)\n\n\nclass ParticipantInviteUpdateView(LoginRequiredMixin, generic.UpdateView):\n model = models.ParticipantInvite\n form_class = forms.ParticipantInviteForm\n slug_field = 'token'\n slug_url_kwarg = 'invite_token'\n\n def form_valid(self, form):\n if form.is_accepted():\n form.instance.accept(self.request.user)\n return redirect(form.instance.project.get_absolute_url())\n else:\n form.instance.reject()\n return redirect('/')\n\n\nclass ModeratorInviteDetailView(generic.DetailView):\n model = models.ModeratorInvite\n slug_field = 'token'\n slug_url_kwarg = 'invite_token'\n\n def dispatch(self, request, invite_token, *args, **kwargs):\n if request.user.is_authenticated:\n return redirect(\n 'project-moderator-invite-update',\n invite_token=invite_token\n )\n else:\n return super().dispatch(request, *args, **kwargs)\n\n\nclass ModeratorInviteUpdateView(LoginRequiredMixin, generic.UpdateView):\n model = models.ModeratorInvite\n form_class = forms.ModeratorInviteForm\n slug_field = 'token'\n slug_url_kwarg = 'invite_token'\n\n def form_valid(self, form):\n if form.is_accepted():\n form.instance.accept(self.request.user)\n return redirect(form.instance.project.get_absolute_url())\n else:\n form.instance.reject()\n return redirect('/')\n\n\nclass AbstractProjectUserInviteListView(\n ProjectMixin,\n a4dashboard_mixins.DashboardBaseMixin,\n a4dashboard_mixins.DashboardComponentMixin,\n generic.base.TemplateResponseMixin,\n generic.edit.FormMixin,\n generic.detail.SingleObjectMixin,\n generic.edit.ProcessFormView):\n\n form_class = forms.InviteUsersFromEmailForm\n invite_model = None\n\n def get(self, request, *args, **kwargs):\n self.object = self.get_object()\n return super().get(request, *args, **kwargs)\n\n def post(self, request, *args, **kwargs):\n self.object = self.get_object()\n if 'submit_action' in request.POST:\n if request.POST['submit_action'] == 'remove_user':\n pk = int(request.POST['user_pk'])\n user = get_object_or_404(User, pk=pk)\n related_users = getattr(self.object, self.related_users_field)\n related_users.remove(user)\n messages.success(request, self.success_message_removal)\n elif request.POST['submit_action'] == 'remove_invite':\n pk = int(request.POST['invite_pk'])\n invite = self.invite_model.objects.get(pk=pk)\n invite.delete()\n messages.success(request, _('Invitation succesfully removed.'))\n\n response = redirect(self.get_success_url())\n else:\n response = super().post(request, *args, **kwargs)\n\n self._send_component_updated_signal()\n return response\n\n def filter_existing(self, emails):\n related_users = getattr(self.object, self.related_users_field)\n related_emails = [u.email for u in related_users.all()]\n existing = []\n filtered_emails = []\n for email in emails:\n if email in related_emails:\n existing.append(email)\n else:\n filtered_emails.append(email)\n return filtered_emails, existing\n\n def filter_pending(self, emails):\n pending = []\n filtered_emails = []\n for email in emails:\n if self.invite_model.objects.filter(email=email,\n project=self.project).exists():\n pending.append(email)\n else:\n filtered_emails.append(email)\n return filtered_emails, pending\n\n def form_valid(self, form):\n emails = list(set(\n itertools.chain(form.cleaned_data['add_users'],\n form.cleaned_data['add_users_upload'])))\n\n emails, existing = self.filter_existing(emails)\n if existing:\n messages.error(\n self.request,\n _('Following users already accepted an invitation: ') +\n ', '.join(existing)\n )\n\n emails, pending = self.filter_pending(emails)\n if pending:\n messages.error(\n self.request,\n _('Following users are already invited: ') +\n ', '.join(pending)\n )\n\n for email in emails:\n self.invite_model.objects.invite(\n self.request.user,\n self.project,\n email\n )\n\n messages.success(\n self.request,\n ungettext(self.success_message[0], self.success_message[1],\n len(emails)).format(len(emails))\n )\n\n return redirect(self.get_success_url())\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs['labels'] = (self.add_user_field_label,\n self.add_user_upload_field_label)\n return kwargs\n\n def _send_component_updated_signal(self):\n a4dashboard_signals.project_component_updated.send(\n sender=self.component.__class__,\n project=self.project,\n component=self.component,\n user=self.request.user\n )\n\n\nclass DashboardProjectModeratorsView(AbstractProjectUserInviteListView):\n\n model = project_models.Project\n slug_url_kwarg = 'project_slug'\n template_name = 'meinberlin_projects/project_moderators.html'\n permission_required = 'a4projects.change_project'\n menu_item = 'project'\n\n related_users_field = 'moderators'\n add_user_field_label = _('Invite moderators via email')\n add_user_upload_field_label = _('Invite moderators via file upload')\n success_message = (_('{} moderator invited.'), _('{} moderators invited.'))\n success_message_removal = _('Moderator successfully removed.')\n\n invite_model = models.ModeratorInvite\n\n def get_permission_object(self):\n return self.project\n\n\nclass DashboardProjectParticipantsView(AbstractProjectUserInviteListView):\n\n model = project_models.Project\n slug_url_kwarg = 'project_slug'\n template_name = 'meinberlin_projects/project_participants.html'\n permission_required = 'a4projects.change_project'\n menu_item = 'project'\n\n related_users_field = 'participants'\n add_user_field_label = _('Invite users via email')\n add_user_upload_field_label = _('Invite users via file upload')\n success_message = (\n _('{} participant invited.'),\n _('{} participants invited.'))\n success_message_removal = _('Participant successfully removed.')\n\n invite_model = models.ParticipantInvite\n\n def get_permission_object(self):\n return self.project\n\n\nclass ProjectDetailView(PermissionRequiredMixin,\n generic.DetailView,\n ModuleClusterMixin):\n\n model = models.Project\n permission_required = 'a4projects.view_project'\n\n def get_template_names(self):\n type = get_project_type(self.project)\n if type == 'container':\n return ['meinberlin_projects/project_container_detail.html']\n if type == 'bplan':\n return ['meinberlin_projects/project_bplan_detail.html']\n return ['meinberlin_projects/project_detail.html']\n\n def dispatch(self, request, *args, **kwargs):\n kwargs['project'] = self.project\n kwargs['module'] = self.module\n\n if self.modules.count() == 1 and not self.events:\n return self._view_by_phase()(request, *args, **kwargs)\n else:\n return super().dispatch(request)\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['event'] = self.get_current_event()\n context['modules'] = self.get_current_modules()\n context['participation_dates'] = self.full_list\n context['initial_slide'] = self.initial_slide\n return context\n\n @cached_property\n def project(self):\n return self.get_object()\n\n @cached_property\n def module(self):\n if self.modules.count() == 1 and not self.events:\n return self.modules.first()\n\n @cached_property\n def modules(self):\n return self.project.modules\\\n .annotate(start_date=Min('phase__start_date'))\\\n .annotate(end_date=Max('phase__end_date'))\\\n .exclude(Q(start_date=None) | Q(end_date=None))\\\n .order_by('start_date', 'id')\n\n @cached_property\n def events(self):\n return self.project.offlineevent_set.all()\n\n @cached_property\n def full_list(self):\n module_cluster = self.module_clusters\n event_list = self.get_events_list()\n full_list = module_cluster + list(event_list)\n return sorted(full_list, key=lambda k: k['date'])\n\n @cached_property\n def module_clusters(self):\n clusters = super().get_module_clusters(self.modules)\n if len(clusters) == 1:\n clusters[0]['title'] = _('Online Participation')\n return clusters\n\n @cached_property\n def initial_slide(self):\n initial_slide = self.request.GET.get('initialSlide')\n if initial_slide:\n return int(initial_slide)\n else:\n now = timezone.now()\n for idx, val in enumerate(self.full_list):\n if 'type' in val and val['type'] == 'module':\n start_date = val['date']\n end_date = val['end_date']\n if start_date and end_date:\n if now >= start_date and now <= end_date:\n return idx\n return 0\n\n @cached_property\n def display_timeline(self):\n return len(self.full_list) > 1\n\n @cached_property\n def is_project_view(self):\n return self.get_current_modules()\n\n def _view_by_phase(self):\n if self.module.last_active_phase:\n return self.module.last_active_phase.view.as_view()\n elif self.module.future_phases:\n return self.module.future_phases.first().view.as_view()\n else:\n return super().dispatch\n\n def _get_module_dict(self, count, start_date, end_date):\n return {\n 'title': _('{}. Online Participation').format(str(count)),\n 'type': 'module',\n 'date': start_date,\n 'end_date': end_date,\n 'modules': []\n }\n\n def get_current_event(self):\n fl = self.full_list\n idx = self.initial_slide\n try:\n current_dict = fl[idx]\n if 'type' not in current_dict:\n return self.full_list[self.initial_slide]\n except (IndexError, KeyError):\n return []\n return []\n\n def get_current_modules(self):\n fl = self.full_list\n idx = self.initial_slide\n try:\n current_dict = fl[idx]\n if current_dict['type'] == 'module':\n return self.full_list[self.initial_slide]['modules']\n except (IndexError, KeyError):\n return []\n\n def get_events_list(self):\n return self.events.values('date', 'name',\n 'event_type',\n 'slug', 'description')\n\n @property\n def raise_exception(self):\n return self.request.user.is_authenticated\n\n\nclass ModuleDetailview(PermissionRequiredMixin,\n PhaseDispatchMixin):\n\n model = module_models.Module\n permission_required = 'a4projects.view_project'\n slug_url_kwarg = 'module_slug'\n\n @cached_property\n def project(self):\n return self.module.project\n\n @cached_property\n def module(self):\n return self.get_object()\n\n def get_permission_object(self):\n return self.project\n\n def get_context_data(self, **kwargs):\n \"\"\"Append project and module to the template context.\"\"\"\n if 'project' not in kwargs:\n kwargs['project'] = self.project\n if 'module' not in kwargs:\n kwargs['module'] = self.module\n return super().get_context_data(**kwargs)\n", "path": "meinberlin/apps/projects/views.py"}]} |
gh_patches_debug_1353 | rasdani/github-patches | git_diff | numpy__numpy-6851 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ENH: use linux fallocate to reserve diskspace in array.tofile
fallocate allows the filesystem to make smarter decisions about space
allocation and gives a fast failure path for insufficient space.
This is very important for filesystems that suffer a lot from
fragmentation like btrfs.
Restricted to linux only as that is the only system I know the behavior
of. Other systems might also have this system call but we don't want to
accidentally trigger explicit zeroing behavior as e.g. posix_fallocate
would when there is no support for a real fallocate.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `numpy/core/setup_common.py`
Content:
```
1 from __future__ import division, absolute_import, print_function
2
3 # Code common to build tools
4 import sys
5 import warnings
6 import copy
7 import binascii
8
9 from numpy.distutils.misc_util import mingw32
10
11
12 #-------------------
13 # Versioning support
14 #-------------------
15 # How to change C_API_VERSION ?
16 # - increase C_API_VERSION value
17 # - record the hash for the new C API with the script cversions.py
18 # and add the hash to cversions.txt
19 # The hash values are used to remind developers when the C API number was not
20 # updated - generates a MismatchCAPIWarning warning which is turned into an
21 # exception for released version.
22
23 # Binary compatibility version number. This number is increased whenever the
24 # C-API is changed such that binary compatibility is broken, i.e. whenever a
25 # recompile of extension modules is needed.
26 C_ABI_VERSION = 0x01000009
27
28 # Minor API version. This number is increased whenever a change is made to the
29 # C-API -- whether it breaks binary compatibility or not. Some changes, such
30 # as adding a function pointer to the end of the function table, can be made
31 # without breaking binary compatibility. In this case, only the C_API_VERSION
32 # (*not* C_ABI_VERSION) would be increased. Whenever binary compatibility is
33 # broken, both C_API_VERSION and C_ABI_VERSION should be increased.
34 #
35 # 0x00000008 - 1.7.x
36 # 0x00000009 - 1.8.x
37 # 0x00000009 - 1.9.x
38 # 0x0000000a - 1.10.x
39 C_API_VERSION = 0x0000000a
40
41 class MismatchCAPIWarning(Warning):
42 pass
43
44 def is_released(config):
45 """Return True if a released version of numpy is detected."""
46 from distutils.version import LooseVersion
47
48 v = config.get_version('../version.py')
49 if v is None:
50 raise ValueError("Could not get version")
51 pv = LooseVersion(vstring=v).version
52 if len(pv) > 3:
53 return False
54 return True
55
56 def get_api_versions(apiversion, codegen_dir):
57 """
58 Return current C API checksum and the recorded checksum.
59
60 Return current C API checksum and the recorded checksum for the given
61 version of the C API version.
62
63 """
64 # Compute the hash of the current API as defined in the .txt files in
65 # code_generators
66 sys.path.insert(0, codegen_dir)
67 try:
68 m = __import__('genapi')
69 numpy_api = __import__('numpy_api')
70 curapi_hash = m.fullapi_hash(numpy_api.full_api)
71 apis_hash = m.get_versions_hash()
72 finally:
73 del sys.path[0]
74
75 return curapi_hash, apis_hash[apiversion]
76
77 def check_api_version(apiversion, codegen_dir):
78 """Emits a MismacthCAPIWarning if the C API version needs updating."""
79 curapi_hash, api_hash = get_api_versions(apiversion, codegen_dir)
80
81 # If different hash, it means that the api .txt files in
82 # codegen_dir have been updated without the API version being
83 # updated. Any modification in those .txt files should be reflected
84 # in the api and eventually abi versions.
85 # To compute the checksum of the current API, use
86 # code_generators/cversions.py script
87 if not curapi_hash == api_hash:
88 msg = ("API mismatch detected, the C API version "
89 "numbers have to be updated. Current C api version is %d, "
90 "with checksum %s, but recorded checksum for C API version %d in "
91 "codegen_dir/cversions.txt is %s. If functions were added in the "
92 "C API, you have to update C_API_VERSION in %s."
93 )
94 warnings.warn(msg % (apiversion, curapi_hash, apiversion, api_hash,
95 __file__),
96 MismatchCAPIWarning)
97 # Mandatory functions: if not found, fail the build
98 MANDATORY_FUNCS = ["sin", "cos", "tan", "sinh", "cosh", "tanh", "fabs",
99 "floor", "ceil", "sqrt", "log10", "log", "exp", "asin",
100 "acos", "atan", "fmod", 'modf', 'frexp', 'ldexp']
101
102 # Standard functions which may not be available and for which we have a
103 # replacement implementation. Note that some of these are C99 functions.
104 OPTIONAL_STDFUNCS = ["expm1", "log1p", "acosh", "asinh", "atanh",
105 "rint", "trunc", "exp2", "log2", "hypot", "atan2", "pow",
106 "copysign", "nextafter", "ftello", "fseeko",
107 "strtoll", "strtoull", "cbrt", "strtold_l",]
108
109
110 OPTIONAL_HEADERS = [
111 # sse headers only enabled automatically on amd64/x32 builds
112 "xmmintrin.h", # SSE
113 "emmintrin.h", # SSE2
114 "features.h", # for glibc version linux
115 ]
116
117 # optional gcc compiler builtins and their call arguments and optional a
118 # required header
119 # call arguments are required as the compiler will do strict signature checking
120 OPTIONAL_INTRINSICS = [("__builtin_isnan", '5.'),
121 ("__builtin_isinf", '5.'),
122 ("__builtin_isfinite", '5.'),
123 ("__builtin_bswap32", '5u'),
124 ("__builtin_bswap64", '5u'),
125 ("__builtin_expect", '5, 0'),
126 ("__builtin_mul_overflow", '5, 5, (int*)5'),
127 ("_mm_load_ps", '(float*)0', "xmmintrin.h"), # SSE
128 ("_mm_prefetch", '(float*)0, _MM_HINT_NTA',
129 "xmmintrin.h"), # SSE
130 ("_mm_load_pd", '(double*)0', "emmintrin.h"), # SSE2
131 ("__builtin_prefetch", "(float*)0, 0, 3"),
132 ]
133
134 # function attributes
135 # tested via "int %s %s(void *);" % (attribute, name)
136 # function name will be converted to HAVE_<upper-case-name> preprocessor macro
137 OPTIONAL_FUNCTION_ATTRIBUTES = [('__attribute__((optimize("unroll-loops")))',
138 'attribute_optimize_unroll_loops'),
139 ('__attribute__((optimize("O3")))',
140 'attribute_optimize_opt_3'),
141 ('__attribute__((nonnull (1)))',
142 'attribute_nonnull'),
143 ]
144
145 # variable attributes tested via "int %s a" % attribute
146 OPTIONAL_VARIABLE_ATTRIBUTES = ["__thread", "__declspec(thread)"]
147
148 # Subset of OPTIONAL_STDFUNCS which may alreay have HAVE_* defined by Python.h
149 OPTIONAL_STDFUNCS_MAYBE = [
150 "expm1", "log1p", "acosh", "atanh", "asinh", "hypot", "copysign",
151 "ftello", "fseeko"
152 ]
153
154 # C99 functions: float and long double versions
155 C99_FUNCS = [
156 "sin", "cos", "tan", "sinh", "cosh", "tanh", "fabs", "floor", "ceil",
157 "rint", "trunc", "sqrt", "log10", "log", "log1p", "exp", "expm1",
158 "asin", "acos", "atan", "asinh", "acosh", "atanh", "hypot", "atan2",
159 "pow", "fmod", "modf", 'frexp', 'ldexp', "exp2", "log2", "copysign",
160 "nextafter", "cbrt"
161 ]
162 C99_FUNCS_SINGLE = [f + 'f' for f in C99_FUNCS]
163 C99_FUNCS_EXTENDED = [f + 'l' for f in C99_FUNCS]
164 C99_COMPLEX_TYPES = [
165 'complex double', 'complex float', 'complex long double'
166 ]
167 C99_COMPLEX_FUNCS = [
168 "cabs", "cacos", "cacosh", "carg", "casin", "casinh", "catan",
169 "catanh", "ccos", "ccosh", "cexp", "cimag", "clog", "conj", "cpow",
170 "cproj", "creal", "csin", "csinh", "csqrt", "ctan", "ctanh"
171 ]
172
173 def fname2def(name):
174 return "HAVE_%s" % name.upper()
175
176 def sym2def(symbol):
177 define = symbol.replace(' ', '')
178 return define.upper()
179
180 def type2def(symbol):
181 define = symbol.replace(' ', '_')
182 return define.upper()
183
184 # Code to detect long double representation taken from MPFR m4 macro
185 def check_long_double_representation(cmd):
186 cmd._check_compiler()
187 body = LONG_DOUBLE_REPRESENTATION_SRC % {'type': 'long double'}
188
189 # Disable whole program optimization (the default on vs2015, with python 3.5+)
190 # which generates intermediary object files and prevents checking the
191 # float representation.
192 if sys.platform == "win32" and not mingw32():
193 try:
194 cmd.compiler.compile_options.remove("/GL")
195 except ValueError:
196 pass
197
198 # We need to use _compile because we need the object filename
199 src, obj = cmd._compile(body, None, None, 'c')
200 try:
201 ltype = long_double_representation(pyod(obj))
202 return ltype
203 except ValueError:
204 # try linking to support CC="gcc -flto" or icc -ipo
205 # struct needs to be volatile so it isn't optimized away
206 body = body.replace('struct', 'volatile struct')
207 body += "int main(void) { return 0; }\n"
208 src, obj = cmd._compile(body, None, None, 'c')
209 cmd.temp_files.append("_configtest")
210 cmd.compiler.link_executable([obj], "_configtest")
211 ltype = long_double_representation(pyod("_configtest"))
212 return ltype
213 finally:
214 cmd._clean()
215
216 LONG_DOUBLE_REPRESENTATION_SRC = r"""
217 /* "before" is 16 bytes to ensure there's no padding between it and "x".
218 * We're not expecting any "long double" bigger than 16 bytes or with
219 * alignment requirements stricter than 16 bytes. */
220 typedef %(type)s test_type;
221
222 struct {
223 char before[16];
224 test_type x;
225 char after[8];
226 } foo = {
227 { '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
228 '\001', '\043', '\105', '\147', '\211', '\253', '\315', '\357' },
229 -123456789.0,
230 { '\376', '\334', '\272', '\230', '\166', '\124', '\062', '\020' }
231 };
232 """
233
234 def pyod(filename):
235 """Python implementation of the od UNIX utility (od -b, more exactly).
236
237 Parameters
238 ----------
239 filename : str
240 name of the file to get the dump from.
241
242 Returns
243 -------
244 out : seq
245 list of lines of od output
246
247 Note
248 ----
249 We only implement enough to get the necessary information for long double
250 representation, this is not intended as a compatible replacement for od.
251 """
252 def _pyod2():
253 out = []
254
255 fid = open(filename, 'rb')
256 try:
257 yo = [int(oct(int(binascii.b2a_hex(o), 16))) for o in fid.read()]
258 for i in range(0, len(yo), 16):
259 line = ['%07d' % int(oct(i))]
260 line.extend(['%03d' % c for c in yo[i:i+16]])
261 out.append(" ".join(line))
262 return out
263 finally:
264 fid.close()
265
266 def _pyod3():
267 out = []
268
269 fid = open(filename, 'rb')
270 try:
271 yo2 = [oct(o)[2:] for o in fid.read()]
272 for i in range(0, len(yo2), 16):
273 line = ['%07d' % int(oct(i)[2:])]
274 line.extend(['%03d' % int(c) for c in yo2[i:i+16]])
275 out.append(" ".join(line))
276 return out
277 finally:
278 fid.close()
279
280 if sys.version_info[0] < 3:
281 return _pyod2()
282 else:
283 return _pyod3()
284
285 _BEFORE_SEQ = ['000', '000', '000', '000', '000', '000', '000', '000',
286 '001', '043', '105', '147', '211', '253', '315', '357']
287 _AFTER_SEQ = ['376', '334', '272', '230', '166', '124', '062', '020']
288
289 _IEEE_DOUBLE_BE = ['301', '235', '157', '064', '124', '000', '000', '000']
290 _IEEE_DOUBLE_LE = _IEEE_DOUBLE_BE[::-1]
291 _INTEL_EXTENDED_12B = ['000', '000', '000', '000', '240', '242', '171', '353',
292 '031', '300', '000', '000']
293 _INTEL_EXTENDED_16B = ['000', '000', '000', '000', '240', '242', '171', '353',
294 '031', '300', '000', '000', '000', '000', '000', '000']
295 _MOTOROLA_EXTENDED_12B = ['300', '031', '000', '000', '353', '171',
296 '242', '240', '000', '000', '000', '000']
297 _IEEE_QUAD_PREC_BE = ['300', '031', '326', '363', '105', '100', '000', '000',
298 '000', '000', '000', '000', '000', '000', '000', '000']
299 _IEEE_QUAD_PREC_LE = _IEEE_QUAD_PREC_BE[::-1]
300 _DOUBLE_DOUBLE_BE = (['301', '235', '157', '064', '124', '000', '000', '000'] +
301 ['000'] * 8)
302 _DOUBLE_DOUBLE_LE = (['000', '000', '000', '124', '064', '157', '235', '301'] +
303 ['000'] * 8)
304
305 def long_double_representation(lines):
306 """Given a binary dump as given by GNU od -b, look for long double
307 representation."""
308
309 # Read contains a list of 32 items, each item is a byte (in octal
310 # representation, as a string). We 'slide' over the output until read is of
311 # the form before_seq + content + after_sequence, where content is the long double
312 # representation:
313 # - content is 12 bytes: 80 bits Intel representation
314 # - content is 16 bytes: 80 bits Intel representation (64 bits) or quad precision
315 # - content is 8 bytes: same as double (not implemented yet)
316 read = [''] * 32
317 saw = None
318 for line in lines:
319 # we skip the first word, as od -b output an index at the beginning of
320 # each line
321 for w in line.split()[1:]:
322 read.pop(0)
323 read.append(w)
324
325 # If the end of read is equal to the after_sequence, read contains
326 # the long double
327 if read[-8:] == _AFTER_SEQ:
328 saw = copy.copy(read)
329 if read[:12] == _BEFORE_SEQ[4:]:
330 if read[12:-8] == _INTEL_EXTENDED_12B:
331 return 'INTEL_EXTENDED_12_BYTES_LE'
332 if read[12:-8] == _MOTOROLA_EXTENDED_12B:
333 return 'MOTOROLA_EXTENDED_12_BYTES_BE'
334 elif read[:8] == _BEFORE_SEQ[8:]:
335 if read[8:-8] == _INTEL_EXTENDED_16B:
336 return 'INTEL_EXTENDED_16_BYTES_LE'
337 elif read[8:-8] == _IEEE_QUAD_PREC_BE:
338 return 'IEEE_QUAD_BE'
339 elif read[8:-8] == _IEEE_QUAD_PREC_LE:
340 return 'IEEE_QUAD_LE'
341 elif read[8:-8] == _DOUBLE_DOUBLE_BE:
342 return 'DOUBLE_DOUBLE_BE'
343 elif read[8:-8] == _DOUBLE_DOUBLE_LE:
344 return 'DOUBLE_DOUBLE_LE'
345 elif read[:16] == _BEFORE_SEQ:
346 if read[16:-8] == _IEEE_DOUBLE_LE:
347 return 'IEEE_DOUBLE_LE'
348 elif read[16:-8] == _IEEE_DOUBLE_BE:
349 return 'IEEE_DOUBLE_BE'
350
351 if saw is not None:
352 raise ValueError("Unrecognized format (%s)" % saw)
353 else:
354 # We never detected the after_sequence
355 raise ValueError("Could not lock sequences (%s)" % saw)
356
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/numpy/core/setup_common.py b/numpy/core/setup_common.py
--- a/numpy/core/setup_common.py
+++ b/numpy/core/setup_common.py
@@ -104,7 +104,7 @@
OPTIONAL_STDFUNCS = ["expm1", "log1p", "acosh", "asinh", "atanh",
"rint", "trunc", "exp2", "log2", "hypot", "atan2", "pow",
"copysign", "nextafter", "ftello", "fseeko",
- "strtoll", "strtoull", "cbrt", "strtold_l",]
+ "strtoll", "strtoull", "cbrt", "strtold_l", "fallocate"]
OPTIONAL_HEADERS = [
| {"golden_diff": "diff --git a/numpy/core/setup_common.py b/numpy/core/setup_common.py\n--- a/numpy/core/setup_common.py\n+++ b/numpy/core/setup_common.py\n@@ -104,7 +104,7 @@\n OPTIONAL_STDFUNCS = [\"expm1\", \"log1p\", \"acosh\", \"asinh\", \"atanh\",\n \"rint\", \"trunc\", \"exp2\", \"log2\", \"hypot\", \"atan2\", \"pow\",\n \"copysign\", \"nextafter\", \"ftello\", \"fseeko\",\n- \"strtoll\", \"strtoull\", \"cbrt\", \"strtold_l\",]\n+ \"strtoll\", \"strtoull\", \"cbrt\", \"strtold_l\", \"fallocate\"]\n \n \n OPTIONAL_HEADERS = [\n", "issue": "ENH: use linux fallocate to reserve diskspace in array.tofile\nfallocate allows the filesystem to make smarter decisions about space\nallocation and gives a fast failure path for insufficient space.\nThis is very important for filesystems that suffer a lot from\nfragmentation like btrfs.\nRestricted to linux only as that is the only system I know the behavior\nof. Other systems might also have this system call but we don't want to\naccidentally trigger explicit zeroing behavior as e.g. posix_fallocate\nwould when there is no support for a real fallocate.\n\n", "before_files": [{"content": "from __future__ import division, absolute_import, print_function\n\n# Code common to build tools\nimport sys\nimport warnings\nimport copy\nimport binascii\n\nfrom numpy.distutils.misc_util import mingw32\n\n\n#-------------------\n# Versioning support\n#-------------------\n# How to change C_API_VERSION ?\n# - increase C_API_VERSION value\n# - record the hash for the new C API with the script cversions.py\n# and add the hash to cversions.txt\n# The hash values are used to remind developers when the C API number was not\n# updated - generates a MismatchCAPIWarning warning which is turned into an\n# exception for released version.\n\n# Binary compatibility version number. This number is increased whenever the\n# C-API is changed such that binary compatibility is broken, i.e. whenever a\n# recompile of extension modules is needed.\nC_ABI_VERSION = 0x01000009\n\n# Minor API version. This number is increased whenever a change is made to the\n# C-API -- whether it breaks binary compatibility or not. Some changes, such\n# as adding a function pointer to the end of the function table, can be made\n# without breaking binary compatibility. In this case, only the C_API_VERSION\n# (*not* C_ABI_VERSION) would be increased. Whenever binary compatibility is\n# broken, both C_API_VERSION and C_ABI_VERSION should be increased.\n#\n# 0x00000008 - 1.7.x\n# 0x00000009 - 1.8.x\n# 0x00000009 - 1.9.x\n# 0x0000000a - 1.10.x\nC_API_VERSION = 0x0000000a\n\nclass MismatchCAPIWarning(Warning):\n pass\n\ndef is_released(config):\n \"\"\"Return True if a released version of numpy is detected.\"\"\"\n from distutils.version import LooseVersion\n\n v = config.get_version('../version.py')\n if v is None:\n raise ValueError(\"Could not get version\")\n pv = LooseVersion(vstring=v).version\n if len(pv) > 3:\n return False\n return True\n\ndef get_api_versions(apiversion, codegen_dir):\n \"\"\"\n Return current C API checksum and the recorded checksum.\n\n Return current C API checksum and the recorded checksum for the given\n version of the C API version.\n\n \"\"\"\n # Compute the hash of the current API as defined in the .txt files in\n # code_generators\n sys.path.insert(0, codegen_dir)\n try:\n m = __import__('genapi')\n numpy_api = __import__('numpy_api')\n curapi_hash = m.fullapi_hash(numpy_api.full_api)\n apis_hash = m.get_versions_hash()\n finally:\n del sys.path[0]\n\n return curapi_hash, apis_hash[apiversion]\n\ndef check_api_version(apiversion, codegen_dir):\n \"\"\"Emits a MismacthCAPIWarning if the C API version needs updating.\"\"\"\n curapi_hash, api_hash = get_api_versions(apiversion, codegen_dir)\n\n # If different hash, it means that the api .txt files in\n # codegen_dir have been updated without the API version being\n # updated. Any modification in those .txt files should be reflected\n # in the api and eventually abi versions.\n # To compute the checksum of the current API, use\n # code_generators/cversions.py script\n if not curapi_hash == api_hash:\n msg = (\"API mismatch detected, the C API version \"\n \"numbers have to be updated. Current C api version is %d, \"\n \"with checksum %s, but recorded checksum for C API version %d in \"\n \"codegen_dir/cversions.txt is %s. If functions were added in the \"\n \"C API, you have to update C_API_VERSION in %s.\"\n )\n warnings.warn(msg % (apiversion, curapi_hash, apiversion, api_hash,\n __file__),\n MismatchCAPIWarning)\n# Mandatory functions: if not found, fail the build\nMANDATORY_FUNCS = [\"sin\", \"cos\", \"tan\", \"sinh\", \"cosh\", \"tanh\", \"fabs\",\n \"floor\", \"ceil\", \"sqrt\", \"log10\", \"log\", \"exp\", \"asin\",\n \"acos\", \"atan\", \"fmod\", 'modf', 'frexp', 'ldexp']\n\n# Standard functions which may not be available and for which we have a\n# replacement implementation. Note that some of these are C99 functions.\nOPTIONAL_STDFUNCS = [\"expm1\", \"log1p\", \"acosh\", \"asinh\", \"atanh\",\n \"rint\", \"trunc\", \"exp2\", \"log2\", \"hypot\", \"atan2\", \"pow\",\n \"copysign\", \"nextafter\", \"ftello\", \"fseeko\",\n \"strtoll\", \"strtoull\", \"cbrt\", \"strtold_l\",]\n\n\nOPTIONAL_HEADERS = [\n# sse headers only enabled automatically on amd64/x32 builds\n \"xmmintrin.h\", # SSE\n \"emmintrin.h\", # SSE2\n \"features.h\", # for glibc version linux\n]\n\n# optional gcc compiler builtins and their call arguments and optional a\n# required header\n# call arguments are required as the compiler will do strict signature checking\nOPTIONAL_INTRINSICS = [(\"__builtin_isnan\", '5.'),\n (\"__builtin_isinf\", '5.'),\n (\"__builtin_isfinite\", '5.'),\n (\"__builtin_bswap32\", '5u'),\n (\"__builtin_bswap64\", '5u'),\n (\"__builtin_expect\", '5, 0'),\n (\"__builtin_mul_overflow\", '5, 5, (int*)5'),\n (\"_mm_load_ps\", '(float*)0', \"xmmintrin.h\"), # SSE\n (\"_mm_prefetch\", '(float*)0, _MM_HINT_NTA',\n \"xmmintrin.h\"), # SSE\n (\"_mm_load_pd\", '(double*)0', \"emmintrin.h\"), # SSE2\n (\"__builtin_prefetch\", \"(float*)0, 0, 3\"),\n ]\n\n# function attributes\n# tested via \"int %s %s(void *);\" % (attribute, name)\n# function name will be converted to HAVE_<upper-case-name> preprocessor macro\nOPTIONAL_FUNCTION_ATTRIBUTES = [('__attribute__((optimize(\"unroll-loops\")))',\n 'attribute_optimize_unroll_loops'),\n ('__attribute__((optimize(\"O3\")))',\n 'attribute_optimize_opt_3'),\n ('__attribute__((nonnull (1)))',\n 'attribute_nonnull'),\n ]\n\n# variable attributes tested via \"int %s a\" % attribute\nOPTIONAL_VARIABLE_ATTRIBUTES = [\"__thread\", \"__declspec(thread)\"]\n\n# Subset of OPTIONAL_STDFUNCS which may alreay have HAVE_* defined by Python.h\nOPTIONAL_STDFUNCS_MAYBE = [\n \"expm1\", \"log1p\", \"acosh\", \"atanh\", \"asinh\", \"hypot\", \"copysign\",\n \"ftello\", \"fseeko\"\n ]\n\n# C99 functions: float and long double versions\nC99_FUNCS = [\n \"sin\", \"cos\", \"tan\", \"sinh\", \"cosh\", \"tanh\", \"fabs\", \"floor\", \"ceil\",\n \"rint\", \"trunc\", \"sqrt\", \"log10\", \"log\", \"log1p\", \"exp\", \"expm1\",\n \"asin\", \"acos\", \"atan\", \"asinh\", \"acosh\", \"atanh\", \"hypot\", \"atan2\",\n \"pow\", \"fmod\", \"modf\", 'frexp', 'ldexp', \"exp2\", \"log2\", \"copysign\",\n \"nextafter\", \"cbrt\"\n ]\nC99_FUNCS_SINGLE = [f + 'f' for f in C99_FUNCS]\nC99_FUNCS_EXTENDED = [f + 'l' for f in C99_FUNCS]\nC99_COMPLEX_TYPES = [\n 'complex double', 'complex float', 'complex long double'\n ]\nC99_COMPLEX_FUNCS = [\n \"cabs\", \"cacos\", \"cacosh\", \"carg\", \"casin\", \"casinh\", \"catan\",\n \"catanh\", \"ccos\", \"ccosh\", \"cexp\", \"cimag\", \"clog\", \"conj\", \"cpow\",\n \"cproj\", \"creal\", \"csin\", \"csinh\", \"csqrt\", \"ctan\", \"ctanh\"\n ]\n\ndef fname2def(name):\n return \"HAVE_%s\" % name.upper()\n\ndef sym2def(symbol):\n define = symbol.replace(' ', '')\n return define.upper()\n\ndef type2def(symbol):\n define = symbol.replace(' ', '_')\n return define.upper()\n\n# Code to detect long double representation taken from MPFR m4 macro\ndef check_long_double_representation(cmd):\n cmd._check_compiler()\n body = LONG_DOUBLE_REPRESENTATION_SRC % {'type': 'long double'}\n\n # Disable whole program optimization (the default on vs2015, with python 3.5+)\n # which generates intermediary object files and prevents checking the\n # float representation.\n if sys.platform == \"win32\" and not mingw32():\n try:\n cmd.compiler.compile_options.remove(\"/GL\")\n except ValueError:\n pass\n\n # We need to use _compile because we need the object filename\n src, obj = cmd._compile(body, None, None, 'c')\n try:\n ltype = long_double_representation(pyod(obj))\n return ltype\n except ValueError:\n # try linking to support CC=\"gcc -flto\" or icc -ipo\n # struct needs to be volatile so it isn't optimized away\n body = body.replace('struct', 'volatile struct')\n body += \"int main(void) { return 0; }\\n\"\n src, obj = cmd._compile(body, None, None, 'c')\n cmd.temp_files.append(\"_configtest\")\n cmd.compiler.link_executable([obj], \"_configtest\")\n ltype = long_double_representation(pyod(\"_configtest\"))\n return ltype\n finally:\n cmd._clean()\n\nLONG_DOUBLE_REPRESENTATION_SRC = r\"\"\"\n/* \"before\" is 16 bytes to ensure there's no padding between it and \"x\".\n * We're not expecting any \"long double\" bigger than 16 bytes or with\n * alignment requirements stricter than 16 bytes. */\ntypedef %(type)s test_type;\n\nstruct {\n char before[16];\n test_type x;\n char after[8];\n} foo = {\n { '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0',\n '\\001', '\\043', '\\105', '\\147', '\\211', '\\253', '\\315', '\\357' },\n -123456789.0,\n { '\\376', '\\334', '\\272', '\\230', '\\166', '\\124', '\\062', '\\020' }\n};\n\"\"\"\n\ndef pyod(filename):\n \"\"\"Python implementation of the od UNIX utility (od -b, more exactly).\n\n Parameters\n ----------\n filename : str\n name of the file to get the dump from.\n\n Returns\n -------\n out : seq\n list of lines of od output\n\n Note\n ----\n We only implement enough to get the necessary information for long double\n representation, this is not intended as a compatible replacement for od.\n \"\"\"\n def _pyod2():\n out = []\n\n fid = open(filename, 'rb')\n try:\n yo = [int(oct(int(binascii.b2a_hex(o), 16))) for o in fid.read()]\n for i in range(0, len(yo), 16):\n line = ['%07d' % int(oct(i))]\n line.extend(['%03d' % c for c in yo[i:i+16]])\n out.append(\" \".join(line))\n return out\n finally:\n fid.close()\n\n def _pyod3():\n out = []\n\n fid = open(filename, 'rb')\n try:\n yo2 = [oct(o)[2:] for o in fid.read()]\n for i in range(0, len(yo2), 16):\n line = ['%07d' % int(oct(i)[2:])]\n line.extend(['%03d' % int(c) for c in yo2[i:i+16]])\n out.append(\" \".join(line))\n return out\n finally:\n fid.close()\n\n if sys.version_info[0] < 3:\n return _pyod2()\n else:\n return _pyod3()\n\n_BEFORE_SEQ = ['000', '000', '000', '000', '000', '000', '000', '000',\n '001', '043', '105', '147', '211', '253', '315', '357']\n_AFTER_SEQ = ['376', '334', '272', '230', '166', '124', '062', '020']\n\n_IEEE_DOUBLE_BE = ['301', '235', '157', '064', '124', '000', '000', '000']\n_IEEE_DOUBLE_LE = _IEEE_DOUBLE_BE[::-1]\n_INTEL_EXTENDED_12B = ['000', '000', '000', '000', '240', '242', '171', '353',\n '031', '300', '000', '000']\n_INTEL_EXTENDED_16B = ['000', '000', '000', '000', '240', '242', '171', '353',\n '031', '300', '000', '000', '000', '000', '000', '000']\n_MOTOROLA_EXTENDED_12B = ['300', '031', '000', '000', '353', '171',\n '242', '240', '000', '000', '000', '000']\n_IEEE_QUAD_PREC_BE = ['300', '031', '326', '363', '105', '100', '000', '000',\n '000', '000', '000', '000', '000', '000', '000', '000']\n_IEEE_QUAD_PREC_LE = _IEEE_QUAD_PREC_BE[::-1]\n_DOUBLE_DOUBLE_BE = (['301', '235', '157', '064', '124', '000', '000', '000'] +\n ['000'] * 8)\n_DOUBLE_DOUBLE_LE = (['000', '000', '000', '124', '064', '157', '235', '301'] +\n ['000'] * 8)\n\ndef long_double_representation(lines):\n \"\"\"Given a binary dump as given by GNU od -b, look for long double\n representation.\"\"\"\n\n # Read contains a list of 32 items, each item is a byte (in octal\n # representation, as a string). We 'slide' over the output until read is of\n # the form before_seq + content + after_sequence, where content is the long double\n # representation:\n # - content is 12 bytes: 80 bits Intel representation\n # - content is 16 bytes: 80 bits Intel representation (64 bits) or quad precision\n # - content is 8 bytes: same as double (not implemented yet)\n read = [''] * 32\n saw = None\n for line in lines:\n # we skip the first word, as od -b output an index at the beginning of\n # each line\n for w in line.split()[1:]:\n read.pop(0)\n read.append(w)\n\n # If the end of read is equal to the after_sequence, read contains\n # the long double\n if read[-8:] == _AFTER_SEQ:\n saw = copy.copy(read)\n if read[:12] == _BEFORE_SEQ[4:]:\n if read[12:-8] == _INTEL_EXTENDED_12B:\n return 'INTEL_EXTENDED_12_BYTES_LE'\n if read[12:-8] == _MOTOROLA_EXTENDED_12B:\n return 'MOTOROLA_EXTENDED_12_BYTES_BE'\n elif read[:8] == _BEFORE_SEQ[8:]:\n if read[8:-8] == _INTEL_EXTENDED_16B:\n return 'INTEL_EXTENDED_16_BYTES_LE'\n elif read[8:-8] == _IEEE_QUAD_PREC_BE:\n return 'IEEE_QUAD_BE'\n elif read[8:-8] == _IEEE_QUAD_PREC_LE:\n return 'IEEE_QUAD_LE'\n elif read[8:-8] == _DOUBLE_DOUBLE_BE:\n return 'DOUBLE_DOUBLE_BE'\n elif read[8:-8] == _DOUBLE_DOUBLE_LE:\n return 'DOUBLE_DOUBLE_LE'\n elif read[:16] == _BEFORE_SEQ:\n if read[16:-8] == _IEEE_DOUBLE_LE:\n return 'IEEE_DOUBLE_LE'\n elif read[16:-8] == _IEEE_DOUBLE_BE:\n return 'IEEE_DOUBLE_BE'\n\n if saw is not None:\n raise ValueError(\"Unrecognized format (%s)\" % saw)\n else:\n # We never detected the after_sequence\n raise ValueError(\"Could not lock sequences (%s)\" % saw)\n", "path": "numpy/core/setup_common.py"}], "after_files": [{"content": "from __future__ import division, absolute_import, print_function\n\n# Code common to build tools\nimport sys\nimport warnings\nimport copy\nimport binascii\n\nfrom numpy.distutils.misc_util import mingw32\n\n\n#-------------------\n# Versioning support\n#-------------------\n# How to change C_API_VERSION ?\n# - increase C_API_VERSION value\n# - record the hash for the new C API with the script cversions.py\n# and add the hash to cversions.txt\n# The hash values are used to remind developers when the C API number was not\n# updated - generates a MismatchCAPIWarning warning which is turned into an\n# exception for released version.\n\n# Binary compatibility version number. This number is increased whenever the\n# C-API is changed such that binary compatibility is broken, i.e. whenever a\n# recompile of extension modules is needed.\nC_ABI_VERSION = 0x01000009\n\n# Minor API version. This number is increased whenever a change is made to the\n# C-API -- whether it breaks binary compatibility or not. Some changes, such\n# as adding a function pointer to the end of the function table, can be made\n# without breaking binary compatibility. In this case, only the C_API_VERSION\n# (*not* C_ABI_VERSION) would be increased. Whenever binary compatibility is\n# broken, both C_API_VERSION and C_ABI_VERSION should be increased.\n#\n# 0x00000008 - 1.7.x\n# 0x00000009 - 1.8.x\n# 0x00000009 - 1.9.x\n# 0x0000000a - 1.10.x\nC_API_VERSION = 0x0000000a\n\nclass MismatchCAPIWarning(Warning):\n pass\n\ndef is_released(config):\n \"\"\"Return True if a released version of numpy is detected.\"\"\"\n from distutils.version import LooseVersion\n\n v = config.get_version('../version.py')\n if v is None:\n raise ValueError(\"Could not get version\")\n pv = LooseVersion(vstring=v).version\n if len(pv) > 3:\n return False\n return True\n\ndef get_api_versions(apiversion, codegen_dir):\n \"\"\"\n Return current C API checksum and the recorded checksum.\n\n Return current C API checksum and the recorded checksum for the given\n version of the C API version.\n\n \"\"\"\n # Compute the hash of the current API as defined in the .txt files in\n # code_generators\n sys.path.insert(0, codegen_dir)\n try:\n m = __import__('genapi')\n numpy_api = __import__('numpy_api')\n curapi_hash = m.fullapi_hash(numpy_api.full_api)\n apis_hash = m.get_versions_hash()\n finally:\n del sys.path[0]\n\n return curapi_hash, apis_hash[apiversion]\n\ndef check_api_version(apiversion, codegen_dir):\n \"\"\"Emits a MismacthCAPIWarning if the C API version needs updating.\"\"\"\n curapi_hash, api_hash = get_api_versions(apiversion, codegen_dir)\n\n # If different hash, it means that the api .txt files in\n # codegen_dir have been updated without the API version being\n # updated. Any modification in those .txt files should be reflected\n # in the api and eventually abi versions.\n # To compute the checksum of the current API, use\n # code_generators/cversions.py script\n if not curapi_hash == api_hash:\n msg = (\"API mismatch detected, the C API version \"\n \"numbers have to be updated. Current C api version is %d, \"\n \"with checksum %s, but recorded checksum for C API version %d in \"\n \"codegen_dir/cversions.txt is %s. If functions were added in the \"\n \"C API, you have to update C_API_VERSION in %s.\"\n )\n warnings.warn(msg % (apiversion, curapi_hash, apiversion, api_hash,\n __file__),\n MismatchCAPIWarning)\n# Mandatory functions: if not found, fail the build\nMANDATORY_FUNCS = [\"sin\", \"cos\", \"tan\", \"sinh\", \"cosh\", \"tanh\", \"fabs\",\n \"floor\", \"ceil\", \"sqrt\", \"log10\", \"log\", \"exp\", \"asin\",\n \"acos\", \"atan\", \"fmod\", 'modf', 'frexp', 'ldexp']\n\n# Standard functions which may not be available and for which we have a\n# replacement implementation. Note that some of these are C99 functions.\nOPTIONAL_STDFUNCS = [\"expm1\", \"log1p\", \"acosh\", \"asinh\", \"atanh\",\n \"rint\", \"trunc\", \"exp2\", \"log2\", \"hypot\", \"atan2\", \"pow\",\n \"copysign\", \"nextafter\", \"ftello\", \"fseeko\",\n \"strtoll\", \"strtoull\", \"cbrt\", \"strtold_l\", \"fallocate\"]\n\n\nOPTIONAL_HEADERS = [\n# sse headers only enabled automatically on amd64/x32 builds\n \"xmmintrin.h\", # SSE\n \"emmintrin.h\", # SSE2\n \"features.h\", # for glibc version linux\n]\n\n# optional gcc compiler builtins and their call arguments and optional a\n# required header\n# call arguments are required as the compiler will do strict signature checking\nOPTIONAL_INTRINSICS = [(\"__builtin_isnan\", '5.'),\n (\"__builtin_isinf\", '5.'),\n (\"__builtin_isfinite\", '5.'),\n (\"__builtin_bswap32\", '5u'),\n (\"__builtin_bswap64\", '5u'),\n (\"__builtin_expect\", '5, 0'),\n (\"__builtin_mul_overflow\", '5, 5, (int*)5'),\n (\"_mm_load_ps\", '(float*)0', \"xmmintrin.h\"), # SSE\n (\"_mm_prefetch\", '(float*)0, _MM_HINT_NTA',\n \"xmmintrin.h\"), # SSE\n (\"_mm_load_pd\", '(double*)0', \"emmintrin.h\"), # SSE2\n (\"__builtin_prefetch\", \"(float*)0, 0, 3\"),\n ]\n\n# function attributes\n# tested via \"int %s %s(void *);\" % (attribute, name)\n# function name will be converted to HAVE_<upper-case-name> preprocessor macro\nOPTIONAL_FUNCTION_ATTRIBUTES = [('__attribute__((optimize(\"unroll-loops\")))',\n 'attribute_optimize_unroll_loops'),\n ('__attribute__((optimize(\"O3\")))',\n 'attribute_optimize_opt_3'),\n ('__attribute__((nonnull (1)))',\n 'attribute_nonnull'),\n ]\n\n# variable attributes tested via \"int %s a\" % attribute\nOPTIONAL_VARIABLE_ATTRIBUTES = [\"__thread\", \"__declspec(thread)\"]\n\n# Subset of OPTIONAL_STDFUNCS which may alreay have HAVE_* defined by Python.h\nOPTIONAL_STDFUNCS_MAYBE = [\n \"expm1\", \"log1p\", \"acosh\", \"atanh\", \"asinh\", \"hypot\", \"copysign\",\n \"ftello\", \"fseeko\"\n ]\n\n# C99 functions: float and long double versions\nC99_FUNCS = [\n \"sin\", \"cos\", \"tan\", \"sinh\", \"cosh\", \"tanh\", \"fabs\", \"floor\", \"ceil\",\n \"rint\", \"trunc\", \"sqrt\", \"log10\", \"log\", \"log1p\", \"exp\", \"expm1\",\n \"asin\", \"acos\", \"atan\", \"asinh\", \"acosh\", \"atanh\", \"hypot\", \"atan2\",\n \"pow\", \"fmod\", \"modf\", 'frexp', 'ldexp', \"exp2\", \"log2\", \"copysign\",\n \"nextafter\", \"cbrt\"\n ]\nC99_FUNCS_SINGLE = [f + 'f' for f in C99_FUNCS]\nC99_FUNCS_EXTENDED = [f + 'l' for f in C99_FUNCS]\nC99_COMPLEX_TYPES = [\n 'complex double', 'complex float', 'complex long double'\n ]\nC99_COMPLEX_FUNCS = [\n \"cabs\", \"cacos\", \"cacosh\", \"carg\", \"casin\", \"casinh\", \"catan\",\n \"catanh\", \"ccos\", \"ccosh\", \"cexp\", \"cimag\", \"clog\", \"conj\", \"cpow\",\n \"cproj\", \"creal\", \"csin\", \"csinh\", \"csqrt\", \"ctan\", \"ctanh\"\n ]\n\ndef fname2def(name):\n return \"HAVE_%s\" % name.upper()\n\ndef sym2def(symbol):\n define = symbol.replace(' ', '')\n return define.upper()\n\ndef type2def(symbol):\n define = symbol.replace(' ', '_')\n return define.upper()\n\n# Code to detect long double representation taken from MPFR m4 macro\ndef check_long_double_representation(cmd):\n cmd._check_compiler()\n body = LONG_DOUBLE_REPRESENTATION_SRC % {'type': 'long double'}\n\n # Disable whole program optimization (the default on vs2015, with python 3.5+)\n # which generates intermediary object files and prevents checking the\n # float representation.\n if sys.platform == \"win32\" and not mingw32():\n try:\n cmd.compiler.compile_options.remove(\"/GL\")\n except ValueError:\n pass\n\n # We need to use _compile because we need the object filename\n src, obj = cmd._compile(body, None, None, 'c')\n try:\n ltype = long_double_representation(pyod(obj))\n return ltype\n except ValueError:\n # try linking to support CC=\"gcc -flto\" or icc -ipo\n # struct needs to be volatile so it isn't optimized away\n body = body.replace('struct', 'volatile struct')\n body += \"int main(void) { return 0; }\\n\"\n src, obj = cmd._compile(body, None, None, 'c')\n cmd.temp_files.append(\"_configtest\")\n cmd.compiler.link_executable([obj], \"_configtest\")\n ltype = long_double_representation(pyod(\"_configtest\"))\n return ltype\n finally:\n cmd._clean()\n\nLONG_DOUBLE_REPRESENTATION_SRC = r\"\"\"\n/* \"before\" is 16 bytes to ensure there's no padding between it and \"x\".\n * We're not expecting any \"long double\" bigger than 16 bytes or with\n * alignment requirements stricter than 16 bytes. */\ntypedef %(type)s test_type;\n\nstruct {\n char before[16];\n test_type x;\n char after[8];\n} foo = {\n { '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0',\n '\\001', '\\043', '\\105', '\\147', '\\211', '\\253', '\\315', '\\357' },\n -123456789.0,\n { '\\376', '\\334', '\\272', '\\230', '\\166', '\\124', '\\062', '\\020' }\n};\n\"\"\"\n\ndef pyod(filename):\n \"\"\"Python implementation of the od UNIX utility (od -b, more exactly).\n\n Parameters\n ----------\n filename : str\n name of the file to get the dump from.\n\n Returns\n -------\n out : seq\n list of lines of od output\n\n Note\n ----\n We only implement enough to get the necessary information for long double\n representation, this is not intended as a compatible replacement for od.\n \"\"\"\n def _pyod2():\n out = []\n\n fid = open(filename, 'rb')\n try:\n yo = [int(oct(int(binascii.b2a_hex(o), 16))) for o in fid.read()]\n for i in range(0, len(yo), 16):\n line = ['%07d' % int(oct(i))]\n line.extend(['%03d' % c for c in yo[i:i+16]])\n out.append(\" \".join(line))\n return out\n finally:\n fid.close()\n\n def _pyod3():\n out = []\n\n fid = open(filename, 'rb')\n try:\n yo2 = [oct(o)[2:] for o in fid.read()]\n for i in range(0, len(yo2), 16):\n line = ['%07d' % int(oct(i)[2:])]\n line.extend(['%03d' % int(c) for c in yo2[i:i+16]])\n out.append(\" \".join(line))\n return out\n finally:\n fid.close()\n\n if sys.version_info[0] < 3:\n return _pyod2()\n else:\n return _pyod3()\n\n_BEFORE_SEQ = ['000', '000', '000', '000', '000', '000', '000', '000',\n '001', '043', '105', '147', '211', '253', '315', '357']\n_AFTER_SEQ = ['376', '334', '272', '230', '166', '124', '062', '020']\n\n_IEEE_DOUBLE_BE = ['301', '235', '157', '064', '124', '000', '000', '000']\n_IEEE_DOUBLE_LE = _IEEE_DOUBLE_BE[::-1]\n_INTEL_EXTENDED_12B = ['000', '000', '000', '000', '240', '242', '171', '353',\n '031', '300', '000', '000']\n_INTEL_EXTENDED_16B = ['000', '000', '000', '000', '240', '242', '171', '353',\n '031', '300', '000', '000', '000', '000', '000', '000']\n_MOTOROLA_EXTENDED_12B = ['300', '031', '000', '000', '353', '171',\n '242', '240', '000', '000', '000', '000']\n_IEEE_QUAD_PREC_BE = ['300', '031', '326', '363', '105', '100', '000', '000',\n '000', '000', '000', '000', '000', '000', '000', '000']\n_IEEE_QUAD_PREC_LE = _IEEE_QUAD_PREC_BE[::-1]\n_DOUBLE_DOUBLE_BE = (['301', '235', '157', '064', '124', '000', '000', '000'] +\n ['000'] * 8)\n_DOUBLE_DOUBLE_LE = (['000', '000', '000', '124', '064', '157', '235', '301'] +\n ['000'] * 8)\n\ndef long_double_representation(lines):\n \"\"\"Given a binary dump as given by GNU od -b, look for long double\n representation.\"\"\"\n\n # Read contains a list of 32 items, each item is a byte (in octal\n # representation, as a string). We 'slide' over the output until read is of\n # the form before_seq + content + after_sequence, where content is the long double\n # representation:\n # - content is 12 bytes: 80 bits Intel representation\n # - content is 16 bytes: 80 bits Intel representation (64 bits) or quad precision\n # - content is 8 bytes: same as double (not implemented yet)\n read = [''] * 32\n saw = None\n for line in lines:\n # we skip the first word, as od -b output an index at the beginning of\n # each line\n for w in line.split()[1:]:\n read.pop(0)\n read.append(w)\n\n # If the end of read is equal to the after_sequence, read contains\n # the long double\n if read[-8:] == _AFTER_SEQ:\n saw = copy.copy(read)\n if read[:12] == _BEFORE_SEQ[4:]:\n if read[12:-8] == _INTEL_EXTENDED_12B:\n return 'INTEL_EXTENDED_12_BYTES_LE'\n if read[12:-8] == _MOTOROLA_EXTENDED_12B:\n return 'MOTOROLA_EXTENDED_12_BYTES_BE'\n elif read[:8] == _BEFORE_SEQ[8:]:\n if read[8:-8] == _INTEL_EXTENDED_16B:\n return 'INTEL_EXTENDED_16_BYTES_LE'\n elif read[8:-8] == _IEEE_QUAD_PREC_BE:\n return 'IEEE_QUAD_BE'\n elif read[8:-8] == _IEEE_QUAD_PREC_LE:\n return 'IEEE_QUAD_LE'\n elif read[8:-8] == _DOUBLE_DOUBLE_BE:\n return 'DOUBLE_DOUBLE_BE'\n elif read[8:-8] == _DOUBLE_DOUBLE_LE:\n return 'DOUBLE_DOUBLE_LE'\n elif read[:16] == _BEFORE_SEQ:\n if read[16:-8] == _IEEE_DOUBLE_LE:\n return 'IEEE_DOUBLE_LE'\n elif read[16:-8] == _IEEE_DOUBLE_BE:\n return 'IEEE_DOUBLE_BE'\n\n if saw is not None:\n raise ValueError(\"Unrecognized format (%s)\" % saw)\n else:\n # We never detected the after_sequence\n raise ValueError(\"Could not lock sequences (%s)\" % saw)\n", "path": "numpy/core/setup_common.py"}]} |
gh_patches_debug_1354 | rasdani/github-patches | git_diff | GeotrekCE__Geotrek-admin-3774 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Bug : Export des interventions filtrées dans la table n'est pas filtré dans le csv exporté
**Contexte :**
Bug dans le module intervention
**Résultat attendu :**
Lorsque je filtre la table qui liste les objets dans le module et que j'exporte le résultat au format CSV, le CSV ne doit contenir que les résultats filtrés
**Résultat observé (bug) :**
La table CSV obtenue contient l'ensemble des interventions non filtrées, sans tenir compte des éventuels filtres choisis par l'utilisateur dans l'interface.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `geotrek/maintenance/views.py`
Content:
```
1 import logging
2 import re
3
4 from django.conf import settings
5 from django.db.models import Subquery, OuterRef, Sum
6 from django.db.models.expressions import Value
7 from django.utils.translation import gettext_lazy as _
8 from mapentity.views import (MapEntityList, MapEntityFormat, MapEntityDetail, MapEntityDocument,
9 MapEntityCreate, MapEntityUpdate, MapEntityDelete)
10
11 from geotrek.altimetry.models import AltimetryMixin
12 from geotrek.authent.decorators import same_structure_required
13 from geotrek.common.mixins.forms import FormsetMixin
14 from geotrek.common.mixins.views import CustomColumnsMixin
15 from geotrek.common.viewsets import GeotrekMapentityViewSet
16 from .filters import InterventionFilterSet, ProjectFilterSet
17 from .forms import (InterventionForm, ProjectForm,
18 FundingFormSet, ManDayFormSet)
19 from .models import Intervention, Project, ManDay
20 from .serializers import (InterventionSerializer, ProjectSerializer,
21 InterventionGeojsonSerializer, ProjectGeojsonSerializer)
22
23 logger = logging.getLogger(__name__)
24
25
26 ANNOTATION_FORBIDDEN_CHARS = re.compile(r"['`\"\]\[;\s]|--|/\*|\*/")
27 REPLACEMENT_CHAR = "_"
28
29
30 def _normalize_annotation_column_name(col_name):
31 return ANNOTATION_FORBIDDEN_CHARS.sub(repl=REPLACEMENT_CHAR, string=col_name)
32
33
34 class InterventionList(CustomColumnsMixin, MapEntityList):
35 queryset = Intervention.objects.existing()
36 filterform = InterventionFilterSet
37 mandatory_columns = ['id', 'name']
38 default_extra_columns = ['date', 'type', 'target', 'status', 'stake']
39 searchable_columns = ['id', 'name']
40 unorderable_columns = ['target']
41
42
43 class InterventionFormatList(MapEntityFormat, InterventionList):
44
45 @classmethod
46 def build_cost_column_name(cls, job_name):
47 return _normalize_annotation_column_name(f"{_('Cost')} {job_name}")
48
49 def get_queryset(self):
50 """Returns all interventions joined with a new column for each job, to record the total cost of each job in each intervention"""
51
52 queryset = Intervention.objects.existing()
53
54 if settings.ENABLE_JOBS_COSTS_DETAILED_EXPORT:
55
56 # Get all jobs that are used in interventions, as unique names, ids and costs
57 all_mandays = ManDay.objects.all()
58 jobs_used_in_interventions = list(
59 set(all_mandays.values_list("job__job", "job_id", "job__cost"))
60 )
61
62 # Iter over unique jobs
63 for job_name, job_id, job_cost in jobs_used_in_interventions:
64
65 # Create column name for current job cost
66 column_name = self.build_cost_column_name(job_name)
67
68 # Create subquery to retrieve total cost of mandays for a given intervention and a given job
69 mandays_query = (
70 ManDay.objects.filter(intervention=OuterRef("pk"), job_id=job_id) # Extract all mandays for a given intervention and a given job
71 .values("job_id") # Group by job
72 .annotate(total_days=Sum("nb_days")) # Select number of days worked
73 .values("total_days") # Rename result as total_days
74 )
75
76 # Use total_days and job cost to calculate total cost for a given intervention and a given job
77 job_cost_query = Subquery(mandays_query) * Value(job_cost)
78
79 # Annotate queryset with this cost query
80 params = {column_name: job_cost_query}
81 queryset = queryset.annotate(**params)
82 return queryset
83
84 @classmethod
85 def get_mandatory_columns(cls):
86 mandatory_columns = ['id']
87 if settings.ENABLE_JOBS_COSTS_DETAILED_EXPORT:
88 all_mandays = ManDay.objects.all() # Used to find all jobs that ARE USED in interventions
89 # Get all jobs that are used in interventions, as unique names
90 jobs_as_names = list(
91 set(all_mandays.values_list("job__job", flat=True))
92 )
93 # Create column names for each unique job cost
94 cost_column_names = list(map(cls.build_cost_column_name, jobs_as_names))
95 # Add these column names to export
96 mandatory_columns = mandatory_columns + cost_column_names
97 return mandatory_columns
98
99 default_extra_columns = [
100 'name', 'date', 'type', 'target', 'status', 'stake',
101 'disorders', 'total_manday', 'project', 'subcontracting',
102 'width', 'height', 'area', 'structure',
103 'description', 'date_insert', 'date_update',
104 'material_cost', 'heliport_cost', 'subcontract_cost',
105 'total_cost_mandays', 'total_cost',
106 'cities', 'districts', 'areas',
107 ] + AltimetryMixin.COLUMNS
108
109
110 class InterventionDetail(MapEntityDetail):
111 queryset = Intervention.objects.existing()
112
113 def get_context_data(self, *args, **kwargs):
114 context = super().get_context_data(*args, **kwargs)
115 context['can_edit'] = self.get_object().same_structure(self.request.user)
116 return context
117
118
119 class InterventionDocument(MapEntityDocument):
120 model = Intervention
121
122
123 class ManDayFormsetMixin(FormsetMixin):
124 context_name = 'manday_formset'
125 formset_class = ManDayFormSet
126
127
128 class InterventionCreate(ManDayFormsetMixin, MapEntityCreate):
129 model = Intervention
130 form_class = InterventionForm
131
132 def get_form_kwargs(self):
133 kwargs = super().get_form_kwargs()
134 if 'target_id' in self.request.GET and 'target_type' in self.request.GET:
135 # Create intervention on an existing infrastructure
136 kwargs['target_id'] = self.request.GET['target_id']
137 kwargs['target_type'] = self.request.GET['target_type']
138 return kwargs
139
140
141 class InterventionUpdate(ManDayFormsetMixin, MapEntityUpdate):
142 queryset = Intervention.objects.existing()
143 form_class = InterventionForm
144
145 @same_structure_required('maintenance:intervention_detail')
146 def dispatch(self, *args, **kwargs):
147 return super().dispatch(*args, **kwargs)
148
149 def get_form_kwargs(self):
150 kwargs = super().get_form_kwargs()
151 # If deletion is allowed
152 if kwargs['can_delete']:
153 intervention = self.get_object()
154 # Disallow deletion if this intervention is part of Suricate Workflow at the moment
155 not_workflow = not settings.SURICATE_WORKFLOW_ENABLED
156 is_report = intervention.target and intervention.target.__class__.__name__ == "Report"
157 report_is_closed = False
158 if is_report:
159 report_is_closed = (intervention.target.status.identifier == 'solved')
160 kwargs["can_delete"] = not_workflow or (not is_report) or report_is_closed
161 return kwargs
162
163
164 class InterventionDelete(MapEntityDelete):
165 model = Intervention
166
167 @same_structure_required('maintenance:intervention_detail')
168 def dispatch(self, *args, **kwargs):
169 return super().dispatch(*args, **kwargs)
170
171
172 class InterventionViewSet(GeotrekMapentityViewSet):
173 model = Intervention
174 serializer_class = InterventionSerializer
175 geojson_serializer_class = InterventionGeojsonSerializer
176 filterset_class = InterventionFilterSet
177 mapentity_list_class = InterventionList
178
179 def get_queryset(self):
180 qs = self.model.objects.existing()
181 if self.format_kwarg == 'geojson':
182 qs = qs.only('id', 'name')
183 else:
184 qs = qs.select_related("stake", "status", "type", "target_type").prefetch_related('target')
185 return qs
186
187
188 class ProjectList(CustomColumnsMixin, MapEntityList):
189 queryset = Project.objects.existing()
190 filterform = ProjectFilterSet
191 mandatory_columns = ['id', 'name']
192 default_extra_columns = ['period', 'type', 'domain']
193 searchable_columns = ['id', 'name']
194 unorderable_columns = ['period', ]
195
196
197 class ProjectFormatList(MapEntityFormat, ProjectList):
198 mandatory_columns = ['id']
199 default_extra_columns = [
200 'structure', 'name', 'period', 'type', 'domain', 'constraint', 'global_cost',
201 'interventions', 'interventions_total_cost', 'comments', 'contractors',
202 'project_owner', 'project_manager', 'founders',
203 'date_insert', 'date_update',
204 'cities', 'districts', 'areas',
205 ]
206
207
208 class ProjectDetail(MapEntityDetail):
209 queryset = Project.objects.existing()
210
211 def get_context_data(self, *args, **kwargs):
212 context = super().get_context_data(*args, **kwargs)
213 context['can_edit'] = self.get_object().same_structure(self.request.user)
214 context['empty_map_message'] = _("No intervention related.")
215 return context
216
217
218 class ProjectDocument(MapEntityDocument):
219 model = Project
220
221
222 class FundingFormsetMixin(FormsetMixin):
223 context_name = 'funding_formset'
224 formset_class = FundingFormSet
225
226
227 class ProjectCreate(FundingFormsetMixin, MapEntityCreate):
228 model = Project
229 form_class = ProjectForm
230
231
232 class ProjectUpdate(FundingFormsetMixin, MapEntityUpdate):
233 queryset = Project.objects.existing()
234 form_class = ProjectForm
235
236 @same_structure_required('maintenance:project_detail')
237 def dispatch(self, *args, **kwargs):
238 return super().dispatch(*args, **kwargs)
239
240
241 class ProjectDelete(MapEntityDelete):
242 model = Project
243
244 @same_structure_required('maintenance:project_detail')
245 def dispatch(self, *args, **kwargs):
246 return super().dispatch(*args, **kwargs)
247
248
249 class ProjectViewSet(GeotrekMapentityViewSet):
250 model = Project
251 serializer_class = ProjectSerializer
252 geojson_serializer_class = ProjectGeojsonSerializer
253 filterset_class = ProjectFilterSet
254 mapentity_list_class = ProjectList
255
256 def get_queryset(self):
257 qs = self.model.objects.existing()
258 if self.format_kwarg == 'geojson':
259 non_empty_qs = Intervention.objects.existing().filter(project__isnull=False).values('project')
260 qs = qs.filter(pk__in=non_empty_qs)
261 qs = qs.only('id', 'name')
262 return qs
263
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/geotrek/maintenance/views.py b/geotrek/maintenance/views.py
--- a/geotrek/maintenance/views.py
+++ b/geotrek/maintenance/views.py
@@ -49,7 +49,7 @@
def get_queryset(self):
"""Returns all interventions joined with a new column for each job, to record the total cost of each job in each intervention"""
- queryset = Intervention.objects.existing()
+ queryset = super().get_queryset()
if settings.ENABLE_JOBS_COSTS_DETAILED_EXPORT:
| {"golden_diff": "diff --git a/geotrek/maintenance/views.py b/geotrek/maintenance/views.py\n--- a/geotrek/maintenance/views.py\n+++ b/geotrek/maintenance/views.py\n@@ -49,7 +49,7 @@\n def get_queryset(self):\n \"\"\"Returns all interventions joined with a new column for each job, to record the total cost of each job in each intervention\"\"\"\n \n- queryset = Intervention.objects.existing()\n+ queryset = super().get_queryset()\n \n if settings.ENABLE_JOBS_COSTS_DETAILED_EXPORT:\n", "issue": "Bug : Export des interventions filtr\u00e9es dans la table n'est pas filtr\u00e9 dans le csv export\u00e9\n**Contexte :** \r\n\r\nBug dans le module intervention\r\n\r\n**R\u00e9sultat attendu :** \r\n\r\nLorsque je filtre la table qui liste les objets dans le module et que j'exporte le r\u00e9sultat au format CSV, le CSV ne doit contenir que les r\u00e9sultats filtr\u00e9s\r\n\r\n**R\u00e9sultat observ\u00e9 (bug) :** \r\n\r\nLa table CSV obtenue contient l'ensemble des interventions non filtr\u00e9es, sans tenir compte des \u00e9ventuels filtres choisis par l'utilisateur dans l'interface. \n", "before_files": [{"content": "import logging\nimport re\n\nfrom django.conf import settings\nfrom django.db.models import Subquery, OuterRef, Sum\nfrom django.db.models.expressions import Value\nfrom django.utils.translation import gettext_lazy as _\nfrom mapentity.views import (MapEntityList, MapEntityFormat, MapEntityDetail, MapEntityDocument,\n MapEntityCreate, MapEntityUpdate, MapEntityDelete)\n\nfrom geotrek.altimetry.models import AltimetryMixin\nfrom geotrek.authent.decorators import same_structure_required\nfrom geotrek.common.mixins.forms import FormsetMixin\nfrom geotrek.common.mixins.views import CustomColumnsMixin\nfrom geotrek.common.viewsets import GeotrekMapentityViewSet\nfrom .filters import InterventionFilterSet, ProjectFilterSet\nfrom .forms import (InterventionForm, ProjectForm,\n FundingFormSet, ManDayFormSet)\nfrom .models import Intervention, Project, ManDay\nfrom .serializers import (InterventionSerializer, ProjectSerializer,\n InterventionGeojsonSerializer, ProjectGeojsonSerializer)\n\nlogger = logging.getLogger(__name__)\n\n\nANNOTATION_FORBIDDEN_CHARS = re.compile(r\"['`\\\"\\]\\[;\\s]|--|/\\*|\\*/\")\nREPLACEMENT_CHAR = \"_\"\n\n\ndef _normalize_annotation_column_name(col_name):\n return ANNOTATION_FORBIDDEN_CHARS.sub(repl=REPLACEMENT_CHAR, string=col_name)\n\n\nclass InterventionList(CustomColumnsMixin, MapEntityList):\n queryset = Intervention.objects.existing()\n filterform = InterventionFilterSet\n mandatory_columns = ['id', 'name']\n default_extra_columns = ['date', 'type', 'target', 'status', 'stake']\n searchable_columns = ['id', 'name']\n unorderable_columns = ['target']\n\n\nclass InterventionFormatList(MapEntityFormat, InterventionList):\n\n @classmethod\n def build_cost_column_name(cls, job_name):\n return _normalize_annotation_column_name(f\"{_('Cost')} {job_name}\")\n\n def get_queryset(self):\n \"\"\"Returns all interventions joined with a new column for each job, to record the total cost of each job in each intervention\"\"\"\n\n queryset = Intervention.objects.existing()\n\n if settings.ENABLE_JOBS_COSTS_DETAILED_EXPORT:\n\n # Get all jobs that are used in interventions, as unique names, ids and costs\n all_mandays = ManDay.objects.all()\n jobs_used_in_interventions = list(\n set(all_mandays.values_list(\"job__job\", \"job_id\", \"job__cost\"))\n )\n\n # Iter over unique jobs\n for job_name, job_id, job_cost in jobs_used_in_interventions:\n\n # Create column name for current job cost\n column_name = self.build_cost_column_name(job_name)\n\n # Create subquery to retrieve total cost of mandays for a given intervention and a given job\n mandays_query = (\n ManDay.objects.filter(intervention=OuterRef(\"pk\"), job_id=job_id) # Extract all mandays for a given intervention and a given job\n .values(\"job_id\") # Group by job\n .annotate(total_days=Sum(\"nb_days\")) # Select number of days worked\n .values(\"total_days\") # Rename result as total_days\n )\n\n # Use total_days and job cost to calculate total cost for a given intervention and a given job\n job_cost_query = Subquery(mandays_query) * Value(job_cost)\n\n # Annotate queryset with this cost query\n params = {column_name: job_cost_query}\n queryset = queryset.annotate(**params)\n return queryset\n\n @classmethod\n def get_mandatory_columns(cls):\n mandatory_columns = ['id']\n if settings.ENABLE_JOBS_COSTS_DETAILED_EXPORT:\n all_mandays = ManDay.objects.all() # Used to find all jobs that ARE USED in interventions\n # Get all jobs that are used in interventions, as unique names\n jobs_as_names = list(\n set(all_mandays.values_list(\"job__job\", flat=True))\n )\n # Create column names for each unique job cost\n cost_column_names = list(map(cls.build_cost_column_name, jobs_as_names))\n # Add these column names to export\n mandatory_columns = mandatory_columns + cost_column_names\n return mandatory_columns\n\n default_extra_columns = [\n 'name', 'date', 'type', 'target', 'status', 'stake',\n 'disorders', 'total_manday', 'project', 'subcontracting',\n 'width', 'height', 'area', 'structure',\n 'description', 'date_insert', 'date_update',\n 'material_cost', 'heliport_cost', 'subcontract_cost',\n 'total_cost_mandays', 'total_cost',\n 'cities', 'districts', 'areas',\n ] + AltimetryMixin.COLUMNS\n\n\nclass InterventionDetail(MapEntityDetail):\n queryset = Intervention.objects.existing()\n\n def get_context_data(self, *args, **kwargs):\n context = super().get_context_data(*args, **kwargs)\n context['can_edit'] = self.get_object().same_structure(self.request.user)\n return context\n\n\nclass InterventionDocument(MapEntityDocument):\n model = Intervention\n\n\nclass ManDayFormsetMixin(FormsetMixin):\n context_name = 'manday_formset'\n formset_class = ManDayFormSet\n\n\nclass InterventionCreate(ManDayFormsetMixin, MapEntityCreate):\n model = Intervention\n form_class = InterventionForm\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n if 'target_id' in self.request.GET and 'target_type' in self.request.GET:\n # Create intervention on an existing infrastructure\n kwargs['target_id'] = self.request.GET['target_id']\n kwargs['target_type'] = self.request.GET['target_type']\n return kwargs\n\n\nclass InterventionUpdate(ManDayFormsetMixin, MapEntityUpdate):\n queryset = Intervention.objects.existing()\n form_class = InterventionForm\n\n @same_structure_required('maintenance:intervention_detail')\n def dispatch(self, *args, **kwargs):\n return super().dispatch(*args, **kwargs)\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n # If deletion is allowed\n if kwargs['can_delete']:\n intervention = self.get_object()\n # Disallow deletion if this intervention is part of Suricate Workflow at the moment\n not_workflow = not settings.SURICATE_WORKFLOW_ENABLED\n is_report = intervention.target and intervention.target.__class__.__name__ == \"Report\"\n report_is_closed = False\n if is_report:\n report_is_closed = (intervention.target.status.identifier == 'solved')\n kwargs[\"can_delete\"] = not_workflow or (not is_report) or report_is_closed\n return kwargs\n\n\nclass InterventionDelete(MapEntityDelete):\n model = Intervention\n\n @same_structure_required('maintenance:intervention_detail')\n def dispatch(self, *args, **kwargs):\n return super().dispatch(*args, **kwargs)\n\n\nclass InterventionViewSet(GeotrekMapentityViewSet):\n model = Intervention\n serializer_class = InterventionSerializer\n geojson_serializer_class = InterventionGeojsonSerializer\n filterset_class = InterventionFilterSet\n mapentity_list_class = InterventionList\n\n def get_queryset(self):\n qs = self.model.objects.existing()\n if self.format_kwarg == 'geojson':\n qs = qs.only('id', 'name')\n else:\n qs = qs.select_related(\"stake\", \"status\", \"type\", \"target_type\").prefetch_related('target')\n return qs\n\n\nclass ProjectList(CustomColumnsMixin, MapEntityList):\n queryset = Project.objects.existing()\n filterform = ProjectFilterSet\n mandatory_columns = ['id', 'name']\n default_extra_columns = ['period', 'type', 'domain']\n searchable_columns = ['id', 'name']\n unorderable_columns = ['period', ]\n\n\nclass ProjectFormatList(MapEntityFormat, ProjectList):\n mandatory_columns = ['id']\n default_extra_columns = [\n 'structure', 'name', 'period', 'type', 'domain', 'constraint', 'global_cost',\n 'interventions', 'interventions_total_cost', 'comments', 'contractors',\n 'project_owner', 'project_manager', 'founders',\n 'date_insert', 'date_update',\n 'cities', 'districts', 'areas',\n ]\n\n\nclass ProjectDetail(MapEntityDetail):\n queryset = Project.objects.existing()\n\n def get_context_data(self, *args, **kwargs):\n context = super().get_context_data(*args, **kwargs)\n context['can_edit'] = self.get_object().same_structure(self.request.user)\n context['empty_map_message'] = _(\"No intervention related.\")\n return context\n\n\nclass ProjectDocument(MapEntityDocument):\n model = Project\n\n\nclass FundingFormsetMixin(FormsetMixin):\n context_name = 'funding_formset'\n formset_class = FundingFormSet\n\n\nclass ProjectCreate(FundingFormsetMixin, MapEntityCreate):\n model = Project\n form_class = ProjectForm\n\n\nclass ProjectUpdate(FundingFormsetMixin, MapEntityUpdate):\n queryset = Project.objects.existing()\n form_class = ProjectForm\n\n @same_structure_required('maintenance:project_detail')\n def dispatch(self, *args, **kwargs):\n return super().dispatch(*args, **kwargs)\n\n\nclass ProjectDelete(MapEntityDelete):\n model = Project\n\n @same_structure_required('maintenance:project_detail')\n def dispatch(self, *args, **kwargs):\n return super().dispatch(*args, **kwargs)\n\n\nclass ProjectViewSet(GeotrekMapentityViewSet):\n model = Project\n serializer_class = ProjectSerializer\n geojson_serializer_class = ProjectGeojsonSerializer\n filterset_class = ProjectFilterSet\n mapentity_list_class = ProjectList\n\n def get_queryset(self):\n qs = self.model.objects.existing()\n if self.format_kwarg == 'geojson':\n non_empty_qs = Intervention.objects.existing().filter(project__isnull=False).values('project')\n qs = qs.filter(pk__in=non_empty_qs)\n qs = qs.only('id', 'name')\n return qs\n", "path": "geotrek/maintenance/views.py"}], "after_files": [{"content": "import logging\nimport re\n\nfrom django.conf import settings\nfrom django.db.models import Subquery, OuterRef, Sum\nfrom django.db.models.expressions import Value\nfrom django.utils.translation import gettext_lazy as _\nfrom mapentity.views import (MapEntityList, MapEntityFormat, MapEntityDetail, MapEntityDocument,\n MapEntityCreate, MapEntityUpdate, MapEntityDelete)\n\nfrom geotrek.altimetry.models import AltimetryMixin\nfrom geotrek.authent.decorators import same_structure_required\nfrom geotrek.common.mixins.forms import FormsetMixin\nfrom geotrek.common.mixins.views import CustomColumnsMixin\nfrom geotrek.common.viewsets import GeotrekMapentityViewSet\nfrom .filters import InterventionFilterSet, ProjectFilterSet\nfrom .forms import (InterventionForm, ProjectForm,\n FundingFormSet, ManDayFormSet)\nfrom .models import Intervention, Project, ManDay\nfrom .serializers import (InterventionSerializer, ProjectSerializer,\n InterventionGeojsonSerializer, ProjectGeojsonSerializer)\n\nlogger = logging.getLogger(__name__)\n\n\nANNOTATION_FORBIDDEN_CHARS = re.compile(r\"['`\\\"\\]\\[;\\s]|--|/\\*|\\*/\")\nREPLACEMENT_CHAR = \"_\"\n\n\ndef _normalize_annotation_column_name(col_name):\n return ANNOTATION_FORBIDDEN_CHARS.sub(repl=REPLACEMENT_CHAR, string=col_name)\n\n\nclass InterventionList(CustomColumnsMixin, MapEntityList):\n queryset = Intervention.objects.existing()\n filterform = InterventionFilterSet\n mandatory_columns = ['id', 'name']\n default_extra_columns = ['date', 'type', 'target', 'status', 'stake']\n searchable_columns = ['id', 'name']\n unorderable_columns = ['target']\n\n\nclass InterventionFormatList(MapEntityFormat, InterventionList):\n\n @classmethod\n def build_cost_column_name(cls, job_name):\n return _normalize_annotation_column_name(f\"{_('Cost')} {job_name}\")\n\n def get_queryset(self):\n \"\"\"Returns all interventions joined with a new column for each job, to record the total cost of each job in each intervention\"\"\"\n\n queryset = super().get_queryset()\n\n if settings.ENABLE_JOBS_COSTS_DETAILED_EXPORT:\n\n # Get all jobs that are used in interventions, as unique names, ids and costs\n all_mandays = ManDay.objects.all()\n jobs_used_in_interventions = list(\n set(all_mandays.values_list(\"job__job\", \"job_id\", \"job__cost\"))\n )\n\n # Iter over unique jobs\n for job_name, job_id, job_cost in jobs_used_in_interventions:\n\n # Create column name for current job cost\n column_name = self.build_cost_column_name(job_name)\n\n # Create subquery to retrieve total cost of mandays for a given intervention and a given job\n mandays_query = (\n ManDay.objects.filter(intervention=OuterRef(\"pk\"), job_id=job_id) # Extract all mandays for a given intervention and a given job\n .values(\"job_id\") # Group by job\n .annotate(total_days=Sum(\"nb_days\")) # Select number of days worked\n .values(\"total_days\") # Rename result as total_days\n )\n\n # Use total_days and job cost to calculate total cost for a given intervention and a given job\n job_cost_query = Subquery(mandays_query) * Value(job_cost)\n\n # Annotate queryset with this cost query\n params = {column_name: job_cost_query}\n queryset = queryset.annotate(**params)\n return queryset\n\n @classmethod\n def get_mandatory_columns(cls):\n mandatory_columns = ['id']\n if settings.ENABLE_JOBS_COSTS_DETAILED_EXPORT:\n all_mandays = ManDay.objects.all() # Used to find all jobs that ARE USED in interventions\n # Get all jobs that are used in interventions, as unique names\n jobs_as_names = list(\n set(all_mandays.values_list(\"job__job\", flat=True))\n )\n # Create column names for each unique job cost\n cost_column_names = list(map(cls.build_cost_column_name, jobs_as_names))\n # Add these column names to export\n mandatory_columns = mandatory_columns + cost_column_names\n return mandatory_columns\n\n default_extra_columns = [\n 'name', 'date', 'type', 'target', 'status', 'stake',\n 'disorders', 'total_manday', 'project', 'subcontracting',\n 'width', 'height', 'area', 'structure',\n 'description', 'date_insert', 'date_update',\n 'material_cost', 'heliport_cost', 'subcontract_cost',\n 'total_cost_mandays', 'total_cost',\n 'cities', 'districts', 'areas',\n ] + AltimetryMixin.COLUMNS\n\n\nclass InterventionDetail(MapEntityDetail):\n queryset = Intervention.objects.existing()\n\n def get_context_data(self, *args, **kwargs):\n context = super().get_context_data(*args, **kwargs)\n context['can_edit'] = self.get_object().same_structure(self.request.user)\n return context\n\n\nclass InterventionDocument(MapEntityDocument):\n model = Intervention\n\n\nclass ManDayFormsetMixin(FormsetMixin):\n context_name = 'manday_formset'\n formset_class = ManDayFormSet\n\n\nclass InterventionCreate(ManDayFormsetMixin, MapEntityCreate):\n model = Intervention\n form_class = InterventionForm\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n if 'target_id' in self.request.GET and 'target_type' in self.request.GET:\n # Create intervention on an existing infrastructure\n kwargs['target_id'] = self.request.GET['target_id']\n kwargs['target_type'] = self.request.GET['target_type']\n return kwargs\n\n\nclass InterventionUpdate(ManDayFormsetMixin, MapEntityUpdate):\n queryset = Intervention.objects.existing()\n form_class = InterventionForm\n\n @same_structure_required('maintenance:intervention_detail')\n def dispatch(self, *args, **kwargs):\n return super().dispatch(*args, **kwargs)\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n # If deletion is allowed\n if kwargs['can_delete']:\n intervention = self.get_object()\n # Disallow deletion if this intervention is part of Suricate Workflow at the moment\n not_workflow = not settings.SURICATE_WORKFLOW_ENABLED\n is_report = intervention.target and intervention.target.__class__.__name__ == \"Report\"\n report_is_closed = False\n if is_report:\n report_is_closed = (intervention.target.status.identifier == 'solved')\n kwargs[\"can_delete\"] = not_workflow or (not is_report) or report_is_closed\n return kwargs\n\n\nclass InterventionDelete(MapEntityDelete):\n model = Intervention\n\n @same_structure_required('maintenance:intervention_detail')\n def dispatch(self, *args, **kwargs):\n return super().dispatch(*args, **kwargs)\n\n\nclass InterventionViewSet(GeotrekMapentityViewSet):\n model = Intervention\n serializer_class = InterventionSerializer\n geojson_serializer_class = InterventionGeojsonSerializer\n filterset_class = InterventionFilterSet\n mapentity_list_class = InterventionList\n\n def get_queryset(self):\n qs = self.model.objects.existing()\n if self.format_kwarg == 'geojson':\n qs = qs.only('id', 'name')\n else:\n qs = qs.select_related(\"stake\", \"status\", \"type\", \"target_type\").prefetch_related('target')\n return qs\n\n\nclass ProjectList(CustomColumnsMixin, MapEntityList):\n queryset = Project.objects.existing()\n filterform = ProjectFilterSet\n mandatory_columns = ['id', 'name']\n default_extra_columns = ['period', 'type', 'domain']\n searchable_columns = ['id', 'name']\n unorderable_columns = ['period', ]\n\n\nclass ProjectFormatList(MapEntityFormat, ProjectList):\n mandatory_columns = ['id']\n default_extra_columns = [\n 'structure', 'name', 'period', 'type', 'domain', 'constraint', 'global_cost',\n 'interventions', 'interventions_total_cost', 'comments', 'contractors',\n 'project_owner', 'project_manager', 'founders',\n 'date_insert', 'date_update',\n 'cities', 'districts', 'areas',\n ]\n\n\nclass ProjectDetail(MapEntityDetail):\n queryset = Project.objects.existing()\n\n def get_context_data(self, *args, **kwargs):\n context = super().get_context_data(*args, **kwargs)\n context['can_edit'] = self.get_object().same_structure(self.request.user)\n context['empty_map_message'] = _(\"No intervention related.\")\n return context\n\n\nclass ProjectDocument(MapEntityDocument):\n model = Project\n\n\nclass FundingFormsetMixin(FormsetMixin):\n context_name = 'funding_formset'\n formset_class = FundingFormSet\n\n\nclass ProjectCreate(FundingFormsetMixin, MapEntityCreate):\n model = Project\n form_class = ProjectForm\n\n\nclass ProjectUpdate(FundingFormsetMixin, MapEntityUpdate):\n queryset = Project.objects.existing()\n form_class = ProjectForm\n\n @same_structure_required('maintenance:project_detail')\n def dispatch(self, *args, **kwargs):\n return super().dispatch(*args, **kwargs)\n\n\nclass ProjectDelete(MapEntityDelete):\n model = Project\n\n @same_structure_required('maintenance:project_detail')\n def dispatch(self, *args, **kwargs):\n return super().dispatch(*args, **kwargs)\n\n\nclass ProjectViewSet(GeotrekMapentityViewSet):\n model = Project\n serializer_class = ProjectSerializer\n geojson_serializer_class = ProjectGeojsonSerializer\n filterset_class = ProjectFilterSet\n mapentity_list_class = ProjectList\n\n def get_queryset(self):\n qs = self.model.objects.existing()\n if self.format_kwarg == 'geojson':\n non_empty_qs = Intervention.objects.existing().filter(project__isnull=False).values('project')\n qs = qs.filter(pk__in=non_empty_qs)\n qs = qs.only('id', 'name')\n return qs\n", "path": "geotrek/maintenance/views.py"}]} |
gh_patches_debug_1355 | rasdani/github-patches | git_diff | meltano__meltano-6694 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
bug: Regression in `meltano remove`
### Meltano Version
2.3.0
### Python Version
NA
### Bug scope
CLI (options, error messages, logging, etc.)
### Operating System
NA
### Description
Meltano `2.3.0` introduced a regression on `meltano remove` where it complains that the plugin is missing the `settings_with_extras` attribute.
The regression was most likely introduced in https://github.com/meltano/meltano/pull/6267 somewhere in the use of a feature flag:
https://github.com/meltano/meltano/blob/23a9099cefe148cb560d25e7ae335447858948df/src/meltano/core/plugin/settings_service.py#L63-L65
I still haven't figured out the root cause but might be related with trying to check for feature flags in the `PluginSettingsService`.
Other commands may be similarly affected, but at least `invoke` and `run` seem to be working fine.
### Code
<details><summary>Traceback</summary>
```
Traceback (most recent call last):
File "/Users/edgarramirez/meltano/meltano/src/meltano/core/behavior/canonical.py", line 138, in __getattr__
value = self._dict[attr]
KeyError: 'settings_with_extras'
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/Users/edgarramirez/meltano/meltano/src/meltano/cli/__init__.py", line 66, in _run_cli
cli(obj={"project": None})
File "/Users/edgarramirez/Library/Caches/pypoetry/virtualenvs/meltano-SY7IjDqw-py3.9/lib/python3.9/site-packages/click/core.py", line 1130, in __call__
return self.main(*args, **kwargs)
File "/Users/edgarramirez/meltano/meltano/src/meltano/cli/cli.py", line 35, in main
return super().main(*args, windows_expand_args=False, **kwargs)
File "/Users/edgarramirez/Library/Caches/pypoetry/virtualenvs/meltano-SY7IjDqw-py3.9/lib/python3.9/site-packages/click/core.py", line 1055, in main
rv = self.invoke(ctx)
File "/Users/edgarramirez/meltano/meltano/src/meltano/cli/utils.py", line 540, in invoke
super().invoke(ctx) # noqa: WPS608
File "/Users/edgarramirez/Library/Caches/pypoetry/virtualenvs/meltano-SY7IjDqw-py3.9/lib/python3.9/site-packages/click/core.py", line 1657, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/Users/edgarramirez/Library/Caches/pypoetry/virtualenvs/meltano-SY7IjDqw-py3.9/lib/python3.9/site-packages/click/core.py", line 1404, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/Users/edgarramirez/Library/Caches/pypoetry/virtualenvs/meltano-SY7IjDqw-py3.9/lib/python3.9/site-packages/click/core.py", line 760, in invoke
return __callback(*args, **kwargs)
File "/Users/edgarramirez/meltano/meltano/src/meltano/cli/params.py", line 18, in decorate
return func(*args, **kwargs)
File "/Users/edgarramirez/meltano/meltano/src/meltano/cli/params.py", line 53, in decorate
func(project, *args, **kwargs)
File "/Users/edgarramirez/Library/Caches/pypoetry/virtualenvs/meltano-SY7IjDqw-py3.9/lib/python3.9/site-packages/click/decorators.py", line 26, in new_func
return f(get_current_context(), *args, **kwargs)
File "/Users/edgarramirez/meltano/meltano/src/meltano/cli/remove.py", line 32, in remove
remove_plugins(project, plugins)
File "/Users/edgarramirez/meltano/meltano/src/meltano/cli/remove.py", line 39, in remove_plugins
num_removed, total = remove_service.remove_plugins(
File "/Users/edgarramirez/meltano/meltano/src/meltano/core/plugin_remove_service.py", line 60, in remove_plugins
removal_managers = self.remove_plugin(plugin)
File "/Users/edgarramirez/meltano/meltano/src/meltano/core/plugin_remove_service.py", line 87, in remove_plugin
DbRemoveManager(plugin, self.project),
File "/Users/edgarramirez/meltano/meltano/src/meltano/core/plugin_location_remove.py", line 86, in __init__
self.plugins_settings_service = PluginSettingsService(project, plugin)
File "/Users/edgarramirez/meltano/meltano/src/meltano/core/plugin/settings_service.py", line 63, in __init__
with self.feature_flag(
File "/Users/edgarramirez/.pyenv/versions/3.9.13/lib/python3.9/contextlib.py", line 119, in __enter__
return next(self.gen)
File "/Users/edgarramirez/meltano/meltano/src/meltano/core/settings_service.py", line 657, in feature_flag
allowed = self.get(f"{FEATURE_FLAG_PREFIX}.{feature}") or False
File "/Users/edgarramirez/meltano/meltano/src/meltano/core/settings_service.py", line 444, in get
value, _ = self.get_with_source(*args, **kwargs)
File "/Users/edgarramirez/meltano/meltano/src/meltano/core/settings_service.py", line 431, in get_with_source
value, metadata = self.get_with_metadata(*args, **kwargs)
File "/Users/edgarramirez/meltano/meltano/src/meltano/core/settings_service.py", line 333, in get_with_metadata
setting_def = setting_def or self.find_setting(name)
File "/Users/edgarramirez/meltano/meltano/src/meltano/core/settings_service.py", line 598, in find_setting
for setting in self.definitions()
File "/Users/edgarramirez/meltano/meltano/src/meltano/core/settings_service.py", line 568, in definitions
for setting in self.setting_definitions
File "/Users/edgarramirez/meltano/meltano/src/meltano/core/plugin/settings_service.py", line 136, in setting_definitions
settings = self.plugin.settings_with_extras
File "/Users/edgarramirez/meltano/meltano/src/meltano/core/behavior/canonical.py", line 143, in __getattr__
raise AttributeError(attr) from err
AttributeError: settings_with_extras
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/Users/edgarramirez/meltano/meltano/src/meltano/cli/__init__.py", line 74, in _run_cli
raise CliError(str(err)) from err
meltano.cli.utils.CliError: settings_with_extras
settings_with_extras
```
</details>
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/meltano/core/plugin/settings_service.py`
Content:
```
1 """Settings manager for Meltano plugins."""
2
3 from __future__ import annotations
4
5 from typing import Any
6
7 from meltano.core.plugin.project_plugin import ProjectPlugin
8 from meltano.core.project import Project
9 from meltano.core.project_plugins_service import ProjectPluginsService
10 from meltano.core.project_settings_service import ProjectSettingsService
11 from meltano.core.setting_definition import SettingDefinition
12 from meltano.core.settings_service import FeatureFlags, SettingsService
13 from meltano.core.utils import expand_env_vars
14
15
16 class PluginSettingsService(SettingsService):
17 """Settings manager for Meltano plugins."""
18
19 def __init__(
20 self,
21 project: Project,
22 plugin: ProjectPlugin,
23 *args,
24 plugins_service: ProjectPluginsService = None,
25 **kwargs,
26 ):
27 """Create a new plugin settings manager.
28
29 Args:
30 project: The Meltano project.
31 plugin: The Meltano plugin.
32 args: Positional arguments to pass to the superclass.
33 plugins_service: The Meltano plugins service.
34 kwargs: Keyword arguments to pass to the superclass.
35 """
36 super().__init__(project, *args, **kwargs)
37
38 self.plugin = plugin
39 self.plugins_service = plugins_service or ProjectPluginsService(self.project)
40
41 self._inherited_settings_service = None
42 if self.project.active_environment:
43 environment = self.project.active_environment
44 self.environment_plugin_config = environment.get_plugin_config(
45 self.plugin.type,
46 self.plugin.name,
47 )
48 else:
49 self.environment_plugin_config = None
50
51 project_settings_service = ProjectSettingsService(
52 self.project, config_service=self.plugins_service.config_service
53 )
54
55 self.env_override = {
56 **project_settings_service.env, # project level environment variables
57 **project_settings_service.as_env(), # project level settings as env vars (e.g. MELTANO_PROJECT_ID)
58 **self.env_override, # plugin level overrides, passed in as **kwargs and set to self.env_overrides by super().__init__ above
59 **self.plugin.info_env, # generated generic plugin settings as env vars (e.g. MELTANO_EXTRACT_NAME)
60 **self.plugin.env, # env vars stored under the `env:` key of the plugin definition
61 }
62
63 environment_env = {}
64 if self.project.active_environment:
65 with self.feature_flag(
66 FeatureFlags.STRICT_ENV_VAR_MODE, raise_error=False
67 ) as strict_env_var_mode:
68 environment_env = {
69 var: expand_env_vars(
70 value,
71 self.env_override,
72 raise_if_missing=strict_env_var_mode,
73 )
74 for var, value in self.project.active_environment.env.items()
75 }
76 self.env_override.update(
77 environment_env
78 ) # active Meltano Environment top level `env:` key
79
80 environment_plugin_env = (
81 self.environment_plugin_config.env if self.environment_plugin_config else {}
82 )
83 self.env_override.update(
84 environment_plugin_env
85 ) # env vars stored under the `env:` key of the plugin definition of the active meltano Environment
86
87 @property
88 def label(self):
89 """Get the label for this plugin.
90
91 Returns:
92 The label for this plugin.
93 """
94 return f"{self.plugin.type.descriptor} '{self.plugin.name}'" # noqa: WPS237
95
96 @property
97 def docs_url(self):
98 """Get the documentation URL for this plugin.
99
100 Returns:
101 The documentation URL for this plugin.
102 """
103 return self.plugin.docs
104
105 def setting_env_vars(self, setting_def: SettingDefinition, for_writing=False):
106 """Get environment variables for a setting.
107
108 Args:
109 setting_def: The setting definition.
110 for_writing: Whether to get environment variables for writing.
111
112 Returns:
113 Environment variables for a setting.
114 """
115 return setting_def.env_vars(
116 prefixes=self.plugin.env_prefixes(for_writing=for_writing),
117 include_custom=self.plugin.is_shadowing or for_writing,
118 for_writing=for_writing,
119 )
120
121 @property
122 def db_namespace(self):
123 """Return namespace for setting value records in system database.
124
125 Returns:
126 Namespace for setting value records in system database.
127 """
128 # "default" is included for legacy reasons
129 return ".".join((self.plugin.type, self.plugin.name, "default"))
130
131 @property
132 def setting_definitions(self) -> list[SettingDefinition]:
133 """Return definitions of supported settings.
134
135 Returns:
136 A list of setting definitions.
137 """
138 settings = self.plugin.settings_with_extras
139
140 if self.environment_plugin_config is not None:
141 settings.extend(
142 self.environment_plugin_config.get_orphan_settings(settings)
143 )
144
145 return settings
146
147 @property
148 def meltano_yml_config(self):
149 """Return current configuration in `meltano.yml`.
150
151 Returns:
152 Current configuration in `meltano.yml`.
153 """
154 return self.plugin.config_with_extras
155
156 @property
157 def environment_config(self):
158 """Return current environment configuration in `meltano.yml`.
159
160 Returns:
161 Current environment configuration in `meltano.yml`.
162 """
163 if self.environment_plugin_config:
164 return self.environment_plugin_config.config_with_extras
165 return {}
166
167 def update_meltano_yml_config(self, config_with_extras):
168 """Update configuration in `meltano.yml`.
169
170 Args:
171 config_with_extras: Configuration to update.
172 """
173 self.plugin.config_with_extras = config_with_extras
174 self.plugins_service.update_plugin(self.plugin)
175
176 def update_meltano_environment_config(self, config_with_extras: dict[str, Any]):
177 """Update environment configuration in `meltano.yml`.
178
179 Args:
180 config_with_extras: Configuration to update.
181 """
182 self.environment_plugin_config.config_with_extras = config_with_extras
183 self.plugins_service.update_environment_plugin(self.environment_plugin_config)
184
185 @property
186 def inherited_settings_service(self):
187 """Return settings service to inherit configuration from.
188
189 Returns:
190 Settings service to inherit configuration from.
191 """
192 parent_plugin = self.plugin.parent
193 if not isinstance(parent_plugin, ProjectPlugin):
194 return None
195
196 if self._inherited_settings_service is None:
197 self._inherited_settings_service = self.__class__(
198 self.project,
199 parent_plugin,
200 env_override=self.env_override,
201 plugins_service=self.plugins_service,
202 )
203 return self._inherited_settings_service
204
205 def process_config(self, config):
206 """Process configuration dictionary to be passed to plugin.
207
208 Args:
209 config: Configuration dictionary to process.
210
211 Returns:
212 Processed configuration dictionary.
213 """
214 return self.plugin.process_config(config)
215
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/meltano/core/plugin/settings_service.py b/src/meltano/core/plugin/settings_service.py
--- a/src/meltano/core/plugin/settings_service.py
+++ b/src/meltano/core/plugin/settings_service.py
@@ -62,7 +62,7 @@
environment_env = {}
if self.project.active_environment:
- with self.feature_flag(
+ with project_settings_service.feature_flag(
FeatureFlags.STRICT_ENV_VAR_MODE, raise_error=False
) as strict_env_var_mode:
environment_env = {
| {"golden_diff": "diff --git a/src/meltano/core/plugin/settings_service.py b/src/meltano/core/plugin/settings_service.py\n--- a/src/meltano/core/plugin/settings_service.py\n+++ b/src/meltano/core/plugin/settings_service.py\n@@ -62,7 +62,7 @@\n \n environment_env = {}\n if self.project.active_environment:\n- with self.feature_flag(\n+ with project_settings_service.feature_flag(\n FeatureFlags.STRICT_ENV_VAR_MODE, raise_error=False\n ) as strict_env_var_mode:\n environment_env = {\n", "issue": "bug: Regression in `meltano remove`\n### Meltano Version\r\n\r\n2.3.0\r\n\r\n### Python Version\r\n\r\nNA\r\n\r\n### Bug scope\r\n\r\nCLI (options, error messages, logging, etc.)\r\n\r\n### Operating System\r\n\r\nNA\r\n\r\n### Description\r\n\r\nMeltano `2.3.0` introduced a regression on `meltano remove` where it complains that the plugin is missing the `settings_with_extras` attribute.\r\n\r\nThe regression was most likely introduced in https://github.com/meltano/meltano/pull/6267 somewhere in the use of a feature flag:\r\n\r\nhttps://github.com/meltano/meltano/blob/23a9099cefe148cb560d25e7ae335447858948df/src/meltano/core/plugin/settings_service.py#L63-L65\r\n\r\nI still haven't figured out the root cause but might be related with trying to check for feature flags in the `PluginSettingsService`.\r\n\r\nOther commands may be similarly affected, but at least `invoke` and `run` seem to be working fine.\r\n\r\n### Code\r\n\r\n\r\n<details><summary>Traceback</summary>\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/Users/edgarramirez/meltano/meltano/src/meltano/core/behavior/canonical.py\", line 138, in __getattr__\r\n value = self._dict[attr]\r\nKeyError: 'settings_with_extras'\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nTraceback (most recent call last):\r\n File \"/Users/edgarramirez/meltano/meltano/src/meltano/cli/__init__.py\", line 66, in _run_cli\r\n cli(obj={\"project\": None})\r\n File \"/Users/edgarramirez/Library/Caches/pypoetry/virtualenvs/meltano-SY7IjDqw-py3.9/lib/python3.9/site-packages/click/core.py\", line 1130, in __call__\r\n return self.main(*args, **kwargs)\r\n File \"/Users/edgarramirez/meltano/meltano/src/meltano/cli/cli.py\", line 35, in main\r\n return super().main(*args, windows_expand_args=False, **kwargs)\r\n File \"/Users/edgarramirez/Library/Caches/pypoetry/virtualenvs/meltano-SY7IjDqw-py3.9/lib/python3.9/site-packages/click/core.py\", line 1055, in main\r\n rv = self.invoke(ctx)\r\n File \"/Users/edgarramirez/meltano/meltano/src/meltano/cli/utils.py\", line 540, in invoke\r\n super().invoke(ctx) # noqa: WPS608\r\n File \"/Users/edgarramirez/Library/Caches/pypoetry/virtualenvs/meltano-SY7IjDqw-py3.9/lib/python3.9/site-packages/click/core.py\", line 1657, in invoke\r\n return _process_result(sub_ctx.command.invoke(sub_ctx))\r\n File \"/Users/edgarramirez/Library/Caches/pypoetry/virtualenvs/meltano-SY7IjDqw-py3.9/lib/python3.9/site-packages/click/core.py\", line 1404, in invoke\r\n return ctx.invoke(self.callback, **ctx.params)\r\n File \"/Users/edgarramirez/Library/Caches/pypoetry/virtualenvs/meltano-SY7IjDqw-py3.9/lib/python3.9/site-packages/click/core.py\", line 760, in invoke\r\n return __callback(*args, **kwargs)\r\n File \"/Users/edgarramirez/meltano/meltano/src/meltano/cli/params.py\", line 18, in decorate\r\n return func(*args, **kwargs)\r\n File \"/Users/edgarramirez/meltano/meltano/src/meltano/cli/params.py\", line 53, in decorate\r\n func(project, *args, **kwargs)\r\n File \"/Users/edgarramirez/Library/Caches/pypoetry/virtualenvs/meltano-SY7IjDqw-py3.9/lib/python3.9/site-packages/click/decorators.py\", line 26, in new_func\r\n return f(get_current_context(), *args, **kwargs)\r\n File \"/Users/edgarramirez/meltano/meltano/src/meltano/cli/remove.py\", line 32, in remove\r\n remove_plugins(project, plugins)\r\n File \"/Users/edgarramirez/meltano/meltano/src/meltano/cli/remove.py\", line 39, in remove_plugins\r\n num_removed, total = remove_service.remove_plugins(\r\n File \"/Users/edgarramirez/meltano/meltano/src/meltano/core/plugin_remove_service.py\", line 60, in remove_plugins\r\n removal_managers = self.remove_plugin(plugin)\r\n File \"/Users/edgarramirez/meltano/meltano/src/meltano/core/plugin_remove_service.py\", line 87, in remove_plugin\r\n DbRemoveManager(plugin, self.project),\r\n File \"/Users/edgarramirez/meltano/meltano/src/meltano/core/plugin_location_remove.py\", line 86, in __init__\r\n self.plugins_settings_service = PluginSettingsService(project, plugin)\r\n File \"/Users/edgarramirez/meltano/meltano/src/meltano/core/plugin/settings_service.py\", line 63, in __init__\r\n with self.feature_flag(\r\n File \"/Users/edgarramirez/.pyenv/versions/3.9.13/lib/python3.9/contextlib.py\", line 119, in __enter__\r\n return next(self.gen)\r\n File \"/Users/edgarramirez/meltano/meltano/src/meltano/core/settings_service.py\", line 657, in feature_flag\r\n allowed = self.get(f\"{FEATURE_FLAG_PREFIX}.{feature}\") or False\r\n File \"/Users/edgarramirez/meltano/meltano/src/meltano/core/settings_service.py\", line 444, in get\r\n value, _ = self.get_with_source(*args, **kwargs)\r\n File \"/Users/edgarramirez/meltano/meltano/src/meltano/core/settings_service.py\", line 431, in get_with_source\r\n value, metadata = self.get_with_metadata(*args, **kwargs)\r\n File \"/Users/edgarramirez/meltano/meltano/src/meltano/core/settings_service.py\", line 333, in get_with_metadata\r\n setting_def = setting_def or self.find_setting(name)\r\n File \"/Users/edgarramirez/meltano/meltano/src/meltano/core/settings_service.py\", line 598, in find_setting\r\n for setting in self.definitions()\r\n File \"/Users/edgarramirez/meltano/meltano/src/meltano/core/settings_service.py\", line 568, in definitions\r\n for setting in self.setting_definitions\r\n File \"/Users/edgarramirez/meltano/meltano/src/meltano/core/plugin/settings_service.py\", line 136, in setting_definitions\r\n settings = self.plugin.settings_with_extras\r\n File \"/Users/edgarramirez/meltano/meltano/src/meltano/core/behavior/canonical.py\", line 143, in __getattr__\r\n raise AttributeError(attr) from err\r\nAttributeError: settings_with_extras\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nTraceback (most recent call last):\r\n File \"/Users/edgarramirez/meltano/meltano/src/meltano/cli/__init__.py\", line 74, in _run_cli\r\n raise CliError(str(err)) from err\r\nmeltano.cli.utils.CliError: settings_with_extras\r\nsettings_with_extras\r\n```\r\n\r\n</details>\r\n\r\n\n", "before_files": [{"content": "\"\"\"Settings manager for Meltano plugins.\"\"\"\n\nfrom __future__ import annotations\n\nfrom typing import Any\n\nfrom meltano.core.plugin.project_plugin import ProjectPlugin\nfrom meltano.core.project import Project\nfrom meltano.core.project_plugins_service import ProjectPluginsService\nfrom meltano.core.project_settings_service import ProjectSettingsService\nfrom meltano.core.setting_definition import SettingDefinition\nfrom meltano.core.settings_service import FeatureFlags, SettingsService\nfrom meltano.core.utils import expand_env_vars\n\n\nclass PluginSettingsService(SettingsService):\n \"\"\"Settings manager for Meltano plugins.\"\"\"\n\n def __init__(\n self,\n project: Project,\n plugin: ProjectPlugin,\n *args,\n plugins_service: ProjectPluginsService = None,\n **kwargs,\n ):\n \"\"\"Create a new plugin settings manager.\n\n Args:\n project: The Meltano project.\n plugin: The Meltano plugin.\n args: Positional arguments to pass to the superclass.\n plugins_service: The Meltano plugins service.\n kwargs: Keyword arguments to pass to the superclass.\n \"\"\"\n super().__init__(project, *args, **kwargs)\n\n self.plugin = plugin\n self.plugins_service = plugins_service or ProjectPluginsService(self.project)\n\n self._inherited_settings_service = None\n if self.project.active_environment:\n environment = self.project.active_environment\n self.environment_plugin_config = environment.get_plugin_config(\n self.plugin.type,\n self.plugin.name,\n )\n else:\n self.environment_plugin_config = None\n\n project_settings_service = ProjectSettingsService(\n self.project, config_service=self.plugins_service.config_service\n )\n\n self.env_override = {\n **project_settings_service.env, # project level environment variables\n **project_settings_service.as_env(), # project level settings as env vars (e.g. MELTANO_PROJECT_ID)\n **self.env_override, # plugin level overrides, passed in as **kwargs and set to self.env_overrides by super().__init__ above\n **self.plugin.info_env, # generated generic plugin settings as env vars (e.g. MELTANO_EXTRACT_NAME)\n **self.plugin.env, # env vars stored under the `env:` key of the plugin definition\n }\n\n environment_env = {}\n if self.project.active_environment:\n with self.feature_flag(\n FeatureFlags.STRICT_ENV_VAR_MODE, raise_error=False\n ) as strict_env_var_mode:\n environment_env = {\n var: expand_env_vars(\n value,\n self.env_override,\n raise_if_missing=strict_env_var_mode,\n )\n for var, value in self.project.active_environment.env.items()\n }\n self.env_override.update(\n environment_env\n ) # active Meltano Environment top level `env:` key\n\n environment_plugin_env = (\n self.environment_plugin_config.env if self.environment_plugin_config else {}\n )\n self.env_override.update(\n environment_plugin_env\n ) # env vars stored under the `env:` key of the plugin definition of the active meltano Environment\n\n @property\n def label(self):\n \"\"\"Get the label for this plugin.\n\n Returns:\n The label for this plugin.\n \"\"\"\n return f\"{self.plugin.type.descriptor} '{self.plugin.name}'\" # noqa: WPS237\n\n @property\n def docs_url(self):\n \"\"\"Get the documentation URL for this plugin.\n\n Returns:\n The documentation URL for this plugin.\n \"\"\"\n return self.plugin.docs\n\n def setting_env_vars(self, setting_def: SettingDefinition, for_writing=False):\n \"\"\"Get environment variables for a setting.\n\n Args:\n setting_def: The setting definition.\n for_writing: Whether to get environment variables for writing.\n\n Returns:\n Environment variables for a setting.\n \"\"\"\n return setting_def.env_vars(\n prefixes=self.plugin.env_prefixes(for_writing=for_writing),\n include_custom=self.plugin.is_shadowing or for_writing,\n for_writing=for_writing,\n )\n\n @property\n def db_namespace(self):\n \"\"\"Return namespace for setting value records in system database.\n\n Returns:\n Namespace for setting value records in system database.\n \"\"\"\n # \"default\" is included for legacy reasons\n return \".\".join((self.plugin.type, self.plugin.name, \"default\"))\n\n @property\n def setting_definitions(self) -> list[SettingDefinition]:\n \"\"\"Return definitions of supported settings.\n\n Returns:\n A list of setting definitions.\n \"\"\"\n settings = self.plugin.settings_with_extras\n\n if self.environment_plugin_config is not None:\n settings.extend(\n self.environment_plugin_config.get_orphan_settings(settings)\n )\n\n return settings\n\n @property\n def meltano_yml_config(self):\n \"\"\"Return current configuration in `meltano.yml`.\n\n Returns:\n Current configuration in `meltano.yml`.\n \"\"\"\n return self.plugin.config_with_extras\n\n @property\n def environment_config(self):\n \"\"\"Return current environment configuration in `meltano.yml`.\n\n Returns:\n Current environment configuration in `meltano.yml`.\n \"\"\"\n if self.environment_plugin_config:\n return self.environment_plugin_config.config_with_extras\n return {}\n\n def update_meltano_yml_config(self, config_with_extras):\n \"\"\"Update configuration in `meltano.yml`.\n\n Args:\n config_with_extras: Configuration to update.\n \"\"\"\n self.plugin.config_with_extras = config_with_extras\n self.plugins_service.update_plugin(self.plugin)\n\n def update_meltano_environment_config(self, config_with_extras: dict[str, Any]):\n \"\"\"Update environment configuration in `meltano.yml`.\n\n Args:\n config_with_extras: Configuration to update.\n \"\"\"\n self.environment_plugin_config.config_with_extras = config_with_extras\n self.plugins_service.update_environment_plugin(self.environment_plugin_config)\n\n @property\n def inherited_settings_service(self):\n \"\"\"Return settings service to inherit configuration from.\n\n Returns:\n Settings service to inherit configuration from.\n \"\"\"\n parent_plugin = self.plugin.parent\n if not isinstance(parent_plugin, ProjectPlugin):\n return None\n\n if self._inherited_settings_service is None:\n self._inherited_settings_service = self.__class__(\n self.project,\n parent_plugin,\n env_override=self.env_override,\n plugins_service=self.plugins_service,\n )\n return self._inherited_settings_service\n\n def process_config(self, config):\n \"\"\"Process configuration dictionary to be passed to plugin.\n\n Args:\n config: Configuration dictionary to process.\n\n Returns:\n Processed configuration dictionary.\n \"\"\"\n return self.plugin.process_config(config)\n", "path": "src/meltano/core/plugin/settings_service.py"}], "after_files": [{"content": "\"\"\"Settings manager for Meltano plugins.\"\"\"\n\nfrom __future__ import annotations\n\nfrom typing import Any\n\nfrom meltano.core.plugin.project_plugin import ProjectPlugin\nfrom meltano.core.project import Project\nfrom meltano.core.project_plugins_service import ProjectPluginsService\nfrom meltano.core.project_settings_service import ProjectSettingsService\nfrom meltano.core.setting_definition import SettingDefinition\nfrom meltano.core.settings_service import FeatureFlags, SettingsService\nfrom meltano.core.utils import expand_env_vars\n\n\nclass PluginSettingsService(SettingsService):\n \"\"\"Settings manager for Meltano plugins.\"\"\"\n\n def __init__(\n self,\n project: Project,\n plugin: ProjectPlugin,\n *args,\n plugins_service: ProjectPluginsService = None,\n **kwargs,\n ):\n \"\"\"Create a new plugin settings manager.\n\n Args:\n project: The Meltano project.\n plugin: The Meltano plugin.\n args: Positional arguments to pass to the superclass.\n plugins_service: The Meltano plugins service.\n kwargs: Keyword arguments to pass to the superclass.\n \"\"\"\n super().__init__(project, *args, **kwargs)\n\n self.plugin = plugin\n self.plugins_service = plugins_service or ProjectPluginsService(self.project)\n\n self._inherited_settings_service = None\n if self.project.active_environment:\n environment = self.project.active_environment\n self.environment_plugin_config = environment.get_plugin_config(\n self.plugin.type,\n self.plugin.name,\n )\n else:\n self.environment_plugin_config = None\n\n project_settings_service = ProjectSettingsService(\n self.project, config_service=self.plugins_service.config_service\n )\n\n self.env_override = {\n **project_settings_service.env, # project level environment variables\n **project_settings_service.as_env(), # project level settings as env vars (e.g. MELTANO_PROJECT_ID)\n **self.env_override, # plugin level overrides, passed in as **kwargs and set to self.env_overrides by super().__init__ above\n **self.plugin.info_env, # generated generic plugin settings as env vars (e.g. MELTANO_EXTRACT_NAME)\n **self.plugin.env, # env vars stored under the `env:` key of the plugin definition\n }\n\n environment_env = {}\n if self.project.active_environment:\n with project_settings_service.feature_flag(\n FeatureFlags.STRICT_ENV_VAR_MODE, raise_error=False\n ) as strict_env_var_mode:\n environment_env = {\n var: expand_env_vars(\n value,\n self.env_override,\n raise_if_missing=strict_env_var_mode,\n )\n for var, value in self.project.active_environment.env.items()\n }\n self.env_override.update(\n environment_env\n ) # active Meltano Environment top level `env:` key\n\n environment_plugin_env = (\n self.environment_plugin_config.env if self.environment_plugin_config else {}\n )\n self.env_override.update(\n environment_plugin_env\n ) # env vars stored under the `env:` key of the plugin definition of the active meltano Environment\n\n @property\n def label(self):\n \"\"\"Get the label for this plugin.\n\n Returns:\n The label for this plugin.\n \"\"\"\n return f\"{self.plugin.type.descriptor} '{self.plugin.name}'\" # noqa: WPS237\n\n @property\n def docs_url(self):\n \"\"\"Get the documentation URL for this plugin.\n\n Returns:\n The documentation URL for this plugin.\n \"\"\"\n return self.plugin.docs\n\n def setting_env_vars(self, setting_def: SettingDefinition, for_writing=False):\n \"\"\"Get environment variables for a setting.\n\n Args:\n setting_def: The setting definition.\n for_writing: Whether to get environment variables for writing.\n\n Returns:\n Environment variables for a setting.\n \"\"\"\n return setting_def.env_vars(\n prefixes=self.plugin.env_prefixes(for_writing=for_writing),\n include_custom=self.plugin.is_shadowing or for_writing,\n for_writing=for_writing,\n )\n\n @property\n def db_namespace(self):\n \"\"\"Return namespace for setting value records in system database.\n\n Returns:\n Namespace for setting value records in system database.\n \"\"\"\n # \"default\" is included for legacy reasons\n return \".\".join((self.plugin.type, self.plugin.name, \"default\"))\n\n @property\n def setting_definitions(self) -> list[SettingDefinition]:\n \"\"\"Return definitions of supported settings.\n\n Returns:\n A list of setting definitions.\n \"\"\"\n settings = self.plugin.settings_with_extras\n\n if self.environment_plugin_config is not None:\n settings.extend(\n self.environment_plugin_config.get_orphan_settings(settings)\n )\n\n return settings\n\n @property\n def meltano_yml_config(self):\n \"\"\"Return current configuration in `meltano.yml`.\n\n Returns:\n Current configuration in `meltano.yml`.\n \"\"\"\n return self.plugin.config_with_extras\n\n @property\n def environment_config(self):\n \"\"\"Return current environment configuration in `meltano.yml`.\n\n Returns:\n Current environment configuration in `meltano.yml`.\n \"\"\"\n if self.environment_plugin_config:\n return self.environment_plugin_config.config_with_extras\n return {}\n\n def update_meltano_yml_config(self, config_with_extras):\n \"\"\"Update configuration in `meltano.yml`.\n\n Args:\n config_with_extras: Configuration to update.\n \"\"\"\n self.plugin.config_with_extras = config_with_extras\n self.plugins_service.update_plugin(self.plugin)\n\n def update_meltano_environment_config(self, config_with_extras: dict[str, Any]):\n \"\"\"Update environment configuration in `meltano.yml`.\n\n Args:\n config_with_extras: Configuration to update.\n \"\"\"\n self.environment_plugin_config.config_with_extras = config_with_extras\n self.plugins_service.update_environment_plugin(self.environment_plugin_config)\n\n @property\n def inherited_settings_service(self):\n \"\"\"Return settings service to inherit configuration from.\n\n Returns:\n Settings service to inherit configuration from.\n \"\"\"\n parent_plugin = self.plugin.parent\n if not isinstance(parent_plugin, ProjectPlugin):\n return None\n\n if self._inherited_settings_service is None:\n self._inherited_settings_service = self.__class__(\n self.project,\n parent_plugin,\n env_override=self.env_override,\n plugins_service=self.plugins_service,\n )\n return self._inherited_settings_service\n\n def process_config(self, config):\n \"\"\"Process configuration dictionary to be passed to plugin.\n\n Args:\n config: Configuration dictionary to process.\n\n Returns:\n Processed configuration dictionary.\n \"\"\"\n return self.plugin.process_config(config)\n", "path": "src/meltano/core/plugin/settings_service.py"}]} |
gh_patches_debug_1356 | rasdani/github-patches | git_diff | voicepaw__so-vits-svc-fork-1155 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Ensure proper sorting by name for output files
### Is your feature request related to a problem? Please describe.
To enhance readability and ensure proper sorting by name, the numeric part of output file names should have a fixed width. This can be achieved by adding leading zeros to the numeric part, with four digits likely being sufficient.
### Describe alternatives you've considered
I don't have any.
### Additional context
_No response_
### Code of Conduct
- [X] I agree to follow this project's Code of Conduct
### Are you willing to resolve this issue by submitting a Pull Request?
- [X] Yes, I have the time, and I know how to start.
- [ ] Yes, I have the time, but I don't know how to start. I would need guidance.
- [ ] No, I don't have the time, although I believe I could do it if I had the time...
- [ ] No, I don't have the time and I wouldn't even know how to start.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/so_vits_svc_fork/preprocessing/preprocess_speaker_diarization.py`
Content:
```
1 from __future__ import annotations
2
3 from collections import defaultdict
4 from logging import getLogger
5 from pathlib import Path
6
7 import librosa
8 import soundfile as sf
9 import torch
10 from joblib import Parallel, delayed
11 from pyannote.audio import Pipeline
12 from tqdm import tqdm
13 from tqdm_joblib import tqdm_joblib
14
15 LOG = getLogger(__name__)
16
17
18 def _process_one(
19 input_path: Path,
20 output_dir: Path,
21 sr: int,
22 *,
23 min_speakers: int = 1,
24 max_speakers: int = 1,
25 huggingface_token: str | None = None,
26 ) -> None:
27 try:
28 audio, sr = librosa.load(input_path, sr=sr, mono=True)
29 except Exception as e:
30 LOG.warning(f"Failed to read {input_path}: {e}")
31 return
32 pipeline = Pipeline.from_pretrained(
33 "pyannote/speaker-diarization", use_auth_token=huggingface_token
34 )
35 if pipeline is None:
36 raise ValueError("Failed to load pipeline")
37 pipeline = pipeline.to(torch.device("cuda"))
38 LOG.info(f"Processing {input_path}. This may take a while...")
39 diarization = pipeline(
40 input_path, min_speakers=min_speakers, max_speakers=max_speakers
41 )
42
43 LOG.info(f"Found {len(diarization)} tracks, writing to {output_dir}")
44 speaker_count = defaultdict(int)
45
46 output_dir.mkdir(parents=True, exist_ok=True)
47 for segment, track, speaker in tqdm(
48 list(diarization.itertracks(yield_label=True)), desc=f"Writing {input_path}"
49 ):
50 if segment.end - segment.start < 1:
51 continue
52 speaker_count[speaker] += 1
53 audio_cut = audio[int(segment.start * sr) : int(segment.end * sr)]
54 sf.write(
55 (output_dir / f"{speaker}_{speaker_count[speaker]}.wav"),
56 audio_cut,
57 sr,
58 )
59
60 LOG.info(f"Speaker count: {speaker_count}")
61
62
63 def preprocess_speaker_diarization(
64 input_dir: Path | str,
65 output_dir: Path | str,
66 sr: int,
67 *,
68 min_speakers: int = 1,
69 max_speakers: int = 1,
70 huggingface_token: str | None = None,
71 n_jobs: int = -1,
72 ) -> None:
73 if huggingface_token is not None and not huggingface_token.startswith("hf_"):
74 LOG.warning("Huggingface token probably should start with hf_")
75 if not torch.cuda.is_available():
76 LOG.warning("CUDA is not available. This will be extremely slow.")
77 input_dir = Path(input_dir)
78 output_dir = Path(output_dir)
79 input_dir.mkdir(parents=True, exist_ok=True)
80 output_dir.mkdir(parents=True, exist_ok=True)
81 input_paths = list(input_dir.rglob("*.*"))
82 with tqdm_joblib(desc="Preprocessing speaker diarization", total=len(input_paths)):
83 Parallel(n_jobs=n_jobs)(
84 delayed(_process_one)(
85 input_path,
86 output_dir / input_path.relative_to(input_dir).parent / input_path.stem,
87 sr,
88 max_speakers=max_speakers,
89 min_speakers=min_speakers,
90 huggingface_token=huggingface_token,
91 )
92 for input_path in input_paths
93 )
94
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/so_vits_svc_fork/preprocessing/preprocess_speaker_diarization.py b/src/so_vits_svc_fork/preprocessing/preprocess_speaker_diarization.py
--- a/src/so_vits_svc_fork/preprocessing/preprocess_speaker_diarization.py
+++ b/src/so_vits_svc_fork/preprocessing/preprocess_speaker_diarization.py
@@ -52,7 +52,7 @@
speaker_count[speaker] += 1
audio_cut = audio[int(segment.start * sr) : int(segment.end * sr)]
sf.write(
- (output_dir / f"{speaker}_{speaker_count[speaker]}.wav"),
+ (output_dir / f"{speaker}_{speaker_count[speaker]:04d}.wav"),
audio_cut,
sr,
)
| {"golden_diff": "diff --git a/src/so_vits_svc_fork/preprocessing/preprocess_speaker_diarization.py b/src/so_vits_svc_fork/preprocessing/preprocess_speaker_diarization.py\n--- a/src/so_vits_svc_fork/preprocessing/preprocess_speaker_diarization.py\n+++ b/src/so_vits_svc_fork/preprocessing/preprocess_speaker_diarization.py\n@@ -52,7 +52,7 @@\n speaker_count[speaker] += 1\n audio_cut = audio[int(segment.start * sr) : int(segment.end * sr)]\n sf.write(\n- (output_dir / f\"{speaker}_{speaker_count[speaker]}.wav\"),\n+ (output_dir / f\"{speaker}_{speaker_count[speaker]:04d}.wav\"),\n audio_cut,\n sr,\n )\n", "issue": "Ensure proper sorting by name for output files\n### Is your feature request related to a problem? Please describe.\n\nTo enhance readability and ensure proper sorting by name, the numeric part of output file names should have a fixed width. This can be achieved by adding leading zeros to the numeric part, with four digits likely being sufficient.\n\n### Describe alternatives you've considered\n\nI don't have any.\n\n### Additional context\n\n_No response_\n\n### Code of Conduct\n\n- [X] I agree to follow this project's Code of Conduct\n\n### Are you willing to resolve this issue by submitting a Pull Request?\n\n- [X] Yes, I have the time, and I know how to start.\n- [ ] Yes, I have the time, but I don't know how to start. I would need guidance.\n- [ ] No, I don't have the time, although I believe I could do it if I had the time...\n- [ ] No, I don't have the time and I wouldn't even know how to start.\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom collections import defaultdict\nfrom logging import getLogger\nfrom pathlib import Path\n\nimport librosa\nimport soundfile as sf\nimport torch\nfrom joblib import Parallel, delayed\nfrom pyannote.audio import Pipeline\nfrom tqdm import tqdm\nfrom tqdm_joblib import tqdm_joblib\n\nLOG = getLogger(__name__)\n\n\ndef _process_one(\n input_path: Path,\n output_dir: Path,\n sr: int,\n *,\n min_speakers: int = 1,\n max_speakers: int = 1,\n huggingface_token: str | None = None,\n) -> None:\n try:\n audio, sr = librosa.load(input_path, sr=sr, mono=True)\n except Exception as e:\n LOG.warning(f\"Failed to read {input_path}: {e}\")\n return\n pipeline = Pipeline.from_pretrained(\n \"pyannote/speaker-diarization\", use_auth_token=huggingface_token\n )\n if pipeline is None:\n raise ValueError(\"Failed to load pipeline\")\n pipeline = pipeline.to(torch.device(\"cuda\"))\n LOG.info(f\"Processing {input_path}. This may take a while...\")\n diarization = pipeline(\n input_path, min_speakers=min_speakers, max_speakers=max_speakers\n )\n\n LOG.info(f\"Found {len(diarization)} tracks, writing to {output_dir}\")\n speaker_count = defaultdict(int)\n\n output_dir.mkdir(parents=True, exist_ok=True)\n for segment, track, speaker in tqdm(\n list(diarization.itertracks(yield_label=True)), desc=f\"Writing {input_path}\"\n ):\n if segment.end - segment.start < 1:\n continue\n speaker_count[speaker] += 1\n audio_cut = audio[int(segment.start * sr) : int(segment.end * sr)]\n sf.write(\n (output_dir / f\"{speaker}_{speaker_count[speaker]}.wav\"),\n audio_cut,\n sr,\n )\n\n LOG.info(f\"Speaker count: {speaker_count}\")\n\n\ndef preprocess_speaker_diarization(\n input_dir: Path | str,\n output_dir: Path | str,\n sr: int,\n *,\n min_speakers: int = 1,\n max_speakers: int = 1,\n huggingface_token: str | None = None,\n n_jobs: int = -1,\n) -> None:\n if huggingface_token is not None and not huggingface_token.startswith(\"hf_\"):\n LOG.warning(\"Huggingface token probably should start with hf_\")\n if not torch.cuda.is_available():\n LOG.warning(\"CUDA is not available. This will be extremely slow.\")\n input_dir = Path(input_dir)\n output_dir = Path(output_dir)\n input_dir.mkdir(parents=True, exist_ok=True)\n output_dir.mkdir(parents=True, exist_ok=True)\n input_paths = list(input_dir.rglob(\"*.*\"))\n with tqdm_joblib(desc=\"Preprocessing speaker diarization\", total=len(input_paths)):\n Parallel(n_jobs=n_jobs)(\n delayed(_process_one)(\n input_path,\n output_dir / input_path.relative_to(input_dir).parent / input_path.stem,\n sr,\n max_speakers=max_speakers,\n min_speakers=min_speakers,\n huggingface_token=huggingface_token,\n )\n for input_path in input_paths\n )\n", "path": "src/so_vits_svc_fork/preprocessing/preprocess_speaker_diarization.py"}], "after_files": [{"content": "from __future__ import annotations\n\nfrom collections import defaultdict\nfrom logging import getLogger\nfrom pathlib import Path\n\nimport librosa\nimport soundfile as sf\nimport torch\nfrom joblib import Parallel, delayed\nfrom pyannote.audio import Pipeline\nfrom tqdm import tqdm\nfrom tqdm_joblib import tqdm_joblib\n\nLOG = getLogger(__name__)\n\n\ndef _process_one(\n input_path: Path,\n output_dir: Path,\n sr: int,\n *,\n min_speakers: int = 1,\n max_speakers: int = 1,\n huggingface_token: str | None = None,\n) -> None:\n try:\n audio, sr = librosa.load(input_path, sr=sr, mono=True)\n except Exception as e:\n LOG.warning(f\"Failed to read {input_path}: {e}\")\n return\n pipeline = Pipeline.from_pretrained(\n \"pyannote/speaker-diarization\", use_auth_token=huggingface_token\n )\n if pipeline is None:\n raise ValueError(\"Failed to load pipeline\")\n pipeline = pipeline.to(torch.device(\"cuda\"))\n LOG.info(f\"Processing {input_path}. This may take a while...\")\n diarization = pipeline(\n input_path, min_speakers=min_speakers, max_speakers=max_speakers\n )\n\n LOG.info(f\"Found {len(diarization)} tracks, writing to {output_dir}\")\n speaker_count = defaultdict(int)\n\n output_dir.mkdir(parents=True, exist_ok=True)\n for segment, track, speaker in tqdm(\n list(diarization.itertracks(yield_label=True)), desc=f\"Writing {input_path}\"\n ):\n if segment.end - segment.start < 1:\n continue\n speaker_count[speaker] += 1\n audio_cut = audio[int(segment.start * sr) : int(segment.end * sr)]\n sf.write(\n (output_dir / f\"{speaker}_{speaker_count[speaker]:04d}.wav\"),\n audio_cut,\n sr,\n )\n\n LOG.info(f\"Speaker count: {speaker_count}\")\n\n\ndef preprocess_speaker_diarization(\n input_dir: Path | str,\n output_dir: Path | str,\n sr: int,\n *,\n min_speakers: int = 1,\n max_speakers: int = 1,\n huggingface_token: str | None = None,\n n_jobs: int = -1,\n) -> None:\n if huggingface_token is not None and not huggingface_token.startswith(\"hf_\"):\n LOG.warning(\"Huggingface token probably should start with hf_\")\n if not torch.cuda.is_available():\n LOG.warning(\"CUDA is not available. This will be extremely slow.\")\n input_dir = Path(input_dir)\n output_dir = Path(output_dir)\n input_dir.mkdir(parents=True, exist_ok=True)\n output_dir.mkdir(parents=True, exist_ok=True)\n input_paths = list(input_dir.rglob(\"*.*\"))\n with tqdm_joblib(desc=\"Preprocessing speaker diarization\", total=len(input_paths)):\n Parallel(n_jobs=n_jobs)(\n delayed(_process_one)(\n input_path,\n output_dir / input_path.relative_to(input_dir).parent / input_path.stem,\n sr,\n max_speakers=max_speakers,\n min_speakers=min_speakers,\n huggingface_token=huggingface_token,\n )\n for input_path in input_paths\n )\n", "path": "src/so_vits_svc_fork/preprocessing/preprocess_speaker_diarization.py"}]} |
gh_patches_debug_1357 | rasdani/github-patches | git_diff | aws__aws-cli-276 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
aws ec2 modify-instance-attribute seems to fail with --source-dest-check parameter
Hi,
I need to call modify-instance-attribute from within my own instance, in order to automate the bootstrap process of my NAT instances.
Here is what happens (btw: $INSTANCE_ID contains a valid InstanceId):
# aws ec2 modify-instance-attribute --instance-id $INSTANCE_ID --source-dest-check 0 --region sa-east-1 --debug
2013-07-16 16:50:28,041 - botocore.service - DEBUG - Creating service object for: ec2
2013-07-16 16:50:28,042 - botocore.base - DEBUG - Attempting to Load: aws/ec2
2013-07-16 16:50:28,394 - botocore.base - DEBUG - Found data file: /usr/lib/python2.6/site-packages/botocore/data/aws/ec2.json
2013-07-16 16:50:28,394 - botocore.hooks - DEBUG - emit: service-created
2013-07-16 16:50:28,395 - botocore.service - DEBUG - Creating operation objects for: Service(ec2)
2013-07-16 16:50:28,412 - botocore.hooks - DEBUG - emit: parser-created.ec2
2013-07-16 16:50:28,413 - botocore.operation - DEBUG - Creating parameter objects for: Operation:ModifyInstanceAttribute
2013-07-16 16:50:28,417 - botocore.hooks - DEBUG - emit: parser-created.ec2-modify-instance-attribute
2013-07-16 16:50:28,418 - botocore.hooks - DEBUG - emit: process-cli-arg.ec2.modify-instance-attribute
2013-07-16 16:50:28,418 - botocore.hooks - DEBUG - emit: calling <awscli.argprocess.ParamShorthand object at 0x27dad10>
Traceback (most recent call last):
File "/usr/lib/python2.6/site-packages/awscli/clidriver.py", line 168, in _call
self._build_call_parameters(args, params)
File "/usr/lib/python2.6/site-packages/awscli/clidriver.py", line 121, in _build_call_parameters
param_dict[param.py_name] = unpack_cli_arg(param, value)
File "/usr/lib/python2.6/site-packages/awscli/argprocess.py", line 344, in unpack_cli_arg
return unpack_complex_cli_arg(parameter, value)
File "/usr/lib/python2.6/site-packages/awscli/argprocess.py", line 355, in unpack_complex_cli_arg
raise ValueError(msg)
ValueError: Structure option value must be JSON or path to file.
The output is the same if I use --source-dest-check false. If the call isn't supposed to be done like that, then I think it lacks better documentation.
Thanks a lot
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `awscli/argprocess.py`
Content:
```
1 # Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License"). You
4 # may not use this file except in compliance with the License. A copy of
5 # the License is located at
6 #
7 # http://aws.amazon.com/apache2.0/
8 #
9 # or in the "license" file accompanying this file. This file is
10 # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 # ANY KIND, either express or implied. See the License for the specific
12 # language governing permissions and limitations under the License.
13 """Module for processing CLI args."""
14 import os
15 import json
16 import logging
17 import six
18
19 from awscli import utils
20
21
22 SCALAR_TYPES = set([
23 'string', 'float', 'integer', 'long', 'boolean', 'double',
24 'blob', 'timestamp'
25 ])
26 COMPLEX_TYPES = set(['structure', 'map', 'list'])
27 LOG = logging.getLogger('awscli.argprocess')
28
29
30 class ParamError(Exception):
31 def __init__(self, param, message):
32 full_message = ("Error parsing parameter %s, should be: %s" %
33 (param.cli_name, message))
34 super(ParamError, self).__init__(full_message)
35 self.param = param
36
37
38 class ParamSyntaxError(Exception):
39 pass
40
41
42 class ParamUnknownKeyError(Exception):
43 def __init__(self, param, key, valid_keys):
44 valid_keys = ', '.join(valid_keys)
45 full_message = (
46 "Unknown key '%s' for parameter %s, valid choices "
47 "are: %s" % (key, param.cli_name, valid_keys))
48 super(ParamUnknownKeyError, self).__init__(full_message)
49
50
51 def detect_shape_structure(param):
52 if param.type in SCALAR_TYPES:
53 return 'scalar'
54 elif param.type == 'structure':
55 sub_types = [detect_shape_structure(p)
56 for p in param.members]
57 # We're distinguishing between structure(scalar)
58 # and structure(scalars), because for the case of
59 # a single scalar in a structure we can simplify
60 # more than a structure(scalars).
61 if len(sub_types) == 1 and all(p == 'scalar' for p in sub_types):
62 return 'structure(scalar)'
63 elif len(sub_types) > 1 and all(p == 'scalar' for p in sub_types):
64 return 'structure(scalars)'
65 else:
66 return 'structure(%s)' % ', '.join(sorted(set(sub_types)))
67 elif param.type == 'list':
68 return 'list-%s' % detect_shape_structure(param.members)
69 elif param.type == 'map':
70 if param.members.type in SCALAR_TYPES:
71 return 'map-scalar'
72 else:
73 return 'map-%s' % detect_shape_structure(param.members)
74
75
76 class ParamShorthand(object):
77
78 # To add support for a new shape:
79 #
80 # * Add it to SHORTHAND_SHAPES below, key is the shape structure
81 # value is the name of the method to call.
82 # * Implement parse method.
83 # * Implement _doc_<parse_method_name>. This is used to generate
84 # the docs for this shorthand syntax.
85
86 SHORTHAND_SHAPES = {
87 'structure(scalars)': '_key_value_parse',
88 'map-scalar': '_key_value_parse',
89 'list-structure(scalar)': '_list_scalar_parse',
90 'list-structure(scalars)': '_list_key_value_parse',
91 'list-structure(list-scalar, scalar)': '_list_scalar_list_parse',
92 }
93
94 def __init__(self):
95 pass
96
97 def __call__(self, param, value, **kwargs):
98 """Attempt to parse shorthand syntax for values.
99
100 This is intended to be hooked up as an event handler (hence the
101 **kwargs). Given ``param`` object and its string ``value``,
102 figure out if we can parse it. If we can parse it, we return
103 the parsed value (typically some sort of python dict).
104
105 :type param: :class:`botocore.parameters.Parameter`
106 :param param: The parameter object (includes various metadata
107 about the parameter).
108
109 :type value: str
110 :param value: The value for the parameter type on the command
111 line, e.g ``--foo this_value``, value would be ``"this_value"``.
112
113 :returns: If we can parse the value we return the parsed value.
114 If it looks like JSON, we return None (which tells the event
115 emitter to use the default ``unpack_cli_arg`` provided that
116 no other event handlers can parsed the value). If we
117 run into an error parsing the value, a ``ParamError`` will
118 be raised.
119
120 """
121 parse_method = self.get_parse_method_for_param(param, value)
122 if parse_method is None:
123 return
124 else:
125 try:
126 LOG.debug("Using %s for param %s", parse_method, param)
127 parsed = getattr(self, parse_method)(param, value)
128 except ParamSyntaxError as e:
129 doc_fn = self._get_example_fn(param)
130 # Try to give them a helpful error message.
131 if doc_fn is None:
132 raise e
133 else:
134 raise ParamError(param, doc_fn(param))
135 return parsed
136
137 def get_parse_method_for_param(self, param, value=None):
138 # We first need to make sure this is a parameter that qualifies
139 # for simplification. The first short-circuit case is if it looks
140 # like json we immediately return.
141 if isinstance(value, list):
142 check_val = value[0]
143 else:
144 check_val = value
145 if isinstance(check_val, str) and check_val.startswith(('[', '{')):
146 LOG.debug("Param %s looks like JSON, not considered for "
147 "param shorthand.", param.py_name)
148 return
149 structure = detect_shape_structure(param)
150 parse_method = self.SHORTHAND_SHAPES.get(structure)
151 return parse_method
152
153 def _get_example_fn(self, param):
154 doc_fn = None
155 shape_structure = detect_shape_structure(param)
156 method = self.SHORTHAND_SHAPES.get(shape_structure)
157 if method:
158 doc_fn = getattr(self, '_docs' + method, None)
159 return doc_fn
160
161 def add_example_fn(self, arg_name, help_command, **kwargs):
162 """
163 Adds a callable to the ``example_fn`` attribute of the parameter
164 if the parameter type is supported by shorthand syntax. This
165 callable should return a string containing just the example and
166 not any of the ReST formatting that might be required in the docs.
167 """
168 argument = help_command.arg_table[arg_name]
169 if hasattr(argument, 'argument_object') and argument.argument_object:
170 param = argument.argument_object
171 LOG.debug('Adding example fn for: %s' % param.name)
172 doc_fn = self._get_example_fn(param)
173 param.example_fn = doc_fn
174
175 def _list_scalar_list_parse(self, param, value):
176 # Think something like ec2.DescribeInstances.Filters.
177 # We're looking for key=val1,val2,val3,key2=val1,val2.
178 arg_types = {}
179 for arg in param.members.members:
180 arg_types[arg.name] = arg.type
181 parsed = []
182 for v in value:
183 parts = self._split_on_commas(v)
184 current_parsed = {}
185 current_key = None
186 for part in parts:
187 current = part.split('=', 1)
188 if len(current) == 2:
189 # This is a key/value pair.
190 current_key = current[0].strip()
191 current_value = current[1].strip()
192 if current_key not in arg_types:
193 raise ParamUnknownKeyError(param, current_key,
194 arg_types.keys())
195 elif arg_types[current_key] == 'list':
196 current_parsed[current_key] = [current_value]
197 else:
198 current_parsed[current_key] = current_value
199 elif current_key is not None:
200 # This is a value which we associate with the current_key,
201 # so key1=val1,val2
202 # ^
203 # |
204 # val2 is associated with key1.
205 current_parsed[current_key].append(current[0])
206 else:
207 raise ParamSyntaxError(part)
208 parsed.append(current_parsed)
209 return parsed
210
211 def _list_scalar_parse(self, param, value):
212 single_param = param.members.members[0]
213 parsed = []
214 # We know that value is a list in this case.
215 for v in value:
216 parsed.append({single_param.name: v})
217 return parsed
218
219 def _list_key_value_parse(self, param, value):
220 # param is a list param.
221 # param.member is the struct param.
222 struct_param = param.members
223 parsed = []
224 for v in value:
225 single_struct_param = self._key_value_parse(struct_param, v)
226 parsed.append(single_struct_param)
227 return parsed
228
229 def _key_value_parse(self, param, value):
230 # The expected structure is:
231 # key=value,key2=value
232 # that is, csv key value pairs, where the key and values
233 # are separated by '='. All of this should be whitespace
234 # insensitive.
235 parsed = {}
236 parts = self._split_on_commas(value)
237 valid_names = self._create_name_to_params(param)
238 for part in parts:
239 try:
240 key, value = part.split('=', 1)
241 except ValueError:
242 raise ParamSyntaxError(part)
243 key = key.strip()
244 value = value.strip()
245 if key not in valid_names:
246 raise ParamUnknownKeyError(param, key, valid_names)
247 sub_param = valid_names[key]
248 if sub_param is not None:
249 value = unpack_scalar_cli_arg(sub_param, value)
250 parsed[key] = value
251 return parsed
252
253 def _create_name_to_params(self, param):
254 if param.type == 'structure':
255 return dict([(p.name, p) for p in param.members])
256 elif param.type == 'map':
257 return dict([(v, None) for v in param.keys.enum])
258
259 def _docs_list_scalar_list_parse(self, param):
260 s = 'Key value pairs, where values are separated by commas.\n'
261 s += '%s ' % param.cli_name
262 inner_params = param.members.members
263 scalar_params = [p for p in inner_params if p.type in SCALAR_TYPES]
264 list_params = [p for p in inner_params if p.type == 'list']
265 for param in scalar_params:
266 s += '%s=%s1,' % (param.name, param.type)
267 for param in list_params[:-1]:
268 param_type = param.members.type
269 s += '%s=%s1,%s2,' % (param.name, param_type, param_type)
270 last_param = list_params[-1]
271 param_type = last_param.members.type
272 s += '%s=%s1,%s2' % (last_param.name, param_type, param_type)
273 return s
274
275 def _docs_list_scalar_parse(self, param):
276 name = param.members.members[0].name
277 return '%s %s1 %s2 %s3' % (param.cli_name, name, name, name)
278
279 def _docs_list_key_value_parse(self, param):
280 s = "Key value pairs, with multiple values separated by a space.\n"
281 s += '%s ' % param.cli_name
282 s += ','.join(['%s=%s' % (sub_param.name, sub_param.type)
283 for sub_param in param.members.members])
284 return s
285
286 def _docs_key_value_parse(self, param):
287 s = '%s ' % param.cli_name
288 if param.type == 'structure':
289 s += ','.join(['%s=value' % sub_param.name
290 for sub_param in param.members])
291 elif param.type == 'map':
292 s += 'key_name=string,key_name2=string'
293 if param.keys.type == 'string' and hasattr(param.keys, 'enum'):
294 s += '\nWhere valid key names are:\n'
295 for value in param.keys.enum:
296 s += ' %s\n' % value
297 return s
298
299 def _split_on_commas(self, value):
300 try:
301 return utils.split_on_commas(value)
302 except ValueError as e:
303 raise ParamSyntaxError(str(e))
304
305
306 def unpack_cli_arg(parameter, value):
307 """
308 Parses and unpacks the encoded string command line parameter
309 and returns native Python data structures that can be passed
310 to the Operation.
311
312 :type parameter: :class:`botocore.parameter.Parameter`
313 :param parameter: The parameter object containing metadata about
314 the parameter.
315
316 :param value: The value of the parameter. This can be a number of
317 different python types (str, list, etc). This is the value as
318 it's specified on the command line.
319
320 :return: The "unpacked" argument than can be sent to the `Operation`
321 object in python.
322 """
323 if parameter.type in SCALAR_TYPES:
324 return unpack_scalar_cli_arg(parameter, value)
325 elif parameter.type in COMPLEX_TYPES:
326 return unpack_complex_cli_arg(parameter, value)
327 else:
328 return str(value)
329
330
331 def unpack_complex_cli_arg(parameter, value):
332 if parameter.type == 'structure' or parameter.type == 'map':
333 if value.lstrip()[0] == '{':
334 d = json.loads(value)
335 else:
336 msg = 'Structure option value must be JSON or path to file.'
337 raise ValueError(msg)
338 return d
339 elif parameter.type == 'list':
340 if isinstance(value, six.string_types):
341 if value.lstrip()[0] == '[':
342 return json.loads(value)
343 elif isinstance(value, list) and len(value) == 1:
344 single_value = value[0].strip()
345 if single_value and single_value[0] == '[':
346 return json.loads(value[0])
347 return [unpack_cli_arg(parameter.members, v) for v in value]
348
349
350 def unpack_scalar_cli_arg(parameter, value):
351 if parameter.type == 'integer' or parameter.type == 'long':
352 return int(value)
353 elif parameter.type == 'float' or parameter.type == 'double':
354 # TODO: losing precision on double types
355 return float(value)
356 elif parameter.type == 'blob' and parameter.payload and parameter.streaming:
357 file_path = os.path.expandvars(value)
358 file_path = os.path.expanduser(file_path)
359 if not os.path.isfile(file_path):
360 msg = 'Blob values must be a path to a file.'
361 raise ValueError(msg)
362 return open(file_path, 'rb')
363 elif parameter.type == 'boolean':
364 return bool(value)
365 else:
366 return str(value)
367
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/awscli/argprocess.py b/awscli/argprocess.py
--- a/awscli/argprocess.py
+++ b/awscli/argprocess.py
@@ -333,7 +333,8 @@
if value.lstrip()[0] == '{':
d = json.loads(value)
else:
- msg = 'Structure option value must be JSON or path to file.'
+ msg = 'The value for parameter "%s" must be JSON or path to file.' % (
+ parameter.cli_name)
raise ValueError(msg)
return d
elif parameter.type == 'list':
| {"golden_diff": "diff --git a/awscli/argprocess.py b/awscli/argprocess.py\n--- a/awscli/argprocess.py\n+++ b/awscli/argprocess.py\n@@ -333,7 +333,8 @@\n if value.lstrip()[0] == '{':\n d = json.loads(value)\n else:\n- msg = 'Structure option value must be JSON or path to file.'\n+ msg = 'The value for parameter \"%s\" must be JSON or path to file.' % (\n+ parameter.cli_name)\n raise ValueError(msg)\n return d\n elif parameter.type == 'list':\n", "issue": "aws ec2 modify-instance-attribute seems to fail with --source-dest-check parameter\nHi,\n\nI need to call modify-instance-attribute from within my own instance, in order to automate the bootstrap process of my NAT instances.\n\nHere is what happens (btw: $INSTANCE_ID contains a valid InstanceId):\n\n# aws ec2 modify-instance-attribute --instance-id $INSTANCE_ID --source-dest-check 0 --region sa-east-1 --debug\n\n2013-07-16 16:50:28,041 - botocore.service - DEBUG - Creating service object for: ec2\n2013-07-16 16:50:28,042 - botocore.base - DEBUG - Attempting to Load: aws/ec2\n2013-07-16 16:50:28,394 - botocore.base - DEBUG - Found data file: /usr/lib/python2.6/site-packages/botocore/data/aws/ec2.json\n2013-07-16 16:50:28,394 - botocore.hooks - DEBUG - emit: service-created\n2013-07-16 16:50:28,395 - botocore.service - DEBUG - Creating operation objects for: Service(ec2)\n2013-07-16 16:50:28,412 - botocore.hooks - DEBUG - emit: parser-created.ec2\n2013-07-16 16:50:28,413 - botocore.operation - DEBUG - Creating parameter objects for: Operation:ModifyInstanceAttribute\n2013-07-16 16:50:28,417 - botocore.hooks - DEBUG - emit: parser-created.ec2-modify-instance-attribute\n2013-07-16 16:50:28,418 - botocore.hooks - DEBUG - emit: process-cli-arg.ec2.modify-instance-attribute\n2013-07-16 16:50:28,418 - botocore.hooks - DEBUG - emit: calling <awscli.argprocess.ParamShorthand object at 0x27dad10>\nTraceback (most recent call last):\n File \"/usr/lib/python2.6/site-packages/awscli/clidriver.py\", line 168, in _call\n self._build_call_parameters(args, params)\n File \"/usr/lib/python2.6/site-packages/awscli/clidriver.py\", line 121, in _build_call_parameters\n param_dict[param.py_name] = unpack_cli_arg(param, value)\n File \"/usr/lib/python2.6/site-packages/awscli/argprocess.py\", line 344, in unpack_cli_arg\n return unpack_complex_cli_arg(parameter, value)\n File \"/usr/lib/python2.6/site-packages/awscli/argprocess.py\", line 355, in unpack_complex_cli_arg\n raise ValueError(msg)\nValueError: Structure option value must be JSON or path to file.\n\nThe output is the same if I use --source-dest-check false. If the call isn't supposed to be done like that, then I think it lacks better documentation.\n\nThanks a lot\n\n", "before_files": [{"content": "# Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\n\"\"\"Module for processing CLI args.\"\"\"\nimport os\nimport json\nimport logging\nimport six\n\nfrom awscli import utils\n\n\nSCALAR_TYPES = set([\n 'string', 'float', 'integer', 'long', 'boolean', 'double',\n 'blob', 'timestamp'\n])\nCOMPLEX_TYPES = set(['structure', 'map', 'list'])\nLOG = logging.getLogger('awscli.argprocess')\n\n\nclass ParamError(Exception):\n def __init__(self, param, message):\n full_message = (\"Error parsing parameter %s, should be: %s\" %\n (param.cli_name, message))\n super(ParamError, self).__init__(full_message)\n self.param = param\n\n\nclass ParamSyntaxError(Exception):\n pass\n\n\nclass ParamUnknownKeyError(Exception):\n def __init__(self, param, key, valid_keys):\n valid_keys = ', '.join(valid_keys)\n full_message = (\n \"Unknown key '%s' for parameter %s, valid choices \"\n \"are: %s\" % (key, param.cli_name, valid_keys))\n super(ParamUnknownKeyError, self).__init__(full_message)\n\n\ndef detect_shape_structure(param):\n if param.type in SCALAR_TYPES:\n return 'scalar'\n elif param.type == 'structure':\n sub_types = [detect_shape_structure(p)\n for p in param.members]\n # We're distinguishing between structure(scalar)\n # and structure(scalars), because for the case of\n # a single scalar in a structure we can simplify\n # more than a structure(scalars).\n if len(sub_types) == 1 and all(p == 'scalar' for p in sub_types):\n return 'structure(scalar)'\n elif len(sub_types) > 1 and all(p == 'scalar' for p in sub_types):\n return 'structure(scalars)'\n else:\n return 'structure(%s)' % ', '.join(sorted(set(sub_types)))\n elif param.type == 'list':\n return 'list-%s' % detect_shape_structure(param.members)\n elif param.type == 'map':\n if param.members.type in SCALAR_TYPES:\n return 'map-scalar'\n else:\n return 'map-%s' % detect_shape_structure(param.members)\n\n\nclass ParamShorthand(object):\n\n # To add support for a new shape:\n #\n # * Add it to SHORTHAND_SHAPES below, key is the shape structure\n # value is the name of the method to call.\n # * Implement parse method.\n # * Implement _doc_<parse_method_name>. This is used to generate\n # the docs for this shorthand syntax.\n\n SHORTHAND_SHAPES = {\n 'structure(scalars)': '_key_value_parse',\n 'map-scalar': '_key_value_parse',\n 'list-structure(scalar)': '_list_scalar_parse',\n 'list-structure(scalars)': '_list_key_value_parse',\n 'list-structure(list-scalar, scalar)': '_list_scalar_list_parse',\n }\n\n def __init__(self):\n pass\n\n def __call__(self, param, value, **kwargs):\n \"\"\"Attempt to parse shorthand syntax for values.\n\n This is intended to be hooked up as an event handler (hence the\n **kwargs). Given ``param`` object and its string ``value``,\n figure out if we can parse it. If we can parse it, we return\n the parsed value (typically some sort of python dict).\n\n :type param: :class:`botocore.parameters.Parameter`\n :param param: The parameter object (includes various metadata\n about the parameter).\n\n :type value: str\n :param value: The value for the parameter type on the command\n line, e.g ``--foo this_value``, value would be ``\"this_value\"``.\n\n :returns: If we can parse the value we return the parsed value.\n If it looks like JSON, we return None (which tells the event\n emitter to use the default ``unpack_cli_arg`` provided that\n no other event handlers can parsed the value). If we\n run into an error parsing the value, a ``ParamError`` will\n be raised.\n\n \"\"\"\n parse_method = self.get_parse_method_for_param(param, value)\n if parse_method is None:\n return\n else:\n try:\n LOG.debug(\"Using %s for param %s\", parse_method, param)\n parsed = getattr(self, parse_method)(param, value)\n except ParamSyntaxError as e:\n doc_fn = self._get_example_fn(param)\n # Try to give them a helpful error message.\n if doc_fn is None:\n raise e\n else:\n raise ParamError(param, doc_fn(param))\n return parsed\n\n def get_parse_method_for_param(self, param, value=None):\n # We first need to make sure this is a parameter that qualifies\n # for simplification. The first short-circuit case is if it looks\n # like json we immediately return.\n if isinstance(value, list):\n check_val = value[0]\n else:\n check_val = value\n if isinstance(check_val, str) and check_val.startswith(('[', '{')):\n LOG.debug(\"Param %s looks like JSON, not considered for \"\n \"param shorthand.\", param.py_name)\n return\n structure = detect_shape_structure(param)\n parse_method = self.SHORTHAND_SHAPES.get(structure)\n return parse_method\n\n def _get_example_fn(self, param):\n doc_fn = None\n shape_structure = detect_shape_structure(param)\n method = self.SHORTHAND_SHAPES.get(shape_structure)\n if method:\n doc_fn = getattr(self, '_docs' + method, None)\n return doc_fn\n\n def add_example_fn(self, arg_name, help_command, **kwargs):\n \"\"\"\n Adds a callable to the ``example_fn`` attribute of the parameter\n if the parameter type is supported by shorthand syntax. This\n callable should return a string containing just the example and\n not any of the ReST formatting that might be required in the docs.\n \"\"\"\n argument = help_command.arg_table[arg_name]\n if hasattr(argument, 'argument_object') and argument.argument_object:\n param = argument.argument_object\n LOG.debug('Adding example fn for: %s' % param.name)\n doc_fn = self._get_example_fn(param)\n param.example_fn = doc_fn\n\n def _list_scalar_list_parse(self, param, value):\n # Think something like ec2.DescribeInstances.Filters.\n # We're looking for key=val1,val2,val3,key2=val1,val2.\n arg_types = {}\n for arg in param.members.members:\n arg_types[arg.name] = arg.type\n parsed = []\n for v in value:\n parts = self._split_on_commas(v)\n current_parsed = {}\n current_key = None\n for part in parts:\n current = part.split('=', 1)\n if len(current) == 2:\n # This is a key/value pair.\n current_key = current[0].strip()\n current_value = current[1].strip()\n if current_key not in arg_types:\n raise ParamUnknownKeyError(param, current_key,\n arg_types.keys())\n elif arg_types[current_key] == 'list':\n current_parsed[current_key] = [current_value]\n else:\n current_parsed[current_key] = current_value\n elif current_key is not None:\n # This is a value which we associate with the current_key,\n # so key1=val1,val2\n # ^\n # |\n # val2 is associated with key1.\n current_parsed[current_key].append(current[0])\n else:\n raise ParamSyntaxError(part)\n parsed.append(current_parsed)\n return parsed\n\n def _list_scalar_parse(self, param, value):\n single_param = param.members.members[0]\n parsed = []\n # We know that value is a list in this case.\n for v in value:\n parsed.append({single_param.name: v})\n return parsed\n\n def _list_key_value_parse(self, param, value):\n # param is a list param.\n # param.member is the struct param.\n struct_param = param.members\n parsed = []\n for v in value:\n single_struct_param = self._key_value_parse(struct_param, v)\n parsed.append(single_struct_param)\n return parsed\n\n def _key_value_parse(self, param, value):\n # The expected structure is:\n # key=value,key2=value\n # that is, csv key value pairs, where the key and values\n # are separated by '='. All of this should be whitespace\n # insensitive.\n parsed = {}\n parts = self._split_on_commas(value)\n valid_names = self._create_name_to_params(param)\n for part in parts:\n try:\n key, value = part.split('=', 1)\n except ValueError:\n raise ParamSyntaxError(part)\n key = key.strip()\n value = value.strip()\n if key not in valid_names:\n raise ParamUnknownKeyError(param, key, valid_names)\n sub_param = valid_names[key]\n if sub_param is not None:\n value = unpack_scalar_cli_arg(sub_param, value)\n parsed[key] = value\n return parsed\n\n def _create_name_to_params(self, param):\n if param.type == 'structure':\n return dict([(p.name, p) for p in param.members])\n elif param.type == 'map':\n return dict([(v, None) for v in param.keys.enum])\n\n def _docs_list_scalar_list_parse(self, param):\n s = 'Key value pairs, where values are separated by commas.\\n'\n s += '%s ' % param.cli_name\n inner_params = param.members.members\n scalar_params = [p for p in inner_params if p.type in SCALAR_TYPES]\n list_params = [p for p in inner_params if p.type == 'list']\n for param in scalar_params:\n s += '%s=%s1,' % (param.name, param.type)\n for param in list_params[:-1]:\n param_type = param.members.type\n s += '%s=%s1,%s2,' % (param.name, param_type, param_type)\n last_param = list_params[-1]\n param_type = last_param.members.type\n s += '%s=%s1,%s2' % (last_param.name, param_type, param_type)\n return s\n\n def _docs_list_scalar_parse(self, param):\n name = param.members.members[0].name\n return '%s %s1 %s2 %s3' % (param.cli_name, name, name, name)\n\n def _docs_list_key_value_parse(self, param):\n s = \"Key value pairs, with multiple values separated by a space.\\n\"\n s += '%s ' % param.cli_name\n s += ','.join(['%s=%s' % (sub_param.name, sub_param.type)\n for sub_param in param.members.members])\n return s\n\n def _docs_key_value_parse(self, param):\n s = '%s ' % param.cli_name\n if param.type == 'structure':\n s += ','.join(['%s=value' % sub_param.name\n for sub_param in param.members])\n elif param.type == 'map':\n s += 'key_name=string,key_name2=string'\n if param.keys.type == 'string' and hasattr(param.keys, 'enum'):\n s += '\\nWhere valid key names are:\\n'\n for value in param.keys.enum:\n s += ' %s\\n' % value\n return s\n\n def _split_on_commas(self, value):\n try:\n return utils.split_on_commas(value)\n except ValueError as e:\n raise ParamSyntaxError(str(e))\n\n\ndef unpack_cli_arg(parameter, value):\n \"\"\"\n Parses and unpacks the encoded string command line parameter\n and returns native Python data structures that can be passed\n to the Operation.\n\n :type parameter: :class:`botocore.parameter.Parameter`\n :param parameter: The parameter object containing metadata about\n the parameter.\n\n :param value: The value of the parameter. This can be a number of\n different python types (str, list, etc). This is the value as\n it's specified on the command line.\n\n :return: The \"unpacked\" argument than can be sent to the `Operation`\n object in python.\n \"\"\"\n if parameter.type in SCALAR_TYPES:\n return unpack_scalar_cli_arg(parameter, value)\n elif parameter.type in COMPLEX_TYPES:\n return unpack_complex_cli_arg(parameter, value)\n else:\n return str(value)\n\n\ndef unpack_complex_cli_arg(parameter, value):\n if parameter.type == 'structure' or parameter.type == 'map':\n if value.lstrip()[0] == '{':\n d = json.loads(value)\n else:\n msg = 'Structure option value must be JSON or path to file.'\n raise ValueError(msg)\n return d\n elif parameter.type == 'list':\n if isinstance(value, six.string_types):\n if value.lstrip()[0] == '[':\n return json.loads(value)\n elif isinstance(value, list) and len(value) == 1:\n single_value = value[0].strip()\n if single_value and single_value[0] == '[':\n return json.loads(value[0])\n return [unpack_cli_arg(parameter.members, v) for v in value]\n\n\ndef unpack_scalar_cli_arg(parameter, value):\n if parameter.type == 'integer' or parameter.type == 'long':\n return int(value)\n elif parameter.type == 'float' or parameter.type == 'double':\n # TODO: losing precision on double types\n return float(value)\n elif parameter.type == 'blob' and parameter.payload and parameter.streaming:\n file_path = os.path.expandvars(value)\n file_path = os.path.expanduser(file_path)\n if not os.path.isfile(file_path):\n msg = 'Blob values must be a path to a file.'\n raise ValueError(msg)\n return open(file_path, 'rb')\n elif parameter.type == 'boolean':\n return bool(value)\n else:\n return str(value)\n", "path": "awscli/argprocess.py"}], "after_files": [{"content": "# Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\n\"\"\"Module for processing CLI args.\"\"\"\nimport os\nimport json\nimport logging\nimport six\n\nfrom awscli import utils\n\n\nSCALAR_TYPES = set([\n 'string', 'float', 'integer', 'long', 'boolean', 'double',\n 'blob', 'timestamp'\n])\nCOMPLEX_TYPES = set(['structure', 'map', 'list'])\nLOG = logging.getLogger('awscli.argprocess')\n\n\nclass ParamError(Exception):\n def __init__(self, param, message):\n full_message = (\"Error parsing parameter %s, should be: %s\" %\n (param.cli_name, message))\n super(ParamError, self).__init__(full_message)\n self.param = param\n\n\nclass ParamSyntaxError(Exception):\n pass\n\n\nclass ParamUnknownKeyError(Exception):\n def __init__(self, param, key, valid_keys):\n valid_keys = ', '.join(valid_keys)\n full_message = (\n \"Unknown key '%s' for parameter %s, valid choices \"\n \"are: %s\" % (key, param.cli_name, valid_keys))\n super(ParamUnknownKeyError, self).__init__(full_message)\n\n\ndef detect_shape_structure(param):\n if param.type in SCALAR_TYPES:\n return 'scalar'\n elif param.type == 'structure':\n sub_types = [detect_shape_structure(p)\n for p in param.members]\n # We're distinguishing between structure(scalar)\n # and structure(scalars), because for the case of\n # a single scalar in a structure we can simplify\n # more than a structure(scalars).\n if len(sub_types) == 1 and all(p == 'scalar' for p in sub_types):\n return 'structure(scalar)'\n elif len(sub_types) > 1 and all(p == 'scalar' for p in sub_types):\n return 'structure(scalars)'\n else:\n return 'structure(%s)' % ', '.join(sorted(set(sub_types)))\n elif param.type == 'list':\n return 'list-%s' % detect_shape_structure(param.members)\n elif param.type == 'map':\n if param.members.type in SCALAR_TYPES:\n return 'map-scalar'\n else:\n return 'map-%s' % detect_shape_structure(param.members)\n\n\nclass ParamShorthand(object):\n\n # To add support for a new shape:\n #\n # * Add it to SHORTHAND_SHAPES below, key is the shape structure\n # value is the name of the method to call.\n # * Implement parse method.\n # * Implement _doc_<parse_method_name>. This is used to generate\n # the docs for this shorthand syntax.\n\n SHORTHAND_SHAPES = {\n 'structure(scalars)': '_key_value_parse',\n 'map-scalar': '_key_value_parse',\n 'list-structure(scalar)': '_list_scalar_parse',\n 'list-structure(scalars)': '_list_key_value_parse',\n 'list-structure(list-scalar, scalar)': '_list_scalar_list_parse',\n }\n\n def __init__(self):\n pass\n\n def __call__(self, param, value, **kwargs):\n \"\"\"Attempt to parse shorthand syntax for values.\n\n This is intended to be hooked up as an event handler (hence the\n **kwargs). Given ``param`` object and its string ``value``,\n figure out if we can parse it. If we can parse it, we return\n the parsed value (typically some sort of python dict).\n\n :type param: :class:`botocore.parameters.Parameter`\n :param param: The parameter object (includes various metadata\n about the parameter).\n\n :type value: str\n :param value: The value for the parameter type on the command\n line, e.g ``--foo this_value``, value would be ``\"this_value\"``.\n\n :returns: If we can parse the value we return the parsed value.\n If it looks like JSON, we return None (which tells the event\n emitter to use the default ``unpack_cli_arg`` provided that\n no other event handlers can parsed the value). If we\n run into an error parsing the value, a ``ParamError`` will\n be raised.\n\n \"\"\"\n parse_method = self.get_parse_method_for_param(param, value)\n if parse_method is None:\n return\n else:\n try:\n LOG.debug(\"Using %s for param %s\", parse_method, param)\n parsed = getattr(self, parse_method)(param, value)\n except ParamSyntaxError as e:\n doc_fn = self._get_example_fn(param)\n # Try to give them a helpful error message.\n if doc_fn is None:\n raise e\n else:\n raise ParamError(param, doc_fn(param))\n return parsed\n\n def get_parse_method_for_param(self, param, value=None):\n # We first need to make sure this is a parameter that qualifies\n # for simplification. The first short-circuit case is if it looks\n # like json we immediately return.\n if isinstance(value, list):\n check_val = value[0]\n else:\n check_val = value\n if isinstance(check_val, str) and check_val.startswith(('[', '{')):\n LOG.debug(\"Param %s looks like JSON, not considered for \"\n \"param shorthand.\", param.py_name)\n return\n structure = detect_shape_structure(param)\n parse_method = self.SHORTHAND_SHAPES.get(structure)\n return parse_method\n\n def _get_example_fn(self, param):\n doc_fn = None\n shape_structure = detect_shape_structure(param)\n method = self.SHORTHAND_SHAPES.get(shape_structure)\n if method:\n doc_fn = getattr(self, '_docs' + method, None)\n return doc_fn\n\n def add_example_fn(self, arg_name, help_command, **kwargs):\n \"\"\"\n Adds a callable to the ``example_fn`` attribute of the parameter\n if the parameter type is supported by shorthand syntax. This\n callable should return a string containing just the example and\n not any of the ReST formatting that might be required in the docs.\n \"\"\"\n argument = help_command.arg_table[arg_name]\n if hasattr(argument, 'argument_object') and argument.argument_object:\n param = argument.argument_object\n LOG.debug('Adding example fn for: %s' % param.name)\n doc_fn = self._get_example_fn(param)\n param.example_fn = doc_fn\n\n def _list_scalar_list_parse(self, param, value):\n # Think something like ec2.DescribeInstances.Filters.\n # We're looking for key=val1,val2,val3,key2=val1,val2.\n arg_types = {}\n for arg in param.members.members:\n arg_types[arg.name] = arg.type\n parsed = []\n for v in value:\n parts = self._split_on_commas(v)\n current_parsed = {}\n current_key = None\n for part in parts:\n current = part.split('=', 1)\n if len(current) == 2:\n # This is a key/value pair.\n current_key = current[0].strip()\n current_value = current[1].strip()\n if current_key not in arg_types:\n raise ParamUnknownKeyError(param, current_key,\n arg_types.keys())\n elif arg_types[current_key] == 'list':\n current_parsed[current_key] = [current_value]\n else:\n current_parsed[current_key] = current_value\n elif current_key is not None:\n # This is a value which we associate with the current_key,\n # so key1=val1,val2\n # ^\n # |\n # val2 is associated with key1.\n current_parsed[current_key].append(current[0])\n else:\n raise ParamSyntaxError(part)\n parsed.append(current_parsed)\n return parsed\n\n def _list_scalar_parse(self, param, value):\n single_param = param.members.members[0]\n parsed = []\n # We know that value is a list in this case.\n for v in value:\n parsed.append({single_param.name: v})\n return parsed\n\n def _list_key_value_parse(self, param, value):\n # param is a list param.\n # param.member is the struct param.\n struct_param = param.members\n parsed = []\n for v in value:\n single_struct_param = self._key_value_parse(struct_param, v)\n parsed.append(single_struct_param)\n return parsed\n\n def _key_value_parse(self, param, value):\n # The expected structure is:\n # key=value,key2=value\n # that is, csv key value pairs, where the key and values\n # are separated by '='. All of this should be whitespace\n # insensitive.\n parsed = {}\n parts = self._split_on_commas(value)\n valid_names = self._create_name_to_params(param)\n for part in parts:\n try:\n key, value = part.split('=', 1)\n except ValueError:\n raise ParamSyntaxError(part)\n key = key.strip()\n value = value.strip()\n if key not in valid_names:\n raise ParamUnknownKeyError(param, key, valid_names)\n sub_param = valid_names[key]\n if sub_param is not None:\n value = unpack_scalar_cli_arg(sub_param, value)\n parsed[key] = value\n return parsed\n\n def _create_name_to_params(self, param):\n if param.type == 'structure':\n return dict([(p.name, p) for p in param.members])\n elif param.type == 'map':\n return dict([(v, None) for v in param.keys.enum])\n\n def _docs_list_scalar_list_parse(self, param):\n s = 'Key value pairs, where values are separated by commas.\\n'\n s += '%s ' % param.cli_name\n inner_params = param.members.members\n scalar_params = [p for p in inner_params if p.type in SCALAR_TYPES]\n list_params = [p for p in inner_params if p.type == 'list']\n for param in scalar_params:\n s += '%s=%s1,' % (param.name, param.type)\n for param in list_params[:-1]:\n param_type = param.members.type\n s += '%s=%s1,%s2,' % (param.name, param_type, param_type)\n last_param = list_params[-1]\n param_type = last_param.members.type\n s += '%s=%s1,%s2' % (last_param.name, param_type, param_type)\n return s\n\n def _docs_list_scalar_parse(self, param):\n name = param.members.members[0].name\n return '%s %s1 %s2 %s3' % (param.cli_name, name, name, name)\n\n def _docs_list_key_value_parse(self, param):\n s = \"Key value pairs, with multiple values separated by a space.\\n\"\n s += '%s ' % param.cli_name\n s += ','.join(['%s=%s' % (sub_param.name, sub_param.type)\n for sub_param in param.members.members])\n return s\n\n def _docs_key_value_parse(self, param):\n s = '%s ' % param.cli_name\n if param.type == 'structure':\n s += ','.join(['%s=value' % sub_param.name\n for sub_param in param.members])\n elif param.type == 'map':\n s += 'key_name=string,key_name2=string'\n if param.keys.type == 'string' and hasattr(param.keys, 'enum'):\n s += '\\nWhere valid key names are:\\n'\n for value in param.keys.enum:\n s += ' %s\\n' % value\n return s\n\n def _split_on_commas(self, value):\n try:\n return utils.split_on_commas(value)\n except ValueError as e:\n raise ParamSyntaxError(str(e))\n\n\ndef unpack_cli_arg(parameter, value):\n \"\"\"\n Parses and unpacks the encoded string command line parameter\n and returns native Python data structures that can be passed\n to the Operation.\n\n :type parameter: :class:`botocore.parameter.Parameter`\n :param parameter: The parameter object containing metadata about\n the parameter.\n\n :param value: The value of the parameter. This can be a number of\n different python types (str, list, etc). This is the value as\n it's specified on the command line.\n\n :return: The \"unpacked\" argument than can be sent to the `Operation`\n object in python.\n \"\"\"\n if parameter.type in SCALAR_TYPES:\n return unpack_scalar_cli_arg(parameter, value)\n elif parameter.type in COMPLEX_TYPES:\n return unpack_complex_cli_arg(parameter, value)\n else:\n return str(value)\n\n\ndef unpack_complex_cli_arg(parameter, value):\n if parameter.type == 'structure' or parameter.type == 'map':\n if value.lstrip()[0] == '{':\n d = json.loads(value)\n else:\n msg = 'The value for parameter \"%s\" must be JSON or path to file.' % (\n parameter.cli_name)\n raise ValueError(msg)\n return d\n elif parameter.type == 'list':\n if isinstance(value, six.string_types):\n if value.lstrip()[0] == '[':\n return json.loads(value)\n elif isinstance(value, list) and len(value) == 1:\n single_value = value[0].strip()\n if single_value and single_value[0] == '[':\n return json.loads(value[0])\n return [unpack_cli_arg(parameter.members, v) for v in value]\n\n\ndef unpack_scalar_cli_arg(parameter, value):\n if parameter.type == 'integer' or parameter.type == 'long':\n return int(value)\n elif parameter.type == 'float' or parameter.type == 'double':\n # TODO: losing precision on double types\n return float(value)\n elif parameter.type == 'blob' and parameter.payload and parameter.streaming:\n file_path = os.path.expandvars(value)\n file_path = os.path.expanduser(file_path)\n if not os.path.isfile(file_path):\n msg = 'Blob values must be a path to a file.'\n raise ValueError(msg)\n return open(file_path, 'rb')\n elif parameter.type == 'boolean':\n return bool(value)\n else:\n return str(value)\n", "path": "awscli/argprocess.py"}]} |
gh_patches_debug_1358 | rasdani/github-patches | git_diff | pennersr__django-allauth-3084 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add python 3.10 support
Would maintainers be willing to add python 3.10 to tox tests and official support? You can add this with the `py310` tag according the tox's [changelog](https://tox.wiki/en/latest/changelog.html#id158).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 from __future__ import print_function
3
4 import io
5 import os
6 import sys
7 from distutils.util import convert_path
8 from fnmatch import fnmatchcase
9
10 from setuptools import find_packages, setup
11
12
13 # Provided as an attribute, so you can append to these instead
14 # of replicating them:
15 standard_exclude = ["*.py", "*.pyc", "*~", ".*", "*.bak", "Makefile"]
16 standard_exclude_directories = [
17 ".*",
18 "CVS",
19 "_darcs",
20 "./build",
21 "./dist",
22 "EGG-INFO",
23 "*.egg-info",
24 "./example",
25 ]
26
27
28 # Copied from paste/util/finddata.py
29 def find_package_data(
30 where=".",
31 package="",
32 exclude=standard_exclude,
33 exclude_directories=standard_exclude_directories,
34 only_in_packages=True,
35 show_ignored=False,
36 ):
37 """
38 Return a dictionary suitable for use in ``package_data``
39 in a distutils ``setup.py`` file.
40
41 The dictionary looks like::
42
43 {"package": [files]}
44
45 Where ``files`` is a list of all the files in that package that
46 don't match anything in ``exclude``.
47
48 If ``only_in_packages`` is true, then top-level directories that
49 are not packages won't be included (but directories under packages
50 will).
51
52 Directories matching any pattern in ``exclude_directories`` will
53 be ignored; by default directories with leading ``.``, ``CVS``,
54 and ``_darcs`` will be ignored.
55
56 If ``show_ignored`` is true, then all the files that aren't
57 included in package data are shown on stderr (for debugging
58 purposes).
59
60 Note patterns use wildcards, or can be exact paths (including
61 leading ``./``), and all searching is case-insensitive.
62 """
63
64 out = {}
65 stack = [(convert_path(where), "", package, only_in_packages)]
66 while stack:
67 where, prefix, package, only_in_packages = stack.pop(0)
68 for name in os.listdir(where):
69 fn = os.path.join(where, name)
70 if os.path.isdir(fn):
71 bad_name = False
72 for pattern in exclude_directories:
73 if fnmatchcase(name, pattern) or fn.lower() == pattern.lower():
74 bad_name = True
75 if show_ignored:
76 print(
77 "Directory %s ignored by pattern %s" % (fn, pattern),
78 file=sys.stderr,
79 )
80 break
81 if bad_name:
82 continue
83 if os.path.isfile(os.path.join(fn, "__init__.py")) and not prefix:
84 if not package:
85 new_package = name
86 else:
87 new_package = package + "." + name
88 stack.append((fn, "", new_package, False))
89 else:
90 stack.append((fn, prefix + name + "/", package, only_in_packages))
91 elif package or not only_in_packages:
92 # is a file
93 bad_name = False
94 for pattern in exclude:
95 if fnmatchcase(name, pattern) or fn.lower() == pattern.lower():
96 bad_name = True
97 if show_ignored:
98 print(
99 "File %s ignored by pattern %s" % (fn, pattern),
100 file=sys.stderr,
101 )
102 break
103 if bad_name:
104 continue
105 out.setdefault(package, []).append(prefix + name)
106 return out
107
108
109 excluded_directories = standard_exclude_directories
110
111 package_data = find_package_data(exclude_directories=excluded_directories)
112
113 long_description = io.open("README.rst", encoding="utf-8").read()
114
115 # Dynamically calculate the version based on allauth.VERSION.
116 version = __import__("allauth").__version__
117
118 METADATA = dict(
119 name="django-allauth",
120 version=version,
121 author="Raymond Penners",
122 author_email="[email protected]",
123 description="Integrated set of Django applications addressing"
124 " authentication, registration, account management as well as"
125 " 3rd party (social) account authentication.",
126 long_description=long_description,
127 url="http://www.intenct.nl/projects/django-allauth/",
128 keywords="django auth account social openid twitter facebook oauth registration",
129 project_urls={
130 "Documentation": "https://django-allauth.readthedocs.io/en/latest/",
131 "Changelog": "https://github.com/pennersr/django-allauth/blob/master/ChangeLog.rst",
132 "Source": "http://github.com/pennersr/django-allauth",
133 "Tracker": "https://github.com/pennersr/django-allauth/issues",
134 "Donate": "https://github.com/sponsors/pennersr",
135 },
136 tests_require=[],
137 install_requires=[
138 "Django >= 2.0",
139 "python3-openid >= 3.0.8",
140 "requests-oauthlib >= 0.3.0",
141 "requests",
142 "pyjwt[crypto] >= 1.7",
143 ],
144 include_package_data=True,
145 classifiers=[
146 "Development Status :: 4 - Beta",
147 "Intended Audience :: Developers",
148 "Topic :: Software Development :: Libraries :: Python Modules",
149 "Environment :: Web Environment",
150 "Topic :: Internet",
151 "License :: OSI Approved :: MIT License",
152 "Operating System :: OS Independent",
153 "Programming Language :: Python",
154 "Programming Language :: Python :: 3",
155 "Programming Language :: Python :: 3.5",
156 "Programming Language :: Python :: 3.6",
157 "Programming Language :: Python :: 3.7",
158 "Programming Language :: Python :: 3.8",
159 "Programming Language :: Python :: 3.9",
160 "Framework :: Django",
161 "Framework :: Django :: 2.0",
162 "Framework :: Django :: 2.1",
163 "Framework :: Django :: 2.2",
164 "Framework :: Django :: 3.0",
165 "Framework :: Django :: 3.1",
166 "Framework :: Django :: 3.2",
167 "Framework :: Django :: 4.0",
168 ],
169 packages=find_packages(exclude=["example"]),
170 package_data=package_data,
171 )
172
173 if __name__ == "__main__":
174 setup(**METADATA)
175
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -157,6 +157,7 @@
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
+ "Programming Language :: Python :: 3.10",
"Framework :: Django",
"Framework :: Django :: 2.0",
"Framework :: Django :: 2.1",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -157,6 +157,7 @@\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n+ \"Programming Language :: Python :: 3.10\",\n \"Framework :: Django\",\n \"Framework :: Django :: 2.0\",\n \"Framework :: Django :: 2.1\",\n", "issue": "Add python 3.10 support\nWould maintainers be willing to add python 3.10 to tox tests and official support? You can add this with the `py310` tag according the tox's [changelog](https://tox.wiki/en/latest/changelog.html#id158). \n", "before_files": [{"content": "#!/usr/bin/env python\nfrom __future__ import print_function\n\nimport io\nimport os\nimport sys\nfrom distutils.util import convert_path\nfrom fnmatch import fnmatchcase\n\nfrom setuptools import find_packages, setup\n\n\n# Provided as an attribute, so you can append to these instead\n# of replicating them:\nstandard_exclude = [\"*.py\", \"*.pyc\", \"*~\", \".*\", \"*.bak\", \"Makefile\"]\nstandard_exclude_directories = [\n \".*\",\n \"CVS\",\n \"_darcs\",\n \"./build\",\n \"./dist\",\n \"EGG-INFO\",\n \"*.egg-info\",\n \"./example\",\n]\n\n\n# Copied from paste/util/finddata.py\ndef find_package_data(\n where=\".\",\n package=\"\",\n exclude=standard_exclude,\n exclude_directories=standard_exclude_directories,\n only_in_packages=True,\n show_ignored=False,\n):\n \"\"\"\n Return a dictionary suitable for use in ``package_data``\n in a distutils ``setup.py`` file.\n\n The dictionary looks like::\n\n {\"package\": [files]}\n\n Where ``files`` is a list of all the files in that package that\n don't match anything in ``exclude``.\n\n If ``only_in_packages`` is true, then top-level directories that\n are not packages won't be included (but directories under packages\n will).\n\n Directories matching any pattern in ``exclude_directories`` will\n be ignored; by default directories with leading ``.``, ``CVS``,\n and ``_darcs`` will be ignored.\n\n If ``show_ignored`` is true, then all the files that aren't\n included in package data are shown on stderr (for debugging\n purposes).\n\n Note patterns use wildcards, or can be exact paths (including\n leading ``./``), and all searching is case-insensitive.\n \"\"\"\n\n out = {}\n stack = [(convert_path(where), \"\", package, only_in_packages)]\n while stack:\n where, prefix, package, only_in_packages = stack.pop(0)\n for name in os.listdir(where):\n fn = os.path.join(where, name)\n if os.path.isdir(fn):\n bad_name = False\n for pattern in exclude_directories:\n if fnmatchcase(name, pattern) or fn.lower() == pattern.lower():\n bad_name = True\n if show_ignored:\n print(\n \"Directory %s ignored by pattern %s\" % (fn, pattern),\n file=sys.stderr,\n )\n break\n if bad_name:\n continue\n if os.path.isfile(os.path.join(fn, \"__init__.py\")) and not prefix:\n if not package:\n new_package = name\n else:\n new_package = package + \".\" + name\n stack.append((fn, \"\", new_package, False))\n else:\n stack.append((fn, prefix + name + \"/\", package, only_in_packages))\n elif package or not only_in_packages:\n # is a file\n bad_name = False\n for pattern in exclude:\n if fnmatchcase(name, pattern) or fn.lower() == pattern.lower():\n bad_name = True\n if show_ignored:\n print(\n \"File %s ignored by pattern %s\" % (fn, pattern),\n file=sys.stderr,\n )\n break\n if bad_name:\n continue\n out.setdefault(package, []).append(prefix + name)\n return out\n\n\nexcluded_directories = standard_exclude_directories\n\npackage_data = find_package_data(exclude_directories=excluded_directories)\n\nlong_description = io.open(\"README.rst\", encoding=\"utf-8\").read()\n\n# Dynamically calculate the version based on allauth.VERSION.\nversion = __import__(\"allauth\").__version__\n\nMETADATA = dict(\n name=\"django-allauth\",\n version=version,\n author=\"Raymond Penners\",\n author_email=\"[email protected]\",\n description=\"Integrated set of Django applications addressing\"\n \" authentication, registration, account management as well as\"\n \" 3rd party (social) account authentication.\",\n long_description=long_description,\n url=\"http://www.intenct.nl/projects/django-allauth/\",\n keywords=\"django auth account social openid twitter facebook oauth registration\",\n project_urls={\n \"Documentation\": \"https://django-allauth.readthedocs.io/en/latest/\",\n \"Changelog\": \"https://github.com/pennersr/django-allauth/blob/master/ChangeLog.rst\",\n \"Source\": \"http://github.com/pennersr/django-allauth\",\n \"Tracker\": \"https://github.com/pennersr/django-allauth/issues\",\n \"Donate\": \"https://github.com/sponsors/pennersr\",\n },\n tests_require=[],\n install_requires=[\n \"Django >= 2.0\",\n \"python3-openid >= 3.0.8\",\n \"requests-oauthlib >= 0.3.0\",\n \"requests\",\n \"pyjwt[crypto] >= 1.7\",\n ],\n include_package_data=True,\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n \"Environment :: Web Environment\",\n \"Topic :: Internet\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Framework :: Django\",\n \"Framework :: Django :: 2.0\",\n \"Framework :: Django :: 2.1\",\n \"Framework :: Django :: 2.2\",\n \"Framework :: Django :: 3.0\",\n \"Framework :: Django :: 3.1\",\n \"Framework :: Django :: 3.2\",\n \"Framework :: Django :: 4.0\",\n ],\n packages=find_packages(exclude=[\"example\"]),\n package_data=package_data,\n)\n\nif __name__ == \"__main__\":\n setup(**METADATA)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\nfrom __future__ import print_function\n\nimport io\nimport os\nimport sys\nfrom distutils.util import convert_path\nfrom fnmatch import fnmatchcase\n\nfrom setuptools import find_packages, setup\n\n\n# Provided as an attribute, so you can append to these instead\n# of replicating them:\nstandard_exclude = [\"*.py\", \"*.pyc\", \"*~\", \".*\", \"*.bak\", \"Makefile\"]\nstandard_exclude_directories = [\n \".*\",\n \"CVS\",\n \"_darcs\",\n \"./build\",\n \"./dist\",\n \"EGG-INFO\",\n \"*.egg-info\",\n \"./example\",\n]\n\n\n# Copied from paste/util/finddata.py\ndef find_package_data(\n where=\".\",\n package=\"\",\n exclude=standard_exclude,\n exclude_directories=standard_exclude_directories,\n only_in_packages=True,\n show_ignored=False,\n):\n \"\"\"\n Return a dictionary suitable for use in ``package_data``\n in a distutils ``setup.py`` file.\n\n The dictionary looks like::\n\n {\"package\": [files]}\n\n Where ``files`` is a list of all the files in that package that\n don't match anything in ``exclude``.\n\n If ``only_in_packages`` is true, then top-level directories that\n are not packages won't be included (but directories under packages\n will).\n\n Directories matching any pattern in ``exclude_directories`` will\n be ignored; by default directories with leading ``.``, ``CVS``,\n and ``_darcs`` will be ignored.\n\n If ``show_ignored`` is true, then all the files that aren't\n included in package data are shown on stderr (for debugging\n purposes).\n\n Note patterns use wildcards, or can be exact paths (including\n leading ``./``), and all searching is case-insensitive.\n \"\"\"\n\n out = {}\n stack = [(convert_path(where), \"\", package, only_in_packages)]\n while stack:\n where, prefix, package, only_in_packages = stack.pop(0)\n for name in os.listdir(where):\n fn = os.path.join(where, name)\n if os.path.isdir(fn):\n bad_name = False\n for pattern in exclude_directories:\n if fnmatchcase(name, pattern) or fn.lower() == pattern.lower():\n bad_name = True\n if show_ignored:\n print(\n \"Directory %s ignored by pattern %s\" % (fn, pattern),\n file=sys.stderr,\n )\n break\n if bad_name:\n continue\n if os.path.isfile(os.path.join(fn, \"__init__.py\")) and not prefix:\n if not package:\n new_package = name\n else:\n new_package = package + \".\" + name\n stack.append((fn, \"\", new_package, False))\n else:\n stack.append((fn, prefix + name + \"/\", package, only_in_packages))\n elif package or not only_in_packages:\n # is a file\n bad_name = False\n for pattern in exclude:\n if fnmatchcase(name, pattern) or fn.lower() == pattern.lower():\n bad_name = True\n if show_ignored:\n print(\n \"File %s ignored by pattern %s\" % (fn, pattern),\n file=sys.stderr,\n )\n break\n if bad_name:\n continue\n out.setdefault(package, []).append(prefix + name)\n return out\n\n\nexcluded_directories = standard_exclude_directories\n\npackage_data = find_package_data(exclude_directories=excluded_directories)\n\nlong_description = io.open(\"README.rst\", encoding=\"utf-8\").read()\n\n# Dynamically calculate the version based on allauth.VERSION.\nversion = __import__(\"allauth\").__version__\n\nMETADATA = dict(\n name=\"django-allauth\",\n version=version,\n author=\"Raymond Penners\",\n author_email=\"[email protected]\",\n description=\"Integrated set of Django applications addressing\"\n \" authentication, registration, account management as well as\"\n \" 3rd party (social) account authentication.\",\n long_description=long_description,\n url=\"http://www.intenct.nl/projects/django-allauth/\",\n keywords=\"django auth account social openid twitter facebook oauth registration\",\n project_urls={\n \"Documentation\": \"https://django-allauth.readthedocs.io/en/latest/\",\n \"Changelog\": \"https://github.com/pennersr/django-allauth/blob/master/ChangeLog.rst\",\n \"Source\": \"http://github.com/pennersr/django-allauth\",\n \"Tracker\": \"https://github.com/pennersr/django-allauth/issues\",\n \"Donate\": \"https://github.com/sponsors/pennersr\",\n },\n tests_require=[],\n install_requires=[\n \"Django >= 2.0\",\n \"python3-openid >= 3.0.8\",\n \"requests-oauthlib >= 0.3.0\",\n \"requests\",\n \"pyjwt[crypto] >= 1.7\",\n ],\n include_package_data=True,\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n \"Environment :: Web Environment\",\n \"Topic :: Internet\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Framework :: Django\",\n \"Framework :: Django :: 2.0\",\n \"Framework :: Django :: 2.1\",\n \"Framework :: Django :: 2.2\",\n \"Framework :: Django :: 3.0\",\n \"Framework :: Django :: 3.1\",\n \"Framework :: Django :: 3.2\",\n \"Framework :: Django :: 4.0\",\n ],\n packages=find_packages(exclude=[\"example\"]),\n package_data=package_data,\n)\n\nif __name__ == \"__main__\":\n setup(**METADATA)\n", "path": "setup.py"}]} |
gh_patches_debug_1359 | rasdani/github-patches | git_diff | ESMCI__cime-699 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
code_checker return code is wrong
Needs to return non-zero error code if files had problems.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `utils/python/CIME/case.py`
Content:
```
1 """
2 Wrapper around all env XML for a case.
3
4 All interaction with and between the module files in XML/ takes place
5 through the Case module.
6 """
7 from copy import deepcopy
8 import glob, os, shutil, traceback
9 from CIME.XML.standard_module_setup import *
10
11 from CIME.utils import expect, get_cime_root, append_status
12 from CIME.utils import convert_to_type, get_model, get_project
13 from CIME.utils import get_build_threaded, get_current_commit
14 from CIME.XML.build import Build
15 from CIME.XML.machines import Machines
16 from CIME.XML.pes import Pes
17 from CIME.XML.files import Files
18 from CIME.XML.component import Component
19 from CIME.XML.compsets import Compsets
20 from CIME.XML.grids import Grids
21 from CIME.XML.batch import Batch
22 from CIME.XML.pio import PIO
23
24 from CIME.XML.env_test import EnvTest
25 from CIME.XML.env_mach_specific import EnvMachSpecific
26 from CIME.XML.env_case import EnvCase
27 from CIME.XML.env_mach_pes import EnvMachPes
28 from CIME.XML.env_build import EnvBuild
29 from CIME.XML.env_run import EnvRun
30 from CIME.XML.env_archive import EnvArchive
31 from CIME.XML.env_batch import EnvBatch
32
33 from CIME.user_mod_support import apply_user_mods
34 from CIME.case_setup import case_setup
35
36 logger = logging.getLogger(__name__)
37
38 class Case(object):
39 """
40 https://github.com/ESMCI/cime/wiki/Developers-Introduction
41 The Case class is the heart of the CIME Case Control system. All
42 interactions with a Case take part through this class. All of the
43 variables used to create and manipulate a case are defined in xml
44 files and for every xml file there is a python class to interact
45 with that file.
46
47 XML files which are part of the CIME distribution and are meant to
48 be readonly with respect to a case are typically named
49 config_something.xml and the corresponding python Class is
50 Something and can be found in file CIME.XML.something.py. I'll
51 refer to these as the CIME config classes.
52
53 XML files which are part of a case and thus are read/write to a
54 case are typically named env_whatever.xml and the cooresponding
55 python modules are CIME.XML.env_whatever.py and classes are
56 EnvWhatever. I'll refer to these as the Case env classes.
57
58 The Case Class includes an array of the Case env classes, in the
59 configure function and it's supporting functions defined below
60 the case object creates and manipulates the Case env classes
61 by reading and interpreting the CIME config classes.
62
63 """
64 def __init__(self, case_root=None, read_only=True):
65
66 if case_root is None:
67 case_root = os.getcwd()
68 self._caseroot = case_root
69 logger.debug("Initializing Case.")
70 self._env_files_that_need_rewrite = set()
71 self._read_only_mode = True
72 self._force_read_only = read_only
73
74 self._env_entryid_files = []
75 self._env_generic_files = []
76 self._files = []
77
78 self.read_xml()
79
80 # Hold arbitary values. In create_newcase we may set values
81 # for xml files that haven't been created yet. We need a place
82 # to store them until we are ready to create the file. At file
83 # creation we get the values for those fields from this lookup
84 # table and then remove the entry.
85 self.lookups = {}
86 self.set_lookup_value('CIMEROOT',os.path.abspath(get_cime_root()))
87
88 self._compsetname = None
89 self._gridname = None
90 self._compsetsfile = None
91 self._pesfile = None
92 self._gridfile = None
93 self._components = []
94 self._component_classes = []
95
96 # Define __enter__ and __exit__ so that we can use this as a context manager
97 # and force a flush on exit.
98 def __enter__(self):
99 if not self._force_read_only:
100 self._read_only_mode = False
101 return self
102
103 def __exit__(self, *_):
104 self.flush()
105 self._read_only_mode = True
106 return False
107
108 def schedule_rewrite(self, env_file):
109 assert not self._read_only_mode, \
110 "case.py scripts error: attempted to modify an env file while in " \
111 "read-only mode"
112 self._env_files_that_need_rewrite.add(env_file)
113
114 def read_xml(self):
115 if(len(self._env_files_that_need_rewrite)>0):
116 files = ""
117 for env_file in self._env_files_that_need_rewrite:
118 files += " "+env_file.filename
119 expect(False,"Object(s) %s seem to have newer data than the corresponding case file"%files)
120
121 self._env_entryid_files = []
122 self._env_entryid_files.append(EnvRun(self._caseroot))
123 self._env_entryid_files.append(EnvBuild(self._caseroot))
124 self._env_entryid_files.append(EnvMachPes(self._caseroot))
125 self._env_entryid_files.append(EnvCase(self._caseroot))
126 self._env_entryid_files.append(EnvBatch(self._caseroot))
127 if os.path.isfile(os.path.join(self._caseroot,"env_test.xml")):
128 self._env_entryid_files.append(EnvTest(self._caseroot))
129 self._env_generic_files = []
130 self._env_generic_files.append(EnvMachSpecific(self._caseroot))
131 self._env_generic_files.append(EnvArchive(self._caseroot))
132 self._files = self._env_entryid_files + self._env_generic_files
133
134 def get_case_root(self):
135 """Returns the root directory for this case."""
136 return self._caseroot
137
138 def get_env(self, short_name):
139 full_name = "env_%s.xml" % (short_name)
140 for env_file in self._files:
141 if os.path.basename(env_file.filename) == full_name:
142 return env_file
143
144 expect(False, "Could not find object for %s in case"%full_name)
145
146 def copy(self, newcasename, newcaseroot, newcimeroot=None, newsrcroot=None):
147 newcase = deepcopy(self)
148 for env_file in newcase._files: # pylint: disable=protected-access
149 basename = os.path.basename(env_file.filename)
150 env_file.filename = os.path.join(newcaseroot,basename)
151
152 if newcimeroot is not None:
153 newcase.set_value("CIMEROOT", newcimeroot)
154
155 if newsrcroot is not None:
156 newcase.set_value("SRCROOT", newsrcroot)
157
158 newcase.set_value("CASE",newcasename)
159 newcase.set_value("CASEROOT",newcaseroot)
160 newcase.set_value("CONTINUE_RUN","FALSE")
161 newcase.set_value("RESUBMIT",0)
162 return newcase
163
164 def flush(self, flushall=False):
165 if not os.path.isdir(self._caseroot):
166 # do not flush if caseroot wasnt created
167 return
168 if flushall:
169 for env_file in self._files:
170 self.schedule_rewrite(env_file)
171 for env_file in self._env_files_that_need_rewrite:
172 env_file.write()
173 self._env_files_that_need_rewrite = set()
174
175 def get_values(self, item, attribute=None, resolved=True, subgroup=None):
176 results = []
177 for env_file in self._env_entryid_files:
178 # Wait and resolve in self rather than in env_file
179 results = env_file.get_values(item, attribute, resolved=False, subgroup=subgroup)
180
181 if len(results) > 0:
182 new_results = []
183 vtype = env_file.get_type_info(item)
184 if resolved:
185 for result in results:
186 if type(result) is str:
187 result = self.get_resolved_value(result)
188 new_results.append(convert_to_type(result, vtype, item))
189 else:
190 new_results.append(result)
191 else:
192 new_results = results
193 return new_results
194
195 for env_file in self._env_generic_files:
196 results = env_file.get_values(item, attribute, resolved=False, subgroup=subgroup)
197 if len(results) > 0:
198 if resolved:
199 for result in results:
200 if type(result) is str:
201 new_results.append(self.get_resolved_value(result))
202 else:
203 new_results.append(result)
204 else:
205 new_results = results
206 return new_results
207 # Return empty result
208 return results
209
210 def get_value(self, item, attribute=None, resolved=True, subgroup=None):
211 result = None
212 for env_file in self._env_entryid_files:
213 # Wait and resolve in self rather than in env_file
214 result = env_file.get_value(item, attribute, resolved=False, subgroup=subgroup)
215
216 if result is not None:
217 if resolved and type(result) is str:
218 result = self.get_resolved_value(result)
219 vtype = env_file.get_type_info(item)
220 result = convert_to_type(result, vtype, item)
221 return result
222
223 for env_file in self._env_generic_files:
224
225 result = env_file.get_value(item, attribute, resolved=False, subgroup=subgroup)
226
227 if result is not None:
228 if resolved and type(result) is str:
229 return self.get_resolved_value(result)
230 return result
231
232 # Return empty result
233 return result
234
235
236 def get_full_records(self, item=None, attribute=None, resolved=True, subgroup=None):
237
238 """
239 Return info object for given item, return all info for all item if item is empty.
240 """
241
242 logger.debug("(get_full_records) Input values: %s , %s , %s , %s , %s" , self.__class__.__name__ , item, attribute, resolved, subgroup)
243
244 # Empty result list
245 results = []
246
247 for env_file in self._env_entryid_files:
248 # Wait and resolve in self rather than in env_file
249 logger.debug("(get_full_records) Searching in %s" , env_file.__class__.__name__)
250 result = None
251
252 try:
253 # env_batch has its own implementation of get_full_records otherwise in entry_id
254 result = env_file.get_full_records(item, attribute, resolved=False, subgroup=subgroup)
255 # Method exists, and was used.
256 except AttributeError:
257 # Method does not exist. What now?
258 traceback.print_exc()
259 logger.debug("(get_full_records) No get_full_records method for class %s (%s)" , env_file.__class__.__name__ , AttributeError)
260
261 if result is not None and (len(result) >= 1):
262
263 if resolved :
264 for r in result :
265 if type(r['value']) is str:
266 logger.debug("(get_full_records) Resolving %s" , r['value'])
267 r['value'] = self.get_resolved_value(r['value'])
268
269 if subgroup :
270 found = []
271 for r in result :
272 if r['group'] == subgroup :
273 found.append(r)
274 results += found
275 else:
276 results = results + result
277
278 logger.debug("(get_full_records) Return value: %s" , results )
279 return results
280
281 def get_type_info(self, item):
282 result = None
283 for env_file in self._env_entryid_files:
284 result = env_file.get_type_info(item)
285 if result is not None:
286 return result
287
288 logging.debug("Not able to retreive type for item '%s'" % item)
289
290 def get_resolved_value(self, item, recurse=0):
291 num_unresolved = item.count("$")
292 recurse_limit = 10
293 if (num_unresolved > 0 and recurse < recurse_limit ):
294 for env_file in self._env_entryid_files:
295 item = env_file.get_resolved_value(item)
296 if ("$" not in item):
297 return item
298 else:
299 item = self.get_resolved_value(item,recurse=recurse+1)
300
301 if recurse >= 2*recurse_limit:
302 logging.warning("Not able to fully resolve item '%s'" % item)
303 elif recurse >= recurse_limit:
304 #try env_batch first
305 env_batch = self.get_env("batch")
306 item = env_batch.get_resolved_value(item)
307 logger.debug("item is %s, checking env_batch"%item)
308 if item is not None:
309 if ("$" not in item):
310 return item
311 else:
312 item = self.get_resolved_value(item,recurse=recurse+1)
313 else:
314 logging.warning("Not able to fully resolve item '%s'" % item)
315
316 return item
317
318 def set_value(self, item, value, subgroup=None, ignore_type=False):
319 """
320 If a file has been defined, and the variable is in the file,
321 then that value will be set in the file object and the file
322 name is returned
323 """
324 if item == "CASEROOT":
325 self._caseroot = value
326 result = None
327 for env_file in self._env_entryid_files:
328 result = env_file.set_value(item, value, subgroup, ignore_type)
329 if (result is not None):
330 logger.debug("Will rewrite file %s %s",env_file.filename, item)
331 self._env_files_that_need_rewrite.add(env_file)
332 return result
333
334 def set_valid_values(self, item, valid_values):
335 """
336 Update or create a valid_values entry for item and populate it
337 """
338 if item == "CASEROOT":
339 self._caseroot = value
340 result = None
341 for env_file in self._env_entryid_files:
342 result = env_file.set_valid_values(item, valid_values)
343 if (result is not None):
344 logger.debug("Will rewrite file %s %s",env_file.filename, item)
345 self._env_files_that_need_rewrite.add(env_file)
346 return result
347
348 def set_lookup_value(self, item, value):
349 if item in self.lookups.keys() and self.lookups[item] is not None:
350 logger.warn("Item %s already in lookups with value %s"%(item,self.lookups[item]))
351 else:
352 self.lookups[item] = value
353
354
355 def _set_compset_and_pesfile(self, compset_name, user_compset=False, pesfile=None):
356 """
357 Loop through all the compset files and find the compset
358 specifation file that matches either the input 'compset_name'.
359 Note that the input compset name (i.e. compset_name) can be
360 either a longname or an alias. This will also set the
361 compsets and pes specfication files.
362 """
363 files = Files()
364 components = files.get_components("COMPSETS_SPEC_FILE")
365 logger.debug(" Possible components for COMPSETS_SPEC_FILE are %s" % components)
366
367 # Loop through all of the files listed in COMPSETS_SPEC_FILE and find the file
368 # that has a match for either the alias or the longname in that order
369 for component in components:
370
371 # Determine the compsets file for this component
372 compsets_filename = files.get_value("COMPSETS_SPEC_FILE", {"component":component})
373
374 # If the file exists, read it and see if there is a match for the compset alias or longname
375 if (os.path.isfile(compsets_filename)):
376 compsets = Compsets(compsets_filename)
377 match = compsets.get_compset_match(name=compset_name)
378 pesfile = files.get_value("PES_SPEC_FILE" , {"component":component})
379 if match is not None:
380 self._pesfile = pesfile
381 self._compsetsfile = compsets_filename
382 self._compsetname = match
383 tests_filename = files.get_value("TESTS_SPEC_FILE" , {"component":component}, resolved=False)
384 tests_mods_dir = files.get_value("TESTS_MODS_DIR" , {"component":component}, resolved=False)
385 user_mods_dir = files.get_value("USER_MODS_DIR" , {"component":component}, resolved=False)
386 self.set_lookup_value("COMPSETS_SPEC_FILE" ,
387 files.get_value("COMPSETS_SPEC_FILE", {"component":component}, resolved=False))
388 self.set_lookup_value("TESTS_SPEC_FILE" , tests_filename)
389 self.set_lookup_value("TESTS_MODS_DIR" , tests_mods_dir)
390 self.set_lookup_value("USER_MODS_DIR" , user_mods_dir)
391 self.set_lookup_value("PES_SPEC_FILE" ,
392 files.get_value("PES_SPEC_FILE" , {"component":component}, resolved=False))
393 logger.info("Compset longname is %s " %(match))
394 logger.info("Compset specification file is %s" %(compsets_filename))
395 logger.info("Pes specification file is %s" %(pesfile))
396 return
397
398 if user_compset is True:
399 #Do not error out for user_compset
400 logger.warn("Could not find a compset match for either alias or longname in %s" %(compset_name))
401 self._compsetname = compset_name
402 self._pesfile = pesfile
403 self.set_lookup_value("PES_SPEC_FILE", pesfile)
404 else:
405 expect(False,
406 "Could not find a compset match for either alias or longname in %s" %(compset_name))
407
408
409 def get_compset_components(self):
410 #If are doing a create_clone then, self._compsetname is not set yet
411 components = []
412 compset = self.get_value("COMPSET")
413 if compset is None:
414 compset = self._compsetname
415 expect(compset is not None,
416 "ERROR: compset is not set")
417 # the first element is always the date operator - skip it
418 elements = compset.split('_')[1:] # pylint: disable=maybe-no-member
419 for element in elements:
420 # ignore the possible BGC or TEST modifier
421 if element.startswith("BGC%") or element.startswith("TEST"):
422 continue
423 else:
424 element_component = element.split('%')[0].lower()
425 element_component = re.sub(r'[0-9]*',"",element_component)
426 components.append(element_component)
427 return components
428
429
430 def __iter__(self):
431 for entryid_file in self._env_entryid_files:
432 for key, val in entryid_file:
433 if type(val) is str and '$' in val:
434 yield key, self.get_resolved_value(val)
435 else:
436 yield key, val
437
438
439 def _get_component_config_data(self):
440 # attributes used for multi valued defaults ($attlist is a hash reference)
441 attlist = {"compset":self._compsetname, "grid":self._gridname}
442
443 # Determine list of component classes that this coupler/driver knows how
444 # to deal with. This list follows the same order as compset longnames follow.
445 files = Files()
446 # Add the group and elements for the config_files.xml
447 for env_file in self._env_entryid_files:
448 env_file.add_elements_by_group(files, attlist)
449
450 drv_config_file = files.get_value("CONFIG_DRV_FILE")
451 drv_comp = Component(drv_config_file)
452 for env_file in self._env_entryid_files:
453 env_file.add_elements_by_group(drv_comp, attributes=attlist)
454
455 # loop over all elements of both component_classes and components - and get config_component_file for
456 # for each component
457 self._component_classes =drv_comp.get_valid_model_components()
458 if len(self._component_classes) > len(self._components):
459 self._components.append('sesp')
460
461 for i in xrange(1,len(self._component_classes)):
462 comp_class = self._component_classes[i]
463 comp_name = self._components[i-1]
464 node_name = 'CONFIG_' + comp_class + '_FILE'
465 # Add the group and elements for the config_files.xml
466 comp_config_file = files.get_value(node_name, {"component":comp_name}, resolved=False)
467 self.set_value(node_name, comp_config_file)
468 comp_config_file = self.get_resolved_value(comp_config_file)
469 expect(comp_config_file is not None,"No config file for component %s"%comp_name)
470 compobj = Component(comp_config_file)
471 for env_file in self._env_entryid_files:
472 env_file.add_elements_by_group(compobj, attributes=attlist)
473
474
475 for key,value in self.lookups.items():
476 result = self.set_value(key,value)
477 if result is not None:
478 del self.lookups[key]
479
480 def get_components(self):
481 """
482 return dictionary of the form [component_class:component],
483 e.g. [atm:cam], for all compset components
484 """
485
486 files = Files()
487 drv_comp = Component(files.get_value("CONFIG_DRV_FILE"))
488
489 # Determine list of component classes that this coupler/driver knows how
490 # to deal with. This list follows the same order as compset longnames follow.
491 component_classes = drv_comp.get_valid_model_components()
492 components = self.get_compset_components()
493
494 # Note that component classes can have a bigger range than
495 # compents since stub esp (sesp) is an optional component - so
496 # need to take the min of the two below
497 comp_dict = {}
498 for i in xrange(0,len(components)):
499 comp_name = components[i]
500 comp_class = component_classes[i+1]
501 comp_dict[comp_class] = comp_name
502 return comp_dict
503
504 def configure(self, compset_name, grid_name, machine_name=None,
505 project=None, pecount=None, compiler=None, mpilib=None,
506 user_compset=False, pesfile=None,
507 user_grid=False, gridfile=None, ninst=1, test=False,
508 walltime=None, queue=None):
509
510 #--------------------------------------------
511 # compset, pesfile, and compset components
512 #--------------------------------------------
513 self._set_compset_and_pesfile(compset_name, user_compset=user_compset, pesfile=pesfile)
514
515 self._components = self.get_compset_components()
516 #FIXME - if --user-compset is True then need to determine that
517 #all of the compset settings are valid
518
519 #--------------------------------------------
520 # grid
521 #--------------------------------------------
522 if user_grid is True and gridfile is not None:
523 self.set_value("GRIDS_SPEC_FILE", gridfile)
524 grids = Grids(gridfile)
525
526 gridinfo = grids.get_grid_info(name=grid_name, compset=self._compsetname)
527
528 self._gridname = gridinfo["GRID"]
529 for key,value in gridinfo.items():
530 logger.debug("Set grid %s %s"%(key,value))
531 self.set_lookup_value(key,value)
532
533 #--------------------------------------------
534 # component config data
535 #--------------------------------------------
536 self._get_component_config_data()
537
538 self.get_compset_var_settings()
539
540 #--------------------------------------------
541 # machine
542 #--------------------------------------------
543 # set machine values in env_xxx files
544 machobj = Machines(machine=machine_name)
545 machine_name = machobj.get_machine_name()
546 self.set_value("MACH",machine_name)
547 nodenames = machobj.get_node_names()
548 nodenames = [x for x in nodenames if
549 '_system' not in x and '_variables' not in x and 'mpirun' not in x and\
550 'COMPILER' not in x and 'MPILIB' not in x]
551
552 for nodename in nodenames:
553 value = machobj.get_value(nodename, resolved=False)
554 type_str = self.get_type_info(nodename)
555 if type_str is not None:
556 logger.debug("machine nodname %s value %s"%(nodename, value))
557 self.set_value(nodename, convert_to_type(value, type_str, nodename))
558
559 if compiler is None:
560 compiler = machobj.get_default_compiler()
561 else:
562 expect(machobj.is_valid_compiler(compiler),
563 "compiler %s is not supported on machine %s" %(compiler, machine_name))
564
565 self.set_value("COMPILER",compiler)
566
567 if mpilib is None:
568 mpilib = machobj.get_default_MPIlib({"compiler":compiler})
569 else:
570 expect(machobj.is_valid_MPIlib(mpilib, {"compiler":compiler}),
571 "MPIlib %s is not supported on machine %s" %(mpilib, machine_name))
572 self.set_value("MPILIB",mpilib)
573
574 machdir = machobj.get_machines_dir()
575 self.set_value("MACHDIR", machdir)
576
577 # Create env_mach_specific settings from machine info.
578 env_mach_specific_obj = self.get_env("mach_specific")
579 env_mach_specific_obj.populate(machobj)
580 self.schedule_rewrite(env_mach_specific_obj)
581
582 #--------------------------------------------
583 # pe payout
584 #--------------------------------------------
585 match1 = re.match('([0-9]+)x([0-9]+)', "" if pecount is None else pecount)
586 match2 = re.match('([0-9]+)', "" if pecount is None else pecount)
587 pes_ntasks = {}
588 pes_nthrds = {}
589 pes_rootpe = {}
590 if match1:
591 opti_tasks = match1.group(1)
592 opti_thrds = match1.group(2)
593 elif match2:
594 opti_tasks = match2.group(1)
595 opti_thrds = 1
596
597 other = {}
598 if match1 or match2:
599 for component_class in self._component_classes:
600 if component_class == "DRV":
601 component_class = "CPL"
602 string = "NTASKS_" + component_class
603 pes_ntasks[string] = opti_tasks
604 string = "NTHRDS_" + component_class
605 pes_nthrds[string] = opti_thrds
606 string = "ROOTPE_" + component_class
607 pes_rootpe[string] = 0
608 else:
609 pesobj = Pes(self._pesfile)
610
611 pes_ntasks, pes_nthrds, pes_rootpe, other = pesobj.find_pes_layout(self._gridname, self._compsetname,
612 machine_name, pesize_opts=pecount)
613
614 mach_pes_obj = self.get_env("mach_pes")
615 totaltasks = {}
616 # Since other items may include PES_PER_NODE we need to do this first
617 # we can get rid of this code when all of the perl is removed
618 for key, value in other.items():
619 self.set_value(key, value)
620 pes_per_node = self.get_value("PES_PER_NODE")
621 for key, value in pes_ntasks.items():
622 totaltasks[key[-3:]] = int(value)
623 mach_pes_obj.set_value(key,int(value), pes_per_node=pes_per_node)
624 for key, value in pes_rootpe.items():
625 totaltasks[key[-3:]] += int(value)
626 mach_pes_obj.set_value(key,int(value), pes_per_node=pes_per_node)
627 for key, value in pes_nthrds.items():
628 totaltasks[key[-3:]] *= int(value)
629 mach_pes_obj.set_value(key,int(value), pes_per_node=pes_per_node)
630
631 maxval = 1
632 if mpilib != "mpi-serial":
633 for key, val in totaltasks.items():
634 if val < 0:
635 val = -1*val*pes_per_node
636 if val > maxval:
637 maxval = val
638
639 # Make sure that every component has been accounted for
640 # set, nthrds and ntasks to 1 otherwise. Also set the ninst values here.
641 for compclass in self._component_classes:
642 if compclass == "DRV":
643 continue
644 key = "NINST_%s"%compclass
645 mach_pes_obj.set_value(key, ninst)
646 key = "NTASKS_%s"%compclass
647 if key not in pes_ntasks.keys():
648 mach_pes_obj.set_value(key,1)
649 key = "NTHRDS_%s"%compclass
650 if compclass not in pes_nthrds.keys():
651 mach_pes_obj.set_value(compclass,1)
652
653 # FIXME - this is a short term fix for dealing with the restriction that
654 # CISM1 cannot run on multiple cores
655 if "CISM1" in self._compsetname:
656 mach_pes_obj.set_value("NTASKS_GLC",1)
657 mach_pes_obj.set_value("NTHRDS_GLC",1)
658
659 #--------------------------------------------
660 # batch system
661 #--------------------------------------------
662 batch_system_type = machobj.get_value("BATCH_SYSTEM")
663 batch = Batch(batch_system=batch_system_type, machine=machine_name)
664 bjobs = batch.get_batch_jobs()
665 env_batch = self.get_env("batch")
666 env_batch.set_batch_system(batch, batch_system_type=batch_system_type)
667 env_batch.create_job_groups(bjobs)
668 env_batch.set_job_defaults(bjobs, pesize=maxval, walltime=walltime, force_queue=queue)
669 self.schedule_rewrite(env_batch)
670
671 self.set_value("COMPSET",self._compsetname)
672
673 self._set_pio_xml()
674 logger.info(" Compset is: %s " %self._compsetname)
675 logger.info(" Grid is: %s " %self._gridname )
676 logger.info(" Components in compset are: %s " %self._components)
677
678 # Set project id
679 if project is None:
680 project = get_project(machobj)
681 if project is not None:
682 self.set_value("PROJECT", project)
683 elif machobj.get_value("PROJECT_REQUIRED"):
684 expect(project is not None, "PROJECT_REQUIRED is true but no project found")
685
686 # Overwriting an existing exeroot or rundir can cause problems
687 exeroot = self.get_value("EXEROOT")
688 rundir = self.get_value("RUNDIR")
689 for wdir in (exeroot, rundir):
690 logging.debug("wdir is %s"%wdir)
691 if os.path.exists(wdir):
692 expect(not test, "Directory %s already exists, aborting test"% wdir)
693 response = raw_input("\nDirectory %s already exists, (r)eplace, (a)bort, or (u)se existing?"% wdir)
694 if response.startswith("r"):
695 shutil.rmtree(wdir)
696 else:
697 expect(response.startswith("u"), "Aborting by user request")
698
699 # miscellaneous settings
700 if self.get_value("RUN_TYPE") == 'hybrid':
701 self.set_value("GET_REFCASE", True)
702
703 # Turn on short term archiving as cesm default setting
704 model = get_model()
705 self.set_model_version(model)
706 if model == "cesm" and not test:
707 self.set_value("DOUT_S",True)
708 if test:
709 self.set_value("TEST",True)
710
711
712 def get_compset_var_settings(self):
713 compset_obj = Compsets(infile=self.get_value("COMPSETS_SPEC_FILE"))
714 matches = compset_obj.get_compset_var_settings(self._compsetname, self._gridname)
715 for name, value in matches:
716 if len(value) > 0:
717 logger.debug("Compset specific settings: name is %s and value is %s"%(name,value))
718 self.set_value(name, value)
719
720 def set_initial_test_values(self):
721 testobj = self.get_env("test")
722 testobj.set_initial_values(self)
723
724 def get_batch_jobs(self):
725 batchobj = self.get_env("batch")
726 return batchobj.get_jobs()
727
728 def _set_pio_xml(self):
729 pioobj = PIO()
730 grid = self.get_value("GRID")
731 compiler = self.get_value("COMPILER")
732 mach = self.get_value("MACH")
733 compset = self.get_value("COMPSET")
734 mpilib = self.get_value("MPILIB")
735 defaults = pioobj.get_defaults(grid=grid,compset=compset,mach=mach,compiler=compiler, mpilib=mpilib)
736 for vid, value in defaults.items():
737 self.set_value(vid,value)
738
739 def _create_caseroot_tools(self):
740 machines_dir = os.path.abspath(self.get_value("MACHDIR"))
741 toolsdir = os.path.join(self.get_value("CIMEROOT"),"scripts","Tools")
742 # setup executable files in caseroot/
743 exefiles = (os.path.join(toolsdir, "case.setup"),
744 os.path.join(toolsdir, "case.build"),
745 os.path.join(toolsdir, "case.submit"),
746 os.path.join(toolsdir, "preview_namelists"),
747 os.path.join(toolsdir, "check_input_data"),
748 os.path.join(toolsdir, "check_case"),
749 os.path.join(toolsdir, "archive_metadata.sh"),
750 os.path.join(toolsdir, "xmlchange"),
751 os.path.join(toolsdir, "xmlquery"))
752 try:
753 for exefile in exefiles:
754 destfile = os.path.join(self._caseroot,os.path.basename(exefile))
755 os.symlink(exefile, destfile)
756 except Exception as e:
757 logger.warning("FAILED to set up exefiles: %s" % str(e))
758
759 # set up utility files in caseroot/Tools/
760 toolfiles = (os.path.join(toolsdir, "check_lockedfiles"),
761 os.path.join(toolsdir, "lt_archive.sh"),
762 os.path.join(toolsdir, "getTiming"),
763 os.path.join(toolsdir, "save_provenance"),
764 os.path.join(machines_dir,"Makefile"),
765 os.path.join(machines_dir,"mkSrcfiles"),
766 os.path.join(machines_dir,"mkDepends"))
767
768 for toolfile in toolfiles:
769 destfile = os.path.join(self._caseroot,"Tools",os.path.basename(toolfile))
770 expect(os.path.isfile(toolfile)," File %s does not exist"%toolfile)
771 try:
772 os.symlink(toolfile, destfile)
773 except Exception as e:
774 logger.warning("FAILED to set up toolfiles: %s %s %s" % (str(e), toolfile, destfile))
775
776 # Create Macros file.
777 machine = self.get_value("MACH")
778 files = Files()
779 # Use config_build if the environment variable is set, or if there is no
780 # config_compilers file.
781 if os.getenv("CIME_USE_CONFIG_BUILD") == "TRUE" or \
782 files.get_value("COMPILERS_SPEC_FILE") is None:
783 build_file = files.get_value("BUILD_SPEC_FILE")
784 machobj = Machines(machine=machine, files=files)
785 macro_maker = Build(machobj)
786 macros_path = os.path.join(self._caseroot, "Macros")
787 with open(macros_path, "w") as macros_file:
788 macro_maker.write_macros('Makefile', build_file, macros_file)
789
790 # Copy any system or compiler Depends files to the case.
791 compiler = self.get_value("COMPILER")
792 for dep in (machine, compiler):
793 dfile = "Depends.%s"%dep
794 if os.path.isfile(os.path.join(machines_dir,dfile)):
795 shutil.copyfile(os.path.join(machines_dir,dfile), os.path.join(self._caseroot,dfile))
796 dfile = "Depends.%s.%s"%(machine,compiler)
797 if os.path.isfile(os.path.join(machines_dir,dfile)):
798 shutil.copyfile(os.path.join(machines_dir,dfile), os.path.join(self._caseroot, dfile))
799 # set up infon files
800 # infofiles = os.path.join(os.path.join(toolsdir, README.post_process")
801 #FIXME - the following does not work
802 # print "DEBUG: infofiles are ",infofiles
803 # try:
804 # for infofile in infofiles:
805 # print "DEBUG: infofile is %s, %s" %(infofile, os.path.basename(infofile))
806 # dst_file = caseroot + "/" + os.path.basename(infofile)
807 # shutil.copyfile(infofile, dst_file)
808 # os.chmod(dst_file, os.stat(dst_file).st_mode | stat.S_IXUSR | stat.S_IXGRP)
809 # except Exception as e:
810 # logger.warning("FAILED to set up infofiles: %s" % str(e))
811
812 def _create_caseroot_sourcemods(self):
813 components = self.get_compset_components()
814 for component in components:
815 directory = os.path.join(self._caseroot,"SourceMods","src.%s"%component)
816 if not os.path.exists(directory):
817 os.makedirs(directory)
818
819 directory = os.path.join(self._caseroot, "SourceMods", "src.share")
820 if not os.path.exists(directory):
821 os.makedirs(directory)
822
823 directory = os.path.join(self._caseroot,"SourceMods","src.drv")
824 if not os.path.exists(directory):
825 os.makedirs(directory)
826
827 if get_model() == "cesm":
828 # Note: this is CESM specific, given that we are referencing cism explitly
829 if "cism" in components:
830 directory = os.path.join(self._caseroot, "SourceMods", "src.cism", "glimmer-cism")
831 if not os.path.exists(directory):
832 os.makedirs(directory)
833 readme_file = os.path.join(directory, "README")
834
835 str_to_write = """
836 Put source mods for the glimmer-cism library in the glimmer-cism subdirectory
837 This includes any files that are in the glimmer-cism subdirectory of $cimeroot/../components/cism
838 Anything else (e.g., mods to source_glc or drivers) goes in this directory, NOT in glimmer-cism/"""
839
840 with open(readme_file, "w") as fd:
841 fd.write(str_to_write)
842
843 def create_caseroot(self, clone=False):
844 if not os.path.exists(self._caseroot):
845 # Make the case directory
846 logger.info(" Creating Case directory %s" %self._caseroot)
847 os.makedirs(self._caseroot)
848 os.chdir(self._caseroot)
849
850 # Create relevant directories in $self._caseroot
851 if clone:
852 newdirs = ("LockedFiles", "Tools")
853 else:
854 newdirs = ("SourceMods", "LockedFiles", "Buildconf", "Tools")
855 for newdir in newdirs:
856 os.makedirs(newdir)
857 # Open a new README.case file in $self._caseroot
858
859 append_status(" ".join(sys.argv), caseroot=self._caseroot, sfile="README.case")
860 append_status("Compset longname is %s"%self.get_value("COMPSET"),
861 caseroot=self._caseroot, sfile="README.case")
862 append_status("Compset specification file is %s" %
863 (self.get_value("COMPSETS_SPEC_FILE")),
864 caseroot=self._caseroot, sfile="README.case")
865 append_status("Pes specification file is %s" %
866 (self.get_value("PES_SPEC_FILE")),
867 caseroot=self._caseroot, sfile="README.case")
868 for component_class in self._component_classes:
869 if component_class == "DRV":
870 continue
871 comp_grid = "%s_GRID"%component_class
872 append_status("%s is %s"%(comp_grid,self.get_value(comp_grid)),
873 caseroot=self._caseroot, sfile="README.case")
874 if not clone:
875 self._create_caseroot_sourcemods()
876 self._create_caseroot_tools()
877
878 def apply_user_mods(self, user_mods_dir=None):
879 if user_mods_dir is not None:
880 if os.path.isabs(user_mods_dir):
881 user_mods_path = user_mods_dir
882 else:
883 user_mods_path = self.get_value('USER_MODS_DIR')
884 user_mods_path = os.path.join(user_mods_path, user_mods_dir)
885 self.set_value("USER_MODS_FULLPATH",user_mods_path)
886 ninst_vals = {}
887 for i in xrange(1,len(self._component_classes)):
888 comp_class = self._component_classes[i]
889 comp_name = self._components[i-1]
890 if comp_class == "DRV":
891 continue
892 ninst_comp = self.get_value("NINST_%s"%comp_class)
893 if ninst_comp > 1:
894 ninst_vals[comp_name] = ninst_comp
895 apply_user_mods(self._caseroot, user_mods_path, ninst_vals)
896
897 def create_clone(self, newcase, keepexe=False, mach_dir=None, project=None):
898
899 newcaseroot = os.path.abspath(newcase)
900 expect(not os.path.isdir(newcaseroot),
901 "New caseroot directory %s already exists" % newcaseroot)
902 newcasename = os.path.basename(newcaseroot)
903 newcase_cimeroot = os.path.abspath(get_cime_root())
904
905 # create clone from self to case
906 clone_cimeroot = self.get_value("CIMEROOT")
907 if newcase_cimeroot != clone_cimeroot:
908 logger.warning(" case CIMEROOT is %s " %newcase_cimeroot)
909 logger.warning(" clone CIMEROOT is %s " %clone_cimeroot)
910 logger.warning(" It is NOT recommended to clone cases from different versions of CIME.")
911
912
913 # *** create case object as deepcopy of clone object ***
914 srcroot = os.path.join(newcase_cimeroot,"..")
915 newcase = self.copy(newcasename, newcaseroot, newsrcroot=srcroot)
916 newcase.set_value("CIMEROOT", newcase_cimeroot)
917
918 # determine if will use clone executable or not
919 if keepexe:
920 orig_exeroot = self.get_value("EXEROOT")
921 newcase.set_value("EXEROOT", orig_exeroot)
922 newcase.set_value("BUILD_COMPLETE","TRUE")
923 else:
924 newcase.set_value("BUILD_COMPLETE","FALSE")
925
926 # set machdir
927 if mach_dir is not None:
928 newcase.set_value("MACHDIR", mach_dir)
929
930 # Set project id
931 # Note: we do not just copy this from the clone because it seems likely that
932 # users will want to change this sometimes, especially when cloning another
933 # user's case. However, note that, if a project is not given, the fallback will
934 # be to copy it from the clone, just like other xml variables are copied.
935 if project is None:
936 project = self.get_value("PROJECT", subgroup="case.run")
937 if project is not None:
938 newcase.set_value("PROJECT", project)
939
940 # create caseroot
941 newcase.create_caseroot(clone=True)
942 newcase.flush(flushall=True)
943
944 # copy user_nl_files
945 cloneroot = self._caseroot
946 files = glob.glob(cloneroot + '/user_nl_*')
947 for item in files:
948 shutil.copy(item, newcaseroot)
949
950 # copy SourceMod and Buildconf files
951 for casesub in ("SourceMods", "Buildconf"):
952 shutil.copytree(os.path.join(cloneroot, casesub), os.path.join(newcaseroot, casesub))
953
954 # copy env_case.xml to LockedFiles
955 shutil.copy(os.path.join(newcaseroot,"env_case.xml"), os.path.join(newcaseroot,"LockedFiles"))
956
957 # Update README.case
958 fclone = open(cloneroot + "/README.case", "r")
959 fnewcase = open(newcaseroot + "/README.case", "a")
960 fnewcase.write("\n *** original clone README follows ****")
961 fnewcase.write("\n " + fclone.read())
962
963 clonename = self.get_value("CASE")
964 logger.info(" Successfully created new case %s from clone case %s " %(newcasename, clonename))
965
966 case_setup(newcase, clean=False, test_mode=False)
967
968 return newcase
969
970 def submit_jobs(self, no_batch=False, job=None):
971 env_batch = self.get_env('batch')
972 env_batch.submit_jobs(self, no_batch=no_batch, job=job)
973
974 def get_mpirun_cmd(self, job="case.run"):
975 env_mach_specific = self.get_env('mach_specific')
976 run_exe = env_mach_specific.get_value("run_exe")
977 run_misc_suffix = env_mach_specific.get_value("run_misc_suffix")
978 run_misc_suffix = "" if run_misc_suffix is None else run_misc_suffix
979 run_suffix = run_exe + run_misc_suffix
980
981 # Things that will have to be matched against mpirun element attributes
982 mpi_attribs = {
983 "compiler" : self.get_value("COMPILER"),
984 "mpilib" : self.get_value("MPILIB"),
985 "threaded" : get_build_threaded(self)
986 }
987
988 executable, args = env_mach_specific.get_mpirun(self, mpi_attribs, job=job)
989
990 mpi_arg_string = " ".join(args.values())
991
992
993 if self.get_value("BATCH_SYSTEM") == "cobalt":
994 mpi_arg_string += " : "
995
996 return "%s %s %s" % (executable if executable is not None else "", mpi_arg_string, run_suffix)
997
998
999 def set_model_version(self, model):
1000 version = "unknown"
1001 srcroot = self.get_value("SRCROOT")
1002 if model == "cesm":
1003 changelog = os.path.join(srcroot,"ChangeLog")
1004 if os.path.isfile(changelog):
1005 for line in open(changelog, "r"):
1006 m = re.search("Tag name: (cesm.*)$", line)
1007 if m is not None:
1008 version = m.group(1)
1009 break
1010 elif model == "acme":
1011 version = get_current_commit(True, srcroot)
1012 self.set_value("MODEL_VERSION", version)
1013 if version != "unknown":
1014 logger.info("%s model version found: %s"%(model, version))
1015 else:
1016 logger.warn("WARNING: No %s Model version found."%(model))
1017
1018
1019
1020
1021
1022
1023
1024
1025
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/utils/python/CIME/case.py b/utils/python/CIME/case.py
--- a/utils/python/CIME/case.py
+++ b/utils/python/CIME/case.py
@@ -335,8 +335,6 @@
"""
Update or create a valid_values entry for item and populate it
"""
- if item == "CASEROOT":
- self._caseroot = value
result = None
for env_file in self._env_entryid_files:
result = env_file.set_valid_values(item, valid_values)
| {"golden_diff": "diff --git a/utils/python/CIME/case.py b/utils/python/CIME/case.py\n--- a/utils/python/CIME/case.py\n+++ b/utils/python/CIME/case.py\n@@ -335,8 +335,6 @@\n \"\"\"\n Update or create a valid_values entry for item and populate it\n \"\"\"\n- if item == \"CASEROOT\":\n- self._caseroot = value\n result = None\n for env_file in self._env_entryid_files:\n result = env_file.set_valid_values(item, valid_values)\n", "issue": "code_checker return code is wrong\nNeeds to return non-zero error code if files had problems.\n\n", "before_files": [{"content": "\"\"\"\nWrapper around all env XML for a case.\n\nAll interaction with and between the module files in XML/ takes place\nthrough the Case module.\n\"\"\"\nfrom copy import deepcopy\nimport glob, os, shutil, traceback\nfrom CIME.XML.standard_module_setup import *\n\nfrom CIME.utils import expect, get_cime_root, append_status\nfrom CIME.utils import convert_to_type, get_model, get_project\nfrom CIME.utils import get_build_threaded, get_current_commit\nfrom CIME.XML.build import Build\nfrom CIME.XML.machines import Machines\nfrom CIME.XML.pes import Pes\nfrom CIME.XML.files import Files\nfrom CIME.XML.component import Component\nfrom CIME.XML.compsets import Compsets\nfrom CIME.XML.grids import Grids\nfrom CIME.XML.batch import Batch\nfrom CIME.XML.pio import PIO\n\nfrom CIME.XML.env_test import EnvTest\nfrom CIME.XML.env_mach_specific import EnvMachSpecific\nfrom CIME.XML.env_case import EnvCase\nfrom CIME.XML.env_mach_pes import EnvMachPes\nfrom CIME.XML.env_build import EnvBuild\nfrom CIME.XML.env_run import EnvRun\nfrom CIME.XML.env_archive import EnvArchive\nfrom CIME.XML.env_batch import EnvBatch\n\nfrom CIME.user_mod_support import apply_user_mods\nfrom CIME.case_setup import case_setup\n\nlogger = logging.getLogger(__name__)\n\nclass Case(object):\n \"\"\"\n https://github.com/ESMCI/cime/wiki/Developers-Introduction\n The Case class is the heart of the CIME Case Control system. All\n interactions with a Case take part through this class. All of the\n variables used to create and manipulate a case are defined in xml\n files and for every xml file there is a python class to interact\n with that file.\n\n XML files which are part of the CIME distribution and are meant to\n be readonly with respect to a case are typically named\n config_something.xml and the corresponding python Class is\n Something and can be found in file CIME.XML.something.py. I'll\n refer to these as the CIME config classes.\n\n XML files which are part of a case and thus are read/write to a\n case are typically named env_whatever.xml and the cooresponding\n python modules are CIME.XML.env_whatever.py and classes are\n EnvWhatever. I'll refer to these as the Case env classes.\n\n The Case Class includes an array of the Case env classes, in the\n configure function and it's supporting functions defined below\n the case object creates and manipulates the Case env classes\n by reading and interpreting the CIME config classes.\n\n \"\"\"\n def __init__(self, case_root=None, read_only=True):\n\n if case_root is None:\n case_root = os.getcwd()\n self._caseroot = case_root\n logger.debug(\"Initializing Case.\")\n self._env_files_that_need_rewrite = set()\n self._read_only_mode = True\n self._force_read_only = read_only\n\n self._env_entryid_files = []\n self._env_generic_files = []\n self._files = []\n\n self.read_xml()\n\n # Hold arbitary values. In create_newcase we may set values\n # for xml files that haven't been created yet. We need a place\n # to store them until we are ready to create the file. At file\n # creation we get the values for those fields from this lookup\n # table and then remove the entry.\n self.lookups = {}\n self.set_lookup_value('CIMEROOT',os.path.abspath(get_cime_root()))\n\n self._compsetname = None\n self._gridname = None\n self._compsetsfile = None\n self._pesfile = None\n self._gridfile = None\n self._components = []\n self._component_classes = []\n\n # Define __enter__ and __exit__ so that we can use this as a context manager\n # and force a flush on exit.\n def __enter__(self):\n if not self._force_read_only:\n self._read_only_mode = False\n return self\n\n def __exit__(self, *_):\n self.flush()\n self._read_only_mode = True\n return False\n\n def schedule_rewrite(self, env_file):\n assert not self._read_only_mode, \\\n \"case.py scripts error: attempted to modify an env file while in \" \\\n \"read-only mode\"\n self._env_files_that_need_rewrite.add(env_file)\n\n def read_xml(self):\n if(len(self._env_files_that_need_rewrite)>0):\n files = \"\"\n for env_file in self._env_files_that_need_rewrite:\n files += \" \"+env_file.filename\n expect(False,\"Object(s) %s seem to have newer data than the corresponding case file\"%files)\n\n self._env_entryid_files = []\n self._env_entryid_files.append(EnvRun(self._caseroot))\n self._env_entryid_files.append(EnvBuild(self._caseroot))\n self._env_entryid_files.append(EnvMachPes(self._caseroot))\n self._env_entryid_files.append(EnvCase(self._caseroot))\n self._env_entryid_files.append(EnvBatch(self._caseroot))\n if os.path.isfile(os.path.join(self._caseroot,\"env_test.xml\")):\n self._env_entryid_files.append(EnvTest(self._caseroot))\n self._env_generic_files = []\n self._env_generic_files.append(EnvMachSpecific(self._caseroot))\n self._env_generic_files.append(EnvArchive(self._caseroot))\n self._files = self._env_entryid_files + self._env_generic_files\n\n def get_case_root(self):\n \"\"\"Returns the root directory for this case.\"\"\"\n return self._caseroot\n\n def get_env(self, short_name):\n full_name = \"env_%s.xml\" % (short_name)\n for env_file in self._files:\n if os.path.basename(env_file.filename) == full_name:\n return env_file\n\n expect(False, \"Could not find object for %s in case\"%full_name)\n\n def copy(self, newcasename, newcaseroot, newcimeroot=None, newsrcroot=None):\n newcase = deepcopy(self)\n for env_file in newcase._files: # pylint: disable=protected-access\n basename = os.path.basename(env_file.filename)\n env_file.filename = os.path.join(newcaseroot,basename)\n\n if newcimeroot is not None:\n newcase.set_value(\"CIMEROOT\", newcimeroot)\n\n if newsrcroot is not None:\n newcase.set_value(\"SRCROOT\", newsrcroot)\n\n newcase.set_value(\"CASE\",newcasename)\n newcase.set_value(\"CASEROOT\",newcaseroot)\n newcase.set_value(\"CONTINUE_RUN\",\"FALSE\")\n newcase.set_value(\"RESUBMIT\",0)\n return newcase\n\n def flush(self, flushall=False):\n if not os.path.isdir(self._caseroot):\n # do not flush if caseroot wasnt created\n return\n if flushall:\n for env_file in self._files:\n self.schedule_rewrite(env_file)\n for env_file in self._env_files_that_need_rewrite:\n env_file.write()\n self._env_files_that_need_rewrite = set()\n\n def get_values(self, item, attribute=None, resolved=True, subgroup=None):\n results = []\n for env_file in self._env_entryid_files:\n # Wait and resolve in self rather than in env_file\n results = env_file.get_values(item, attribute, resolved=False, subgroup=subgroup)\n\n if len(results) > 0:\n new_results = []\n vtype = env_file.get_type_info(item)\n if resolved:\n for result in results:\n if type(result) is str:\n result = self.get_resolved_value(result)\n new_results.append(convert_to_type(result, vtype, item))\n else:\n new_results.append(result)\n else:\n new_results = results\n return new_results\n\n for env_file in self._env_generic_files:\n results = env_file.get_values(item, attribute, resolved=False, subgroup=subgroup)\n if len(results) > 0:\n if resolved:\n for result in results:\n if type(result) is str:\n new_results.append(self.get_resolved_value(result))\n else:\n new_results.append(result)\n else:\n new_results = results\n return new_results\n # Return empty result\n return results\n\n def get_value(self, item, attribute=None, resolved=True, subgroup=None):\n result = None\n for env_file in self._env_entryid_files:\n # Wait and resolve in self rather than in env_file\n result = env_file.get_value(item, attribute, resolved=False, subgroup=subgroup)\n\n if result is not None:\n if resolved and type(result) is str:\n result = self.get_resolved_value(result)\n vtype = env_file.get_type_info(item)\n result = convert_to_type(result, vtype, item)\n return result\n\n for env_file in self._env_generic_files:\n\n result = env_file.get_value(item, attribute, resolved=False, subgroup=subgroup)\n\n if result is not None:\n if resolved and type(result) is str:\n return self.get_resolved_value(result)\n return result\n\n # Return empty result\n return result\n\n\n def get_full_records(self, item=None, attribute=None, resolved=True, subgroup=None):\n\n \"\"\"\n Return info object for given item, return all info for all item if item is empty.\n \"\"\"\n\n logger.debug(\"(get_full_records) Input values: %s , %s , %s , %s , %s\" , self.__class__.__name__ , item, attribute, resolved, subgroup)\n\n # Empty result list\n results = []\n\n for env_file in self._env_entryid_files:\n # Wait and resolve in self rather than in env_file\n logger.debug(\"(get_full_records) Searching in %s\" , env_file.__class__.__name__)\n result = None\n\n try:\n # env_batch has its own implementation of get_full_records otherwise in entry_id\n result = env_file.get_full_records(item, attribute, resolved=False, subgroup=subgroup)\n # Method exists, and was used.\n except AttributeError:\n # Method does not exist. What now?\n traceback.print_exc()\n logger.debug(\"(get_full_records) No get_full_records method for class %s (%s)\" , env_file.__class__.__name__ , AttributeError)\n\n if result is not None and (len(result) >= 1):\n\n if resolved :\n for r in result :\n if type(r['value']) is str:\n logger.debug(\"(get_full_records) Resolving %s\" , r['value'])\n r['value'] = self.get_resolved_value(r['value'])\n\n if subgroup :\n found = []\n for r in result :\n if r['group'] == subgroup :\n found.append(r)\n results += found\n else:\n results = results + result\n\n logger.debug(\"(get_full_records) Return value: %s\" , results )\n return results\n\n def get_type_info(self, item):\n result = None\n for env_file in self._env_entryid_files:\n result = env_file.get_type_info(item)\n if result is not None:\n return result\n\n logging.debug(\"Not able to retreive type for item '%s'\" % item)\n\n def get_resolved_value(self, item, recurse=0):\n num_unresolved = item.count(\"$\")\n recurse_limit = 10\n if (num_unresolved > 0 and recurse < recurse_limit ):\n for env_file in self._env_entryid_files:\n item = env_file.get_resolved_value(item)\n if (\"$\" not in item):\n return item\n else:\n item = self.get_resolved_value(item,recurse=recurse+1)\n\n if recurse >= 2*recurse_limit:\n logging.warning(\"Not able to fully resolve item '%s'\" % item)\n elif recurse >= recurse_limit:\n #try env_batch first\n env_batch = self.get_env(\"batch\")\n item = env_batch.get_resolved_value(item)\n logger.debug(\"item is %s, checking env_batch\"%item)\n if item is not None:\n if (\"$\" not in item):\n return item\n else:\n item = self.get_resolved_value(item,recurse=recurse+1)\n else:\n logging.warning(\"Not able to fully resolve item '%s'\" % item)\n\n return item\n\n def set_value(self, item, value, subgroup=None, ignore_type=False):\n \"\"\"\n If a file has been defined, and the variable is in the file,\n then that value will be set in the file object and the file\n name is returned\n \"\"\"\n if item == \"CASEROOT\":\n self._caseroot = value\n result = None\n for env_file in self._env_entryid_files:\n result = env_file.set_value(item, value, subgroup, ignore_type)\n if (result is not None):\n logger.debug(\"Will rewrite file %s %s\",env_file.filename, item)\n self._env_files_that_need_rewrite.add(env_file)\n return result\n\n def set_valid_values(self, item, valid_values):\n \"\"\"\n Update or create a valid_values entry for item and populate it\n \"\"\"\n if item == \"CASEROOT\":\n self._caseroot = value\n result = None\n for env_file in self._env_entryid_files:\n result = env_file.set_valid_values(item, valid_values)\n if (result is not None):\n logger.debug(\"Will rewrite file %s %s\",env_file.filename, item)\n self._env_files_that_need_rewrite.add(env_file)\n return result\n\n def set_lookup_value(self, item, value):\n if item in self.lookups.keys() and self.lookups[item] is not None:\n logger.warn(\"Item %s already in lookups with value %s\"%(item,self.lookups[item]))\n else:\n self.lookups[item] = value\n\n\n def _set_compset_and_pesfile(self, compset_name, user_compset=False, pesfile=None):\n \"\"\"\n Loop through all the compset files and find the compset\n specifation file that matches either the input 'compset_name'.\n Note that the input compset name (i.e. compset_name) can be\n either a longname or an alias. This will also set the\n compsets and pes specfication files.\n \"\"\"\n files = Files()\n components = files.get_components(\"COMPSETS_SPEC_FILE\")\n logger.debug(\" Possible components for COMPSETS_SPEC_FILE are %s\" % components)\n\n # Loop through all of the files listed in COMPSETS_SPEC_FILE and find the file\n # that has a match for either the alias or the longname in that order\n for component in components:\n\n # Determine the compsets file for this component\n compsets_filename = files.get_value(\"COMPSETS_SPEC_FILE\", {\"component\":component})\n\n # If the file exists, read it and see if there is a match for the compset alias or longname\n if (os.path.isfile(compsets_filename)):\n compsets = Compsets(compsets_filename)\n match = compsets.get_compset_match(name=compset_name)\n pesfile = files.get_value(\"PES_SPEC_FILE\" , {\"component\":component})\n if match is not None:\n self._pesfile = pesfile\n self._compsetsfile = compsets_filename\n self._compsetname = match\n tests_filename = files.get_value(\"TESTS_SPEC_FILE\" , {\"component\":component}, resolved=False)\n tests_mods_dir = files.get_value(\"TESTS_MODS_DIR\" , {\"component\":component}, resolved=False)\n user_mods_dir = files.get_value(\"USER_MODS_DIR\" , {\"component\":component}, resolved=False)\n self.set_lookup_value(\"COMPSETS_SPEC_FILE\" ,\n files.get_value(\"COMPSETS_SPEC_FILE\", {\"component\":component}, resolved=False))\n self.set_lookup_value(\"TESTS_SPEC_FILE\" , tests_filename)\n self.set_lookup_value(\"TESTS_MODS_DIR\" , tests_mods_dir)\n self.set_lookup_value(\"USER_MODS_DIR\" , user_mods_dir)\n self.set_lookup_value(\"PES_SPEC_FILE\" ,\n files.get_value(\"PES_SPEC_FILE\" , {\"component\":component}, resolved=False))\n logger.info(\"Compset longname is %s \" %(match))\n logger.info(\"Compset specification file is %s\" %(compsets_filename))\n logger.info(\"Pes specification file is %s\" %(pesfile))\n return\n\n if user_compset is True:\n #Do not error out for user_compset\n logger.warn(\"Could not find a compset match for either alias or longname in %s\" %(compset_name))\n self._compsetname = compset_name\n self._pesfile = pesfile\n self.set_lookup_value(\"PES_SPEC_FILE\", pesfile)\n else:\n expect(False,\n \"Could not find a compset match for either alias or longname in %s\" %(compset_name))\n\n\n def get_compset_components(self):\n #If are doing a create_clone then, self._compsetname is not set yet\n components = []\n compset = self.get_value(\"COMPSET\")\n if compset is None:\n compset = self._compsetname\n expect(compset is not None,\n \"ERROR: compset is not set\")\n # the first element is always the date operator - skip it\n elements = compset.split('_')[1:] # pylint: disable=maybe-no-member\n for element in elements:\n # ignore the possible BGC or TEST modifier\n if element.startswith(\"BGC%\") or element.startswith(\"TEST\"):\n continue\n else:\n element_component = element.split('%')[0].lower()\n element_component = re.sub(r'[0-9]*',\"\",element_component)\n components.append(element_component)\n return components\n\n\n def __iter__(self):\n for entryid_file in self._env_entryid_files:\n for key, val in entryid_file:\n if type(val) is str and '$' in val:\n yield key, self.get_resolved_value(val)\n else:\n yield key, val\n\n\n def _get_component_config_data(self):\n # attributes used for multi valued defaults ($attlist is a hash reference)\n attlist = {\"compset\":self._compsetname, \"grid\":self._gridname}\n\n # Determine list of component classes that this coupler/driver knows how\n # to deal with. This list follows the same order as compset longnames follow.\n files = Files()\n # Add the group and elements for the config_files.xml\n for env_file in self._env_entryid_files:\n env_file.add_elements_by_group(files, attlist)\n\n drv_config_file = files.get_value(\"CONFIG_DRV_FILE\")\n drv_comp = Component(drv_config_file)\n for env_file in self._env_entryid_files:\n env_file.add_elements_by_group(drv_comp, attributes=attlist)\n\n # loop over all elements of both component_classes and components - and get config_component_file for\n # for each component\n self._component_classes =drv_comp.get_valid_model_components()\n if len(self._component_classes) > len(self._components):\n self._components.append('sesp')\n\n for i in xrange(1,len(self._component_classes)):\n comp_class = self._component_classes[i]\n comp_name = self._components[i-1]\n node_name = 'CONFIG_' + comp_class + '_FILE'\n # Add the group and elements for the config_files.xml\n comp_config_file = files.get_value(node_name, {\"component\":comp_name}, resolved=False)\n self.set_value(node_name, comp_config_file)\n comp_config_file = self.get_resolved_value(comp_config_file)\n expect(comp_config_file is not None,\"No config file for component %s\"%comp_name)\n compobj = Component(comp_config_file)\n for env_file in self._env_entryid_files:\n env_file.add_elements_by_group(compobj, attributes=attlist)\n\n\n for key,value in self.lookups.items():\n result = self.set_value(key,value)\n if result is not None:\n del self.lookups[key]\n\n def get_components(self):\n \"\"\"\n return dictionary of the form [component_class:component],\n e.g. [atm:cam], for all compset components\n \"\"\"\n\n files = Files()\n drv_comp = Component(files.get_value(\"CONFIG_DRV_FILE\"))\n\n # Determine list of component classes that this coupler/driver knows how\n # to deal with. This list follows the same order as compset longnames follow.\n component_classes = drv_comp.get_valid_model_components()\n components = self.get_compset_components()\n\n # Note that component classes can have a bigger range than\n # compents since stub esp (sesp) is an optional component - so\n # need to take the min of the two below\n comp_dict = {}\n for i in xrange(0,len(components)):\n comp_name = components[i]\n comp_class = component_classes[i+1]\n comp_dict[comp_class] = comp_name\n return comp_dict\n\n def configure(self, compset_name, grid_name, machine_name=None,\n project=None, pecount=None, compiler=None, mpilib=None,\n user_compset=False, pesfile=None,\n user_grid=False, gridfile=None, ninst=1, test=False,\n walltime=None, queue=None):\n\n #--------------------------------------------\n # compset, pesfile, and compset components\n #--------------------------------------------\n self._set_compset_and_pesfile(compset_name, user_compset=user_compset, pesfile=pesfile)\n\n self._components = self.get_compset_components()\n #FIXME - if --user-compset is True then need to determine that\n #all of the compset settings are valid\n\n #--------------------------------------------\n # grid\n #--------------------------------------------\n if user_grid is True and gridfile is not None:\n self.set_value(\"GRIDS_SPEC_FILE\", gridfile)\n grids = Grids(gridfile)\n\n gridinfo = grids.get_grid_info(name=grid_name, compset=self._compsetname)\n\n self._gridname = gridinfo[\"GRID\"]\n for key,value in gridinfo.items():\n logger.debug(\"Set grid %s %s\"%(key,value))\n self.set_lookup_value(key,value)\n\n #--------------------------------------------\n # component config data\n #--------------------------------------------\n self._get_component_config_data()\n\n self.get_compset_var_settings()\n\n #--------------------------------------------\n # machine\n #--------------------------------------------\n # set machine values in env_xxx files\n machobj = Machines(machine=machine_name)\n machine_name = machobj.get_machine_name()\n self.set_value(\"MACH\",machine_name)\n nodenames = machobj.get_node_names()\n nodenames = [x for x in nodenames if\n '_system' not in x and '_variables' not in x and 'mpirun' not in x and\\\n 'COMPILER' not in x and 'MPILIB' not in x]\n\n for nodename in nodenames:\n value = machobj.get_value(nodename, resolved=False)\n type_str = self.get_type_info(nodename)\n if type_str is not None:\n logger.debug(\"machine nodname %s value %s\"%(nodename, value))\n self.set_value(nodename, convert_to_type(value, type_str, nodename))\n\n if compiler is None:\n compiler = machobj.get_default_compiler()\n else:\n expect(machobj.is_valid_compiler(compiler),\n \"compiler %s is not supported on machine %s\" %(compiler, machine_name))\n\n self.set_value(\"COMPILER\",compiler)\n\n if mpilib is None:\n mpilib = machobj.get_default_MPIlib({\"compiler\":compiler})\n else:\n expect(machobj.is_valid_MPIlib(mpilib, {\"compiler\":compiler}),\n \"MPIlib %s is not supported on machine %s\" %(mpilib, machine_name))\n self.set_value(\"MPILIB\",mpilib)\n\n machdir = machobj.get_machines_dir()\n self.set_value(\"MACHDIR\", machdir)\n\n # Create env_mach_specific settings from machine info.\n env_mach_specific_obj = self.get_env(\"mach_specific\")\n env_mach_specific_obj.populate(machobj)\n self.schedule_rewrite(env_mach_specific_obj)\n\n #--------------------------------------------\n # pe payout\n #--------------------------------------------\n match1 = re.match('([0-9]+)x([0-9]+)', \"\" if pecount is None else pecount)\n match2 = re.match('([0-9]+)', \"\" if pecount is None else pecount)\n pes_ntasks = {}\n pes_nthrds = {}\n pes_rootpe = {}\n if match1:\n opti_tasks = match1.group(1)\n opti_thrds = match1.group(2)\n elif match2:\n opti_tasks = match2.group(1)\n opti_thrds = 1\n\n other = {}\n if match1 or match2:\n for component_class in self._component_classes:\n if component_class == \"DRV\":\n component_class = \"CPL\"\n string = \"NTASKS_\" + component_class\n pes_ntasks[string] = opti_tasks\n string = \"NTHRDS_\" + component_class\n pes_nthrds[string] = opti_thrds\n string = \"ROOTPE_\" + component_class\n pes_rootpe[string] = 0\n else:\n pesobj = Pes(self._pesfile)\n\n pes_ntasks, pes_nthrds, pes_rootpe, other = pesobj.find_pes_layout(self._gridname, self._compsetname,\n machine_name, pesize_opts=pecount)\n\n mach_pes_obj = self.get_env(\"mach_pes\")\n totaltasks = {}\n # Since other items may include PES_PER_NODE we need to do this first\n # we can get rid of this code when all of the perl is removed\n for key, value in other.items():\n self.set_value(key, value)\n pes_per_node = self.get_value(\"PES_PER_NODE\")\n for key, value in pes_ntasks.items():\n totaltasks[key[-3:]] = int(value)\n mach_pes_obj.set_value(key,int(value), pes_per_node=pes_per_node)\n for key, value in pes_rootpe.items():\n totaltasks[key[-3:]] += int(value)\n mach_pes_obj.set_value(key,int(value), pes_per_node=pes_per_node)\n for key, value in pes_nthrds.items():\n totaltasks[key[-3:]] *= int(value)\n mach_pes_obj.set_value(key,int(value), pes_per_node=pes_per_node)\n\n maxval = 1\n if mpilib != \"mpi-serial\":\n for key, val in totaltasks.items():\n if val < 0:\n val = -1*val*pes_per_node\n if val > maxval:\n maxval = val\n\n # Make sure that every component has been accounted for\n # set, nthrds and ntasks to 1 otherwise. Also set the ninst values here.\n for compclass in self._component_classes:\n if compclass == \"DRV\":\n continue\n key = \"NINST_%s\"%compclass\n mach_pes_obj.set_value(key, ninst)\n key = \"NTASKS_%s\"%compclass\n if key not in pes_ntasks.keys():\n mach_pes_obj.set_value(key,1)\n key = \"NTHRDS_%s\"%compclass\n if compclass not in pes_nthrds.keys():\n mach_pes_obj.set_value(compclass,1)\n\n # FIXME - this is a short term fix for dealing with the restriction that\n # CISM1 cannot run on multiple cores\n if \"CISM1\" in self._compsetname:\n mach_pes_obj.set_value(\"NTASKS_GLC\",1)\n mach_pes_obj.set_value(\"NTHRDS_GLC\",1)\n\n #--------------------------------------------\n # batch system\n #--------------------------------------------\n batch_system_type = machobj.get_value(\"BATCH_SYSTEM\")\n batch = Batch(batch_system=batch_system_type, machine=machine_name)\n bjobs = batch.get_batch_jobs()\n env_batch = self.get_env(\"batch\")\n env_batch.set_batch_system(batch, batch_system_type=batch_system_type)\n env_batch.create_job_groups(bjobs)\n env_batch.set_job_defaults(bjobs, pesize=maxval, walltime=walltime, force_queue=queue)\n self.schedule_rewrite(env_batch)\n\n self.set_value(\"COMPSET\",self._compsetname)\n\n self._set_pio_xml()\n logger.info(\" Compset is: %s \" %self._compsetname)\n logger.info(\" Grid is: %s \" %self._gridname )\n logger.info(\" Components in compset are: %s \" %self._components)\n\n # Set project id\n if project is None:\n project = get_project(machobj)\n if project is not None:\n self.set_value(\"PROJECT\", project)\n elif machobj.get_value(\"PROJECT_REQUIRED\"):\n expect(project is not None, \"PROJECT_REQUIRED is true but no project found\")\n\n # Overwriting an existing exeroot or rundir can cause problems\n exeroot = self.get_value(\"EXEROOT\")\n rundir = self.get_value(\"RUNDIR\")\n for wdir in (exeroot, rundir):\n logging.debug(\"wdir is %s\"%wdir)\n if os.path.exists(wdir):\n expect(not test, \"Directory %s already exists, aborting test\"% wdir)\n response = raw_input(\"\\nDirectory %s already exists, (r)eplace, (a)bort, or (u)se existing?\"% wdir)\n if response.startswith(\"r\"):\n shutil.rmtree(wdir)\n else:\n expect(response.startswith(\"u\"), \"Aborting by user request\")\n\n # miscellaneous settings\n if self.get_value(\"RUN_TYPE\") == 'hybrid':\n self.set_value(\"GET_REFCASE\", True)\n\n # Turn on short term archiving as cesm default setting\n model = get_model()\n self.set_model_version(model)\n if model == \"cesm\" and not test:\n self.set_value(\"DOUT_S\",True)\n if test:\n self.set_value(\"TEST\",True)\n\n\n def get_compset_var_settings(self):\n compset_obj = Compsets(infile=self.get_value(\"COMPSETS_SPEC_FILE\"))\n matches = compset_obj.get_compset_var_settings(self._compsetname, self._gridname)\n for name, value in matches:\n if len(value) > 0:\n logger.debug(\"Compset specific settings: name is %s and value is %s\"%(name,value))\n self.set_value(name, value)\n\n def set_initial_test_values(self):\n testobj = self.get_env(\"test\")\n testobj.set_initial_values(self)\n\n def get_batch_jobs(self):\n batchobj = self.get_env(\"batch\")\n return batchobj.get_jobs()\n\n def _set_pio_xml(self):\n pioobj = PIO()\n grid = self.get_value(\"GRID\")\n compiler = self.get_value(\"COMPILER\")\n mach = self.get_value(\"MACH\")\n compset = self.get_value(\"COMPSET\")\n mpilib = self.get_value(\"MPILIB\")\n defaults = pioobj.get_defaults(grid=grid,compset=compset,mach=mach,compiler=compiler, mpilib=mpilib)\n for vid, value in defaults.items():\n self.set_value(vid,value)\n\n def _create_caseroot_tools(self):\n machines_dir = os.path.abspath(self.get_value(\"MACHDIR\"))\n toolsdir = os.path.join(self.get_value(\"CIMEROOT\"),\"scripts\",\"Tools\")\n # setup executable files in caseroot/\n exefiles = (os.path.join(toolsdir, \"case.setup\"),\n os.path.join(toolsdir, \"case.build\"),\n os.path.join(toolsdir, \"case.submit\"),\n os.path.join(toolsdir, \"preview_namelists\"),\n os.path.join(toolsdir, \"check_input_data\"),\n os.path.join(toolsdir, \"check_case\"),\n os.path.join(toolsdir, \"archive_metadata.sh\"),\n os.path.join(toolsdir, \"xmlchange\"),\n os.path.join(toolsdir, \"xmlquery\"))\n try:\n for exefile in exefiles:\n destfile = os.path.join(self._caseroot,os.path.basename(exefile))\n os.symlink(exefile, destfile)\n except Exception as e:\n logger.warning(\"FAILED to set up exefiles: %s\" % str(e))\n\n # set up utility files in caseroot/Tools/\n toolfiles = (os.path.join(toolsdir, \"check_lockedfiles\"),\n os.path.join(toolsdir, \"lt_archive.sh\"),\n os.path.join(toolsdir, \"getTiming\"),\n os.path.join(toolsdir, \"save_provenance\"),\n os.path.join(machines_dir,\"Makefile\"),\n os.path.join(machines_dir,\"mkSrcfiles\"),\n os.path.join(machines_dir,\"mkDepends\"))\n\n for toolfile in toolfiles:\n destfile = os.path.join(self._caseroot,\"Tools\",os.path.basename(toolfile))\n expect(os.path.isfile(toolfile),\" File %s does not exist\"%toolfile)\n try:\n os.symlink(toolfile, destfile)\n except Exception as e:\n logger.warning(\"FAILED to set up toolfiles: %s %s %s\" % (str(e), toolfile, destfile))\n\n # Create Macros file.\n machine = self.get_value(\"MACH\")\n files = Files()\n # Use config_build if the environment variable is set, or if there is no\n # config_compilers file.\n if os.getenv(\"CIME_USE_CONFIG_BUILD\") == \"TRUE\" or \\\n files.get_value(\"COMPILERS_SPEC_FILE\") is None:\n build_file = files.get_value(\"BUILD_SPEC_FILE\")\n machobj = Machines(machine=machine, files=files)\n macro_maker = Build(machobj)\n macros_path = os.path.join(self._caseroot, \"Macros\")\n with open(macros_path, \"w\") as macros_file:\n macro_maker.write_macros('Makefile', build_file, macros_file)\n\n # Copy any system or compiler Depends files to the case.\n compiler = self.get_value(\"COMPILER\")\n for dep in (machine, compiler):\n dfile = \"Depends.%s\"%dep\n if os.path.isfile(os.path.join(machines_dir,dfile)):\n shutil.copyfile(os.path.join(machines_dir,dfile), os.path.join(self._caseroot,dfile))\n dfile = \"Depends.%s.%s\"%(machine,compiler)\n if os.path.isfile(os.path.join(machines_dir,dfile)):\n shutil.copyfile(os.path.join(machines_dir,dfile), os.path.join(self._caseroot, dfile))\n # set up infon files\n # infofiles = os.path.join(os.path.join(toolsdir, README.post_process\")\n #FIXME - the following does not work\n # print \"DEBUG: infofiles are \",infofiles\n # try:\n # for infofile in infofiles:\n # print \"DEBUG: infofile is %s, %s\" %(infofile, os.path.basename(infofile))\n # dst_file = caseroot + \"/\" + os.path.basename(infofile)\n # shutil.copyfile(infofile, dst_file)\n # os.chmod(dst_file, os.stat(dst_file).st_mode | stat.S_IXUSR | stat.S_IXGRP)\n # except Exception as e:\n # logger.warning(\"FAILED to set up infofiles: %s\" % str(e))\n\n def _create_caseroot_sourcemods(self):\n components = self.get_compset_components()\n for component in components:\n directory = os.path.join(self._caseroot,\"SourceMods\",\"src.%s\"%component)\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n directory = os.path.join(self._caseroot, \"SourceMods\", \"src.share\")\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n directory = os.path.join(self._caseroot,\"SourceMods\",\"src.drv\")\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n if get_model() == \"cesm\":\n # Note: this is CESM specific, given that we are referencing cism explitly\n if \"cism\" in components:\n directory = os.path.join(self._caseroot, \"SourceMods\", \"src.cism\", \"glimmer-cism\")\n if not os.path.exists(directory):\n os.makedirs(directory)\n readme_file = os.path.join(directory, \"README\")\n\n str_to_write = \"\"\"\n Put source mods for the glimmer-cism library in the glimmer-cism subdirectory\n This includes any files that are in the glimmer-cism subdirectory of $cimeroot/../components/cism\n Anything else (e.g., mods to source_glc or drivers) goes in this directory, NOT in glimmer-cism/\"\"\"\n\n with open(readme_file, \"w\") as fd:\n fd.write(str_to_write)\n\n def create_caseroot(self, clone=False):\n if not os.path.exists(self._caseroot):\n # Make the case directory\n logger.info(\" Creating Case directory %s\" %self._caseroot)\n os.makedirs(self._caseroot)\n os.chdir(self._caseroot)\n\n # Create relevant directories in $self._caseroot\n if clone:\n newdirs = (\"LockedFiles\", \"Tools\")\n else:\n newdirs = (\"SourceMods\", \"LockedFiles\", \"Buildconf\", \"Tools\")\n for newdir in newdirs:\n os.makedirs(newdir)\n # Open a new README.case file in $self._caseroot\n\n append_status(\" \".join(sys.argv), caseroot=self._caseroot, sfile=\"README.case\")\n append_status(\"Compset longname is %s\"%self.get_value(\"COMPSET\"),\n caseroot=self._caseroot, sfile=\"README.case\")\n append_status(\"Compset specification file is %s\" %\n (self.get_value(\"COMPSETS_SPEC_FILE\")),\n caseroot=self._caseroot, sfile=\"README.case\")\n append_status(\"Pes specification file is %s\" %\n (self.get_value(\"PES_SPEC_FILE\")),\n caseroot=self._caseroot, sfile=\"README.case\")\n for component_class in self._component_classes:\n if component_class == \"DRV\":\n continue\n comp_grid = \"%s_GRID\"%component_class\n append_status(\"%s is %s\"%(comp_grid,self.get_value(comp_grid)),\n caseroot=self._caseroot, sfile=\"README.case\")\n if not clone:\n self._create_caseroot_sourcemods()\n self._create_caseroot_tools()\n\n def apply_user_mods(self, user_mods_dir=None):\n if user_mods_dir is not None:\n if os.path.isabs(user_mods_dir):\n user_mods_path = user_mods_dir\n else:\n user_mods_path = self.get_value('USER_MODS_DIR')\n user_mods_path = os.path.join(user_mods_path, user_mods_dir)\n self.set_value(\"USER_MODS_FULLPATH\",user_mods_path)\n ninst_vals = {}\n for i in xrange(1,len(self._component_classes)):\n comp_class = self._component_classes[i]\n comp_name = self._components[i-1]\n if comp_class == \"DRV\":\n continue\n ninst_comp = self.get_value(\"NINST_%s\"%comp_class)\n if ninst_comp > 1:\n ninst_vals[comp_name] = ninst_comp\n apply_user_mods(self._caseroot, user_mods_path, ninst_vals)\n\n def create_clone(self, newcase, keepexe=False, mach_dir=None, project=None):\n\n newcaseroot = os.path.abspath(newcase)\n expect(not os.path.isdir(newcaseroot),\n \"New caseroot directory %s already exists\" % newcaseroot)\n newcasename = os.path.basename(newcaseroot)\n newcase_cimeroot = os.path.abspath(get_cime_root())\n\n # create clone from self to case\n clone_cimeroot = self.get_value(\"CIMEROOT\")\n if newcase_cimeroot != clone_cimeroot:\n logger.warning(\" case CIMEROOT is %s \" %newcase_cimeroot)\n logger.warning(\" clone CIMEROOT is %s \" %clone_cimeroot)\n logger.warning(\" It is NOT recommended to clone cases from different versions of CIME.\")\n\n\n # *** create case object as deepcopy of clone object ***\n srcroot = os.path.join(newcase_cimeroot,\"..\")\n newcase = self.copy(newcasename, newcaseroot, newsrcroot=srcroot)\n newcase.set_value(\"CIMEROOT\", newcase_cimeroot)\n\n # determine if will use clone executable or not\n if keepexe:\n orig_exeroot = self.get_value(\"EXEROOT\")\n newcase.set_value(\"EXEROOT\", orig_exeroot)\n newcase.set_value(\"BUILD_COMPLETE\",\"TRUE\")\n else:\n newcase.set_value(\"BUILD_COMPLETE\",\"FALSE\")\n\n # set machdir\n if mach_dir is not None:\n newcase.set_value(\"MACHDIR\", mach_dir)\n\n # Set project id\n # Note: we do not just copy this from the clone because it seems likely that\n # users will want to change this sometimes, especially when cloning another\n # user's case. However, note that, if a project is not given, the fallback will\n # be to copy it from the clone, just like other xml variables are copied.\n if project is None:\n project = self.get_value(\"PROJECT\", subgroup=\"case.run\")\n if project is not None:\n newcase.set_value(\"PROJECT\", project)\n\n # create caseroot\n newcase.create_caseroot(clone=True)\n newcase.flush(flushall=True)\n\n # copy user_nl_files\n cloneroot = self._caseroot\n files = glob.glob(cloneroot + '/user_nl_*')\n for item in files:\n shutil.copy(item, newcaseroot)\n\n # copy SourceMod and Buildconf files\n for casesub in (\"SourceMods\", \"Buildconf\"):\n shutil.copytree(os.path.join(cloneroot, casesub), os.path.join(newcaseroot, casesub))\n\n # copy env_case.xml to LockedFiles\n shutil.copy(os.path.join(newcaseroot,\"env_case.xml\"), os.path.join(newcaseroot,\"LockedFiles\"))\n\n # Update README.case\n fclone = open(cloneroot + \"/README.case\", \"r\")\n fnewcase = open(newcaseroot + \"/README.case\", \"a\")\n fnewcase.write(\"\\n *** original clone README follows ****\")\n fnewcase.write(\"\\n \" + fclone.read())\n\n clonename = self.get_value(\"CASE\")\n logger.info(\" Successfully created new case %s from clone case %s \" %(newcasename, clonename))\n\n case_setup(newcase, clean=False, test_mode=False)\n\n return newcase\n\n def submit_jobs(self, no_batch=False, job=None):\n env_batch = self.get_env('batch')\n env_batch.submit_jobs(self, no_batch=no_batch, job=job)\n\n def get_mpirun_cmd(self, job=\"case.run\"):\n env_mach_specific = self.get_env('mach_specific')\n run_exe = env_mach_specific.get_value(\"run_exe\")\n run_misc_suffix = env_mach_specific.get_value(\"run_misc_suffix\")\n run_misc_suffix = \"\" if run_misc_suffix is None else run_misc_suffix\n run_suffix = run_exe + run_misc_suffix\n\n # Things that will have to be matched against mpirun element attributes\n mpi_attribs = {\n \"compiler\" : self.get_value(\"COMPILER\"),\n \"mpilib\" : self.get_value(\"MPILIB\"),\n \"threaded\" : get_build_threaded(self)\n }\n\n executable, args = env_mach_specific.get_mpirun(self, mpi_attribs, job=job)\n\n mpi_arg_string = \" \".join(args.values())\n\n\n if self.get_value(\"BATCH_SYSTEM\") == \"cobalt\":\n mpi_arg_string += \" : \"\n\n return \"%s %s %s\" % (executable if executable is not None else \"\", mpi_arg_string, run_suffix)\n\n\n def set_model_version(self, model):\n version = \"unknown\"\n srcroot = self.get_value(\"SRCROOT\")\n if model == \"cesm\":\n changelog = os.path.join(srcroot,\"ChangeLog\")\n if os.path.isfile(changelog):\n for line in open(changelog, \"r\"):\n m = re.search(\"Tag name: (cesm.*)$\", line)\n if m is not None:\n version = m.group(1)\n break\n elif model == \"acme\":\n version = get_current_commit(True, srcroot)\n self.set_value(\"MODEL_VERSION\", version)\n if version != \"unknown\":\n logger.info(\"%s model version found: %s\"%(model, version))\n else:\n logger.warn(\"WARNING: No %s Model version found.\"%(model))\n\n\n\n\n\n\n\n\n", "path": "utils/python/CIME/case.py"}], "after_files": [{"content": "\"\"\"\nWrapper around all env XML for a case.\n\nAll interaction with and between the module files in XML/ takes place\nthrough the Case module.\n\"\"\"\nfrom copy import deepcopy\nimport glob, os, shutil, traceback\nfrom CIME.XML.standard_module_setup import *\n\nfrom CIME.utils import expect, get_cime_root, append_status\nfrom CIME.utils import convert_to_type, get_model, get_project\nfrom CIME.utils import get_build_threaded, get_current_commit\nfrom CIME.XML.build import Build\nfrom CIME.XML.machines import Machines\nfrom CIME.XML.pes import Pes\nfrom CIME.XML.files import Files\nfrom CIME.XML.component import Component\nfrom CIME.XML.compsets import Compsets\nfrom CIME.XML.grids import Grids\nfrom CIME.XML.batch import Batch\nfrom CIME.XML.pio import PIO\n\nfrom CIME.XML.env_test import EnvTest\nfrom CIME.XML.env_mach_specific import EnvMachSpecific\nfrom CIME.XML.env_case import EnvCase\nfrom CIME.XML.env_mach_pes import EnvMachPes\nfrom CIME.XML.env_build import EnvBuild\nfrom CIME.XML.env_run import EnvRun\nfrom CIME.XML.env_archive import EnvArchive\nfrom CIME.XML.env_batch import EnvBatch\n\nfrom CIME.user_mod_support import apply_user_mods\nfrom CIME.case_setup import case_setup\n\nlogger = logging.getLogger(__name__)\n\nclass Case(object):\n \"\"\"\n https://github.com/ESMCI/cime/wiki/Developers-Introduction\n The Case class is the heart of the CIME Case Control system. All\n interactions with a Case take part through this class. All of the\n variables used to create and manipulate a case are defined in xml\n files and for every xml file there is a python class to interact\n with that file.\n\n XML files which are part of the CIME distribution and are meant to\n be readonly with respect to a case are typically named\n config_something.xml and the corresponding python Class is\n Something and can be found in file CIME.XML.something.py. I'll\n refer to these as the CIME config classes.\n\n XML files which are part of a case and thus are read/write to a\n case are typically named env_whatever.xml and the cooresponding\n python modules are CIME.XML.env_whatever.py and classes are\n EnvWhatever. I'll refer to these as the Case env classes.\n\n The Case Class includes an array of the Case env classes, in the\n configure function and it's supporting functions defined below\n the case object creates and manipulates the Case env classes\n by reading and interpreting the CIME config classes.\n\n \"\"\"\n def __init__(self, case_root=None, read_only=True):\n\n if case_root is None:\n case_root = os.getcwd()\n self._caseroot = case_root\n logger.debug(\"Initializing Case.\")\n self._env_files_that_need_rewrite = set()\n self._read_only_mode = True\n self._force_read_only = read_only\n\n self._env_entryid_files = []\n self._env_generic_files = []\n self._files = []\n\n self.read_xml()\n\n # Hold arbitary values. In create_newcase we may set values\n # for xml files that haven't been created yet. We need a place\n # to store them until we are ready to create the file. At file\n # creation we get the values for those fields from this lookup\n # table and then remove the entry.\n self.lookups = {}\n self.set_lookup_value('CIMEROOT',os.path.abspath(get_cime_root()))\n\n self._compsetname = None\n self._gridname = None\n self._compsetsfile = None\n self._pesfile = None\n self._gridfile = None\n self._components = []\n self._component_classes = []\n\n # Define __enter__ and __exit__ so that we can use this as a context manager\n # and force a flush on exit.\n def __enter__(self):\n if not self._force_read_only:\n self._read_only_mode = False\n return self\n\n def __exit__(self, *_):\n self.flush()\n self._read_only_mode = True\n return False\n\n def schedule_rewrite(self, env_file):\n assert not self._read_only_mode, \\\n \"case.py scripts error: attempted to modify an env file while in \" \\\n \"read-only mode\"\n self._env_files_that_need_rewrite.add(env_file)\n\n def read_xml(self):\n if(len(self._env_files_that_need_rewrite)>0):\n files = \"\"\n for env_file in self._env_files_that_need_rewrite:\n files += \" \"+env_file.filename\n expect(False,\"Object(s) %s seem to have newer data than the corresponding case file\"%files)\n\n self._env_entryid_files = []\n self._env_entryid_files.append(EnvRun(self._caseroot))\n self._env_entryid_files.append(EnvBuild(self._caseroot))\n self._env_entryid_files.append(EnvMachPes(self._caseroot))\n self._env_entryid_files.append(EnvCase(self._caseroot))\n self._env_entryid_files.append(EnvBatch(self._caseroot))\n if os.path.isfile(os.path.join(self._caseroot,\"env_test.xml\")):\n self._env_entryid_files.append(EnvTest(self._caseroot))\n self._env_generic_files = []\n self._env_generic_files.append(EnvMachSpecific(self._caseroot))\n self._env_generic_files.append(EnvArchive(self._caseroot))\n self._files = self._env_entryid_files + self._env_generic_files\n\n def get_case_root(self):\n \"\"\"Returns the root directory for this case.\"\"\"\n return self._caseroot\n\n def get_env(self, short_name):\n full_name = \"env_%s.xml\" % (short_name)\n for env_file in self._files:\n if os.path.basename(env_file.filename) == full_name:\n return env_file\n\n expect(False, \"Could not find object for %s in case\"%full_name)\n\n def copy(self, newcasename, newcaseroot, newcimeroot=None, newsrcroot=None):\n newcase = deepcopy(self)\n for env_file in newcase._files: # pylint: disable=protected-access\n basename = os.path.basename(env_file.filename)\n env_file.filename = os.path.join(newcaseroot,basename)\n\n if newcimeroot is not None:\n newcase.set_value(\"CIMEROOT\", newcimeroot)\n\n if newsrcroot is not None:\n newcase.set_value(\"SRCROOT\", newsrcroot)\n\n newcase.set_value(\"CASE\",newcasename)\n newcase.set_value(\"CASEROOT\",newcaseroot)\n newcase.set_value(\"CONTINUE_RUN\",\"FALSE\")\n newcase.set_value(\"RESUBMIT\",0)\n return newcase\n\n def flush(self, flushall=False):\n if not os.path.isdir(self._caseroot):\n # do not flush if caseroot wasnt created\n return\n if flushall:\n for env_file in self._files:\n self.schedule_rewrite(env_file)\n for env_file in self._env_files_that_need_rewrite:\n env_file.write()\n self._env_files_that_need_rewrite = set()\n\n def get_values(self, item, attribute=None, resolved=True, subgroup=None):\n results = []\n for env_file in self._env_entryid_files:\n # Wait and resolve in self rather than in env_file\n results = env_file.get_values(item, attribute, resolved=False, subgroup=subgroup)\n\n if len(results) > 0:\n new_results = []\n vtype = env_file.get_type_info(item)\n if resolved:\n for result in results:\n if type(result) is str:\n result = self.get_resolved_value(result)\n new_results.append(convert_to_type(result, vtype, item))\n else:\n new_results.append(result)\n else:\n new_results = results\n return new_results\n\n for env_file in self._env_generic_files:\n results = env_file.get_values(item, attribute, resolved=False, subgroup=subgroup)\n if len(results) > 0:\n if resolved:\n for result in results:\n if type(result) is str:\n new_results.append(self.get_resolved_value(result))\n else:\n new_results.append(result)\n else:\n new_results = results\n return new_results\n # Return empty result\n return results\n\n def get_value(self, item, attribute=None, resolved=True, subgroup=None):\n result = None\n for env_file in self._env_entryid_files:\n # Wait and resolve in self rather than in env_file\n result = env_file.get_value(item, attribute, resolved=False, subgroup=subgroup)\n\n if result is not None:\n if resolved and type(result) is str:\n result = self.get_resolved_value(result)\n vtype = env_file.get_type_info(item)\n result = convert_to_type(result, vtype, item)\n return result\n\n for env_file in self._env_generic_files:\n\n result = env_file.get_value(item, attribute, resolved=False, subgroup=subgroup)\n\n if result is not None:\n if resolved and type(result) is str:\n return self.get_resolved_value(result)\n return result\n\n # Return empty result\n return result\n\n\n def get_full_records(self, item=None, attribute=None, resolved=True, subgroup=None):\n\n \"\"\"\n Return info object for given item, return all info for all item if item is empty.\n \"\"\"\n\n logger.debug(\"(get_full_records) Input values: %s , %s , %s , %s , %s\" , self.__class__.__name__ , item, attribute, resolved, subgroup)\n\n # Empty result list\n results = []\n\n for env_file in self._env_entryid_files:\n # Wait and resolve in self rather than in env_file\n logger.debug(\"(get_full_records) Searching in %s\" , env_file.__class__.__name__)\n result = None\n\n try:\n # env_batch has its own implementation of get_full_records otherwise in entry_id\n result = env_file.get_full_records(item, attribute, resolved=False, subgroup=subgroup)\n # Method exists, and was used.\n except AttributeError:\n # Method does not exist. What now?\n traceback.print_exc()\n logger.debug(\"(get_full_records) No get_full_records method for class %s (%s)\" , env_file.__class__.__name__ , AttributeError)\n\n if result is not None and (len(result) >= 1):\n\n if resolved :\n for r in result :\n if type(r['value']) is str:\n logger.debug(\"(get_full_records) Resolving %s\" , r['value'])\n r['value'] = self.get_resolved_value(r['value'])\n\n if subgroup :\n found = []\n for r in result :\n if r['group'] == subgroup :\n found.append(r)\n results += found\n else:\n results = results + result\n\n logger.debug(\"(get_full_records) Return value: %s\" , results )\n return results\n\n def get_type_info(self, item):\n result = None\n for env_file in self._env_entryid_files:\n result = env_file.get_type_info(item)\n if result is not None:\n return result\n\n logging.debug(\"Not able to retreive type for item '%s'\" % item)\n\n def get_resolved_value(self, item, recurse=0):\n num_unresolved = item.count(\"$\")\n recurse_limit = 10\n if (num_unresolved > 0 and recurse < recurse_limit ):\n for env_file in self._env_entryid_files:\n item = env_file.get_resolved_value(item)\n if (\"$\" not in item):\n return item\n else:\n item = self.get_resolved_value(item,recurse=recurse+1)\n\n if recurse >= 2*recurse_limit:\n logging.warning(\"Not able to fully resolve item '%s'\" % item)\n elif recurse >= recurse_limit:\n #try env_batch first\n env_batch = self.get_env(\"batch\")\n item = env_batch.get_resolved_value(item)\n logger.debug(\"item is %s, checking env_batch\"%item)\n if item is not None:\n if (\"$\" not in item):\n return item\n else:\n item = self.get_resolved_value(item,recurse=recurse+1)\n else:\n logging.warning(\"Not able to fully resolve item '%s'\" % item)\n\n return item\n\n def set_value(self, item, value, subgroup=None, ignore_type=False):\n \"\"\"\n If a file has been defined, and the variable is in the file,\n then that value will be set in the file object and the file\n name is returned\n \"\"\"\n if item == \"CASEROOT\":\n self._caseroot = value\n result = None\n for env_file in self._env_entryid_files:\n result = env_file.set_value(item, value, subgroup, ignore_type)\n if (result is not None):\n logger.debug(\"Will rewrite file %s %s\",env_file.filename, item)\n self._env_files_that_need_rewrite.add(env_file)\n return result\n\n def set_valid_values(self, item, valid_values):\n \"\"\"\n Update or create a valid_values entry for item and populate it\n \"\"\"\n result = None\n for env_file in self._env_entryid_files:\n result = env_file.set_valid_values(item, valid_values)\n if (result is not None):\n logger.debug(\"Will rewrite file %s %s\",env_file.filename, item)\n self._env_files_that_need_rewrite.add(env_file)\n return result\n\n def set_lookup_value(self, item, value):\n if item in self.lookups.keys() and self.lookups[item] is not None:\n logger.warn(\"Item %s already in lookups with value %s\"%(item,self.lookups[item]))\n else:\n self.lookups[item] = value\n\n\n def _set_compset_and_pesfile(self, compset_name, user_compset=False, pesfile=None):\n \"\"\"\n Loop through all the compset files and find the compset\n specifation file that matches either the input 'compset_name'.\n Note that the input compset name (i.e. compset_name) can be\n either a longname or an alias. This will also set the\n compsets and pes specfication files.\n \"\"\"\n files = Files()\n components = files.get_components(\"COMPSETS_SPEC_FILE\")\n logger.debug(\" Possible components for COMPSETS_SPEC_FILE are %s\" % components)\n\n # Loop through all of the files listed in COMPSETS_SPEC_FILE and find the file\n # that has a match for either the alias or the longname in that order\n for component in components:\n\n # Determine the compsets file for this component\n compsets_filename = files.get_value(\"COMPSETS_SPEC_FILE\", {\"component\":component})\n\n # If the file exists, read it and see if there is a match for the compset alias or longname\n if (os.path.isfile(compsets_filename)):\n compsets = Compsets(compsets_filename)\n match = compsets.get_compset_match(name=compset_name)\n pesfile = files.get_value(\"PES_SPEC_FILE\" , {\"component\":component})\n if match is not None:\n self._pesfile = pesfile\n self._compsetsfile = compsets_filename\n self._compsetname = match\n tests_filename = files.get_value(\"TESTS_SPEC_FILE\" , {\"component\":component}, resolved=False)\n tests_mods_dir = files.get_value(\"TESTS_MODS_DIR\" , {\"component\":component}, resolved=False)\n user_mods_dir = files.get_value(\"USER_MODS_DIR\" , {\"component\":component}, resolved=False)\n self.set_lookup_value(\"COMPSETS_SPEC_FILE\" ,\n files.get_value(\"COMPSETS_SPEC_FILE\", {\"component\":component}, resolved=False))\n self.set_lookup_value(\"TESTS_SPEC_FILE\" , tests_filename)\n self.set_lookup_value(\"TESTS_MODS_DIR\" , tests_mods_dir)\n self.set_lookup_value(\"USER_MODS_DIR\" , user_mods_dir)\n self.set_lookup_value(\"PES_SPEC_FILE\" ,\n files.get_value(\"PES_SPEC_FILE\" , {\"component\":component}, resolved=False))\n logger.info(\"Compset longname is %s \" %(match))\n logger.info(\"Compset specification file is %s\" %(compsets_filename))\n logger.info(\"Pes specification file is %s\" %(pesfile))\n return\n\n if user_compset is True:\n #Do not error out for user_compset\n logger.warn(\"Could not find a compset match for either alias or longname in %s\" %(compset_name))\n self._compsetname = compset_name\n self._pesfile = pesfile\n self.set_lookup_value(\"PES_SPEC_FILE\", pesfile)\n else:\n expect(False,\n \"Could not find a compset match for either alias or longname in %s\" %(compset_name))\n\n\n def get_compset_components(self):\n #If are doing a create_clone then, self._compsetname is not set yet\n components = []\n compset = self.get_value(\"COMPSET\")\n if compset is None:\n compset = self._compsetname\n expect(compset is not None,\n \"ERROR: compset is not set\")\n # the first element is always the date operator - skip it\n elements = compset.split('_')[1:] # pylint: disable=maybe-no-member\n for element in elements:\n # ignore the possible BGC or TEST modifier\n if element.startswith(\"BGC%\") or element.startswith(\"TEST\"):\n continue\n else:\n element_component = element.split('%')[0].lower()\n element_component = re.sub(r'[0-9]*',\"\",element_component)\n components.append(element_component)\n return components\n\n\n def __iter__(self):\n for entryid_file in self._env_entryid_files:\n for key, val in entryid_file:\n if type(val) is str and '$' in val:\n yield key, self.get_resolved_value(val)\n else:\n yield key, val\n\n\n def _get_component_config_data(self):\n # attributes used for multi valued defaults ($attlist is a hash reference)\n attlist = {\"compset\":self._compsetname, \"grid\":self._gridname}\n\n # Determine list of component classes that this coupler/driver knows how\n # to deal with. This list follows the same order as compset longnames follow.\n files = Files()\n # Add the group and elements for the config_files.xml\n for env_file in self._env_entryid_files:\n env_file.add_elements_by_group(files, attlist)\n\n drv_config_file = files.get_value(\"CONFIG_DRV_FILE\")\n drv_comp = Component(drv_config_file)\n for env_file in self._env_entryid_files:\n env_file.add_elements_by_group(drv_comp, attributes=attlist)\n\n # loop over all elements of both component_classes and components - and get config_component_file for\n # for each component\n self._component_classes =drv_comp.get_valid_model_components()\n if len(self._component_classes) > len(self._components):\n self._components.append('sesp')\n\n for i in xrange(1,len(self._component_classes)):\n comp_class = self._component_classes[i]\n comp_name = self._components[i-1]\n node_name = 'CONFIG_' + comp_class + '_FILE'\n # Add the group and elements for the config_files.xml\n comp_config_file = files.get_value(node_name, {\"component\":comp_name}, resolved=False)\n self.set_value(node_name, comp_config_file)\n comp_config_file = self.get_resolved_value(comp_config_file)\n expect(comp_config_file is not None,\"No config file for component %s\"%comp_name)\n compobj = Component(comp_config_file)\n for env_file in self._env_entryid_files:\n env_file.add_elements_by_group(compobj, attributes=attlist)\n\n\n for key,value in self.lookups.items():\n result = self.set_value(key,value)\n if result is not None:\n del self.lookups[key]\n\n def get_components(self):\n \"\"\"\n return dictionary of the form [component_class:component],\n e.g. [atm:cam], for all compset components\n \"\"\"\n\n files = Files()\n drv_comp = Component(files.get_value(\"CONFIG_DRV_FILE\"))\n\n # Determine list of component classes that this coupler/driver knows how\n # to deal with. This list follows the same order as compset longnames follow.\n component_classes = drv_comp.get_valid_model_components()\n components = self.get_compset_components()\n\n # Note that component classes can have a bigger range than\n # compents since stub esp (sesp) is an optional component - so\n # need to take the min of the two below\n comp_dict = {}\n for i in xrange(0,len(components)):\n comp_name = components[i]\n comp_class = component_classes[i+1]\n comp_dict[comp_class] = comp_name\n return comp_dict\n\n def configure(self, compset_name, grid_name, machine_name=None,\n project=None, pecount=None, compiler=None, mpilib=None,\n user_compset=False, pesfile=None,\n user_grid=False, gridfile=None, ninst=1, test=False,\n walltime=None, queue=None):\n\n #--------------------------------------------\n # compset, pesfile, and compset components\n #--------------------------------------------\n self._set_compset_and_pesfile(compset_name, user_compset=user_compset, pesfile=pesfile)\n\n self._components = self.get_compset_components()\n #FIXME - if --user-compset is True then need to determine that\n #all of the compset settings are valid\n\n #--------------------------------------------\n # grid\n #--------------------------------------------\n if user_grid is True and gridfile is not None:\n self.set_value(\"GRIDS_SPEC_FILE\", gridfile)\n grids = Grids(gridfile)\n\n gridinfo = grids.get_grid_info(name=grid_name, compset=self._compsetname)\n\n self._gridname = gridinfo[\"GRID\"]\n for key,value in gridinfo.items():\n logger.debug(\"Set grid %s %s\"%(key,value))\n self.set_lookup_value(key,value)\n\n #--------------------------------------------\n # component config data\n #--------------------------------------------\n self._get_component_config_data()\n\n self.get_compset_var_settings()\n\n #--------------------------------------------\n # machine\n #--------------------------------------------\n # set machine values in env_xxx files\n machobj = Machines(machine=machine_name)\n machine_name = machobj.get_machine_name()\n self.set_value(\"MACH\",machine_name)\n nodenames = machobj.get_node_names()\n nodenames = [x for x in nodenames if\n '_system' not in x and '_variables' not in x and 'mpirun' not in x and\\\n 'COMPILER' not in x and 'MPILIB' not in x]\n\n for nodename in nodenames:\n value = machobj.get_value(nodename, resolved=False)\n type_str = self.get_type_info(nodename)\n if type_str is not None:\n logger.debug(\"machine nodname %s value %s\"%(nodename, value))\n self.set_value(nodename, convert_to_type(value, type_str, nodename))\n\n if compiler is None:\n compiler = machobj.get_default_compiler()\n else:\n expect(machobj.is_valid_compiler(compiler),\n \"compiler %s is not supported on machine %s\" %(compiler, machine_name))\n\n self.set_value(\"COMPILER\",compiler)\n\n if mpilib is None:\n mpilib = machobj.get_default_MPIlib({\"compiler\":compiler})\n else:\n expect(machobj.is_valid_MPIlib(mpilib, {\"compiler\":compiler}),\n \"MPIlib %s is not supported on machine %s\" %(mpilib, machine_name))\n self.set_value(\"MPILIB\",mpilib)\n\n machdir = machobj.get_machines_dir()\n self.set_value(\"MACHDIR\", machdir)\n\n # Create env_mach_specific settings from machine info.\n env_mach_specific_obj = self.get_env(\"mach_specific\")\n env_mach_specific_obj.populate(machobj)\n self.schedule_rewrite(env_mach_specific_obj)\n\n #--------------------------------------------\n # pe payout\n #--------------------------------------------\n match1 = re.match('([0-9]+)x([0-9]+)', \"\" if pecount is None else pecount)\n match2 = re.match('([0-9]+)', \"\" if pecount is None else pecount)\n pes_ntasks = {}\n pes_nthrds = {}\n pes_rootpe = {}\n if match1:\n opti_tasks = match1.group(1)\n opti_thrds = match1.group(2)\n elif match2:\n opti_tasks = match2.group(1)\n opti_thrds = 1\n\n other = {}\n if match1 or match2:\n for component_class in self._component_classes:\n if component_class == \"DRV\":\n component_class = \"CPL\"\n string = \"NTASKS_\" + component_class\n pes_ntasks[string] = opti_tasks\n string = \"NTHRDS_\" + component_class\n pes_nthrds[string] = opti_thrds\n string = \"ROOTPE_\" + component_class\n pes_rootpe[string] = 0\n else:\n pesobj = Pes(self._pesfile)\n\n pes_ntasks, pes_nthrds, pes_rootpe, other = pesobj.find_pes_layout(self._gridname, self._compsetname,\n machine_name, pesize_opts=pecount)\n\n mach_pes_obj = self.get_env(\"mach_pes\")\n totaltasks = {}\n # Since other items may include PES_PER_NODE we need to do this first\n # we can get rid of this code when all of the perl is removed\n for key, value in other.items():\n self.set_value(key, value)\n pes_per_node = self.get_value(\"PES_PER_NODE\")\n for key, value in pes_ntasks.items():\n totaltasks[key[-3:]] = int(value)\n mach_pes_obj.set_value(key,int(value), pes_per_node=pes_per_node)\n for key, value in pes_rootpe.items():\n totaltasks[key[-3:]] += int(value)\n mach_pes_obj.set_value(key,int(value), pes_per_node=pes_per_node)\n for key, value in pes_nthrds.items():\n totaltasks[key[-3:]] *= int(value)\n mach_pes_obj.set_value(key,int(value), pes_per_node=pes_per_node)\n\n maxval = 1\n if mpilib != \"mpi-serial\":\n for key, val in totaltasks.items():\n if val < 0:\n val = -1*val*pes_per_node\n if val > maxval:\n maxval = val\n\n # Make sure that every component has been accounted for\n # set, nthrds and ntasks to 1 otherwise. Also set the ninst values here.\n for compclass in self._component_classes:\n if compclass == \"DRV\":\n continue\n key = \"NINST_%s\"%compclass\n mach_pes_obj.set_value(key, ninst)\n key = \"NTASKS_%s\"%compclass\n if key not in pes_ntasks.keys():\n mach_pes_obj.set_value(key,1)\n key = \"NTHRDS_%s\"%compclass\n if compclass not in pes_nthrds.keys():\n mach_pes_obj.set_value(compclass,1)\n\n # FIXME - this is a short term fix for dealing with the restriction that\n # CISM1 cannot run on multiple cores\n if \"CISM1\" in self._compsetname:\n mach_pes_obj.set_value(\"NTASKS_GLC\",1)\n mach_pes_obj.set_value(\"NTHRDS_GLC\",1)\n\n #--------------------------------------------\n # batch system\n #--------------------------------------------\n batch_system_type = machobj.get_value(\"BATCH_SYSTEM\")\n batch = Batch(batch_system=batch_system_type, machine=machine_name)\n bjobs = batch.get_batch_jobs()\n env_batch = self.get_env(\"batch\")\n env_batch.set_batch_system(batch, batch_system_type=batch_system_type)\n env_batch.create_job_groups(bjobs)\n env_batch.set_job_defaults(bjobs, pesize=maxval, walltime=walltime, force_queue=queue)\n self.schedule_rewrite(env_batch)\n\n self.set_value(\"COMPSET\",self._compsetname)\n\n self._set_pio_xml()\n logger.info(\" Compset is: %s \" %self._compsetname)\n logger.info(\" Grid is: %s \" %self._gridname )\n logger.info(\" Components in compset are: %s \" %self._components)\n\n # Set project id\n if project is None:\n project = get_project(machobj)\n if project is not None:\n self.set_value(\"PROJECT\", project)\n elif machobj.get_value(\"PROJECT_REQUIRED\"):\n expect(project is not None, \"PROJECT_REQUIRED is true but no project found\")\n\n # Overwriting an existing exeroot or rundir can cause problems\n exeroot = self.get_value(\"EXEROOT\")\n rundir = self.get_value(\"RUNDIR\")\n for wdir in (exeroot, rundir):\n logging.debug(\"wdir is %s\"%wdir)\n if os.path.exists(wdir):\n expect(not test, \"Directory %s already exists, aborting test\"% wdir)\n response = raw_input(\"\\nDirectory %s already exists, (r)eplace, (a)bort, or (u)se existing?\"% wdir)\n if response.startswith(\"r\"):\n shutil.rmtree(wdir)\n else:\n expect(response.startswith(\"u\"), \"Aborting by user request\")\n\n # miscellaneous settings\n if self.get_value(\"RUN_TYPE\") == 'hybrid':\n self.set_value(\"GET_REFCASE\", True)\n\n # Turn on short term archiving as cesm default setting\n model = get_model()\n self.set_model_version(model)\n if model == \"cesm\" and not test:\n self.set_value(\"DOUT_S\",True)\n if test:\n self.set_value(\"TEST\",True)\n\n\n def get_compset_var_settings(self):\n compset_obj = Compsets(infile=self.get_value(\"COMPSETS_SPEC_FILE\"))\n matches = compset_obj.get_compset_var_settings(self._compsetname, self._gridname)\n for name, value in matches:\n if len(value) > 0:\n logger.debug(\"Compset specific settings: name is %s and value is %s\"%(name,value))\n self.set_value(name, value)\n\n def set_initial_test_values(self):\n testobj = self.get_env(\"test\")\n testobj.set_initial_values(self)\n\n def get_batch_jobs(self):\n batchobj = self.get_env(\"batch\")\n return batchobj.get_jobs()\n\n def _set_pio_xml(self):\n pioobj = PIO()\n grid = self.get_value(\"GRID\")\n compiler = self.get_value(\"COMPILER\")\n mach = self.get_value(\"MACH\")\n compset = self.get_value(\"COMPSET\")\n mpilib = self.get_value(\"MPILIB\")\n defaults = pioobj.get_defaults(grid=grid,compset=compset,mach=mach,compiler=compiler, mpilib=mpilib)\n for vid, value in defaults.items():\n self.set_value(vid,value)\n\n def _create_caseroot_tools(self):\n machines_dir = os.path.abspath(self.get_value(\"MACHDIR\"))\n toolsdir = os.path.join(self.get_value(\"CIMEROOT\"),\"scripts\",\"Tools\")\n # setup executable files in caseroot/\n exefiles = (os.path.join(toolsdir, \"case.setup\"),\n os.path.join(toolsdir, \"case.build\"),\n os.path.join(toolsdir, \"case.submit\"),\n os.path.join(toolsdir, \"preview_namelists\"),\n os.path.join(toolsdir, \"check_input_data\"),\n os.path.join(toolsdir, \"check_case\"),\n os.path.join(toolsdir, \"archive_metadata.sh\"),\n os.path.join(toolsdir, \"xmlchange\"),\n os.path.join(toolsdir, \"xmlquery\"))\n try:\n for exefile in exefiles:\n destfile = os.path.join(self._caseroot,os.path.basename(exefile))\n os.symlink(exefile, destfile)\n except Exception as e:\n logger.warning(\"FAILED to set up exefiles: %s\" % str(e))\n\n # set up utility files in caseroot/Tools/\n toolfiles = (os.path.join(toolsdir, \"check_lockedfiles\"),\n os.path.join(toolsdir, \"lt_archive.sh\"),\n os.path.join(toolsdir, \"getTiming\"),\n os.path.join(toolsdir, \"save_provenance\"),\n os.path.join(machines_dir,\"Makefile\"),\n os.path.join(machines_dir,\"mkSrcfiles\"),\n os.path.join(machines_dir,\"mkDepends\"))\n\n for toolfile in toolfiles:\n destfile = os.path.join(self._caseroot,\"Tools\",os.path.basename(toolfile))\n expect(os.path.isfile(toolfile),\" File %s does not exist\"%toolfile)\n try:\n os.symlink(toolfile, destfile)\n except Exception as e:\n logger.warning(\"FAILED to set up toolfiles: %s %s %s\" % (str(e), toolfile, destfile))\n\n # Create Macros file.\n machine = self.get_value(\"MACH\")\n files = Files()\n # Use config_build if the environment variable is set, or if there is no\n # config_compilers file.\n if os.getenv(\"CIME_USE_CONFIG_BUILD\") == \"TRUE\" or \\\n files.get_value(\"COMPILERS_SPEC_FILE\") is None:\n build_file = files.get_value(\"BUILD_SPEC_FILE\")\n machobj = Machines(machine=machine, files=files)\n macro_maker = Build(machobj)\n macros_path = os.path.join(self._caseroot, \"Macros\")\n with open(macros_path, \"w\") as macros_file:\n macro_maker.write_macros('Makefile', build_file, macros_file)\n\n # Copy any system or compiler Depends files to the case.\n compiler = self.get_value(\"COMPILER\")\n for dep in (machine, compiler):\n dfile = \"Depends.%s\"%dep\n if os.path.isfile(os.path.join(machines_dir,dfile)):\n shutil.copyfile(os.path.join(machines_dir,dfile), os.path.join(self._caseroot,dfile))\n dfile = \"Depends.%s.%s\"%(machine,compiler)\n if os.path.isfile(os.path.join(machines_dir,dfile)):\n shutil.copyfile(os.path.join(machines_dir,dfile), os.path.join(self._caseroot, dfile))\n # set up infon files\n # infofiles = os.path.join(os.path.join(toolsdir, README.post_process\")\n #FIXME - the following does not work\n # print \"DEBUG: infofiles are \",infofiles\n # try:\n # for infofile in infofiles:\n # print \"DEBUG: infofile is %s, %s\" %(infofile, os.path.basename(infofile))\n # dst_file = caseroot + \"/\" + os.path.basename(infofile)\n # shutil.copyfile(infofile, dst_file)\n # os.chmod(dst_file, os.stat(dst_file).st_mode | stat.S_IXUSR | stat.S_IXGRP)\n # except Exception as e:\n # logger.warning(\"FAILED to set up infofiles: %s\" % str(e))\n\n def _create_caseroot_sourcemods(self):\n components = self.get_compset_components()\n for component in components:\n directory = os.path.join(self._caseroot,\"SourceMods\",\"src.%s\"%component)\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n directory = os.path.join(self._caseroot, \"SourceMods\", \"src.share\")\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n directory = os.path.join(self._caseroot,\"SourceMods\",\"src.drv\")\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n if get_model() == \"cesm\":\n # Note: this is CESM specific, given that we are referencing cism explitly\n if \"cism\" in components:\n directory = os.path.join(self._caseroot, \"SourceMods\", \"src.cism\", \"glimmer-cism\")\n if not os.path.exists(directory):\n os.makedirs(directory)\n readme_file = os.path.join(directory, \"README\")\n\n str_to_write = \"\"\"\n Put source mods for the glimmer-cism library in the glimmer-cism subdirectory\n This includes any files that are in the glimmer-cism subdirectory of $cimeroot/../components/cism\n Anything else (e.g., mods to source_glc or drivers) goes in this directory, NOT in glimmer-cism/\"\"\"\n\n with open(readme_file, \"w\") as fd:\n fd.write(str_to_write)\n\n def create_caseroot(self, clone=False):\n if not os.path.exists(self._caseroot):\n # Make the case directory\n logger.info(\" Creating Case directory %s\" %self._caseroot)\n os.makedirs(self._caseroot)\n os.chdir(self._caseroot)\n\n # Create relevant directories in $self._caseroot\n if clone:\n newdirs = (\"LockedFiles\", \"Tools\")\n else:\n newdirs = (\"SourceMods\", \"LockedFiles\", \"Buildconf\", \"Tools\")\n for newdir in newdirs:\n os.makedirs(newdir)\n # Open a new README.case file in $self._caseroot\n\n append_status(\" \".join(sys.argv), caseroot=self._caseroot, sfile=\"README.case\")\n append_status(\"Compset longname is %s\"%self.get_value(\"COMPSET\"),\n caseroot=self._caseroot, sfile=\"README.case\")\n append_status(\"Compset specification file is %s\" %\n (self.get_value(\"COMPSETS_SPEC_FILE\")),\n caseroot=self._caseroot, sfile=\"README.case\")\n append_status(\"Pes specification file is %s\" %\n (self.get_value(\"PES_SPEC_FILE\")),\n caseroot=self._caseroot, sfile=\"README.case\")\n for component_class in self._component_classes:\n if component_class == \"DRV\":\n continue\n comp_grid = \"%s_GRID\"%component_class\n append_status(\"%s is %s\"%(comp_grid,self.get_value(comp_grid)),\n caseroot=self._caseroot, sfile=\"README.case\")\n if not clone:\n self._create_caseroot_sourcemods()\n self._create_caseroot_tools()\n\n def apply_user_mods(self, user_mods_dir=None):\n if user_mods_dir is not None:\n if os.path.isabs(user_mods_dir):\n user_mods_path = user_mods_dir\n else:\n user_mods_path = self.get_value('USER_MODS_DIR')\n user_mods_path = os.path.join(user_mods_path, user_mods_dir)\n self.set_value(\"USER_MODS_FULLPATH\",user_mods_path)\n ninst_vals = {}\n for i in xrange(1,len(self._component_classes)):\n comp_class = self._component_classes[i]\n comp_name = self._components[i-1]\n if comp_class == \"DRV\":\n continue\n ninst_comp = self.get_value(\"NINST_%s\"%comp_class)\n if ninst_comp > 1:\n ninst_vals[comp_name] = ninst_comp\n apply_user_mods(self._caseroot, user_mods_path, ninst_vals)\n\n def create_clone(self, newcase, keepexe=False, mach_dir=None, project=None):\n\n newcaseroot = os.path.abspath(newcase)\n expect(not os.path.isdir(newcaseroot),\n \"New caseroot directory %s already exists\" % newcaseroot)\n newcasename = os.path.basename(newcaseroot)\n newcase_cimeroot = os.path.abspath(get_cime_root())\n\n # create clone from self to case\n clone_cimeroot = self.get_value(\"CIMEROOT\")\n if newcase_cimeroot != clone_cimeroot:\n logger.warning(\" case CIMEROOT is %s \" %newcase_cimeroot)\n logger.warning(\" clone CIMEROOT is %s \" %clone_cimeroot)\n logger.warning(\" It is NOT recommended to clone cases from different versions of CIME.\")\n\n\n # *** create case object as deepcopy of clone object ***\n srcroot = os.path.join(newcase_cimeroot,\"..\")\n newcase = self.copy(newcasename, newcaseroot, newsrcroot=srcroot)\n newcase.set_value(\"CIMEROOT\", newcase_cimeroot)\n\n # determine if will use clone executable or not\n if keepexe:\n orig_exeroot = self.get_value(\"EXEROOT\")\n newcase.set_value(\"EXEROOT\", orig_exeroot)\n newcase.set_value(\"BUILD_COMPLETE\",\"TRUE\")\n else:\n newcase.set_value(\"BUILD_COMPLETE\",\"FALSE\")\n\n # set machdir\n if mach_dir is not None:\n newcase.set_value(\"MACHDIR\", mach_dir)\n\n # Set project id\n # Note: we do not just copy this from the clone because it seems likely that\n # users will want to change this sometimes, especially when cloning another\n # user's case. However, note that, if a project is not given, the fallback will\n # be to copy it from the clone, just like other xml variables are copied.\n if project is None:\n project = self.get_value(\"PROJECT\", subgroup=\"case.run\")\n if project is not None:\n newcase.set_value(\"PROJECT\", project)\n\n # create caseroot\n newcase.create_caseroot(clone=True)\n newcase.flush(flushall=True)\n\n # copy user_nl_files\n cloneroot = self._caseroot\n files = glob.glob(cloneroot + '/user_nl_*')\n for item in files:\n shutil.copy(item, newcaseroot)\n\n # copy SourceMod and Buildconf files\n for casesub in (\"SourceMods\", \"Buildconf\"):\n shutil.copytree(os.path.join(cloneroot, casesub), os.path.join(newcaseroot, casesub))\n\n # copy env_case.xml to LockedFiles\n shutil.copy(os.path.join(newcaseroot,\"env_case.xml\"), os.path.join(newcaseroot,\"LockedFiles\"))\n\n # Update README.case\n fclone = open(cloneroot + \"/README.case\", \"r\")\n fnewcase = open(newcaseroot + \"/README.case\", \"a\")\n fnewcase.write(\"\\n *** original clone README follows ****\")\n fnewcase.write(\"\\n \" + fclone.read())\n\n clonename = self.get_value(\"CASE\")\n logger.info(\" Successfully created new case %s from clone case %s \" %(newcasename, clonename))\n\n case_setup(newcase, clean=False, test_mode=False)\n\n return newcase\n\n def submit_jobs(self, no_batch=False, job=None):\n env_batch = self.get_env('batch')\n env_batch.submit_jobs(self, no_batch=no_batch, job=job)\n\n def get_mpirun_cmd(self, job=\"case.run\"):\n env_mach_specific = self.get_env('mach_specific')\n run_exe = env_mach_specific.get_value(\"run_exe\")\n run_misc_suffix = env_mach_specific.get_value(\"run_misc_suffix\")\n run_misc_suffix = \"\" if run_misc_suffix is None else run_misc_suffix\n run_suffix = run_exe + run_misc_suffix\n\n # Things that will have to be matched against mpirun element attributes\n mpi_attribs = {\n \"compiler\" : self.get_value(\"COMPILER\"),\n \"mpilib\" : self.get_value(\"MPILIB\"),\n \"threaded\" : get_build_threaded(self)\n }\n\n executable, args = env_mach_specific.get_mpirun(self, mpi_attribs, job=job)\n\n mpi_arg_string = \" \".join(args.values())\n\n\n if self.get_value(\"BATCH_SYSTEM\") == \"cobalt\":\n mpi_arg_string += \" : \"\n\n return \"%s %s %s\" % (executable if executable is not None else \"\", mpi_arg_string, run_suffix)\n\n\n def set_model_version(self, model):\n version = \"unknown\"\n srcroot = self.get_value(\"SRCROOT\")\n if model == \"cesm\":\n changelog = os.path.join(srcroot,\"ChangeLog\")\n if os.path.isfile(changelog):\n for line in open(changelog, \"r\"):\n m = re.search(\"Tag name: (cesm.*)$\", line)\n if m is not None:\n version = m.group(1)\n break\n elif model == \"acme\":\n version = get_current_commit(True, srcroot)\n self.set_value(\"MODEL_VERSION\", version)\n if version != \"unknown\":\n logger.info(\"%s model version found: %s\"%(model, version))\n else:\n logger.warn(\"WARNING: No %s Model version found.\"%(model))\n\n\n\n\n\n\n\n\n", "path": "utils/python/CIME/case.py"}]} |
gh_patches_debug_1360 | rasdani/github-patches | git_diff | netbox-community__netbox-10296 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ChangeLog false positive for wwn
### NetBox version
v3.3.2
### Python version
3.8
### Steps to Reproduce
1. Update a Interface
2. Display the generated change log.
### Expected Behavior
Only changed properties are detected as changed.
### Observed Behavior
`wwn` is detected as a changed from `null` to `""`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `netbox/dcim/forms/models.py`
Content:
```
1 from django import forms
2 from django.utils.translation import gettext as _
3 from django.contrib.auth.models import User
4 from django.contrib.contenttypes.models import ContentType
5 from timezone_field import TimeZoneFormField
6
7 from dcim.choices import *
8 from dcim.constants import *
9 from dcim.models import *
10 from ipam.models import ASN, IPAddress, VLAN, VLANGroup, VRF
11 from netbox.forms import NetBoxModelForm
12 from tenancy.forms import TenancyForm
13 from utilities.forms import (
14 APISelect, add_blank_choice, BootstrapMixin, ClearableFileInput, CommentField, ContentTypeChoiceField,
15 DynamicModelChoiceField, DynamicModelMultipleChoiceField, JSONField, NumericArrayField, SelectWithPK, SmallTextarea,
16 SlugField, StaticSelect, SelectSpeedWidget,
17 )
18 from virtualization.models import Cluster, ClusterGroup
19 from wireless.models import WirelessLAN, WirelessLANGroup
20 from .common import InterfaceCommonForm
21
22 __all__ = (
23 'CableForm',
24 'ConsolePortForm',
25 'ConsolePortTemplateForm',
26 'ConsoleServerPortForm',
27 'ConsoleServerPortTemplateForm',
28 'DeviceBayForm',
29 'DeviceBayTemplateForm',
30 'DeviceForm',
31 'DeviceRoleForm',
32 'DeviceTypeForm',
33 'DeviceVCMembershipForm',
34 'FrontPortForm',
35 'FrontPortTemplateForm',
36 'InterfaceForm',
37 'InterfaceTemplateForm',
38 'InventoryItemForm',
39 'InventoryItemRoleForm',
40 'InventoryItemTemplateForm',
41 'LocationForm',
42 'ManufacturerForm',
43 'ModuleForm',
44 'ModuleBayForm',
45 'ModuleBayTemplateForm',
46 'ModuleTypeForm',
47 'PlatformForm',
48 'PopulateDeviceBayForm',
49 'PowerFeedForm',
50 'PowerOutletForm',
51 'PowerOutletTemplateForm',
52 'PowerPanelForm',
53 'PowerPortForm',
54 'PowerPortTemplateForm',
55 'RackForm',
56 'RackReservationForm',
57 'RackRoleForm',
58 'RearPortForm',
59 'RearPortTemplateForm',
60 'RegionForm',
61 'SiteForm',
62 'SiteGroupForm',
63 'VCMemberSelectForm',
64 'VirtualChassisForm',
65 )
66
67 INTERFACE_MODE_HELP_TEXT = """
68 Access: One untagged VLAN<br />
69 Tagged: One untagged VLAN and/or one or more tagged VLANs<br />
70 Tagged (All): Implies all VLANs are available (w/optional untagged VLAN)
71 """
72
73
74 class RegionForm(NetBoxModelForm):
75 parent = DynamicModelChoiceField(
76 queryset=Region.objects.all(),
77 required=False
78 )
79 slug = SlugField()
80
81 class Meta:
82 model = Region
83 fields = (
84 'parent', 'name', 'slug', 'description', 'tags',
85 )
86
87
88 class SiteGroupForm(NetBoxModelForm):
89 parent = DynamicModelChoiceField(
90 queryset=SiteGroup.objects.all(),
91 required=False
92 )
93 slug = SlugField()
94
95 class Meta:
96 model = SiteGroup
97 fields = (
98 'parent', 'name', 'slug', 'description', 'tags',
99 )
100
101
102 class SiteForm(TenancyForm, NetBoxModelForm):
103 region = DynamicModelChoiceField(
104 queryset=Region.objects.all(),
105 required=False
106 )
107 group = DynamicModelChoiceField(
108 queryset=SiteGroup.objects.all(),
109 required=False
110 )
111 asns = DynamicModelMultipleChoiceField(
112 queryset=ASN.objects.all(),
113 label=_('ASNs'),
114 required=False
115 )
116 slug = SlugField()
117 time_zone = TimeZoneFormField(
118 choices=add_blank_choice(TimeZoneFormField().choices),
119 required=False,
120 widget=StaticSelect()
121 )
122 comments = CommentField()
123
124 fieldsets = (
125 ('Site', (
126 'name', 'slug', 'status', 'region', 'group', 'facility', 'asns', 'time_zone', 'description', 'tags',
127 )),
128 ('Tenancy', ('tenant_group', 'tenant')),
129 ('Contact Info', ('physical_address', 'shipping_address', 'latitude', 'longitude')),
130 )
131
132 class Meta:
133 model = Site
134 fields = (
135 'name', 'slug', 'status', 'region', 'group', 'tenant_group', 'tenant', 'facility', 'asns', 'time_zone',
136 'description', 'physical_address', 'shipping_address', 'latitude', 'longitude', 'comments', 'tags',
137 )
138 widgets = {
139 'physical_address': SmallTextarea(
140 attrs={
141 'rows': 3,
142 }
143 ),
144 'shipping_address': SmallTextarea(
145 attrs={
146 'rows': 3,
147 }
148 ),
149 'status': StaticSelect(),
150 'time_zone': StaticSelect(),
151 }
152 help_texts = {
153 'name': "Full name of the site",
154 'facility': "Data center provider and facility (e.g. Equinix NY7)",
155 'time_zone': "Local time zone",
156 'description': "Short description (will appear in sites list)",
157 'physical_address': "Physical location of the building (e.g. for GPS)",
158 'shipping_address': "If different from the physical address",
159 'latitude': "Latitude in decimal format (xx.yyyyyy)",
160 'longitude': "Longitude in decimal format (xx.yyyyyy)"
161 }
162
163
164 class LocationForm(TenancyForm, NetBoxModelForm):
165 region = DynamicModelChoiceField(
166 queryset=Region.objects.all(),
167 required=False,
168 initial_params={
169 'sites': '$site'
170 }
171 )
172 site_group = DynamicModelChoiceField(
173 queryset=SiteGroup.objects.all(),
174 required=False,
175 initial_params={
176 'sites': '$site'
177 }
178 )
179 site = DynamicModelChoiceField(
180 queryset=Site.objects.all(),
181 query_params={
182 'region_id': '$region',
183 'group_id': '$site_group',
184 }
185 )
186 parent = DynamicModelChoiceField(
187 queryset=Location.objects.all(),
188 required=False,
189 query_params={
190 'site_id': '$site'
191 }
192 )
193 slug = SlugField()
194
195 fieldsets = (
196 ('Location', (
197 'region', 'site_group', 'site', 'parent', 'name', 'slug', 'status', 'description', 'tags',
198 )),
199 ('Tenancy', ('tenant_group', 'tenant')),
200 )
201
202 class Meta:
203 model = Location
204 fields = (
205 'region', 'site_group', 'site', 'parent', 'name', 'slug', 'status', 'description', 'tenant_group', 'tenant',
206 'tags',
207 )
208 widgets = {
209 'status': StaticSelect(),
210 }
211
212
213 class RackRoleForm(NetBoxModelForm):
214 slug = SlugField()
215
216 class Meta:
217 model = RackRole
218 fields = [
219 'name', 'slug', 'color', 'description', 'tags',
220 ]
221
222
223 class RackForm(TenancyForm, NetBoxModelForm):
224 region = DynamicModelChoiceField(
225 queryset=Region.objects.all(),
226 required=False,
227 initial_params={
228 'sites': '$site'
229 }
230 )
231 site_group = DynamicModelChoiceField(
232 queryset=SiteGroup.objects.all(),
233 required=False,
234 initial_params={
235 'sites': '$site'
236 }
237 )
238 site = DynamicModelChoiceField(
239 queryset=Site.objects.all(),
240 query_params={
241 'region_id': '$region',
242 'group_id': '$site_group',
243 }
244 )
245 location = DynamicModelChoiceField(
246 queryset=Location.objects.all(),
247 required=False,
248 query_params={
249 'site_id': '$site'
250 }
251 )
252 role = DynamicModelChoiceField(
253 queryset=RackRole.objects.all(),
254 required=False
255 )
256 comments = CommentField()
257
258 class Meta:
259 model = Rack
260 fields = [
261 'region', 'site_group', 'site', 'location', 'name', 'facility_id', 'tenant_group', 'tenant', 'status',
262 'role', 'serial', 'asset_tag', 'type', 'width', 'u_height', 'desc_units', 'outer_width', 'outer_depth',
263 'outer_unit', 'comments', 'tags',
264 ]
265 help_texts = {
266 'site': "The site at which the rack exists",
267 'name': "Organizational rack name",
268 'facility_id': "The unique rack ID assigned by the facility",
269 'u_height': "Height in rack units",
270 }
271 widgets = {
272 'status': StaticSelect(),
273 'type': StaticSelect(),
274 'width': StaticSelect(),
275 'outer_unit': StaticSelect(),
276 }
277
278
279 class RackReservationForm(TenancyForm, NetBoxModelForm):
280 region = DynamicModelChoiceField(
281 queryset=Region.objects.all(),
282 required=False,
283 initial_params={
284 'sites': '$site'
285 }
286 )
287 site_group = DynamicModelChoiceField(
288 queryset=SiteGroup.objects.all(),
289 required=False,
290 initial_params={
291 'sites': '$site'
292 }
293 )
294 site = DynamicModelChoiceField(
295 queryset=Site.objects.all(),
296 required=False,
297 query_params={
298 'region_id': '$region',
299 'group_id': '$site_group',
300 }
301 )
302 location = DynamicModelChoiceField(
303 queryset=Location.objects.all(),
304 required=False,
305 query_params={
306 'site_id': '$site'
307 }
308 )
309 rack = DynamicModelChoiceField(
310 queryset=Rack.objects.all(),
311 query_params={
312 'site_id': '$site',
313 'location_id': '$location',
314 }
315 )
316 units = NumericArrayField(
317 base_field=forms.IntegerField(),
318 help_text="Comma-separated list of numeric unit IDs. A range may be specified using a hyphen."
319 )
320 user = forms.ModelChoiceField(
321 queryset=User.objects.order_by(
322 'username'
323 ),
324 widget=StaticSelect()
325 )
326
327 fieldsets = (
328 ('Reservation', ('region', 'site_group', 'site', 'location', 'rack', 'units', 'user', 'description', 'tags')),
329 ('Tenancy', ('tenant_group', 'tenant')),
330 )
331
332 class Meta:
333 model = RackReservation
334 fields = [
335 'region', 'site_group', 'site', 'location', 'rack', 'units', 'user', 'tenant_group', 'tenant',
336 'description', 'tags',
337 ]
338
339
340 class ManufacturerForm(NetBoxModelForm):
341 slug = SlugField()
342
343 class Meta:
344 model = Manufacturer
345 fields = [
346 'name', 'slug', 'description', 'tags',
347 ]
348
349
350 class DeviceTypeForm(NetBoxModelForm):
351 manufacturer = DynamicModelChoiceField(
352 queryset=Manufacturer.objects.all()
353 )
354 slug = SlugField(
355 slug_source='model'
356 )
357 comments = CommentField()
358
359 fieldsets = (
360 ('Device Type', (
361 'manufacturer', 'model', 'slug', 'part_number', 'tags',
362 )),
363 ('Chassis', (
364 'u_height', 'is_full_depth', 'subdevice_role', 'airflow',
365 )),
366 ('Images', ('front_image', 'rear_image')),
367 )
368
369 class Meta:
370 model = DeviceType
371 fields = [
372 'manufacturer', 'model', 'slug', 'part_number', 'u_height', 'is_full_depth', 'subdevice_role', 'airflow',
373 'front_image', 'rear_image', 'comments', 'tags',
374 ]
375 widgets = {
376 'subdevice_role': StaticSelect(),
377 'front_image': ClearableFileInput(attrs={
378 'accept': DEVICETYPE_IMAGE_FORMATS
379 }),
380 'rear_image': ClearableFileInput(attrs={
381 'accept': DEVICETYPE_IMAGE_FORMATS
382 })
383 }
384
385
386 class ModuleTypeForm(NetBoxModelForm):
387 manufacturer = DynamicModelChoiceField(
388 queryset=Manufacturer.objects.all()
389 )
390 comments = CommentField()
391
392 fieldsets = (
393 ('Module Type', (
394 'manufacturer', 'model', 'part_number', 'tags',
395 )),
396 )
397
398 class Meta:
399 model = ModuleType
400 fields = [
401 'manufacturer', 'model', 'part_number', 'comments', 'tags',
402 ]
403
404
405 class DeviceRoleForm(NetBoxModelForm):
406 slug = SlugField()
407
408 class Meta:
409 model = DeviceRole
410 fields = [
411 'name', 'slug', 'color', 'vm_role', 'description', 'tags',
412 ]
413
414
415 class PlatformForm(NetBoxModelForm):
416 manufacturer = DynamicModelChoiceField(
417 queryset=Manufacturer.objects.all(),
418 required=False
419 )
420 slug = SlugField(
421 max_length=64
422 )
423
424 class Meta:
425 model = Platform
426 fields = [
427 'name', 'slug', 'manufacturer', 'napalm_driver', 'napalm_args', 'description', 'tags',
428 ]
429 widgets = {
430 'napalm_args': SmallTextarea(),
431 }
432
433
434 class DeviceForm(TenancyForm, NetBoxModelForm):
435 region = DynamicModelChoiceField(
436 queryset=Region.objects.all(),
437 required=False,
438 initial_params={
439 'sites': '$site'
440 }
441 )
442 site_group = DynamicModelChoiceField(
443 queryset=SiteGroup.objects.all(),
444 required=False,
445 initial_params={
446 'sites': '$site'
447 }
448 )
449 site = DynamicModelChoiceField(
450 queryset=Site.objects.all(),
451 query_params={
452 'region_id': '$region',
453 'group_id': '$site_group',
454 }
455 )
456 location = DynamicModelChoiceField(
457 queryset=Location.objects.all(),
458 required=False,
459 query_params={
460 'site_id': '$site'
461 },
462 initial_params={
463 'racks': '$rack'
464 }
465 )
466 rack = DynamicModelChoiceField(
467 queryset=Rack.objects.all(),
468 required=False,
469 query_params={
470 'site_id': '$site',
471 'location_id': '$location',
472 }
473 )
474 position = forms.DecimalField(
475 required=False,
476 help_text="The lowest-numbered unit occupied by the device",
477 widget=APISelect(
478 api_url='/api/dcim/racks/{{rack}}/elevation/',
479 attrs={
480 'disabled-indicator': 'device',
481 'data-dynamic-params': '[{"fieldName":"face","queryParam":"face"}]'
482 }
483 )
484 )
485 manufacturer = DynamicModelChoiceField(
486 queryset=Manufacturer.objects.all(),
487 required=False,
488 initial_params={
489 'device_types': '$device_type'
490 }
491 )
492 device_type = DynamicModelChoiceField(
493 queryset=DeviceType.objects.all(),
494 query_params={
495 'manufacturer_id': '$manufacturer'
496 }
497 )
498 device_role = DynamicModelChoiceField(
499 queryset=DeviceRole.objects.all()
500 )
501 platform = DynamicModelChoiceField(
502 queryset=Platform.objects.all(),
503 required=False,
504 query_params={
505 'manufacturer_id': ['$manufacturer', 'null']
506 }
507 )
508 cluster_group = DynamicModelChoiceField(
509 queryset=ClusterGroup.objects.all(),
510 required=False,
511 null_option='None',
512 initial_params={
513 'clusters': '$cluster'
514 }
515 )
516 cluster = DynamicModelChoiceField(
517 queryset=Cluster.objects.all(),
518 required=False,
519 query_params={
520 'group_id': '$cluster_group'
521 }
522 )
523 comments = CommentField()
524 local_context_data = JSONField(
525 required=False,
526 label=''
527 )
528 virtual_chassis = DynamicModelChoiceField(
529 queryset=VirtualChassis.objects.all(),
530 required=False
531 )
532 vc_position = forms.IntegerField(
533 required=False,
534 label='Position',
535 help_text="The position in the virtual chassis this device is identified by"
536 )
537 vc_priority = forms.IntegerField(
538 required=False,
539 label='Priority',
540 help_text="The priority of the device in the virtual chassis"
541 )
542
543 class Meta:
544 model = Device
545 fields = [
546 'name', 'device_role', 'device_type', 'serial', 'asset_tag', 'region', 'site_group', 'site', 'rack',
547 'location', 'position', 'face', 'status', 'airflow', 'platform', 'primary_ip4', 'primary_ip6',
548 'cluster_group', 'cluster', 'tenant_group', 'tenant', 'virtual_chassis', 'vc_position', 'vc_priority',
549 'comments', 'tags', 'local_context_data'
550 ]
551 help_texts = {
552 'device_role': "The function this device serves",
553 'serial': "Chassis serial number",
554 'local_context_data': "Local config context data overwrites all source contexts in the final rendered "
555 "config context",
556 }
557 widgets = {
558 'face': StaticSelect(),
559 'status': StaticSelect(),
560 'airflow': StaticSelect(),
561 'primary_ip4': StaticSelect(),
562 'primary_ip6': StaticSelect(),
563 }
564
565 def __init__(self, *args, **kwargs):
566 super().__init__(*args, **kwargs)
567
568 if self.instance.pk:
569
570 # Compile list of choices for primary IPv4 and IPv6 addresses
571 for family in [4, 6]:
572 ip_choices = [(None, '---------')]
573
574 # Gather PKs of all interfaces belonging to this Device or a peer VirtualChassis member
575 interface_ids = self.instance.vc_interfaces(if_master=False).values_list('pk', flat=True)
576
577 # Collect interface IPs
578 interface_ips = IPAddress.objects.filter(
579 address__family=family,
580 assigned_object_type=ContentType.objects.get_for_model(Interface),
581 assigned_object_id__in=interface_ids
582 ).prefetch_related('assigned_object')
583 if interface_ips:
584 ip_list = [(ip.id, f'{ip.address} ({ip.assigned_object})') for ip in interface_ips]
585 ip_choices.append(('Interface IPs', ip_list))
586 # Collect NAT IPs
587 nat_ips = IPAddress.objects.prefetch_related('nat_inside').filter(
588 address__family=family,
589 nat_inside__assigned_object_type=ContentType.objects.get_for_model(Interface),
590 nat_inside__assigned_object_id__in=interface_ids
591 ).prefetch_related('assigned_object')
592 if nat_ips:
593 ip_list = [(ip.id, f'{ip.address} (NAT)') for ip in nat_ips]
594 ip_choices.append(('NAT IPs', ip_list))
595 self.fields['primary_ip{}'.format(family)].choices = ip_choices
596
597 # If editing an existing device, exclude it from the list of occupied rack units. This ensures that a device
598 # can be flipped from one face to another.
599 self.fields['position'].widget.add_query_param('exclude', self.instance.pk)
600
601 # Disable rack assignment if this is a child device installed in a parent device
602 if self.instance.device_type.is_child_device and hasattr(self.instance, 'parent_bay'):
603 self.fields['site'].disabled = True
604 self.fields['rack'].disabled = True
605 self.initial['site'] = self.instance.parent_bay.device.site_id
606 self.initial['rack'] = self.instance.parent_bay.device.rack_id
607
608 else:
609
610 # An object that doesn't exist yet can't have any IPs assigned to it
611 self.fields['primary_ip4'].choices = []
612 self.fields['primary_ip4'].widget.attrs['readonly'] = True
613 self.fields['primary_ip6'].choices = []
614 self.fields['primary_ip6'].widget.attrs['readonly'] = True
615
616 # Rack position
617 position = self.data.get('position') or self.initial.get('position')
618 if position:
619 self.fields['position'].widget.choices = [(position, f'U{position}')]
620
621
622 class ModuleForm(NetBoxModelForm):
623 device = DynamicModelChoiceField(
624 queryset=Device.objects.all(),
625 initial_params={
626 'modulebays': '$module_bay'
627 }
628 )
629 module_bay = DynamicModelChoiceField(
630 queryset=ModuleBay.objects.all(),
631 query_params={
632 'device_id': '$device'
633 }
634 )
635 manufacturer = DynamicModelChoiceField(
636 queryset=Manufacturer.objects.all(),
637 required=False,
638 initial_params={
639 'module_types': '$module_type'
640 }
641 )
642 module_type = DynamicModelChoiceField(
643 queryset=ModuleType.objects.all(),
644 query_params={
645 'manufacturer_id': '$manufacturer'
646 }
647 )
648 comments = CommentField()
649 replicate_components = forms.BooleanField(
650 required=False,
651 initial=True,
652 help_text="Automatically populate components associated with this module type"
653 )
654
655 adopt_components = forms.BooleanField(
656 required=False,
657 initial=False,
658 help_text="Adopt already existing components"
659 )
660
661 fieldsets = (
662 ('Module', (
663 'device', 'module_bay', 'manufacturer', 'module_type', 'tags',
664 )),
665 ('Hardware', (
666 'serial', 'asset_tag', 'replicate_components', 'adopt_components',
667 )),
668 )
669
670 class Meta:
671 model = Module
672 fields = [
673 'device', 'module_bay', 'manufacturer', 'module_type', 'serial', 'asset_tag', 'tags',
674 'replicate_components', 'adopt_components', 'comments',
675 ]
676
677 def __init__(self, *args, **kwargs):
678 super().__init__(*args, **kwargs)
679
680 if self.instance.pk:
681 self.fields['replicate_components'].initial = False
682 self.fields['replicate_components'].disabled = True
683 self.fields['adopt_components'].initial = False
684 self.fields['adopt_components'].disabled = True
685
686 def save(self, *args, **kwargs):
687
688 # If replicate_components is False, disable automatic component replication on the instance
689 if self.instance.pk or not self.cleaned_data['replicate_components']:
690 self.instance._disable_replication = True
691
692 if self.cleaned_data['adopt_components']:
693 self.instance._adopt_components = True
694
695 return super().save(*args, **kwargs)
696
697 def clean(self):
698 super().clean()
699
700 replicate_components = self.cleaned_data.get("replicate_components")
701 adopt_components = self.cleaned_data.get("adopt_components")
702 device = self.cleaned_data['device']
703 module_type = self.cleaned_data['module_type']
704 module_bay = self.cleaned_data['module_bay']
705
706 # Bail out if we are not installing a new module or if we are not replicating components
707 if self.instance.pk or not replicate_components:
708 return
709
710 for templates, component_attribute in [
711 ("consoleporttemplates", "consoleports"),
712 ("consoleserverporttemplates", "consoleserverports"),
713 ("interfacetemplates", "interfaces"),
714 ("powerporttemplates", "powerports"),
715 ("poweroutlettemplates", "poweroutlets"),
716 ("rearporttemplates", "rearports"),
717 ("frontporttemplates", "frontports")
718 ]:
719 # Prefetch installed components
720 installed_components = {
721 component.name: component for component in getattr(device, component_attribute).all()
722 }
723
724 # Get the templates for the module type.
725 for template in getattr(module_type, templates).all():
726 # Installing modules with placeholders require that the bay has a position value
727 if MODULE_TOKEN in template.name and not module_bay.position:
728 raise forms.ValidationError(
729 "Cannot install module with placeholder values in a module bay with no position defined"
730 )
731
732 resolved_name = template.name.replace(MODULE_TOKEN, module_bay.position)
733 existing_item = installed_components.get(resolved_name)
734
735 # It is not possible to adopt components already belonging to a module
736 if adopt_components and existing_item and existing_item.module:
737 raise forms.ValidationError(
738 f"Cannot adopt {template.component_model.__name__} '{resolved_name}' as it already belongs "
739 f"to a module"
740 )
741
742 # If we are not adopting components we error if the component exists
743 if not adopt_components and resolved_name in installed_components:
744 raise forms.ValidationError(
745 f"{template.component_model.__name__} - {resolved_name} already exists"
746 )
747
748
749 class CableForm(TenancyForm, NetBoxModelForm):
750
751 class Meta:
752 model = Cable
753 fields = [
754 'type', 'status', 'tenant_group', 'tenant', 'label', 'color', 'length', 'length_unit', 'tags',
755 ]
756 widgets = {
757 'status': StaticSelect,
758 'type': StaticSelect,
759 'length_unit': StaticSelect,
760 }
761 error_messages = {
762 'length': {
763 'max_value': 'Maximum length is 32767 (any unit)'
764 }
765 }
766
767
768 class PowerPanelForm(NetBoxModelForm):
769 region = DynamicModelChoiceField(
770 queryset=Region.objects.all(),
771 required=False,
772 initial_params={
773 'sites': '$site'
774 }
775 )
776 site_group = DynamicModelChoiceField(
777 queryset=SiteGroup.objects.all(),
778 required=False,
779 initial_params={
780 'sites': '$site'
781 }
782 )
783 site = DynamicModelChoiceField(
784 queryset=Site.objects.all(),
785 query_params={
786 'region_id': '$region',
787 'group_id': '$site_group',
788 }
789 )
790 location = DynamicModelChoiceField(
791 queryset=Location.objects.all(),
792 required=False,
793 query_params={
794 'site_id': '$site'
795 }
796 )
797
798 fieldsets = (
799 ('Power Panel', ('region', 'site_group', 'site', 'location', 'name', 'tags')),
800 )
801
802 class Meta:
803 model = PowerPanel
804 fields = [
805 'region', 'site_group', 'site', 'location', 'name', 'tags',
806 ]
807
808
809 class PowerFeedForm(NetBoxModelForm):
810 region = DynamicModelChoiceField(
811 queryset=Region.objects.all(),
812 required=False,
813 initial_params={
814 'sites__powerpanel': '$power_panel'
815 }
816 )
817 site_group = DynamicModelChoiceField(
818 queryset=SiteGroup.objects.all(),
819 required=False,
820 initial_params={
821 'sites': '$site'
822 }
823 )
824 site = DynamicModelChoiceField(
825 queryset=Site.objects.all(),
826 required=False,
827 initial_params={
828 'powerpanel': '$power_panel'
829 },
830 query_params={
831 'region_id': '$region',
832 'group_id': '$site_group',
833 }
834 )
835 power_panel = DynamicModelChoiceField(
836 queryset=PowerPanel.objects.all(),
837 query_params={
838 'site_id': '$site'
839 }
840 )
841 rack = DynamicModelChoiceField(
842 queryset=Rack.objects.all(),
843 required=False,
844 query_params={
845 'site_id': '$site'
846 }
847 )
848 comments = CommentField()
849
850 fieldsets = (
851 ('Power Panel', ('region', 'site', 'power_panel')),
852 ('Power Feed', ('rack', 'name', 'status', 'type', 'mark_connected', 'tags')),
853 ('Characteristics', ('supply', 'voltage', 'amperage', 'phase', 'max_utilization')),
854 )
855
856 class Meta:
857 model = PowerFeed
858 fields = [
859 'region', 'site_group', 'site', 'power_panel', 'rack', 'name', 'status', 'type', 'mark_connected', 'supply',
860 'phase', 'voltage', 'amperage', 'max_utilization', 'comments', 'tags',
861 ]
862 widgets = {
863 'status': StaticSelect(),
864 'type': StaticSelect(),
865 'supply': StaticSelect(),
866 'phase': StaticSelect(),
867 }
868
869
870 #
871 # Virtual chassis
872 #
873
874 class VirtualChassisForm(NetBoxModelForm):
875 master = forms.ModelChoiceField(
876 queryset=Device.objects.all(),
877 required=False,
878 )
879
880 class Meta:
881 model = VirtualChassis
882 fields = [
883 'name', 'domain', 'master', 'tags',
884 ]
885 widgets = {
886 'master': SelectWithPK(),
887 }
888
889 def __init__(self, *args, **kwargs):
890 super().__init__(*args, **kwargs)
891
892 self.fields['master'].queryset = Device.objects.filter(virtual_chassis=self.instance)
893
894
895 class DeviceVCMembershipForm(forms.ModelForm):
896 class Meta:
897 model = Device
898 fields = [
899 'vc_position', 'vc_priority',
900 ]
901 labels = {
902 'vc_position': 'Position',
903 'vc_priority': 'Priority',
904 }
905
906 def __init__(self, validate_vc_position=False, *args, **kwargs):
907 super().__init__(*args, **kwargs)
908
909 # Require VC position (only required when the Device is a VirtualChassis member)
910 self.fields['vc_position'].required = True
911
912 # Add bootstrap classes to form elements.
913 self.fields['vc_position'].widget.attrs = {'class': 'form-control'}
914 self.fields['vc_priority'].widget.attrs = {'class': 'form-control'}
915
916 # Validation of vc_position is optional. This is only required when adding a new member to an existing
917 # VirtualChassis. Otherwise, vc_position validation is handled by BaseVCMemberFormSet.
918 self.validate_vc_position = validate_vc_position
919
920 def clean_vc_position(self):
921 vc_position = self.cleaned_data['vc_position']
922
923 if self.validate_vc_position:
924 conflicting_members = Device.objects.filter(
925 virtual_chassis=self.instance.virtual_chassis,
926 vc_position=vc_position
927 )
928 if conflicting_members.exists():
929 raise forms.ValidationError(
930 'A virtual chassis member already exists in position {}.'.format(vc_position)
931 )
932
933 return vc_position
934
935
936 class VCMemberSelectForm(BootstrapMixin, forms.Form):
937 region = DynamicModelChoiceField(
938 queryset=Region.objects.all(),
939 required=False,
940 initial_params={
941 'sites': '$site'
942 }
943 )
944 site_group = DynamicModelChoiceField(
945 queryset=SiteGroup.objects.all(),
946 required=False,
947 initial_params={
948 'sites': '$site'
949 }
950 )
951 site = DynamicModelChoiceField(
952 queryset=Site.objects.all(),
953 required=False,
954 query_params={
955 'region_id': '$region',
956 'group_id': '$site_group',
957 }
958 )
959 rack = DynamicModelChoiceField(
960 queryset=Rack.objects.all(),
961 required=False,
962 null_option='None',
963 query_params={
964 'site_id': '$site'
965 }
966 )
967 device = DynamicModelChoiceField(
968 queryset=Device.objects.all(),
969 query_params={
970 'site_id': '$site',
971 'rack_id': '$rack',
972 'virtual_chassis_id': 'null',
973 }
974 )
975
976 def clean_device(self):
977 device = self.cleaned_data['device']
978 if device.virtual_chassis is not None:
979 raise forms.ValidationError(
980 f"Device {device} is already assigned to a virtual chassis."
981 )
982 return device
983
984
985 #
986 # Device component templates
987 #
988
989
990 class ConsolePortTemplateForm(BootstrapMixin, forms.ModelForm):
991 class Meta:
992 model = ConsolePortTemplate
993 fields = [
994 'device_type', 'module_type', 'name', 'label', 'type', 'description',
995 ]
996 widgets = {
997 'device_type': forms.HiddenInput(),
998 'module_type': forms.HiddenInput(),
999 'type': StaticSelect,
1000 }
1001
1002
1003 class ConsoleServerPortTemplateForm(BootstrapMixin, forms.ModelForm):
1004 class Meta:
1005 model = ConsoleServerPortTemplate
1006 fields = [
1007 'device_type', 'module_type', 'name', 'label', 'type', 'description',
1008 ]
1009 widgets = {
1010 'device_type': forms.HiddenInput(),
1011 'module_type': forms.HiddenInput(),
1012 'type': StaticSelect,
1013 }
1014
1015
1016 class PowerPortTemplateForm(BootstrapMixin, forms.ModelForm):
1017 class Meta:
1018 model = PowerPortTemplate
1019 fields = [
1020 'device_type', 'module_type', 'name', 'label', 'type', 'maximum_draw', 'allocated_draw', 'description',
1021 ]
1022 widgets = {
1023 'device_type': forms.HiddenInput(),
1024 'module_type': forms.HiddenInput(),
1025 'type': StaticSelect(),
1026 }
1027
1028
1029 class PowerOutletTemplateForm(BootstrapMixin, forms.ModelForm):
1030 power_port = DynamicModelChoiceField(
1031 queryset=PowerPortTemplate.objects.all(),
1032 required=False,
1033 query_params={
1034 'devicetype_id': '$device_type',
1035 }
1036 )
1037
1038 class Meta:
1039 model = PowerOutletTemplate
1040 fields = [
1041 'device_type', 'module_type', 'name', 'label', 'type', 'power_port', 'feed_leg', 'description',
1042 ]
1043 widgets = {
1044 'device_type': forms.HiddenInput(),
1045 'module_type': forms.HiddenInput(),
1046 'type': StaticSelect(),
1047 'feed_leg': StaticSelect(),
1048 }
1049
1050
1051 class InterfaceTemplateForm(BootstrapMixin, forms.ModelForm):
1052 class Meta:
1053 model = InterfaceTemplate
1054 fields = [
1055 'device_type', 'module_type', 'name', 'label', 'type', 'mgmt_only', 'description', 'poe_mode', 'poe_type',
1056 ]
1057 widgets = {
1058 'device_type': forms.HiddenInput(),
1059 'module_type': forms.HiddenInput(),
1060 'type': StaticSelect(),
1061 'poe_mode': StaticSelect(),
1062 'poe_type': StaticSelect(),
1063 }
1064
1065
1066 class FrontPortTemplateForm(BootstrapMixin, forms.ModelForm):
1067 rear_port = DynamicModelChoiceField(
1068 queryset=RearPortTemplate.objects.all(),
1069 required=False,
1070 query_params={
1071 'devicetype_id': '$device_type',
1072 'moduletype_id': '$module_type',
1073 }
1074 )
1075
1076 class Meta:
1077 model = FrontPortTemplate
1078 fields = [
1079 'device_type', 'module_type', 'name', 'label', 'type', 'color', 'rear_port', 'rear_port_position',
1080 'description',
1081 ]
1082 widgets = {
1083 'device_type': forms.HiddenInput(),
1084 'module_type': forms.HiddenInput(),
1085 'type': StaticSelect(),
1086 }
1087
1088
1089 class RearPortTemplateForm(BootstrapMixin, forms.ModelForm):
1090 class Meta:
1091 model = RearPortTemplate
1092 fields = [
1093 'device_type', 'module_type', 'name', 'label', 'type', 'color', 'positions', 'description',
1094 ]
1095 widgets = {
1096 'device_type': forms.HiddenInput(),
1097 'module_type': forms.HiddenInput(),
1098 'type': StaticSelect(),
1099 }
1100
1101
1102 class ModuleBayTemplateForm(BootstrapMixin, forms.ModelForm):
1103 class Meta:
1104 model = ModuleBayTemplate
1105 fields = [
1106 'device_type', 'name', 'label', 'position', 'description',
1107 ]
1108 widgets = {
1109 'device_type': forms.HiddenInput(),
1110 }
1111
1112
1113 class DeviceBayTemplateForm(BootstrapMixin, forms.ModelForm):
1114 class Meta:
1115 model = DeviceBayTemplate
1116 fields = [
1117 'device_type', 'name', 'label', 'description',
1118 ]
1119 widgets = {
1120 'device_type': forms.HiddenInput(),
1121 }
1122
1123
1124 class InventoryItemTemplateForm(BootstrapMixin, forms.ModelForm):
1125 parent = DynamicModelChoiceField(
1126 queryset=InventoryItemTemplate.objects.all(),
1127 required=False,
1128 query_params={
1129 'devicetype_id': '$device_type'
1130 }
1131 )
1132 role = DynamicModelChoiceField(
1133 queryset=InventoryItemRole.objects.all(),
1134 required=False
1135 )
1136 manufacturer = DynamicModelChoiceField(
1137 queryset=Manufacturer.objects.all(),
1138 required=False
1139 )
1140 component_type = ContentTypeChoiceField(
1141 queryset=ContentType.objects.all(),
1142 limit_choices_to=MODULAR_COMPONENT_TEMPLATE_MODELS,
1143 required=False,
1144 widget=forms.HiddenInput
1145 )
1146 component_id = forms.IntegerField(
1147 required=False,
1148 widget=forms.HiddenInput
1149 )
1150
1151 class Meta:
1152 model = InventoryItemTemplate
1153 fields = [
1154 'device_type', 'parent', 'name', 'label', 'role', 'manufacturer', 'part_id', 'description',
1155 'component_type', 'component_id',
1156 ]
1157 widgets = {
1158 'device_type': forms.HiddenInput(),
1159 }
1160
1161
1162 #
1163 # Device components
1164 #
1165
1166 class ConsolePortForm(NetBoxModelForm):
1167 module = DynamicModelChoiceField(
1168 queryset=Module.objects.all(),
1169 required=False,
1170 query_params={
1171 'device_id': '$device',
1172 }
1173 )
1174
1175 class Meta:
1176 model = ConsolePort
1177 fields = [
1178 'device', 'module', 'name', 'label', 'type', 'speed', 'mark_connected', 'description', 'tags',
1179 ]
1180 widgets = {
1181 'device': forms.HiddenInput(),
1182 'type': StaticSelect(),
1183 'speed': StaticSelect(),
1184 }
1185
1186
1187 class ConsoleServerPortForm(NetBoxModelForm):
1188 module = DynamicModelChoiceField(
1189 queryset=Module.objects.all(),
1190 required=False,
1191 query_params={
1192 'device_id': '$device',
1193 }
1194 )
1195
1196 class Meta:
1197 model = ConsoleServerPort
1198 fields = [
1199 'device', 'module', 'name', 'label', 'type', 'speed', 'mark_connected', 'description', 'tags',
1200 ]
1201 widgets = {
1202 'device': forms.HiddenInput(),
1203 'type': StaticSelect(),
1204 'speed': StaticSelect(),
1205 }
1206
1207
1208 class PowerPortForm(NetBoxModelForm):
1209 module = DynamicModelChoiceField(
1210 queryset=Module.objects.all(),
1211 required=False,
1212 query_params={
1213 'device_id': '$device',
1214 }
1215 )
1216
1217 class Meta:
1218 model = PowerPort
1219 fields = [
1220 'device', 'module', 'name', 'label', 'type', 'maximum_draw', 'allocated_draw', 'mark_connected',
1221 'description',
1222 'tags',
1223 ]
1224 widgets = {
1225 'device': forms.HiddenInput(),
1226 'type': StaticSelect(),
1227 }
1228
1229
1230 class PowerOutletForm(NetBoxModelForm):
1231 module = DynamicModelChoiceField(
1232 queryset=Module.objects.all(),
1233 required=False,
1234 query_params={
1235 'device_id': '$device',
1236 }
1237 )
1238 power_port = DynamicModelChoiceField(
1239 queryset=PowerPort.objects.all(),
1240 required=False,
1241 query_params={
1242 'device_id': '$device',
1243 }
1244 )
1245
1246 class Meta:
1247 model = PowerOutlet
1248 fields = [
1249 'device', 'module', 'name', 'label', 'type', 'power_port', 'feed_leg', 'mark_connected', 'description',
1250 'tags',
1251 ]
1252 widgets = {
1253 'device': forms.HiddenInput(),
1254 'type': StaticSelect(),
1255 'feed_leg': StaticSelect(),
1256 }
1257
1258
1259 class InterfaceForm(InterfaceCommonForm, NetBoxModelForm):
1260 module = DynamicModelChoiceField(
1261 queryset=Module.objects.all(),
1262 required=False,
1263 query_params={
1264 'device_id': '$device',
1265 }
1266 )
1267 parent = DynamicModelChoiceField(
1268 queryset=Interface.objects.all(),
1269 required=False,
1270 label='Parent interface',
1271 query_params={
1272 'device_id': '$device',
1273 }
1274 )
1275 bridge = DynamicModelChoiceField(
1276 queryset=Interface.objects.all(),
1277 required=False,
1278 label='Bridged interface',
1279 query_params={
1280 'device_id': '$device',
1281 }
1282 )
1283 lag = DynamicModelChoiceField(
1284 queryset=Interface.objects.all(),
1285 required=False,
1286 label='LAG interface',
1287 query_params={
1288 'device_id': '$device',
1289 'type': 'lag',
1290 }
1291 )
1292 wireless_lan_group = DynamicModelChoiceField(
1293 queryset=WirelessLANGroup.objects.all(),
1294 required=False,
1295 label='Wireless LAN group'
1296 )
1297 wireless_lans = DynamicModelMultipleChoiceField(
1298 queryset=WirelessLAN.objects.all(),
1299 required=False,
1300 label='Wireless LANs',
1301 query_params={
1302 'group_id': '$wireless_lan_group',
1303 }
1304 )
1305 vlan_group = DynamicModelChoiceField(
1306 queryset=VLANGroup.objects.all(),
1307 required=False,
1308 label='VLAN group'
1309 )
1310 untagged_vlan = DynamicModelChoiceField(
1311 queryset=VLAN.objects.all(),
1312 required=False,
1313 label='Untagged VLAN',
1314 query_params={
1315 'group_id': '$vlan_group',
1316 'available_on_device': '$device',
1317 }
1318 )
1319 tagged_vlans = DynamicModelMultipleChoiceField(
1320 queryset=VLAN.objects.all(),
1321 required=False,
1322 label='Tagged VLANs',
1323 query_params={
1324 'group_id': '$vlan_group',
1325 'available_on_device': '$device',
1326 }
1327 )
1328 vrf = DynamicModelChoiceField(
1329 queryset=VRF.objects.all(),
1330 required=False,
1331 label='VRF'
1332 )
1333
1334 fieldsets = (
1335 ('Interface', ('device', 'module', 'name', 'type', 'speed', 'duplex', 'label', 'description', 'tags')),
1336 ('Addressing', ('vrf', 'mac_address', 'wwn')),
1337 ('Operation', ('mtu', 'tx_power', 'enabled', 'mgmt_only', 'mark_connected')),
1338 ('Related Interfaces', ('parent', 'bridge', 'lag')),
1339 ('PoE', ('poe_mode', 'poe_type')),
1340 ('802.1Q Switching', ('mode', 'vlan_group', 'untagged_vlan', 'tagged_vlans')),
1341 ('Wireless', (
1342 'rf_role', 'rf_channel', 'rf_channel_frequency', 'rf_channel_width', 'wireless_lan_group', 'wireless_lans',
1343 )),
1344 )
1345
1346 class Meta:
1347 model = Interface
1348 fields = [
1349 'device', 'module', 'name', 'label', 'type', 'speed', 'duplex', 'enabled', 'parent', 'bridge', 'lag',
1350 'mac_address', 'wwn', 'mtu', 'mgmt_only', 'mark_connected', 'description', 'poe_mode', 'poe_type', 'mode',
1351 'rf_role', 'rf_channel', 'rf_channel_frequency', 'rf_channel_width', 'tx_power', 'wireless_lans',
1352 'untagged_vlan', 'tagged_vlans', 'vrf', 'tags',
1353 ]
1354 widgets = {
1355 'device': forms.HiddenInput(),
1356 'type': StaticSelect(),
1357 'speed': SelectSpeedWidget(),
1358 'poe_mode': StaticSelect(),
1359 'poe_type': StaticSelect(),
1360 'duplex': StaticSelect(),
1361 'mode': StaticSelect(),
1362 'rf_role': StaticSelect(),
1363 'rf_channel': StaticSelect(),
1364 }
1365 labels = {
1366 'mode': '802.1Q Mode',
1367 }
1368 help_texts = {
1369 'mode': INTERFACE_MODE_HELP_TEXT,
1370 'rf_channel_frequency': "Populated by selected channel (if set)",
1371 'rf_channel_width': "Populated by selected channel (if set)",
1372 }
1373
1374 def __init__(self, *args, **kwargs):
1375 super().__init__(*args, **kwargs)
1376
1377 # Restrict LAG/bridge interface assignment by device/VC
1378 device_id = self.data['device'] if self.is_bound else self.initial.get('device')
1379 device = Device.objects.filter(pk=device_id).first()
1380 if device and device.virtual_chassis and device.virtual_chassis.master:
1381 self.fields['lag'].widget.add_query_param('device_id', device.virtual_chassis.master.pk)
1382 self.fields['bridge'].widget.add_query_param('device_id', device.virtual_chassis.master.pk)
1383
1384
1385 class FrontPortForm(NetBoxModelForm):
1386 module = DynamicModelChoiceField(
1387 queryset=Module.objects.all(),
1388 required=False,
1389 query_params={
1390 'device_id': '$device',
1391 }
1392 )
1393 rear_port = DynamicModelChoiceField(
1394 queryset=RearPort.objects.all(),
1395 query_params={
1396 'device_id': '$device',
1397 }
1398 )
1399
1400 class Meta:
1401 model = FrontPort
1402 fields = [
1403 'device', 'module', 'name', 'label', 'type', 'color', 'rear_port', 'rear_port_position', 'mark_connected',
1404 'description', 'tags',
1405 ]
1406 widgets = {
1407 'device': forms.HiddenInput(),
1408 'type': StaticSelect(),
1409 }
1410
1411
1412 class RearPortForm(NetBoxModelForm):
1413 module = DynamicModelChoiceField(
1414 queryset=Module.objects.all(),
1415 required=False,
1416 query_params={
1417 'device_id': '$device',
1418 }
1419 )
1420
1421 class Meta:
1422 model = RearPort
1423 fields = [
1424 'device', 'module', 'name', 'label', 'type', 'color', 'positions', 'mark_connected', 'description', 'tags',
1425 ]
1426 widgets = {
1427 'device': forms.HiddenInput(),
1428 'type': StaticSelect(),
1429 }
1430
1431
1432 class ModuleBayForm(NetBoxModelForm):
1433
1434 class Meta:
1435 model = ModuleBay
1436 fields = [
1437 'device', 'name', 'label', 'position', 'description', 'tags',
1438 ]
1439 widgets = {
1440 'device': forms.HiddenInput(),
1441 }
1442
1443
1444 class DeviceBayForm(NetBoxModelForm):
1445
1446 class Meta:
1447 model = DeviceBay
1448 fields = [
1449 'device', 'name', 'label', 'description', 'tags',
1450 ]
1451 widgets = {
1452 'device': forms.HiddenInput(),
1453 }
1454
1455
1456 class PopulateDeviceBayForm(BootstrapMixin, forms.Form):
1457 installed_device = forms.ModelChoiceField(
1458 queryset=Device.objects.all(),
1459 label='Child Device',
1460 help_text="Child devices must first be created and assigned to the site/rack of the parent device.",
1461 widget=StaticSelect(),
1462 )
1463
1464 def __init__(self, device_bay, *args, **kwargs):
1465 super().__init__(*args, **kwargs)
1466
1467 self.fields['installed_device'].queryset = Device.objects.filter(
1468 site=device_bay.device.site,
1469 rack=device_bay.device.rack,
1470 parent_bay__isnull=True,
1471 device_type__u_height=0,
1472 device_type__subdevice_role=SubdeviceRoleChoices.ROLE_CHILD
1473 ).exclude(pk=device_bay.device.pk)
1474
1475
1476 class InventoryItemForm(NetBoxModelForm):
1477 device = DynamicModelChoiceField(
1478 queryset=Device.objects.all()
1479 )
1480 parent = DynamicModelChoiceField(
1481 queryset=InventoryItem.objects.all(),
1482 required=False,
1483 query_params={
1484 'device_id': '$device'
1485 }
1486 )
1487 role = DynamicModelChoiceField(
1488 queryset=InventoryItemRole.objects.all(),
1489 required=False
1490 )
1491 manufacturer = DynamicModelChoiceField(
1492 queryset=Manufacturer.objects.all(),
1493 required=False
1494 )
1495 component_type = ContentTypeChoiceField(
1496 queryset=ContentType.objects.all(),
1497 limit_choices_to=MODULAR_COMPONENT_MODELS,
1498 required=False,
1499 widget=forms.HiddenInput
1500 )
1501 component_id = forms.IntegerField(
1502 required=False,
1503 widget=forms.HiddenInput
1504 )
1505
1506 fieldsets = (
1507 ('Inventory Item', ('device', 'parent', 'name', 'label', 'role', 'description', 'tags')),
1508 ('Hardware', ('manufacturer', 'part_id', 'serial', 'asset_tag')),
1509 )
1510
1511 class Meta:
1512 model = InventoryItem
1513 fields = [
1514 'device', 'parent', 'name', 'label', 'role', 'manufacturer', 'part_id', 'serial', 'asset_tag',
1515 'description', 'component_type', 'component_id', 'tags',
1516 ]
1517
1518
1519 #
1520 # Device component roles
1521 #
1522
1523 class InventoryItemRoleForm(NetBoxModelForm):
1524 slug = SlugField()
1525
1526 class Meta:
1527 model = InventoryItemRole
1528 fields = [
1529 'name', 'slug', 'color', 'description', 'tags',
1530 ]
1531
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/netbox/dcim/forms/models.py b/netbox/dcim/forms/models.py
--- a/netbox/dcim/forms/models.py
+++ b/netbox/dcim/forms/models.py
@@ -1331,6 +1331,12 @@
label='VRF'
)
+ wwn = forms.CharField(
+ empty_value=None,
+ required=False,
+ label='WWN'
+ )
+
fieldsets = (
('Interface', ('device', 'module', 'name', 'type', 'speed', 'duplex', 'label', 'description', 'tags')),
('Addressing', ('vrf', 'mac_address', 'wwn')),
| {"golden_diff": "diff --git a/netbox/dcim/forms/models.py b/netbox/dcim/forms/models.py\n--- a/netbox/dcim/forms/models.py\n+++ b/netbox/dcim/forms/models.py\n@@ -1331,6 +1331,12 @@\n label='VRF'\n )\n \n+ wwn = forms.CharField(\n+ empty_value=None,\n+ required=False,\n+ label='WWN'\n+ )\n+\n fieldsets = (\n ('Interface', ('device', 'module', 'name', 'type', 'speed', 'duplex', 'label', 'description', 'tags')),\n ('Addressing', ('vrf', 'mac_address', 'wwn')),\n", "issue": "ChangeLog false positive for wwn \n### NetBox version\n\nv3.3.2\n\n### Python version\n\n3.8\n\n### Steps to Reproduce\n\n1. Update a Interface\r\n2. Display the generated change log.\r\n\n\n### Expected Behavior\n\nOnly changed properties are detected as changed.\n\n### Observed Behavior\n\n`wwn` is detected as a changed from `null` to `\"\"`\n", "before_files": [{"content": "from django import forms\nfrom django.utils.translation import gettext as _\nfrom django.contrib.auth.models import User\nfrom django.contrib.contenttypes.models import ContentType\nfrom timezone_field import TimeZoneFormField\n\nfrom dcim.choices import *\nfrom dcim.constants import *\nfrom dcim.models import *\nfrom ipam.models import ASN, IPAddress, VLAN, VLANGroup, VRF\nfrom netbox.forms import NetBoxModelForm\nfrom tenancy.forms import TenancyForm\nfrom utilities.forms import (\n APISelect, add_blank_choice, BootstrapMixin, ClearableFileInput, CommentField, ContentTypeChoiceField,\n DynamicModelChoiceField, DynamicModelMultipleChoiceField, JSONField, NumericArrayField, SelectWithPK, SmallTextarea,\n SlugField, StaticSelect, SelectSpeedWidget,\n)\nfrom virtualization.models import Cluster, ClusterGroup\nfrom wireless.models import WirelessLAN, WirelessLANGroup\nfrom .common import InterfaceCommonForm\n\n__all__ = (\n 'CableForm',\n 'ConsolePortForm',\n 'ConsolePortTemplateForm',\n 'ConsoleServerPortForm',\n 'ConsoleServerPortTemplateForm',\n 'DeviceBayForm',\n 'DeviceBayTemplateForm',\n 'DeviceForm',\n 'DeviceRoleForm',\n 'DeviceTypeForm',\n 'DeviceVCMembershipForm',\n 'FrontPortForm',\n 'FrontPortTemplateForm',\n 'InterfaceForm',\n 'InterfaceTemplateForm',\n 'InventoryItemForm',\n 'InventoryItemRoleForm',\n 'InventoryItemTemplateForm',\n 'LocationForm',\n 'ManufacturerForm',\n 'ModuleForm',\n 'ModuleBayForm',\n 'ModuleBayTemplateForm',\n 'ModuleTypeForm',\n 'PlatformForm',\n 'PopulateDeviceBayForm',\n 'PowerFeedForm',\n 'PowerOutletForm',\n 'PowerOutletTemplateForm',\n 'PowerPanelForm',\n 'PowerPortForm',\n 'PowerPortTemplateForm',\n 'RackForm',\n 'RackReservationForm',\n 'RackRoleForm',\n 'RearPortForm',\n 'RearPortTemplateForm',\n 'RegionForm',\n 'SiteForm',\n 'SiteGroupForm',\n 'VCMemberSelectForm',\n 'VirtualChassisForm',\n)\n\nINTERFACE_MODE_HELP_TEXT = \"\"\"\nAccess: One untagged VLAN<br />\nTagged: One untagged VLAN and/or one or more tagged VLANs<br />\nTagged (All): Implies all VLANs are available (w/optional untagged VLAN)\n\"\"\"\n\n\nclass RegionForm(NetBoxModelForm):\n parent = DynamicModelChoiceField(\n queryset=Region.objects.all(),\n required=False\n )\n slug = SlugField()\n\n class Meta:\n model = Region\n fields = (\n 'parent', 'name', 'slug', 'description', 'tags',\n )\n\n\nclass SiteGroupForm(NetBoxModelForm):\n parent = DynamicModelChoiceField(\n queryset=SiteGroup.objects.all(),\n required=False\n )\n slug = SlugField()\n\n class Meta:\n model = SiteGroup\n fields = (\n 'parent', 'name', 'slug', 'description', 'tags',\n )\n\n\nclass SiteForm(TenancyForm, NetBoxModelForm):\n region = DynamicModelChoiceField(\n queryset=Region.objects.all(),\n required=False\n )\n group = DynamicModelChoiceField(\n queryset=SiteGroup.objects.all(),\n required=False\n )\n asns = DynamicModelMultipleChoiceField(\n queryset=ASN.objects.all(),\n label=_('ASNs'),\n required=False\n )\n slug = SlugField()\n time_zone = TimeZoneFormField(\n choices=add_blank_choice(TimeZoneFormField().choices),\n required=False,\n widget=StaticSelect()\n )\n comments = CommentField()\n\n fieldsets = (\n ('Site', (\n 'name', 'slug', 'status', 'region', 'group', 'facility', 'asns', 'time_zone', 'description', 'tags',\n )),\n ('Tenancy', ('tenant_group', 'tenant')),\n ('Contact Info', ('physical_address', 'shipping_address', 'latitude', 'longitude')),\n )\n\n class Meta:\n model = Site\n fields = (\n 'name', 'slug', 'status', 'region', 'group', 'tenant_group', 'tenant', 'facility', 'asns', 'time_zone',\n 'description', 'physical_address', 'shipping_address', 'latitude', 'longitude', 'comments', 'tags',\n )\n widgets = {\n 'physical_address': SmallTextarea(\n attrs={\n 'rows': 3,\n }\n ),\n 'shipping_address': SmallTextarea(\n attrs={\n 'rows': 3,\n }\n ),\n 'status': StaticSelect(),\n 'time_zone': StaticSelect(),\n }\n help_texts = {\n 'name': \"Full name of the site\",\n 'facility': \"Data center provider and facility (e.g. Equinix NY7)\",\n 'time_zone': \"Local time zone\",\n 'description': \"Short description (will appear in sites list)\",\n 'physical_address': \"Physical location of the building (e.g. for GPS)\",\n 'shipping_address': \"If different from the physical address\",\n 'latitude': \"Latitude in decimal format (xx.yyyyyy)\",\n 'longitude': \"Longitude in decimal format (xx.yyyyyy)\"\n }\n\n\nclass LocationForm(TenancyForm, NetBoxModelForm):\n region = DynamicModelChoiceField(\n queryset=Region.objects.all(),\n required=False,\n initial_params={\n 'sites': '$site'\n }\n )\n site_group = DynamicModelChoiceField(\n queryset=SiteGroup.objects.all(),\n required=False,\n initial_params={\n 'sites': '$site'\n }\n )\n site = DynamicModelChoiceField(\n queryset=Site.objects.all(),\n query_params={\n 'region_id': '$region',\n 'group_id': '$site_group',\n }\n )\n parent = DynamicModelChoiceField(\n queryset=Location.objects.all(),\n required=False,\n query_params={\n 'site_id': '$site'\n }\n )\n slug = SlugField()\n\n fieldsets = (\n ('Location', (\n 'region', 'site_group', 'site', 'parent', 'name', 'slug', 'status', 'description', 'tags',\n )),\n ('Tenancy', ('tenant_group', 'tenant')),\n )\n\n class Meta:\n model = Location\n fields = (\n 'region', 'site_group', 'site', 'parent', 'name', 'slug', 'status', 'description', 'tenant_group', 'tenant',\n 'tags',\n )\n widgets = {\n 'status': StaticSelect(),\n }\n\n\nclass RackRoleForm(NetBoxModelForm):\n slug = SlugField()\n\n class Meta:\n model = RackRole\n fields = [\n 'name', 'slug', 'color', 'description', 'tags',\n ]\n\n\nclass RackForm(TenancyForm, NetBoxModelForm):\n region = DynamicModelChoiceField(\n queryset=Region.objects.all(),\n required=False,\n initial_params={\n 'sites': '$site'\n }\n )\n site_group = DynamicModelChoiceField(\n queryset=SiteGroup.objects.all(),\n required=False,\n initial_params={\n 'sites': '$site'\n }\n )\n site = DynamicModelChoiceField(\n queryset=Site.objects.all(),\n query_params={\n 'region_id': '$region',\n 'group_id': '$site_group',\n }\n )\n location = DynamicModelChoiceField(\n queryset=Location.objects.all(),\n required=False,\n query_params={\n 'site_id': '$site'\n }\n )\n role = DynamicModelChoiceField(\n queryset=RackRole.objects.all(),\n required=False\n )\n comments = CommentField()\n\n class Meta:\n model = Rack\n fields = [\n 'region', 'site_group', 'site', 'location', 'name', 'facility_id', 'tenant_group', 'tenant', 'status',\n 'role', 'serial', 'asset_tag', 'type', 'width', 'u_height', 'desc_units', 'outer_width', 'outer_depth',\n 'outer_unit', 'comments', 'tags',\n ]\n help_texts = {\n 'site': \"The site at which the rack exists\",\n 'name': \"Organizational rack name\",\n 'facility_id': \"The unique rack ID assigned by the facility\",\n 'u_height': \"Height in rack units\",\n }\n widgets = {\n 'status': StaticSelect(),\n 'type': StaticSelect(),\n 'width': StaticSelect(),\n 'outer_unit': StaticSelect(),\n }\n\n\nclass RackReservationForm(TenancyForm, NetBoxModelForm):\n region = DynamicModelChoiceField(\n queryset=Region.objects.all(),\n required=False,\n initial_params={\n 'sites': '$site'\n }\n )\n site_group = DynamicModelChoiceField(\n queryset=SiteGroup.objects.all(),\n required=False,\n initial_params={\n 'sites': '$site'\n }\n )\n site = DynamicModelChoiceField(\n queryset=Site.objects.all(),\n required=False,\n query_params={\n 'region_id': '$region',\n 'group_id': '$site_group',\n }\n )\n location = DynamicModelChoiceField(\n queryset=Location.objects.all(),\n required=False,\n query_params={\n 'site_id': '$site'\n }\n )\n rack = DynamicModelChoiceField(\n queryset=Rack.objects.all(),\n query_params={\n 'site_id': '$site',\n 'location_id': '$location',\n }\n )\n units = NumericArrayField(\n base_field=forms.IntegerField(),\n help_text=\"Comma-separated list of numeric unit IDs. A range may be specified using a hyphen.\"\n )\n user = forms.ModelChoiceField(\n queryset=User.objects.order_by(\n 'username'\n ),\n widget=StaticSelect()\n )\n\n fieldsets = (\n ('Reservation', ('region', 'site_group', 'site', 'location', 'rack', 'units', 'user', 'description', 'tags')),\n ('Tenancy', ('tenant_group', 'tenant')),\n )\n\n class Meta:\n model = RackReservation\n fields = [\n 'region', 'site_group', 'site', 'location', 'rack', 'units', 'user', 'tenant_group', 'tenant',\n 'description', 'tags',\n ]\n\n\nclass ManufacturerForm(NetBoxModelForm):\n slug = SlugField()\n\n class Meta:\n model = Manufacturer\n fields = [\n 'name', 'slug', 'description', 'tags',\n ]\n\n\nclass DeviceTypeForm(NetBoxModelForm):\n manufacturer = DynamicModelChoiceField(\n queryset=Manufacturer.objects.all()\n )\n slug = SlugField(\n slug_source='model'\n )\n comments = CommentField()\n\n fieldsets = (\n ('Device Type', (\n 'manufacturer', 'model', 'slug', 'part_number', 'tags',\n )),\n ('Chassis', (\n 'u_height', 'is_full_depth', 'subdevice_role', 'airflow',\n )),\n ('Images', ('front_image', 'rear_image')),\n )\n\n class Meta:\n model = DeviceType\n fields = [\n 'manufacturer', 'model', 'slug', 'part_number', 'u_height', 'is_full_depth', 'subdevice_role', 'airflow',\n 'front_image', 'rear_image', 'comments', 'tags',\n ]\n widgets = {\n 'subdevice_role': StaticSelect(),\n 'front_image': ClearableFileInput(attrs={\n 'accept': DEVICETYPE_IMAGE_FORMATS\n }),\n 'rear_image': ClearableFileInput(attrs={\n 'accept': DEVICETYPE_IMAGE_FORMATS\n })\n }\n\n\nclass ModuleTypeForm(NetBoxModelForm):\n manufacturer = DynamicModelChoiceField(\n queryset=Manufacturer.objects.all()\n )\n comments = CommentField()\n\n fieldsets = (\n ('Module Type', (\n 'manufacturer', 'model', 'part_number', 'tags',\n )),\n )\n\n class Meta:\n model = ModuleType\n fields = [\n 'manufacturer', 'model', 'part_number', 'comments', 'tags',\n ]\n\n\nclass DeviceRoleForm(NetBoxModelForm):\n slug = SlugField()\n\n class Meta:\n model = DeviceRole\n fields = [\n 'name', 'slug', 'color', 'vm_role', 'description', 'tags',\n ]\n\n\nclass PlatformForm(NetBoxModelForm):\n manufacturer = DynamicModelChoiceField(\n queryset=Manufacturer.objects.all(),\n required=False\n )\n slug = SlugField(\n max_length=64\n )\n\n class Meta:\n model = Platform\n fields = [\n 'name', 'slug', 'manufacturer', 'napalm_driver', 'napalm_args', 'description', 'tags',\n ]\n widgets = {\n 'napalm_args': SmallTextarea(),\n }\n\n\nclass DeviceForm(TenancyForm, NetBoxModelForm):\n region = DynamicModelChoiceField(\n queryset=Region.objects.all(),\n required=False,\n initial_params={\n 'sites': '$site'\n }\n )\n site_group = DynamicModelChoiceField(\n queryset=SiteGroup.objects.all(),\n required=False,\n initial_params={\n 'sites': '$site'\n }\n )\n site = DynamicModelChoiceField(\n queryset=Site.objects.all(),\n query_params={\n 'region_id': '$region',\n 'group_id': '$site_group',\n }\n )\n location = DynamicModelChoiceField(\n queryset=Location.objects.all(),\n required=False,\n query_params={\n 'site_id': '$site'\n },\n initial_params={\n 'racks': '$rack'\n }\n )\n rack = DynamicModelChoiceField(\n queryset=Rack.objects.all(),\n required=False,\n query_params={\n 'site_id': '$site',\n 'location_id': '$location',\n }\n )\n position = forms.DecimalField(\n required=False,\n help_text=\"The lowest-numbered unit occupied by the device\",\n widget=APISelect(\n api_url='/api/dcim/racks/{{rack}}/elevation/',\n attrs={\n 'disabled-indicator': 'device',\n 'data-dynamic-params': '[{\"fieldName\":\"face\",\"queryParam\":\"face\"}]'\n }\n )\n )\n manufacturer = DynamicModelChoiceField(\n queryset=Manufacturer.objects.all(),\n required=False,\n initial_params={\n 'device_types': '$device_type'\n }\n )\n device_type = DynamicModelChoiceField(\n queryset=DeviceType.objects.all(),\n query_params={\n 'manufacturer_id': '$manufacturer'\n }\n )\n device_role = DynamicModelChoiceField(\n queryset=DeviceRole.objects.all()\n )\n platform = DynamicModelChoiceField(\n queryset=Platform.objects.all(),\n required=False,\n query_params={\n 'manufacturer_id': ['$manufacturer', 'null']\n }\n )\n cluster_group = DynamicModelChoiceField(\n queryset=ClusterGroup.objects.all(),\n required=False,\n null_option='None',\n initial_params={\n 'clusters': '$cluster'\n }\n )\n cluster = DynamicModelChoiceField(\n queryset=Cluster.objects.all(),\n required=False,\n query_params={\n 'group_id': '$cluster_group'\n }\n )\n comments = CommentField()\n local_context_data = JSONField(\n required=False,\n label=''\n )\n virtual_chassis = DynamicModelChoiceField(\n queryset=VirtualChassis.objects.all(),\n required=False\n )\n vc_position = forms.IntegerField(\n required=False,\n label='Position',\n help_text=\"The position in the virtual chassis this device is identified by\"\n )\n vc_priority = forms.IntegerField(\n required=False,\n label='Priority',\n help_text=\"The priority of the device in the virtual chassis\"\n )\n\n class Meta:\n model = Device\n fields = [\n 'name', 'device_role', 'device_type', 'serial', 'asset_tag', 'region', 'site_group', 'site', 'rack',\n 'location', 'position', 'face', 'status', 'airflow', 'platform', 'primary_ip4', 'primary_ip6',\n 'cluster_group', 'cluster', 'tenant_group', 'tenant', 'virtual_chassis', 'vc_position', 'vc_priority',\n 'comments', 'tags', 'local_context_data'\n ]\n help_texts = {\n 'device_role': \"The function this device serves\",\n 'serial': \"Chassis serial number\",\n 'local_context_data': \"Local config context data overwrites all source contexts in the final rendered \"\n \"config context\",\n }\n widgets = {\n 'face': StaticSelect(),\n 'status': StaticSelect(),\n 'airflow': StaticSelect(),\n 'primary_ip4': StaticSelect(),\n 'primary_ip6': StaticSelect(),\n }\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n if self.instance.pk:\n\n # Compile list of choices for primary IPv4 and IPv6 addresses\n for family in [4, 6]:\n ip_choices = [(None, '---------')]\n\n # Gather PKs of all interfaces belonging to this Device or a peer VirtualChassis member\n interface_ids = self.instance.vc_interfaces(if_master=False).values_list('pk', flat=True)\n\n # Collect interface IPs\n interface_ips = IPAddress.objects.filter(\n address__family=family,\n assigned_object_type=ContentType.objects.get_for_model(Interface),\n assigned_object_id__in=interface_ids\n ).prefetch_related('assigned_object')\n if interface_ips:\n ip_list = [(ip.id, f'{ip.address} ({ip.assigned_object})') for ip in interface_ips]\n ip_choices.append(('Interface IPs', ip_list))\n # Collect NAT IPs\n nat_ips = IPAddress.objects.prefetch_related('nat_inside').filter(\n address__family=family,\n nat_inside__assigned_object_type=ContentType.objects.get_for_model(Interface),\n nat_inside__assigned_object_id__in=interface_ids\n ).prefetch_related('assigned_object')\n if nat_ips:\n ip_list = [(ip.id, f'{ip.address} (NAT)') for ip in nat_ips]\n ip_choices.append(('NAT IPs', ip_list))\n self.fields['primary_ip{}'.format(family)].choices = ip_choices\n\n # If editing an existing device, exclude it from the list of occupied rack units. This ensures that a device\n # can be flipped from one face to another.\n self.fields['position'].widget.add_query_param('exclude', self.instance.pk)\n\n # Disable rack assignment if this is a child device installed in a parent device\n if self.instance.device_type.is_child_device and hasattr(self.instance, 'parent_bay'):\n self.fields['site'].disabled = True\n self.fields['rack'].disabled = True\n self.initial['site'] = self.instance.parent_bay.device.site_id\n self.initial['rack'] = self.instance.parent_bay.device.rack_id\n\n else:\n\n # An object that doesn't exist yet can't have any IPs assigned to it\n self.fields['primary_ip4'].choices = []\n self.fields['primary_ip4'].widget.attrs['readonly'] = True\n self.fields['primary_ip6'].choices = []\n self.fields['primary_ip6'].widget.attrs['readonly'] = True\n\n # Rack position\n position = self.data.get('position') or self.initial.get('position')\n if position:\n self.fields['position'].widget.choices = [(position, f'U{position}')]\n\n\nclass ModuleForm(NetBoxModelForm):\n device = DynamicModelChoiceField(\n queryset=Device.objects.all(),\n initial_params={\n 'modulebays': '$module_bay'\n }\n )\n module_bay = DynamicModelChoiceField(\n queryset=ModuleBay.objects.all(),\n query_params={\n 'device_id': '$device'\n }\n )\n manufacturer = DynamicModelChoiceField(\n queryset=Manufacturer.objects.all(),\n required=False,\n initial_params={\n 'module_types': '$module_type'\n }\n )\n module_type = DynamicModelChoiceField(\n queryset=ModuleType.objects.all(),\n query_params={\n 'manufacturer_id': '$manufacturer'\n }\n )\n comments = CommentField()\n replicate_components = forms.BooleanField(\n required=False,\n initial=True,\n help_text=\"Automatically populate components associated with this module type\"\n )\n\n adopt_components = forms.BooleanField(\n required=False,\n initial=False,\n help_text=\"Adopt already existing components\"\n )\n\n fieldsets = (\n ('Module', (\n 'device', 'module_bay', 'manufacturer', 'module_type', 'tags',\n )),\n ('Hardware', (\n 'serial', 'asset_tag', 'replicate_components', 'adopt_components',\n )),\n )\n\n class Meta:\n model = Module\n fields = [\n 'device', 'module_bay', 'manufacturer', 'module_type', 'serial', 'asset_tag', 'tags',\n 'replicate_components', 'adopt_components', 'comments',\n ]\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n if self.instance.pk:\n self.fields['replicate_components'].initial = False\n self.fields['replicate_components'].disabled = True\n self.fields['adopt_components'].initial = False\n self.fields['adopt_components'].disabled = True\n\n def save(self, *args, **kwargs):\n\n # If replicate_components is False, disable automatic component replication on the instance\n if self.instance.pk or not self.cleaned_data['replicate_components']:\n self.instance._disable_replication = True\n\n if self.cleaned_data['adopt_components']:\n self.instance._adopt_components = True\n\n return super().save(*args, **kwargs)\n\n def clean(self):\n super().clean()\n\n replicate_components = self.cleaned_data.get(\"replicate_components\")\n adopt_components = self.cleaned_data.get(\"adopt_components\")\n device = self.cleaned_data['device']\n module_type = self.cleaned_data['module_type']\n module_bay = self.cleaned_data['module_bay']\n\n # Bail out if we are not installing a new module or if we are not replicating components\n if self.instance.pk or not replicate_components:\n return\n\n for templates, component_attribute in [\n (\"consoleporttemplates\", \"consoleports\"),\n (\"consoleserverporttemplates\", \"consoleserverports\"),\n (\"interfacetemplates\", \"interfaces\"),\n (\"powerporttemplates\", \"powerports\"),\n (\"poweroutlettemplates\", \"poweroutlets\"),\n (\"rearporttemplates\", \"rearports\"),\n (\"frontporttemplates\", \"frontports\")\n ]:\n # Prefetch installed components\n installed_components = {\n component.name: component for component in getattr(device, component_attribute).all()\n }\n\n # Get the templates for the module type.\n for template in getattr(module_type, templates).all():\n # Installing modules with placeholders require that the bay has a position value\n if MODULE_TOKEN in template.name and not module_bay.position:\n raise forms.ValidationError(\n \"Cannot install module with placeholder values in a module bay with no position defined\"\n )\n\n resolved_name = template.name.replace(MODULE_TOKEN, module_bay.position)\n existing_item = installed_components.get(resolved_name)\n\n # It is not possible to adopt components already belonging to a module\n if adopt_components and existing_item and existing_item.module:\n raise forms.ValidationError(\n f\"Cannot adopt {template.component_model.__name__} '{resolved_name}' as it already belongs \"\n f\"to a module\"\n )\n\n # If we are not adopting components we error if the component exists\n if not adopt_components and resolved_name in installed_components:\n raise forms.ValidationError(\n f\"{template.component_model.__name__} - {resolved_name} already exists\"\n )\n\n\nclass CableForm(TenancyForm, NetBoxModelForm):\n\n class Meta:\n model = Cable\n fields = [\n 'type', 'status', 'tenant_group', 'tenant', 'label', 'color', 'length', 'length_unit', 'tags',\n ]\n widgets = {\n 'status': StaticSelect,\n 'type': StaticSelect,\n 'length_unit': StaticSelect,\n }\n error_messages = {\n 'length': {\n 'max_value': 'Maximum length is 32767 (any unit)'\n }\n }\n\n\nclass PowerPanelForm(NetBoxModelForm):\n region = DynamicModelChoiceField(\n queryset=Region.objects.all(),\n required=False,\n initial_params={\n 'sites': '$site'\n }\n )\n site_group = DynamicModelChoiceField(\n queryset=SiteGroup.objects.all(),\n required=False,\n initial_params={\n 'sites': '$site'\n }\n )\n site = DynamicModelChoiceField(\n queryset=Site.objects.all(),\n query_params={\n 'region_id': '$region',\n 'group_id': '$site_group',\n }\n )\n location = DynamicModelChoiceField(\n queryset=Location.objects.all(),\n required=False,\n query_params={\n 'site_id': '$site'\n }\n )\n\n fieldsets = (\n ('Power Panel', ('region', 'site_group', 'site', 'location', 'name', 'tags')),\n )\n\n class Meta:\n model = PowerPanel\n fields = [\n 'region', 'site_group', 'site', 'location', 'name', 'tags',\n ]\n\n\nclass PowerFeedForm(NetBoxModelForm):\n region = DynamicModelChoiceField(\n queryset=Region.objects.all(),\n required=False,\n initial_params={\n 'sites__powerpanel': '$power_panel'\n }\n )\n site_group = DynamicModelChoiceField(\n queryset=SiteGroup.objects.all(),\n required=False,\n initial_params={\n 'sites': '$site'\n }\n )\n site = DynamicModelChoiceField(\n queryset=Site.objects.all(),\n required=False,\n initial_params={\n 'powerpanel': '$power_panel'\n },\n query_params={\n 'region_id': '$region',\n 'group_id': '$site_group',\n }\n )\n power_panel = DynamicModelChoiceField(\n queryset=PowerPanel.objects.all(),\n query_params={\n 'site_id': '$site'\n }\n )\n rack = DynamicModelChoiceField(\n queryset=Rack.objects.all(),\n required=False,\n query_params={\n 'site_id': '$site'\n }\n )\n comments = CommentField()\n\n fieldsets = (\n ('Power Panel', ('region', 'site', 'power_panel')),\n ('Power Feed', ('rack', 'name', 'status', 'type', 'mark_connected', 'tags')),\n ('Characteristics', ('supply', 'voltage', 'amperage', 'phase', 'max_utilization')),\n )\n\n class Meta:\n model = PowerFeed\n fields = [\n 'region', 'site_group', 'site', 'power_panel', 'rack', 'name', 'status', 'type', 'mark_connected', 'supply',\n 'phase', 'voltage', 'amperage', 'max_utilization', 'comments', 'tags',\n ]\n widgets = {\n 'status': StaticSelect(),\n 'type': StaticSelect(),\n 'supply': StaticSelect(),\n 'phase': StaticSelect(),\n }\n\n\n#\n# Virtual chassis\n#\n\nclass VirtualChassisForm(NetBoxModelForm):\n master = forms.ModelChoiceField(\n queryset=Device.objects.all(),\n required=False,\n )\n\n class Meta:\n model = VirtualChassis\n fields = [\n 'name', 'domain', 'master', 'tags',\n ]\n widgets = {\n 'master': SelectWithPK(),\n }\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.fields['master'].queryset = Device.objects.filter(virtual_chassis=self.instance)\n\n\nclass DeviceVCMembershipForm(forms.ModelForm):\n class Meta:\n model = Device\n fields = [\n 'vc_position', 'vc_priority',\n ]\n labels = {\n 'vc_position': 'Position',\n 'vc_priority': 'Priority',\n }\n\n def __init__(self, validate_vc_position=False, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n # Require VC position (only required when the Device is a VirtualChassis member)\n self.fields['vc_position'].required = True\n\n # Add bootstrap classes to form elements.\n self.fields['vc_position'].widget.attrs = {'class': 'form-control'}\n self.fields['vc_priority'].widget.attrs = {'class': 'form-control'}\n\n # Validation of vc_position is optional. This is only required when adding a new member to an existing\n # VirtualChassis. Otherwise, vc_position validation is handled by BaseVCMemberFormSet.\n self.validate_vc_position = validate_vc_position\n\n def clean_vc_position(self):\n vc_position = self.cleaned_data['vc_position']\n\n if self.validate_vc_position:\n conflicting_members = Device.objects.filter(\n virtual_chassis=self.instance.virtual_chassis,\n vc_position=vc_position\n )\n if conflicting_members.exists():\n raise forms.ValidationError(\n 'A virtual chassis member already exists in position {}.'.format(vc_position)\n )\n\n return vc_position\n\n\nclass VCMemberSelectForm(BootstrapMixin, forms.Form):\n region = DynamicModelChoiceField(\n queryset=Region.objects.all(),\n required=False,\n initial_params={\n 'sites': '$site'\n }\n )\n site_group = DynamicModelChoiceField(\n queryset=SiteGroup.objects.all(),\n required=False,\n initial_params={\n 'sites': '$site'\n }\n )\n site = DynamicModelChoiceField(\n queryset=Site.objects.all(),\n required=False,\n query_params={\n 'region_id': '$region',\n 'group_id': '$site_group',\n }\n )\n rack = DynamicModelChoiceField(\n queryset=Rack.objects.all(),\n required=False,\n null_option='None',\n query_params={\n 'site_id': '$site'\n }\n )\n device = DynamicModelChoiceField(\n queryset=Device.objects.all(),\n query_params={\n 'site_id': '$site',\n 'rack_id': '$rack',\n 'virtual_chassis_id': 'null',\n }\n )\n\n def clean_device(self):\n device = self.cleaned_data['device']\n if device.virtual_chassis is not None:\n raise forms.ValidationError(\n f\"Device {device} is already assigned to a virtual chassis.\"\n )\n return device\n\n\n#\n# Device component templates\n#\n\n\nclass ConsolePortTemplateForm(BootstrapMixin, forms.ModelForm):\n class Meta:\n model = ConsolePortTemplate\n fields = [\n 'device_type', 'module_type', 'name', 'label', 'type', 'description',\n ]\n widgets = {\n 'device_type': forms.HiddenInput(),\n 'module_type': forms.HiddenInput(),\n 'type': StaticSelect,\n }\n\n\nclass ConsoleServerPortTemplateForm(BootstrapMixin, forms.ModelForm):\n class Meta:\n model = ConsoleServerPortTemplate\n fields = [\n 'device_type', 'module_type', 'name', 'label', 'type', 'description',\n ]\n widgets = {\n 'device_type': forms.HiddenInput(),\n 'module_type': forms.HiddenInput(),\n 'type': StaticSelect,\n }\n\n\nclass PowerPortTemplateForm(BootstrapMixin, forms.ModelForm):\n class Meta:\n model = PowerPortTemplate\n fields = [\n 'device_type', 'module_type', 'name', 'label', 'type', 'maximum_draw', 'allocated_draw', 'description',\n ]\n widgets = {\n 'device_type': forms.HiddenInput(),\n 'module_type': forms.HiddenInput(),\n 'type': StaticSelect(),\n }\n\n\nclass PowerOutletTemplateForm(BootstrapMixin, forms.ModelForm):\n power_port = DynamicModelChoiceField(\n queryset=PowerPortTemplate.objects.all(),\n required=False,\n query_params={\n 'devicetype_id': '$device_type',\n }\n )\n\n class Meta:\n model = PowerOutletTemplate\n fields = [\n 'device_type', 'module_type', 'name', 'label', 'type', 'power_port', 'feed_leg', 'description',\n ]\n widgets = {\n 'device_type': forms.HiddenInput(),\n 'module_type': forms.HiddenInput(),\n 'type': StaticSelect(),\n 'feed_leg': StaticSelect(),\n }\n\n\nclass InterfaceTemplateForm(BootstrapMixin, forms.ModelForm):\n class Meta:\n model = InterfaceTemplate\n fields = [\n 'device_type', 'module_type', 'name', 'label', 'type', 'mgmt_only', 'description', 'poe_mode', 'poe_type',\n ]\n widgets = {\n 'device_type': forms.HiddenInput(),\n 'module_type': forms.HiddenInput(),\n 'type': StaticSelect(),\n 'poe_mode': StaticSelect(),\n 'poe_type': StaticSelect(),\n }\n\n\nclass FrontPortTemplateForm(BootstrapMixin, forms.ModelForm):\n rear_port = DynamicModelChoiceField(\n queryset=RearPortTemplate.objects.all(),\n required=False,\n query_params={\n 'devicetype_id': '$device_type',\n 'moduletype_id': '$module_type',\n }\n )\n\n class Meta:\n model = FrontPortTemplate\n fields = [\n 'device_type', 'module_type', 'name', 'label', 'type', 'color', 'rear_port', 'rear_port_position',\n 'description',\n ]\n widgets = {\n 'device_type': forms.HiddenInput(),\n 'module_type': forms.HiddenInput(),\n 'type': StaticSelect(),\n }\n\n\nclass RearPortTemplateForm(BootstrapMixin, forms.ModelForm):\n class Meta:\n model = RearPortTemplate\n fields = [\n 'device_type', 'module_type', 'name', 'label', 'type', 'color', 'positions', 'description',\n ]\n widgets = {\n 'device_type': forms.HiddenInput(),\n 'module_type': forms.HiddenInput(),\n 'type': StaticSelect(),\n }\n\n\nclass ModuleBayTemplateForm(BootstrapMixin, forms.ModelForm):\n class Meta:\n model = ModuleBayTemplate\n fields = [\n 'device_type', 'name', 'label', 'position', 'description',\n ]\n widgets = {\n 'device_type': forms.HiddenInput(),\n }\n\n\nclass DeviceBayTemplateForm(BootstrapMixin, forms.ModelForm):\n class Meta:\n model = DeviceBayTemplate\n fields = [\n 'device_type', 'name', 'label', 'description',\n ]\n widgets = {\n 'device_type': forms.HiddenInput(),\n }\n\n\nclass InventoryItemTemplateForm(BootstrapMixin, forms.ModelForm):\n parent = DynamicModelChoiceField(\n queryset=InventoryItemTemplate.objects.all(),\n required=False,\n query_params={\n 'devicetype_id': '$device_type'\n }\n )\n role = DynamicModelChoiceField(\n queryset=InventoryItemRole.objects.all(),\n required=False\n )\n manufacturer = DynamicModelChoiceField(\n queryset=Manufacturer.objects.all(),\n required=False\n )\n component_type = ContentTypeChoiceField(\n queryset=ContentType.objects.all(),\n limit_choices_to=MODULAR_COMPONENT_TEMPLATE_MODELS,\n required=False,\n widget=forms.HiddenInput\n )\n component_id = forms.IntegerField(\n required=False,\n widget=forms.HiddenInput\n )\n\n class Meta:\n model = InventoryItemTemplate\n fields = [\n 'device_type', 'parent', 'name', 'label', 'role', 'manufacturer', 'part_id', 'description',\n 'component_type', 'component_id',\n ]\n widgets = {\n 'device_type': forms.HiddenInput(),\n }\n\n\n#\n# Device components\n#\n\nclass ConsolePortForm(NetBoxModelForm):\n module = DynamicModelChoiceField(\n queryset=Module.objects.all(),\n required=False,\n query_params={\n 'device_id': '$device',\n }\n )\n\n class Meta:\n model = ConsolePort\n fields = [\n 'device', 'module', 'name', 'label', 'type', 'speed', 'mark_connected', 'description', 'tags',\n ]\n widgets = {\n 'device': forms.HiddenInput(),\n 'type': StaticSelect(),\n 'speed': StaticSelect(),\n }\n\n\nclass ConsoleServerPortForm(NetBoxModelForm):\n module = DynamicModelChoiceField(\n queryset=Module.objects.all(),\n required=False,\n query_params={\n 'device_id': '$device',\n }\n )\n\n class Meta:\n model = ConsoleServerPort\n fields = [\n 'device', 'module', 'name', 'label', 'type', 'speed', 'mark_connected', 'description', 'tags',\n ]\n widgets = {\n 'device': forms.HiddenInput(),\n 'type': StaticSelect(),\n 'speed': StaticSelect(),\n }\n\n\nclass PowerPortForm(NetBoxModelForm):\n module = DynamicModelChoiceField(\n queryset=Module.objects.all(),\n required=False,\n query_params={\n 'device_id': '$device',\n }\n )\n\n class Meta:\n model = PowerPort\n fields = [\n 'device', 'module', 'name', 'label', 'type', 'maximum_draw', 'allocated_draw', 'mark_connected',\n 'description',\n 'tags',\n ]\n widgets = {\n 'device': forms.HiddenInput(),\n 'type': StaticSelect(),\n }\n\n\nclass PowerOutletForm(NetBoxModelForm):\n module = DynamicModelChoiceField(\n queryset=Module.objects.all(),\n required=False,\n query_params={\n 'device_id': '$device',\n }\n )\n power_port = DynamicModelChoiceField(\n queryset=PowerPort.objects.all(),\n required=False,\n query_params={\n 'device_id': '$device',\n }\n )\n\n class Meta:\n model = PowerOutlet\n fields = [\n 'device', 'module', 'name', 'label', 'type', 'power_port', 'feed_leg', 'mark_connected', 'description',\n 'tags',\n ]\n widgets = {\n 'device': forms.HiddenInput(),\n 'type': StaticSelect(),\n 'feed_leg': StaticSelect(),\n }\n\n\nclass InterfaceForm(InterfaceCommonForm, NetBoxModelForm):\n module = DynamicModelChoiceField(\n queryset=Module.objects.all(),\n required=False,\n query_params={\n 'device_id': '$device',\n }\n )\n parent = DynamicModelChoiceField(\n queryset=Interface.objects.all(),\n required=False,\n label='Parent interface',\n query_params={\n 'device_id': '$device',\n }\n )\n bridge = DynamicModelChoiceField(\n queryset=Interface.objects.all(),\n required=False,\n label='Bridged interface',\n query_params={\n 'device_id': '$device',\n }\n )\n lag = DynamicModelChoiceField(\n queryset=Interface.objects.all(),\n required=False,\n label='LAG interface',\n query_params={\n 'device_id': '$device',\n 'type': 'lag',\n }\n )\n wireless_lan_group = DynamicModelChoiceField(\n queryset=WirelessLANGroup.objects.all(),\n required=False,\n label='Wireless LAN group'\n )\n wireless_lans = DynamicModelMultipleChoiceField(\n queryset=WirelessLAN.objects.all(),\n required=False,\n label='Wireless LANs',\n query_params={\n 'group_id': '$wireless_lan_group',\n }\n )\n vlan_group = DynamicModelChoiceField(\n queryset=VLANGroup.objects.all(),\n required=False,\n label='VLAN group'\n )\n untagged_vlan = DynamicModelChoiceField(\n queryset=VLAN.objects.all(),\n required=False,\n label='Untagged VLAN',\n query_params={\n 'group_id': '$vlan_group',\n 'available_on_device': '$device',\n }\n )\n tagged_vlans = DynamicModelMultipleChoiceField(\n queryset=VLAN.objects.all(),\n required=False,\n label='Tagged VLANs',\n query_params={\n 'group_id': '$vlan_group',\n 'available_on_device': '$device',\n }\n )\n vrf = DynamicModelChoiceField(\n queryset=VRF.objects.all(),\n required=False,\n label='VRF'\n )\n\n fieldsets = (\n ('Interface', ('device', 'module', 'name', 'type', 'speed', 'duplex', 'label', 'description', 'tags')),\n ('Addressing', ('vrf', 'mac_address', 'wwn')),\n ('Operation', ('mtu', 'tx_power', 'enabled', 'mgmt_only', 'mark_connected')),\n ('Related Interfaces', ('parent', 'bridge', 'lag')),\n ('PoE', ('poe_mode', 'poe_type')),\n ('802.1Q Switching', ('mode', 'vlan_group', 'untagged_vlan', 'tagged_vlans')),\n ('Wireless', (\n 'rf_role', 'rf_channel', 'rf_channel_frequency', 'rf_channel_width', 'wireless_lan_group', 'wireless_lans',\n )),\n )\n\n class Meta:\n model = Interface\n fields = [\n 'device', 'module', 'name', 'label', 'type', 'speed', 'duplex', 'enabled', 'parent', 'bridge', 'lag',\n 'mac_address', 'wwn', 'mtu', 'mgmt_only', 'mark_connected', 'description', 'poe_mode', 'poe_type', 'mode',\n 'rf_role', 'rf_channel', 'rf_channel_frequency', 'rf_channel_width', 'tx_power', 'wireless_lans',\n 'untagged_vlan', 'tagged_vlans', 'vrf', 'tags',\n ]\n widgets = {\n 'device': forms.HiddenInput(),\n 'type': StaticSelect(),\n 'speed': SelectSpeedWidget(),\n 'poe_mode': StaticSelect(),\n 'poe_type': StaticSelect(),\n 'duplex': StaticSelect(),\n 'mode': StaticSelect(),\n 'rf_role': StaticSelect(),\n 'rf_channel': StaticSelect(),\n }\n labels = {\n 'mode': '802.1Q Mode',\n }\n help_texts = {\n 'mode': INTERFACE_MODE_HELP_TEXT,\n 'rf_channel_frequency': \"Populated by selected channel (if set)\",\n 'rf_channel_width': \"Populated by selected channel (if set)\",\n }\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n # Restrict LAG/bridge interface assignment by device/VC\n device_id = self.data['device'] if self.is_bound else self.initial.get('device')\n device = Device.objects.filter(pk=device_id).first()\n if device and device.virtual_chassis and device.virtual_chassis.master:\n self.fields['lag'].widget.add_query_param('device_id', device.virtual_chassis.master.pk)\n self.fields['bridge'].widget.add_query_param('device_id', device.virtual_chassis.master.pk)\n\n\nclass FrontPortForm(NetBoxModelForm):\n module = DynamicModelChoiceField(\n queryset=Module.objects.all(),\n required=False,\n query_params={\n 'device_id': '$device',\n }\n )\n rear_port = DynamicModelChoiceField(\n queryset=RearPort.objects.all(),\n query_params={\n 'device_id': '$device',\n }\n )\n\n class Meta:\n model = FrontPort\n fields = [\n 'device', 'module', 'name', 'label', 'type', 'color', 'rear_port', 'rear_port_position', 'mark_connected',\n 'description', 'tags',\n ]\n widgets = {\n 'device': forms.HiddenInput(),\n 'type': StaticSelect(),\n }\n\n\nclass RearPortForm(NetBoxModelForm):\n module = DynamicModelChoiceField(\n queryset=Module.objects.all(),\n required=False,\n query_params={\n 'device_id': '$device',\n }\n )\n\n class Meta:\n model = RearPort\n fields = [\n 'device', 'module', 'name', 'label', 'type', 'color', 'positions', 'mark_connected', 'description', 'tags',\n ]\n widgets = {\n 'device': forms.HiddenInput(),\n 'type': StaticSelect(),\n }\n\n\nclass ModuleBayForm(NetBoxModelForm):\n\n class Meta:\n model = ModuleBay\n fields = [\n 'device', 'name', 'label', 'position', 'description', 'tags',\n ]\n widgets = {\n 'device': forms.HiddenInput(),\n }\n\n\nclass DeviceBayForm(NetBoxModelForm):\n\n class Meta:\n model = DeviceBay\n fields = [\n 'device', 'name', 'label', 'description', 'tags',\n ]\n widgets = {\n 'device': forms.HiddenInput(),\n }\n\n\nclass PopulateDeviceBayForm(BootstrapMixin, forms.Form):\n installed_device = forms.ModelChoiceField(\n queryset=Device.objects.all(),\n label='Child Device',\n help_text=\"Child devices must first be created and assigned to the site/rack of the parent device.\",\n widget=StaticSelect(),\n )\n\n def __init__(self, device_bay, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.fields['installed_device'].queryset = Device.objects.filter(\n site=device_bay.device.site,\n rack=device_bay.device.rack,\n parent_bay__isnull=True,\n device_type__u_height=0,\n device_type__subdevice_role=SubdeviceRoleChoices.ROLE_CHILD\n ).exclude(pk=device_bay.device.pk)\n\n\nclass InventoryItemForm(NetBoxModelForm):\n device = DynamicModelChoiceField(\n queryset=Device.objects.all()\n )\n parent = DynamicModelChoiceField(\n queryset=InventoryItem.objects.all(),\n required=False,\n query_params={\n 'device_id': '$device'\n }\n )\n role = DynamicModelChoiceField(\n queryset=InventoryItemRole.objects.all(),\n required=False\n )\n manufacturer = DynamicModelChoiceField(\n queryset=Manufacturer.objects.all(),\n required=False\n )\n component_type = ContentTypeChoiceField(\n queryset=ContentType.objects.all(),\n limit_choices_to=MODULAR_COMPONENT_MODELS,\n required=False,\n widget=forms.HiddenInput\n )\n component_id = forms.IntegerField(\n required=False,\n widget=forms.HiddenInput\n )\n\n fieldsets = (\n ('Inventory Item', ('device', 'parent', 'name', 'label', 'role', 'description', 'tags')),\n ('Hardware', ('manufacturer', 'part_id', 'serial', 'asset_tag')),\n )\n\n class Meta:\n model = InventoryItem\n fields = [\n 'device', 'parent', 'name', 'label', 'role', 'manufacturer', 'part_id', 'serial', 'asset_tag',\n 'description', 'component_type', 'component_id', 'tags',\n ]\n\n\n#\n# Device component roles\n#\n\nclass InventoryItemRoleForm(NetBoxModelForm):\n slug = SlugField()\n\n class Meta:\n model = InventoryItemRole\n fields = [\n 'name', 'slug', 'color', 'description', 'tags',\n ]\n", "path": "netbox/dcim/forms/models.py"}], "after_files": [{"content": "from django import forms\nfrom django.utils.translation import gettext as _\nfrom django.contrib.auth.models import User\nfrom django.contrib.contenttypes.models import ContentType\nfrom timezone_field import TimeZoneFormField\n\nfrom dcim.choices import *\nfrom dcim.constants import *\nfrom dcim.models import *\nfrom ipam.models import ASN, IPAddress, VLAN, VLANGroup, VRF\nfrom netbox.forms import NetBoxModelForm\nfrom tenancy.forms import TenancyForm\nfrom utilities.forms import (\n APISelect, add_blank_choice, BootstrapMixin, ClearableFileInput, CommentField, ContentTypeChoiceField,\n DynamicModelChoiceField, DynamicModelMultipleChoiceField, JSONField, NumericArrayField, SelectWithPK, SmallTextarea,\n SlugField, StaticSelect, SelectSpeedWidget,\n)\nfrom virtualization.models import Cluster, ClusterGroup\nfrom wireless.models import WirelessLAN, WirelessLANGroup\nfrom .common import InterfaceCommonForm\n\n__all__ = (\n 'CableForm',\n 'ConsolePortForm',\n 'ConsolePortTemplateForm',\n 'ConsoleServerPortForm',\n 'ConsoleServerPortTemplateForm',\n 'DeviceBayForm',\n 'DeviceBayTemplateForm',\n 'DeviceForm',\n 'DeviceRoleForm',\n 'DeviceTypeForm',\n 'DeviceVCMembershipForm',\n 'FrontPortForm',\n 'FrontPortTemplateForm',\n 'InterfaceForm',\n 'InterfaceTemplateForm',\n 'InventoryItemForm',\n 'InventoryItemRoleForm',\n 'InventoryItemTemplateForm',\n 'LocationForm',\n 'ManufacturerForm',\n 'ModuleForm',\n 'ModuleBayForm',\n 'ModuleBayTemplateForm',\n 'ModuleTypeForm',\n 'PlatformForm',\n 'PopulateDeviceBayForm',\n 'PowerFeedForm',\n 'PowerOutletForm',\n 'PowerOutletTemplateForm',\n 'PowerPanelForm',\n 'PowerPortForm',\n 'PowerPortTemplateForm',\n 'RackForm',\n 'RackReservationForm',\n 'RackRoleForm',\n 'RearPortForm',\n 'RearPortTemplateForm',\n 'RegionForm',\n 'SiteForm',\n 'SiteGroupForm',\n 'VCMemberSelectForm',\n 'VirtualChassisForm',\n)\n\nINTERFACE_MODE_HELP_TEXT = \"\"\"\nAccess: One untagged VLAN<br />\nTagged: One untagged VLAN and/or one or more tagged VLANs<br />\nTagged (All): Implies all VLANs are available (w/optional untagged VLAN)\n\"\"\"\n\n\nclass RegionForm(NetBoxModelForm):\n parent = DynamicModelChoiceField(\n queryset=Region.objects.all(),\n required=False\n )\n slug = SlugField()\n\n class Meta:\n model = Region\n fields = (\n 'parent', 'name', 'slug', 'description', 'tags',\n )\n\n\nclass SiteGroupForm(NetBoxModelForm):\n parent = DynamicModelChoiceField(\n queryset=SiteGroup.objects.all(),\n required=False\n )\n slug = SlugField()\n\n class Meta:\n model = SiteGroup\n fields = (\n 'parent', 'name', 'slug', 'description', 'tags',\n )\n\n\nclass SiteForm(TenancyForm, NetBoxModelForm):\n region = DynamicModelChoiceField(\n queryset=Region.objects.all(),\n required=False\n )\n group = DynamicModelChoiceField(\n queryset=SiteGroup.objects.all(),\n required=False\n )\n asns = DynamicModelMultipleChoiceField(\n queryset=ASN.objects.all(),\n label=_('ASNs'),\n required=False\n )\n slug = SlugField()\n time_zone = TimeZoneFormField(\n choices=add_blank_choice(TimeZoneFormField().choices),\n required=False,\n widget=StaticSelect()\n )\n comments = CommentField()\n\n fieldsets = (\n ('Site', (\n 'name', 'slug', 'status', 'region', 'group', 'facility', 'asns', 'time_zone', 'description', 'tags',\n )),\n ('Tenancy', ('tenant_group', 'tenant')),\n ('Contact Info', ('physical_address', 'shipping_address', 'latitude', 'longitude')),\n )\n\n class Meta:\n model = Site\n fields = (\n 'name', 'slug', 'status', 'region', 'group', 'tenant_group', 'tenant', 'facility', 'asns', 'time_zone',\n 'description', 'physical_address', 'shipping_address', 'latitude', 'longitude', 'comments', 'tags',\n )\n widgets = {\n 'physical_address': SmallTextarea(\n attrs={\n 'rows': 3,\n }\n ),\n 'shipping_address': SmallTextarea(\n attrs={\n 'rows': 3,\n }\n ),\n 'status': StaticSelect(),\n 'time_zone': StaticSelect(),\n }\n help_texts = {\n 'name': \"Full name of the site\",\n 'facility': \"Data center provider and facility (e.g. Equinix NY7)\",\n 'time_zone': \"Local time zone\",\n 'description': \"Short description (will appear in sites list)\",\n 'physical_address': \"Physical location of the building (e.g. for GPS)\",\n 'shipping_address': \"If different from the physical address\",\n 'latitude': \"Latitude in decimal format (xx.yyyyyy)\",\n 'longitude': \"Longitude in decimal format (xx.yyyyyy)\"\n }\n\n\nclass LocationForm(TenancyForm, NetBoxModelForm):\n region = DynamicModelChoiceField(\n queryset=Region.objects.all(),\n required=False,\n initial_params={\n 'sites': '$site'\n }\n )\n site_group = DynamicModelChoiceField(\n queryset=SiteGroup.objects.all(),\n required=False,\n initial_params={\n 'sites': '$site'\n }\n )\n site = DynamicModelChoiceField(\n queryset=Site.objects.all(),\n query_params={\n 'region_id': '$region',\n 'group_id': '$site_group',\n }\n )\n parent = DynamicModelChoiceField(\n queryset=Location.objects.all(),\n required=False,\n query_params={\n 'site_id': '$site'\n }\n )\n slug = SlugField()\n\n fieldsets = (\n ('Location', (\n 'region', 'site_group', 'site', 'parent', 'name', 'slug', 'status', 'description', 'tags',\n )),\n ('Tenancy', ('tenant_group', 'tenant')),\n )\n\n class Meta:\n model = Location\n fields = (\n 'region', 'site_group', 'site', 'parent', 'name', 'slug', 'status', 'description', 'tenant_group', 'tenant',\n 'tags',\n )\n widgets = {\n 'status': StaticSelect(),\n }\n\n\nclass RackRoleForm(NetBoxModelForm):\n slug = SlugField()\n\n class Meta:\n model = RackRole\n fields = [\n 'name', 'slug', 'color', 'description', 'tags',\n ]\n\n\nclass RackForm(TenancyForm, NetBoxModelForm):\n region = DynamicModelChoiceField(\n queryset=Region.objects.all(),\n required=False,\n initial_params={\n 'sites': '$site'\n }\n )\n site_group = DynamicModelChoiceField(\n queryset=SiteGroup.objects.all(),\n required=False,\n initial_params={\n 'sites': '$site'\n }\n )\n site = DynamicModelChoiceField(\n queryset=Site.objects.all(),\n query_params={\n 'region_id': '$region',\n 'group_id': '$site_group',\n }\n )\n location = DynamicModelChoiceField(\n queryset=Location.objects.all(),\n required=False,\n query_params={\n 'site_id': '$site'\n }\n )\n role = DynamicModelChoiceField(\n queryset=RackRole.objects.all(),\n required=False\n )\n comments = CommentField()\n\n class Meta:\n model = Rack\n fields = [\n 'region', 'site_group', 'site', 'location', 'name', 'facility_id', 'tenant_group', 'tenant', 'status',\n 'role', 'serial', 'asset_tag', 'type', 'width', 'u_height', 'desc_units', 'outer_width', 'outer_depth',\n 'outer_unit', 'comments', 'tags',\n ]\n help_texts = {\n 'site': \"The site at which the rack exists\",\n 'name': \"Organizational rack name\",\n 'facility_id': \"The unique rack ID assigned by the facility\",\n 'u_height': \"Height in rack units\",\n }\n widgets = {\n 'status': StaticSelect(),\n 'type': StaticSelect(),\n 'width': StaticSelect(),\n 'outer_unit': StaticSelect(),\n }\n\n\nclass RackReservationForm(TenancyForm, NetBoxModelForm):\n region = DynamicModelChoiceField(\n queryset=Region.objects.all(),\n required=False,\n initial_params={\n 'sites': '$site'\n }\n )\n site_group = DynamicModelChoiceField(\n queryset=SiteGroup.objects.all(),\n required=False,\n initial_params={\n 'sites': '$site'\n }\n )\n site = DynamicModelChoiceField(\n queryset=Site.objects.all(),\n required=False,\n query_params={\n 'region_id': '$region',\n 'group_id': '$site_group',\n }\n )\n location = DynamicModelChoiceField(\n queryset=Location.objects.all(),\n required=False,\n query_params={\n 'site_id': '$site'\n }\n )\n rack = DynamicModelChoiceField(\n queryset=Rack.objects.all(),\n query_params={\n 'site_id': '$site',\n 'location_id': '$location',\n }\n )\n units = NumericArrayField(\n base_field=forms.IntegerField(),\n help_text=\"Comma-separated list of numeric unit IDs. A range may be specified using a hyphen.\"\n )\n user = forms.ModelChoiceField(\n queryset=User.objects.order_by(\n 'username'\n ),\n widget=StaticSelect()\n )\n\n fieldsets = (\n ('Reservation', ('region', 'site_group', 'site', 'location', 'rack', 'units', 'user', 'description', 'tags')),\n ('Tenancy', ('tenant_group', 'tenant')),\n )\n\n class Meta:\n model = RackReservation\n fields = [\n 'region', 'site_group', 'site', 'location', 'rack', 'units', 'user', 'tenant_group', 'tenant',\n 'description', 'tags',\n ]\n\n\nclass ManufacturerForm(NetBoxModelForm):\n slug = SlugField()\n\n class Meta:\n model = Manufacturer\n fields = [\n 'name', 'slug', 'description', 'tags',\n ]\n\n\nclass DeviceTypeForm(NetBoxModelForm):\n manufacturer = DynamicModelChoiceField(\n queryset=Manufacturer.objects.all()\n )\n slug = SlugField(\n slug_source='model'\n )\n comments = CommentField()\n\n fieldsets = (\n ('Device Type', (\n 'manufacturer', 'model', 'slug', 'part_number', 'tags',\n )),\n ('Chassis', (\n 'u_height', 'is_full_depth', 'subdevice_role', 'airflow',\n )),\n ('Images', ('front_image', 'rear_image')),\n )\n\n class Meta:\n model = DeviceType\n fields = [\n 'manufacturer', 'model', 'slug', 'part_number', 'u_height', 'is_full_depth', 'subdevice_role', 'airflow',\n 'front_image', 'rear_image', 'comments', 'tags',\n ]\n widgets = {\n 'subdevice_role': StaticSelect(),\n 'front_image': ClearableFileInput(attrs={\n 'accept': DEVICETYPE_IMAGE_FORMATS\n }),\n 'rear_image': ClearableFileInput(attrs={\n 'accept': DEVICETYPE_IMAGE_FORMATS\n })\n }\n\n\nclass ModuleTypeForm(NetBoxModelForm):\n manufacturer = DynamicModelChoiceField(\n queryset=Manufacturer.objects.all()\n )\n comments = CommentField()\n\n fieldsets = (\n ('Module Type', (\n 'manufacturer', 'model', 'part_number', 'tags',\n )),\n )\n\n class Meta:\n model = ModuleType\n fields = [\n 'manufacturer', 'model', 'part_number', 'comments', 'tags',\n ]\n\n\nclass DeviceRoleForm(NetBoxModelForm):\n slug = SlugField()\n\n class Meta:\n model = DeviceRole\n fields = [\n 'name', 'slug', 'color', 'vm_role', 'description', 'tags',\n ]\n\n\nclass PlatformForm(NetBoxModelForm):\n manufacturer = DynamicModelChoiceField(\n queryset=Manufacturer.objects.all(),\n required=False\n )\n slug = SlugField(\n max_length=64\n )\n\n class Meta:\n model = Platform\n fields = [\n 'name', 'slug', 'manufacturer', 'napalm_driver', 'napalm_args', 'description', 'tags',\n ]\n widgets = {\n 'napalm_args': SmallTextarea(),\n }\n\n\nclass DeviceForm(TenancyForm, NetBoxModelForm):\n region = DynamicModelChoiceField(\n queryset=Region.objects.all(),\n required=False,\n initial_params={\n 'sites': '$site'\n }\n )\n site_group = DynamicModelChoiceField(\n queryset=SiteGroup.objects.all(),\n required=False,\n initial_params={\n 'sites': '$site'\n }\n )\n site = DynamicModelChoiceField(\n queryset=Site.objects.all(),\n query_params={\n 'region_id': '$region',\n 'group_id': '$site_group',\n }\n )\n location = DynamicModelChoiceField(\n queryset=Location.objects.all(),\n required=False,\n query_params={\n 'site_id': '$site'\n },\n initial_params={\n 'racks': '$rack'\n }\n )\n rack = DynamicModelChoiceField(\n queryset=Rack.objects.all(),\n required=False,\n query_params={\n 'site_id': '$site',\n 'location_id': '$location',\n }\n )\n position = forms.DecimalField(\n required=False,\n help_text=\"The lowest-numbered unit occupied by the device\",\n widget=APISelect(\n api_url='/api/dcim/racks/{{rack}}/elevation/',\n attrs={\n 'disabled-indicator': 'device',\n 'data-dynamic-params': '[{\"fieldName\":\"face\",\"queryParam\":\"face\"}]'\n }\n )\n )\n manufacturer = DynamicModelChoiceField(\n queryset=Manufacturer.objects.all(),\n required=False,\n initial_params={\n 'device_types': '$device_type'\n }\n )\n device_type = DynamicModelChoiceField(\n queryset=DeviceType.objects.all(),\n query_params={\n 'manufacturer_id': '$manufacturer'\n }\n )\n device_role = DynamicModelChoiceField(\n queryset=DeviceRole.objects.all()\n )\n platform = DynamicModelChoiceField(\n queryset=Platform.objects.all(),\n required=False,\n query_params={\n 'manufacturer_id': ['$manufacturer', 'null']\n }\n )\n cluster_group = DynamicModelChoiceField(\n queryset=ClusterGroup.objects.all(),\n required=False,\n null_option='None',\n initial_params={\n 'clusters': '$cluster'\n }\n )\n cluster = DynamicModelChoiceField(\n queryset=Cluster.objects.all(),\n required=False,\n query_params={\n 'group_id': '$cluster_group'\n }\n )\n comments = CommentField()\n local_context_data = JSONField(\n required=False,\n label=''\n )\n virtual_chassis = DynamicModelChoiceField(\n queryset=VirtualChassis.objects.all(),\n required=False\n )\n vc_position = forms.IntegerField(\n required=False,\n label='Position',\n help_text=\"The position in the virtual chassis this device is identified by\"\n )\n vc_priority = forms.IntegerField(\n required=False,\n label='Priority',\n help_text=\"The priority of the device in the virtual chassis\"\n )\n\n class Meta:\n model = Device\n fields = [\n 'name', 'device_role', 'device_type', 'serial', 'asset_tag', 'region', 'site_group', 'site', 'rack',\n 'location', 'position', 'face', 'status', 'airflow', 'platform', 'primary_ip4', 'primary_ip6',\n 'cluster_group', 'cluster', 'tenant_group', 'tenant', 'virtual_chassis', 'vc_position', 'vc_priority',\n 'comments', 'tags', 'local_context_data'\n ]\n help_texts = {\n 'device_role': \"The function this device serves\",\n 'serial': \"Chassis serial number\",\n 'local_context_data': \"Local config context data overwrites all source contexts in the final rendered \"\n \"config context\",\n }\n widgets = {\n 'face': StaticSelect(),\n 'status': StaticSelect(),\n 'airflow': StaticSelect(),\n 'primary_ip4': StaticSelect(),\n 'primary_ip6': StaticSelect(),\n }\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n if self.instance.pk:\n\n # Compile list of choices for primary IPv4 and IPv6 addresses\n for family in [4, 6]:\n ip_choices = [(None, '---------')]\n\n # Gather PKs of all interfaces belonging to this Device or a peer VirtualChassis member\n interface_ids = self.instance.vc_interfaces(if_master=False).values_list('pk', flat=True)\n\n # Collect interface IPs\n interface_ips = IPAddress.objects.filter(\n address__family=family,\n assigned_object_type=ContentType.objects.get_for_model(Interface),\n assigned_object_id__in=interface_ids\n ).prefetch_related('assigned_object')\n if interface_ips:\n ip_list = [(ip.id, f'{ip.address} ({ip.assigned_object})') for ip in interface_ips]\n ip_choices.append(('Interface IPs', ip_list))\n # Collect NAT IPs\n nat_ips = IPAddress.objects.prefetch_related('nat_inside').filter(\n address__family=family,\n nat_inside__assigned_object_type=ContentType.objects.get_for_model(Interface),\n nat_inside__assigned_object_id__in=interface_ids\n ).prefetch_related('assigned_object')\n if nat_ips:\n ip_list = [(ip.id, f'{ip.address} (NAT)') for ip in nat_ips]\n ip_choices.append(('NAT IPs', ip_list))\n self.fields['primary_ip{}'.format(family)].choices = ip_choices\n\n # If editing an existing device, exclude it from the list of occupied rack units. This ensures that a device\n # can be flipped from one face to another.\n self.fields['position'].widget.add_query_param('exclude', self.instance.pk)\n\n # Disable rack assignment if this is a child device installed in a parent device\n if self.instance.device_type.is_child_device and hasattr(self.instance, 'parent_bay'):\n self.fields['site'].disabled = True\n self.fields['rack'].disabled = True\n self.initial['site'] = self.instance.parent_bay.device.site_id\n self.initial['rack'] = self.instance.parent_bay.device.rack_id\n\n else:\n\n # An object that doesn't exist yet can't have any IPs assigned to it\n self.fields['primary_ip4'].choices = []\n self.fields['primary_ip4'].widget.attrs['readonly'] = True\n self.fields['primary_ip6'].choices = []\n self.fields['primary_ip6'].widget.attrs['readonly'] = True\n\n # Rack position\n position = self.data.get('position') or self.initial.get('position')\n if position:\n self.fields['position'].widget.choices = [(position, f'U{position}')]\n\n\nclass ModuleForm(NetBoxModelForm):\n device = DynamicModelChoiceField(\n queryset=Device.objects.all(),\n initial_params={\n 'modulebays': '$module_bay'\n }\n )\n module_bay = DynamicModelChoiceField(\n queryset=ModuleBay.objects.all(),\n query_params={\n 'device_id': '$device'\n }\n )\n manufacturer = DynamicModelChoiceField(\n queryset=Manufacturer.objects.all(),\n required=False,\n initial_params={\n 'module_types': '$module_type'\n }\n )\n module_type = DynamicModelChoiceField(\n queryset=ModuleType.objects.all(),\n query_params={\n 'manufacturer_id': '$manufacturer'\n }\n )\n comments = CommentField()\n replicate_components = forms.BooleanField(\n required=False,\n initial=True,\n help_text=\"Automatically populate components associated with this module type\"\n )\n\n adopt_components = forms.BooleanField(\n required=False,\n initial=False,\n help_text=\"Adopt already existing components\"\n )\n\n fieldsets = (\n ('Module', (\n 'device', 'module_bay', 'manufacturer', 'module_type', 'tags',\n )),\n ('Hardware', (\n 'serial', 'asset_tag', 'replicate_components', 'adopt_components',\n )),\n )\n\n class Meta:\n model = Module\n fields = [\n 'device', 'module_bay', 'manufacturer', 'module_type', 'serial', 'asset_tag', 'tags',\n 'replicate_components', 'adopt_components', 'comments',\n ]\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n if self.instance.pk:\n self.fields['replicate_components'].initial = False\n self.fields['replicate_components'].disabled = True\n self.fields['adopt_components'].initial = False\n self.fields['adopt_components'].disabled = True\n\n def save(self, *args, **kwargs):\n\n # If replicate_components is False, disable automatic component replication on the instance\n if self.instance.pk or not self.cleaned_data['replicate_components']:\n self.instance._disable_replication = True\n\n if self.cleaned_data['adopt_components']:\n self.instance._adopt_components = True\n\n return super().save(*args, **kwargs)\n\n def clean(self):\n super().clean()\n\n replicate_components = self.cleaned_data.get(\"replicate_components\")\n adopt_components = self.cleaned_data.get(\"adopt_components\")\n device = self.cleaned_data['device']\n module_type = self.cleaned_data['module_type']\n module_bay = self.cleaned_data['module_bay']\n\n # Bail out if we are not installing a new module or if we are not replicating components\n if self.instance.pk or not replicate_components:\n return\n\n for templates, component_attribute in [\n (\"consoleporttemplates\", \"consoleports\"),\n (\"consoleserverporttemplates\", \"consoleserverports\"),\n (\"interfacetemplates\", \"interfaces\"),\n (\"powerporttemplates\", \"powerports\"),\n (\"poweroutlettemplates\", \"poweroutlets\"),\n (\"rearporttemplates\", \"rearports\"),\n (\"frontporttemplates\", \"frontports\")\n ]:\n # Prefetch installed components\n installed_components = {\n component.name: component for component in getattr(device, component_attribute).all()\n }\n\n # Get the templates for the module type.\n for template in getattr(module_type, templates).all():\n # Installing modules with placeholders require that the bay has a position value\n if MODULE_TOKEN in template.name and not module_bay.position:\n raise forms.ValidationError(\n \"Cannot install module with placeholder values in a module bay with no position defined\"\n )\n\n resolved_name = template.name.replace(MODULE_TOKEN, module_bay.position)\n existing_item = installed_components.get(resolved_name)\n\n # It is not possible to adopt components already belonging to a module\n if adopt_components and existing_item and existing_item.module:\n raise forms.ValidationError(\n f\"Cannot adopt {template.component_model.__name__} '{resolved_name}' as it already belongs \"\n f\"to a module\"\n )\n\n # If we are not adopting components we error if the component exists\n if not adopt_components and resolved_name in installed_components:\n raise forms.ValidationError(\n f\"{template.component_model.__name__} - {resolved_name} already exists\"\n )\n\n\nclass CableForm(TenancyForm, NetBoxModelForm):\n\n class Meta:\n model = Cable\n fields = [\n 'type', 'status', 'tenant_group', 'tenant', 'label', 'color', 'length', 'length_unit', 'tags',\n ]\n widgets = {\n 'status': StaticSelect,\n 'type': StaticSelect,\n 'length_unit': StaticSelect,\n }\n error_messages = {\n 'length': {\n 'max_value': 'Maximum length is 32767 (any unit)'\n }\n }\n\n\nclass PowerPanelForm(NetBoxModelForm):\n region = DynamicModelChoiceField(\n queryset=Region.objects.all(),\n required=False,\n initial_params={\n 'sites': '$site'\n }\n )\n site_group = DynamicModelChoiceField(\n queryset=SiteGroup.objects.all(),\n required=False,\n initial_params={\n 'sites': '$site'\n }\n )\n site = DynamicModelChoiceField(\n queryset=Site.objects.all(),\n query_params={\n 'region_id': '$region',\n 'group_id': '$site_group',\n }\n )\n location = DynamicModelChoiceField(\n queryset=Location.objects.all(),\n required=False,\n query_params={\n 'site_id': '$site'\n }\n )\n\n fieldsets = (\n ('Power Panel', ('region', 'site_group', 'site', 'location', 'name', 'tags')),\n )\n\n class Meta:\n model = PowerPanel\n fields = [\n 'region', 'site_group', 'site', 'location', 'name', 'tags',\n ]\n\n\nclass PowerFeedForm(NetBoxModelForm):\n region = DynamicModelChoiceField(\n queryset=Region.objects.all(),\n required=False,\n initial_params={\n 'sites__powerpanel': '$power_panel'\n }\n )\n site_group = DynamicModelChoiceField(\n queryset=SiteGroup.objects.all(),\n required=False,\n initial_params={\n 'sites': '$site'\n }\n )\n site = DynamicModelChoiceField(\n queryset=Site.objects.all(),\n required=False,\n initial_params={\n 'powerpanel': '$power_panel'\n },\n query_params={\n 'region_id': '$region',\n 'group_id': '$site_group',\n }\n )\n power_panel = DynamicModelChoiceField(\n queryset=PowerPanel.objects.all(),\n query_params={\n 'site_id': '$site'\n }\n )\n rack = DynamicModelChoiceField(\n queryset=Rack.objects.all(),\n required=False,\n query_params={\n 'site_id': '$site'\n }\n )\n comments = CommentField()\n\n fieldsets = (\n ('Power Panel', ('region', 'site', 'power_panel')),\n ('Power Feed', ('rack', 'name', 'status', 'type', 'mark_connected', 'tags')),\n ('Characteristics', ('supply', 'voltage', 'amperage', 'phase', 'max_utilization')),\n )\n\n class Meta:\n model = PowerFeed\n fields = [\n 'region', 'site_group', 'site', 'power_panel', 'rack', 'name', 'status', 'type', 'mark_connected', 'supply',\n 'phase', 'voltage', 'amperage', 'max_utilization', 'comments', 'tags',\n ]\n widgets = {\n 'status': StaticSelect(),\n 'type': StaticSelect(),\n 'supply': StaticSelect(),\n 'phase': StaticSelect(),\n }\n\n\n#\n# Virtual chassis\n#\n\nclass VirtualChassisForm(NetBoxModelForm):\n master = forms.ModelChoiceField(\n queryset=Device.objects.all(),\n required=False,\n )\n\n class Meta:\n model = VirtualChassis\n fields = [\n 'name', 'domain', 'master', 'tags',\n ]\n widgets = {\n 'master': SelectWithPK(),\n }\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.fields['master'].queryset = Device.objects.filter(virtual_chassis=self.instance)\n\n\nclass DeviceVCMembershipForm(forms.ModelForm):\n class Meta:\n model = Device\n fields = [\n 'vc_position', 'vc_priority',\n ]\n labels = {\n 'vc_position': 'Position',\n 'vc_priority': 'Priority',\n }\n\n def __init__(self, validate_vc_position=False, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n # Require VC position (only required when the Device is a VirtualChassis member)\n self.fields['vc_position'].required = True\n\n # Add bootstrap classes to form elements.\n self.fields['vc_position'].widget.attrs = {'class': 'form-control'}\n self.fields['vc_priority'].widget.attrs = {'class': 'form-control'}\n\n # Validation of vc_position is optional. This is only required when adding a new member to an existing\n # VirtualChassis. Otherwise, vc_position validation is handled by BaseVCMemberFormSet.\n self.validate_vc_position = validate_vc_position\n\n def clean_vc_position(self):\n vc_position = self.cleaned_data['vc_position']\n\n if self.validate_vc_position:\n conflicting_members = Device.objects.filter(\n virtual_chassis=self.instance.virtual_chassis,\n vc_position=vc_position\n )\n if conflicting_members.exists():\n raise forms.ValidationError(\n 'A virtual chassis member already exists in position {}.'.format(vc_position)\n )\n\n return vc_position\n\n\nclass VCMemberSelectForm(BootstrapMixin, forms.Form):\n region = DynamicModelChoiceField(\n queryset=Region.objects.all(),\n required=False,\n initial_params={\n 'sites': '$site'\n }\n )\n site_group = DynamicModelChoiceField(\n queryset=SiteGroup.objects.all(),\n required=False,\n initial_params={\n 'sites': '$site'\n }\n )\n site = DynamicModelChoiceField(\n queryset=Site.objects.all(),\n required=False,\n query_params={\n 'region_id': '$region',\n 'group_id': '$site_group',\n }\n )\n rack = DynamicModelChoiceField(\n queryset=Rack.objects.all(),\n required=False,\n null_option='None',\n query_params={\n 'site_id': '$site'\n }\n )\n device = DynamicModelChoiceField(\n queryset=Device.objects.all(),\n query_params={\n 'site_id': '$site',\n 'rack_id': '$rack',\n 'virtual_chassis_id': 'null',\n }\n )\n\n def clean_device(self):\n device = self.cleaned_data['device']\n if device.virtual_chassis is not None:\n raise forms.ValidationError(\n f\"Device {device} is already assigned to a virtual chassis.\"\n )\n return device\n\n\n#\n# Device component templates\n#\n\n\nclass ConsolePortTemplateForm(BootstrapMixin, forms.ModelForm):\n class Meta:\n model = ConsolePortTemplate\n fields = [\n 'device_type', 'module_type', 'name', 'label', 'type', 'description',\n ]\n widgets = {\n 'device_type': forms.HiddenInput(),\n 'module_type': forms.HiddenInput(),\n 'type': StaticSelect,\n }\n\n\nclass ConsoleServerPortTemplateForm(BootstrapMixin, forms.ModelForm):\n class Meta:\n model = ConsoleServerPortTemplate\n fields = [\n 'device_type', 'module_type', 'name', 'label', 'type', 'description',\n ]\n widgets = {\n 'device_type': forms.HiddenInput(),\n 'module_type': forms.HiddenInput(),\n 'type': StaticSelect,\n }\n\n\nclass PowerPortTemplateForm(BootstrapMixin, forms.ModelForm):\n class Meta:\n model = PowerPortTemplate\n fields = [\n 'device_type', 'module_type', 'name', 'label', 'type', 'maximum_draw', 'allocated_draw', 'description',\n ]\n widgets = {\n 'device_type': forms.HiddenInput(),\n 'module_type': forms.HiddenInput(),\n 'type': StaticSelect(),\n }\n\n\nclass PowerOutletTemplateForm(BootstrapMixin, forms.ModelForm):\n power_port = DynamicModelChoiceField(\n queryset=PowerPortTemplate.objects.all(),\n required=False,\n query_params={\n 'devicetype_id': '$device_type',\n }\n )\n\n class Meta:\n model = PowerOutletTemplate\n fields = [\n 'device_type', 'module_type', 'name', 'label', 'type', 'power_port', 'feed_leg', 'description',\n ]\n widgets = {\n 'device_type': forms.HiddenInput(),\n 'module_type': forms.HiddenInput(),\n 'type': StaticSelect(),\n 'feed_leg': StaticSelect(),\n }\n\n\nclass InterfaceTemplateForm(BootstrapMixin, forms.ModelForm):\n class Meta:\n model = InterfaceTemplate\n fields = [\n 'device_type', 'module_type', 'name', 'label', 'type', 'mgmt_only', 'description', 'poe_mode', 'poe_type',\n ]\n widgets = {\n 'device_type': forms.HiddenInput(),\n 'module_type': forms.HiddenInput(),\n 'type': StaticSelect(),\n 'poe_mode': StaticSelect(),\n 'poe_type': StaticSelect(),\n }\n\n\nclass FrontPortTemplateForm(BootstrapMixin, forms.ModelForm):\n rear_port = DynamicModelChoiceField(\n queryset=RearPortTemplate.objects.all(),\n required=False,\n query_params={\n 'devicetype_id': '$device_type',\n 'moduletype_id': '$module_type',\n }\n )\n\n class Meta:\n model = FrontPortTemplate\n fields = [\n 'device_type', 'module_type', 'name', 'label', 'type', 'color', 'rear_port', 'rear_port_position',\n 'description',\n ]\n widgets = {\n 'device_type': forms.HiddenInput(),\n 'module_type': forms.HiddenInput(),\n 'type': StaticSelect(),\n }\n\n\nclass RearPortTemplateForm(BootstrapMixin, forms.ModelForm):\n class Meta:\n model = RearPortTemplate\n fields = [\n 'device_type', 'module_type', 'name', 'label', 'type', 'color', 'positions', 'description',\n ]\n widgets = {\n 'device_type': forms.HiddenInput(),\n 'module_type': forms.HiddenInput(),\n 'type': StaticSelect(),\n }\n\n\nclass ModuleBayTemplateForm(BootstrapMixin, forms.ModelForm):\n class Meta:\n model = ModuleBayTemplate\n fields = [\n 'device_type', 'name', 'label', 'position', 'description',\n ]\n widgets = {\n 'device_type': forms.HiddenInput(),\n }\n\n\nclass DeviceBayTemplateForm(BootstrapMixin, forms.ModelForm):\n class Meta:\n model = DeviceBayTemplate\n fields = [\n 'device_type', 'name', 'label', 'description',\n ]\n widgets = {\n 'device_type': forms.HiddenInput(),\n }\n\n\nclass InventoryItemTemplateForm(BootstrapMixin, forms.ModelForm):\n parent = DynamicModelChoiceField(\n queryset=InventoryItemTemplate.objects.all(),\n required=False,\n query_params={\n 'devicetype_id': '$device_type'\n }\n )\n role = DynamicModelChoiceField(\n queryset=InventoryItemRole.objects.all(),\n required=False\n )\n manufacturer = DynamicModelChoiceField(\n queryset=Manufacturer.objects.all(),\n required=False\n )\n component_type = ContentTypeChoiceField(\n queryset=ContentType.objects.all(),\n limit_choices_to=MODULAR_COMPONENT_TEMPLATE_MODELS,\n required=False,\n widget=forms.HiddenInput\n )\n component_id = forms.IntegerField(\n required=False,\n widget=forms.HiddenInput\n )\n\n class Meta:\n model = InventoryItemTemplate\n fields = [\n 'device_type', 'parent', 'name', 'label', 'role', 'manufacturer', 'part_id', 'description',\n 'component_type', 'component_id',\n ]\n widgets = {\n 'device_type': forms.HiddenInput(),\n }\n\n\n#\n# Device components\n#\n\nclass ConsolePortForm(NetBoxModelForm):\n module = DynamicModelChoiceField(\n queryset=Module.objects.all(),\n required=False,\n query_params={\n 'device_id': '$device',\n }\n )\n\n class Meta:\n model = ConsolePort\n fields = [\n 'device', 'module', 'name', 'label', 'type', 'speed', 'mark_connected', 'description', 'tags',\n ]\n widgets = {\n 'device': forms.HiddenInput(),\n 'type': StaticSelect(),\n 'speed': StaticSelect(),\n }\n\n\nclass ConsoleServerPortForm(NetBoxModelForm):\n module = DynamicModelChoiceField(\n queryset=Module.objects.all(),\n required=False,\n query_params={\n 'device_id': '$device',\n }\n )\n\n class Meta:\n model = ConsoleServerPort\n fields = [\n 'device', 'module', 'name', 'label', 'type', 'speed', 'mark_connected', 'description', 'tags',\n ]\n widgets = {\n 'device': forms.HiddenInput(),\n 'type': StaticSelect(),\n 'speed': StaticSelect(),\n }\n\n\nclass PowerPortForm(NetBoxModelForm):\n module = DynamicModelChoiceField(\n queryset=Module.objects.all(),\n required=False,\n query_params={\n 'device_id': '$device',\n }\n )\n\n class Meta:\n model = PowerPort\n fields = [\n 'device', 'module', 'name', 'label', 'type', 'maximum_draw', 'allocated_draw', 'mark_connected',\n 'description',\n 'tags',\n ]\n widgets = {\n 'device': forms.HiddenInput(),\n 'type': StaticSelect(),\n }\n\n\nclass PowerOutletForm(NetBoxModelForm):\n module = DynamicModelChoiceField(\n queryset=Module.objects.all(),\n required=False,\n query_params={\n 'device_id': '$device',\n }\n )\n power_port = DynamicModelChoiceField(\n queryset=PowerPort.objects.all(),\n required=False,\n query_params={\n 'device_id': '$device',\n }\n )\n\n class Meta:\n model = PowerOutlet\n fields = [\n 'device', 'module', 'name', 'label', 'type', 'power_port', 'feed_leg', 'mark_connected', 'description',\n 'tags',\n ]\n widgets = {\n 'device': forms.HiddenInput(),\n 'type': StaticSelect(),\n 'feed_leg': StaticSelect(),\n }\n\n\nclass InterfaceForm(InterfaceCommonForm, NetBoxModelForm):\n module = DynamicModelChoiceField(\n queryset=Module.objects.all(),\n required=False,\n query_params={\n 'device_id': '$device',\n }\n )\n parent = DynamicModelChoiceField(\n queryset=Interface.objects.all(),\n required=False,\n label='Parent interface',\n query_params={\n 'device_id': '$device',\n }\n )\n bridge = DynamicModelChoiceField(\n queryset=Interface.objects.all(),\n required=False,\n label='Bridged interface',\n query_params={\n 'device_id': '$device',\n }\n )\n lag = DynamicModelChoiceField(\n queryset=Interface.objects.all(),\n required=False,\n label='LAG interface',\n query_params={\n 'device_id': '$device',\n 'type': 'lag',\n }\n )\n wireless_lan_group = DynamicModelChoiceField(\n queryset=WirelessLANGroup.objects.all(),\n required=False,\n label='Wireless LAN group'\n )\n wireless_lans = DynamicModelMultipleChoiceField(\n queryset=WirelessLAN.objects.all(),\n required=False,\n label='Wireless LANs',\n query_params={\n 'group_id': '$wireless_lan_group',\n }\n )\n vlan_group = DynamicModelChoiceField(\n queryset=VLANGroup.objects.all(),\n required=False,\n label='VLAN group'\n )\n untagged_vlan = DynamicModelChoiceField(\n queryset=VLAN.objects.all(),\n required=False,\n label='Untagged VLAN',\n query_params={\n 'group_id': '$vlan_group',\n 'available_on_device': '$device',\n }\n )\n tagged_vlans = DynamicModelMultipleChoiceField(\n queryset=VLAN.objects.all(),\n required=False,\n label='Tagged VLANs',\n query_params={\n 'group_id': '$vlan_group',\n 'available_on_device': '$device',\n }\n )\n vrf = DynamicModelChoiceField(\n queryset=VRF.objects.all(),\n required=False,\n label='VRF'\n )\n\n wwn = forms.CharField(\n empty_value=None,\n required=False,\n label='WWN'\n )\n\n fieldsets = (\n ('Interface', ('device', 'module', 'name', 'type', 'speed', 'duplex', 'label', 'description', 'tags')),\n ('Addressing', ('vrf', 'mac_address', 'wwn')),\n ('Operation', ('mtu', 'tx_power', 'enabled', 'mgmt_only', 'mark_connected')),\n ('Related Interfaces', ('parent', 'bridge', 'lag')),\n ('PoE', ('poe_mode', 'poe_type')),\n ('802.1Q Switching', ('mode', 'vlan_group', 'untagged_vlan', 'tagged_vlans')),\n ('Wireless', (\n 'rf_role', 'rf_channel', 'rf_channel_frequency', 'rf_channel_width', 'wireless_lan_group', 'wireless_lans',\n )),\n )\n\n class Meta:\n model = Interface\n fields = [\n 'device', 'module', 'name', 'label', 'type', 'speed', 'duplex', 'enabled', 'parent', 'bridge', 'lag',\n 'mac_address', 'wwn', 'mtu', 'mgmt_only', 'mark_connected', 'description', 'poe_mode', 'poe_type', 'mode',\n 'rf_role', 'rf_channel', 'rf_channel_frequency', 'rf_channel_width', 'tx_power', 'wireless_lans',\n 'untagged_vlan', 'tagged_vlans', 'vrf', 'tags',\n ]\n widgets = {\n 'device': forms.HiddenInput(),\n 'type': StaticSelect(),\n 'speed': SelectSpeedWidget(),\n 'poe_mode': StaticSelect(),\n 'poe_type': StaticSelect(),\n 'duplex': StaticSelect(),\n 'mode': StaticSelect(),\n 'rf_role': StaticSelect(),\n 'rf_channel': StaticSelect(),\n }\n labels = {\n 'mode': '802.1Q Mode',\n }\n help_texts = {\n 'mode': INTERFACE_MODE_HELP_TEXT,\n 'rf_channel_frequency': \"Populated by selected channel (if set)\",\n 'rf_channel_width': \"Populated by selected channel (if set)\",\n }\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n # Restrict LAG/bridge interface assignment by device/VC\n device_id = self.data['device'] if self.is_bound else self.initial.get('device')\n device = Device.objects.filter(pk=device_id).first()\n if device and device.virtual_chassis and device.virtual_chassis.master:\n self.fields['lag'].widget.add_query_param('device_id', device.virtual_chassis.master.pk)\n self.fields['bridge'].widget.add_query_param('device_id', device.virtual_chassis.master.pk)\n\n\nclass FrontPortForm(NetBoxModelForm):\n module = DynamicModelChoiceField(\n queryset=Module.objects.all(),\n required=False,\n query_params={\n 'device_id': '$device',\n }\n )\n rear_port = DynamicModelChoiceField(\n queryset=RearPort.objects.all(),\n query_params={\n 'device_id': '$device',\n }\n )\n\n class Meta:\n model = FrontPort\n fields = [\n 'device', 'module', 'name', 'label', 'type', 'color', 'rear_port', 'rear_port_position', 'mark_connected',\n 'description', 'tags',\n ]\n widgets = {\n 'device': forms.HiddenInput(),\n 'type': StaticSelect(),\n }\n\n\nclass RearPortForm(NetBoxModelForm):\n module = DynamicModelChoiceField(\n queryset=Module.objects.all(),\n required=False,\n query_params={\n 'device_id': '$device',\n }\n )\n\n class Meta:\n model = RearPort\n fields = [\n 'device', 'module', 'name', 'label', 'type', 'color', 'positions', 'mark_connected', 'description', 'tags',\n ]\n widgets = {\n 'device': forms.HiddenInput(),\n 'type': StaticSelect(),\n }\n\n\nclass ModuleBayForm(NetBoxModelForm):\n\n class Meta:\n model = ModuleBay\n fields = [\n 'device', 'name', 'label', 'position', 'description', 'tags',\n ]\n widgets = {\n 'device': forms.HiddenInput(),\n }\n\n\nclass DeviceBayForm(NetBoxModelForm):\n\n class Meta:\n model = DeviceBay\n fields = [\n 'device', 'name', 'label', 'description', 'tags',\n ]\n widgets = {\n 'device': forms.HiddenInput(),\n }\n\n\nclass PopulateDeviceBayForm(BootstrapMixin, forms.Form):\n installed_device = forms.ModelChoiceField(\n queryset=Device.objects.all(),\n label='Child Device',\n help_text=\"Child devices must first be created and assigned to the site/rack of the parent device.\",\n widget=StaticSelect(),\n )\n\n def __init__(self, device_bay, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.fields['installed_device'].queryset = Device.objects.filter(\n site=device_bay.device.site,\n rack=device_bay.device.rack,\n parent_bay__isnull=True,\n device_type__u_height=0,\n device_type__subdevice_role=SubdeviceRoleChoices.ROLE_CHILD\n ).exclude(pk=device_bay.device.pk)\n\n\nclass InventoryItemForm(NetBoxModelForm):\n device = DynamicModelChoiceField(\n queryset=Device.objects.all()\n )\n parent = DynamicModelChoiceField(\n queryset=InventoryItem.objects.all(),\n required=False,\n query_params={\n 'device_id': '$device'\n }\n )\n role = DynamicModelChoiceField(\n queryset=InventoryItemRole.objects.all(),\n required=False\n )\n manufacturer = DynamicModelChoiceField(\n queryset=Manufacturer.objects.all(),\n required=False\n )\n component_type = ContentTypeChoiceField(\n queryset=ContentType.objects.all(),\n limit_choices_to=MODULAR_COMPONENT_MODELS,\n required=False,\n widget=forms.HiddenInput\n )\n component_id = forms.IntegerField(\n required=False,\n widget=forms.HiddenInput\n )\n\n fieldsets = (\n ('Inventory Item', ('device', 'parent', 'name', 'label', 'role', 'description', 'tags')),\n ('Hardware', ('manufacturer', 'part_id', 'serial', 'asset_tag')),\n )\n\n class Meta:\n model = InventoryItem\n fields = [\n 'device', 'parent', 'name', 'label', 'role', 'manufacturer', 'part_id', 'serial', 'asset_tag',\n 'description', 'component_type', 'component_id', 'tags',\n ]\n\n\n#\n# Device component roles\n#\n\nclass InventoryItemRoleForm(NetBoxModelForm):\n slug = SlugField()\n\n class Meta:\n model = InventoryItemRole\n fields = [\n 'name', 'slug', 'color', 'description', 'tags',\n ]\n", "path": "netbox/dcim/forms/models.py"}]} |
gh_patches_debug_1361 | rasdani/github-patches | git_diff | python-telegram-bot__python-telegram-bot-2017 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Pickle Persistence Not Saving Bot_data[BUG]
<!--
Thanks for reporting issues of python-telegram-bot!
Use this template to notify us if you found a bug.
To make it easier for us to help you please enter detailed information below.
Please note, we only support the latest version of python-telegram-bot and
master branch. Please make sure to upgrade & recreate the issue on the latest
version prior to opening an issue.
-->
### Steps to reproduce
1.
In line 282 of picklepersistence.py:
[BUG] if self.user_data or self.chat_data or self.conversations:
[FIX] if self.user_data or self.chat_data or self.conversations or self.bot_data:
2.
Add self.bot_data in the if-check
3.
### Expected behaviour
When you register the pickle persistence with (store_user_data=False, store_chat_data=False), the bot should save bot_data to pickle file every time it calls flush().
### Actual behaviour
In the current version, the bot only checks user_data/chat_data/conversation to be non-empty to save the pickle file, which when the case (store_user_data=False, store_chat_data=False), the bot never checks bot_data in order to save the pickle file. In my case, user_data/chat_data are always empty, so bot doesn't save to pickle file
### Configuration
**Operating System:**
**Version of Python, python-telegram-bot & dependencies:**
``$ python -m telegram``
### Logs
Insert logs here (if necessary)
Pickle Persistence Not Saving Bot_data[BUG]
<!--
Thanks for reporting issues of python-telegram-bot!
Use this template to notify us if you found a bug.
To make it easier for us to help you please enter detailed information below.
Please note, we only support the latest version of python-telegram-bot and
master branch. Please make sure to upgrade & recreate the issue on the latest
version prior to opening an issue.
-->
### Steps to reproduce
1.
In line 282 of picklepersistence.py:
[BUG] if self.user_data or self.chat_data or self.conversations:
[FIX] if self.user_data or self.chat_data or self.conversations or self.bot_data:
2.
Add self.bot_data in the if-check
3.
### Expected behaviour
When you register the pickle persistence with (store_user_data=False, store_chat_data=False), the bot should save bot_data to pickle file every time it calls flush().
### Actual behaviour
In the current version, the bot only checks user_data/chat_data/conversation to be non-empty to save the pickle file, which when the case (store_user_data=False, store_chat_data=False), the bot never checks bot_data in order to save the pickle file. In my case, user_data/chat_data are always empty, so bot doesn't save to pickle file
### Configuration
**Operating System:**
**Version of Python, python-telegram-bot & dependencies:**
``$ python -m telegram``
### Logs
Insert logs here (if necessary)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `telegram/ext/picklepersistence.py`
Content:
```
1 #!/usr/bin/env python
2 #
3 # A library that provides a Python interface to the Telegram Bot API
4 # Copyright (C) 2015-2020
5 # Leandro Toledo de Souza <[email protected]>
6 #
7 # This program is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU Lesser Public License as published by
9 # the Free Software Foundation, either version 3 of the License, or
10 # (at your option) any later version.
11 #
12 # This program is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU Lesser Public License for more details.
16 #
17 # You should have received a copy of the GNU Lesser Public License
18 # along with this program. If not, see [http://www.gnu.org/licenses/].
19 """This module contains the PicklePersistence class."""
20 import pickle
21 from collections import defaultdict
22 from copy import deepcopy
23
24 from telegram.ext import BasePersistence
25
26
27 class PicklePersistence(BasePersistence):
28 """Using python's builtin pickle for making you bot persistent.
29
30 Attributes:
31 filename (:obj:`str`): The filename for storing the pickle files. When :attr:`single_file`
32 is false this will be used as a prefix.
33 store_user_data (:obj:`bool`): Optional. Whether user_data should be saved by this
34 persistence class.
35 store_chat_data (:obj:`bool`): Optional. Whether user_data should be saved by this
36 persistence class.
37 store_bot_data (:obj:`bool`): Optional. Whether bot_data should be saved by this
38 persistence class.
39 single_file (:obj:`bool`): Optional. When ``False`` will store 3 sperate files of
40 `filename_user_data`, `filename_chat_data` and `filename_conversations`. Default is
41 ``True``.
42 on_flush (:obj:`bool`, optional): When ``True`` will only save to file when :meth:`flush`
43 is called and keep data in memory until that happens. When ``False`` will store data
44 on any transaction *and* on call fo :meth:`flush`. Default is ``False``.
45
46 Args:
47 filename (:obj:`str`): The filename for storing the pickle files. When :attr:`single_file`
48 is false this will be used as a prefix.
49 store_user_data (:obj:`bool`, optional): Whether user_data should be saved by this
50 persistence class. Default is ``True``.
51 store_chat_data (:obj:`bool`, optional): Whether user_data should be saved by this
52 persistence class. Default is ``True``.
53 store_bot_data (:obj:`bool`, optional): Whether bot_data should be saved by this
54 persistence class. Default is ``True`` .
55 single_file (:obj:`bool`, optional): When ``False`` will store 3 sperate files of
56 `filename_user_data`, `filename_chat_data` and `filename_conversations`. Default is
57 ``True``.
58 on_flush (:obj:`bool`, optional): When ``True`` will only save to file when :meth:`flush`
59 is called and keep data in memory until that happens. When ``False`` will store data
60 on any transaction *and* on call fo :meth:`flush`. Default is ``False``.
61 """
62
63 def __init__(self, filename,
64 store_user_data=True,
65 store_chat_data=True,
66 store_bot_data=True,
67 single_file=True,
68 on_flush=False):
69 super().__init__(store_user_data=store_user_data,
70 store_chat_data=store_chat_data,
71 store_bot_data=store_bot_data)
72 self.filename = filename
73 self.single_file = single_file
74 self.on_flush = on_flush
75 self.user_data = None
76 self.chat_data = None
77 self.bot_data = None
78 self.conversations = None
79
80 def load_singlefile(self):
81 try:
82 filename = self.filename
83 with open(self.filename, "rb") as f:
84 data = pickle.load(f)
85 self.user_data = defaultdict(dict, data['user_data'])
86 self.chat_data = defaultdict(dict, data['chat_data'])
87 # For backwards compatibility with files not containing bot data
88 self.bot_data = data.get('bot_data', {})
89 self.conversations = data['conversations']
90 except IOError:
91 self.conversations = {}
92 self.user_data = defaultdict(dict)
93 self.chat_data = defaultdict(dict)
94 self.bot_data = {}
95 except pickle.UnpicklingError:
96 raise TypeError("File {} does not contain valid pickle data".format(filename))
97 except Exception:
98 raise TypeError("Something went wrong unpickling {}".format(filename))
99
100 def load_file(self, filename):
101 try:
102 with open(filename, "rb") as f:
103 return pickle.load(f)
104 except IOError:
105 return None
106 except pickle.UnpicklingError:
107 raise TypeError("File {} does not contain valid pickle data".format(filename))
108 except Exception:
109 raise TypeError("Something went wrong unpickling {}".format(filename))
110
111 def dump_singlefile(self):
112 with open(self.filename, "wb") as f:
113 data = {'conversations': self.conversations, 'user_data': self.user_data,
114 'chat_data': self.chat_data, 'bot_data': self.bot_data}
115 pickle.dump(data, f)
116
117 def dump_file(self, filename, data):
118 with open(filename, "wb") as f:
119 pickle.dump(data, f)
120
121 def get_user_data(self):
122 """Returns the user_data from the pickle file if it exsists or an empty defaultdict.
123
124 Returns:
125 :obj:`defaultdict`: The restored user data.
126 """
127 if self.user_data:
128 pass
129 elif not self.single_file:
130 filename = "{}_user_data".format(self.filename)
131 data = self.load_file(filename)
132 if not data:
133 data = defaultdict(dict)
134 else:
135 data = defaultdict(dict, data)
136 self.user_data = data
137 else:
138 self.load_singlefile()
139 return deepcopy(self.user_data)
140
141 def get_chat_data(self):
142 """Returns the chat_data from the pickle file if it exsists or an empty defaultdict.
143
144 Returns:
145 :obj:`defaultdict`: The restored chat data.
146 """
147 if self.chat_data:
148 pass
149 elif not self.single_file:
150 filename = "{}_chat_data".format(self.filename)
151 data = self.load_file(filename)
152 if not data:
153 data = defaultdict(dict)
154 else:
155 data = defaultdict(dict, data)
156 self.chat_data = data
157 else:
158 self.load_singlefile()
159 return deepcopy(self.chat_data)
160
161 def get_bot_data(self):
162 """Returns the bot_data from the pickle file if it exsists or an empty dict.
163
164 Returns:
165 :obj:`defaultdict`: The restored bot data.
166 """
167 if self.bot_data:
168 pass
169 elif not self.single_file:
170 filename = "{}_bot_data".format(self.filename)
171 data = self.load_file(filename)
172 if not data:
173 data = {}
174 self.bot_data = data
175 else:
176 self.load_singlefile()
177 return deepcopy(self.bot_data)
178
179 def get_conversations(self, name):
180 """Returns the conversations from the pickle file if it exsists or an empty defaultdict.
181
182 Args:
183 name (:obj:`str`): The handlers name.
184
185 Returns:
186 :obj:`dict`: The restored conversations for the handler.
187 """
188 if self.conversations:
189 pass
190 elif not self.single_file:
191 filename = "{}_conversations".format(self.filename)
192 data = self.load_file(filename)
193 if not data:
194 data = {name: {}}
195 self.conversations = data
196 else:
197 self.load_singlefile()
198 return self.conversations.get(name, {}).copy()
199
200 def update_conversation(self, name, key, new_state):
201 """Will update the conversations for the given handler and depending on :attr:`on_flush`
202 save the pickle file.
203
204 Args:
205 name (:obj:`str`): The handlers name.
206 key (:obj:`tuple`): The key the state is changed for.
207 new_state (:obj:`tuple` | :obj:`any`): The new state for the given key.
208 """
209 if self.conversations.setdefault(name, {}).get(key) == new_state:
210 return
211 self.conversations[name][key] = new_state
212 if not self.on_flush:
213 if not self.single_file:
214 filename = "{}_conversations".format(self.filename)
215 self.dump_file(filename, self.conversations)
216 else:
217 self.dump_singlefile()
218
219 def update_user_data(self, user_id, data):
220 """Will update the user_data (if changed) and depending on :attr:`on_flush` save the
221 pickle file.
222
223 Args:
224 user_id (:obj:`int`): The user the data might have been changed for.
225 data (:obj:`dict`): The :attr:`telegram.ext.dispatcher.user_data` [user_id].
226 """
227 if self.user_data is None:
228 self.user_data = defaultdict(dict)
229 if self.user_data.get(user_id) == data:
230 return
231 self.user_data[user_id] = data
232 if not self.on_flush:
233 if not self.single_file:
234 filename = "{}_user_data".format(self.filename)
235 self.dump_file(filename, self.user_data)
236 else:
237 self.dump_singlefile()
238
239 def update_chat_data(self, chat_id, data):
240 """Will update the chat_data (if changed) and depending on :attr:`on_flush` save the
241 pickle file.
242
243 Args:
244 chat_id (:obj:`int`): The chat the data might have been changed for.
245 data (:obj:`dict`): The :attr:`telegram.ext.dispatcher.chat_data` [chat_id].
246 """
247 if self.chat_data is None:
248 self.chat_data = defaultdict(dict)
249 if self.chat_data.get(chat_id) == data:
250 return
251 self.chat_data[chat_id] = data
252 if not self.on_flush:
253 if not self.single_file:
254 filename = "{}_chat_data".format(self.filename)
255 self.dump_file(filename, self.chat_data)
256 else:
257 self.dump_singlefile()
258
259 def update_bot_data(self, data):
260 """Will update the bot_data (if changed) and depending on :attr:`on_flush` save the
261 pickle file.
262
263 Args:
264 data (:obj:`dict`): The :attr:`telegram.ext.dispatcher.bot_data`.
265 """
266 if self.bot_data == data:
267 return
268 self.bot_data = data.copy()
269 if not self.on_flush:
270 if not self.single_file:
271 filename = "{}_bot_data".format(self.filename)
272 self.dump_file(filename, self.bot_data)
273 else:
274 self.dump_singlefile()
275
276 def flush(self):
277 """ Will save all data in memory to pickle file(s).
278 """
279 if self.single_file:
280 if self.user_data or self.chat_data or self.conversations:
281 self.dump_singlefile()
282 else:
283 if self.user_data:
284 self.dump_file("{}_user_data".format(self.filename), self.user_data)
285 if self.chat_data:
286 self.dump_file("{}_chat_data".format(self.filename), self.chat_data)
287 if self.bot_data:
288 self.dump_file("{}_bot_data".format(self.filename), self.bot_data)
289 if self.conversations:
290 self.dump_file("{}_conversations".format(self.filename), self.conversations)
291
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/telegram/ext/picklepersistence.py b/telegram/ext/picklepersistence.py
--- a/telegram/ext/picklepersistence.py
+++ b/telegram/ext/picklepersistence.py
@@ -277,7 +277,7 @@
""" Will save all data in memory to pickle file(s).
"""
if self.single_file:
- if self.user_data or self.chat_data or self.conversations:
+ if self.user_data or self.chat_data or self.bot_data or self.conversations:
self.dump_singlefile()
else:
if self.user_data:
| {"golden_diff": "diff --git a/telegram/ext/picklepersistence.py b/telegram/ext/picklepersistence.py\n--- a/telegram/ext/picklepersistence.py\n+++ b/telegram/ext/picklepersistence.py\n@@ -277,7 +277,7 @@\n \"\"\" Will save all data in memory to pickle file(s).\n \"\"\"\n if self.single_file:\n- if self.user_data or self.chat_data or self.conversations:\n+ if self.user_data or self.chat_data or self.bot_data or self.conversations:\n self.dump_singlefile()\n else:\n if self.user_data:\n", "issue": "Pickle Persistence Not Saving Bot_data[BUG]\n<!--\r\nThanks for reporting issues of python-telegram-bot!\r\n\r\nUse this template to notify us if you found a bug.\r\n\r\nTo make it easier for us to help you please enter detailed information below.\r\n\r\nPlease note, we only support the latest version of python-telegram-bot and\r\nmaster branch. Please make sure to upgrade & recreate the issue on the latest\r\nversion prior to opening an issue.\r\n-->\r\n### Steps to reproduce\r\n1.\r\nIn line 282 of picklepersistence.py:\r\n[BUG] if self.user_data or self.chat_data or self.conversations:\r\n[FIX] if self.user_data or self.chat_data or self.conversations or self.bot_data:\r\n\r\n2.\r\nAdd self.bot_data in the if-check\r\n3.\r\n\r\n### Expected behaviour\r\nWhen you register the pickle persistence with (store_user_data=False, store_chat_data=False), the bot should save bot_data to pickle file every time it calls flush().\r\n\r\n\r\n### Actual behaviour\r\nIn the current version, the bot only checks user_data/chat_data/conversation to be non-empty to save the pickle file, which when the case (store_user_data=False, store_chat_data=False), the bot never checks bot_data in order to save the pickle file. In my case, user_data/chat_data are always empty, so bot doesn't save to pickle file\r\n\r\n### Configuration\r\n**Operating System:**\r\n\r\n\r\n**Version of Python, python-telegram-bot & dependencies:**\r\n\r\n``$ python -m telegram``\r\n\r\n### Logs\r\nInsert logs here (if necessary)\r\n\nPickle Persistence Not Saving Bot_data[BUG]\n<!--\r\nThanks for reporting issues of python-telegram-bot!\r\n\r\nUse this template to notify us if you found a bug.\r\n\r\nTo make it easier for us to help you please enter detailed information below.\r\n\r\nPlease note, we only support the latest version of python-telegram-bot and\r\nmaster branch. Please make sure to upgrade & recreate the issue on the latest\r\nversion prior to opening an issue.\r\n-->\r\n### Steps to reproduce\r\n1.\r\nIn line 282 of picklepersistence.py:\r\n[BUG] if self.user_data or self.chat_data or self.conversations:\r\n[FIX] if self.user_data or self.chat_data or self.conversations or self.bot_data:\r\n\r\n2.\r\nAdd self.bot_data in the if-check\r\n3.\r\n\r\n### Expected behaviour\r\nWhen you register the pickle persistence with (store_user_data=False, store_chat_data=False), the bot should save bot_data to pickle file every time it calls flush().\r\n\r\n\r\n### Actual behaviour\r\nIn the current version, the bot only checks user_data/chat_data/conversation to be non-empty to save the pickle file, which when the case (store_user_data=False, store_chat_data=False), the bot never checks bot_data in order to save the pickle file. In my case, user_data/chat_data are always empty, so bot doesn't save to pickle file\r\n\r\n### Configuration\r\n**Operating System:**\r\n\r\n\r\n**Version of Python, python-telegram-bot & dependencies:**\r\n\r\n``$ python -m telegram``\r\n\r\n### Logs\r\nInsert logs here (if necessary)\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n#\n# A library that provides a Python interface to the Telegram Bot API\n# Copyright (C) 2015-2020\n# Leandro Toledo de Souza <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser Public License for more details.\n#\n# You should have received a copy of the GNU Lesser Public License\n# along with this program. If not, see [http://www.gnu.org/licenses/].\n\"\"\"This module contains the PicklePersistence class.\"\"\"\nimport pickle\nfrom collections import defaultdict\nfrom copy import deepcopy\n\nfrom telegram.ext import BasePersistence\n\n\nclass PicklePersistence(BasePersistence):\n \"\"\"Using python's builtin pickle for making you bot persistent.\n\n Attributes:\n filename (:obj:`str`): The filename for storing the pickle files. When :attr:`single_file`\n is false this will be used as a prefix.\n store_user_data (:obj:`bool`): Optional. Whether user_data should be saved by this\n persistence class.\n store_chat_data (:obj:`bool`): Optional. Whether user_data should be saved by this\n persistence class.\n store_bot_data (:obj:`bool`): Optional. Whether bot_data should be saved by this\n persistence class.\n single_file (:obj:`bool`): Optional. When ``False`` will store 3 sperate files of\n `filename_user_data`, `filename_chat_data` and `filename_conversations`. Default is\n ``True``.\n on_flush (:obj:`bool`, optional): When ``True`` will only save to file when :meth:`flush`\n is called and keep data in memory until that happens. When ``False`` will store data\n on any transaction *and* on call fo :meth:`flush`. Default is ``False``.\n\n Args:\n filename (:obj:`str`): The filename for storing the pickle files. When :attr:`single_file`\n is false this will be used as a prefix.\n store_user_data (:obj:`bool`, optional): Whether user_data should be saved by this\n persistence class. Default is ``True``.\n store_chat_data (:obj:`bool`, optional): Whether user_data should be saved by this\n persistence class. Default is ``True``.\n store_bot_data (:obj:`bool`, optional): Whether bot_data should be saved by this\n persistence class. Default is ``True`` .\n single_file (:obj:`bool`, optional): When ``False`` will store 3 sperate files of\n `filename_user_data`, `filename_chat_data` and `filename_conversations`. Default is\n ``True``.\n on_flush (:obj:`bool`, optional): When ``True`` will only save to file when :meth:`flush`\n is called and keep data in memory until that happens. When ``False`` will store data\n on any transaction *and* on call fo :meth:`flush`. Default is ``False``.\n \"\"\"\n\n def __init__(self, filename,\n store_user_data=True,\n store_chat_data=True,\n store_bot_data=True,\n single_file=True,\n on_flush=False):\n super().__init__(store_user_data=store_user_data,\n store_chat_data=store_chat_data,\n store_bot_data=store_bot_data)\n self.filename = filename\n self.single_file = single_file\n self.on_flush = on_flush\n self.user_data = None\n self.chat_data = None\n self.bot_data = None\n self.conversations = None\n\n def load_singlefile(self):\n try:\n filename = self.filename\n with open(self.filename, \"rb\") as f:\n data = pickle.load(f)\n self.user_data = defaultdict(dict, data['user_data'])\n self.chat_data = defaultdict(dict, data['chat_data'])\n # For backwards compatibility with files not containing bot data\n self.bot_data = data.get('bot_data', {})\n self.conversations = data['conversations']\n except IOError:\n self.conversations = {}\n self.user_data = defaultdict(dict)\n self.chat_data = defaultdict(dict)\n self.bot_data = {}\n except pickle.UnpicklingError:\n raise TypeError(\"File {} does not contain valid pickle data\".format(filename))\n except Exception:\n raise TypeError(\"Something went wrong unpickling {}\".format(filename))\n\n def load_file(self, filename):\n try:\n with open(filename, \"rb\") as f:\n return pickle.load(f)\n except IOError:\n return None\n except pickle.UnpicklingError:\n raise TypeError(\"File {} does not contain valid pickle data\".format(filename))\n except Exception:\n raise TypeError(\"Something went wrong unpickling {}\".format(filename))\n\n def dump_singlefile(self):\n with open(self.filename, \"wb\") as f:\n data = {'conversations': self.conversations, 'user_data': self.user_data,\n 'chat_data': self.chat_data, 'bot_data': self.bot_data}\n pickle.dump(data, f)\n\n def dump_file(self, filename, data):\n with open(filename, \"wb\") as f:\n pickle.dump(data, f)\n\n def get_user_data(self):\n \"\"\"Returns the user_data from the pickle file if it exsists or an empty defaultdict.\n\n Returns:\n :obj:`defaultdict`: The restored user data.\n \"\"\"\n if self.user_data:\n pass\n elif not self.single_file:\n filename = \"{}_user_data\".format(self.filename)\n data = self.load_file(filename)\n if not data:\n data = defaultdict(dict)\n else:\n data = defaultdict(dict, data)\n self.user_data = data\n else:\n self.load_singlefile()\n return deepcopy(self.user_data)\n\n def get_chat_data(self):\n \"\"\"Returns the chat_data from the pickle file if it exsists or an empty defaultdict.\n\n Returns:\n :obj:`defaultdict`: The restored chat data.\n \"\"\"\n if self.chat_data:\n pass\n elif not self.single_file:\n filename = \"{}_chat_data\".format(self.filename)\n data = self.load_file(filename)\n if not data:\n data = defaultdict(dict)\n else:\n data = defaultdict(dict, data)\n self.chat_data = data\n else:\n self.load_singlefile()\n return deepcopy(self.chat_data)\n\n def get_bot_data(self):\n \"\"\"Returns the bot_data from the pickle file if it exsists or an empty dict.\n\n Returns:\n :obj:`defaultdict`: The restored bot data.\n \"\"\"\n if self.bot_data:\n pass\n elif not self.single_file:\n filename = \"{}_bot_data\".format(self.filename)\n data = self.load_file(filename)\n if not data:\n data = {}\n self.bot_data = data\n else:\n self.load_singlefile()\n return deepcopy(self.bot_data)\n\n def get_conversations(self, name):\n \"\"\"Returns the conversations from the pickle file if it exsists or an empty defaultdict.\n\n Args:\n name (:obj:`str`): The handlers name.\n\n Returns:\n :obj:`dict`: The restored conversations for the handler.\n \"\"\"\n if self.conversations:\n pass\n elif not self.single_file:\n filename = \"{}_conversations\".format(self.filename)\n data = self.load_file(filename)\n if not data:\n data = {name: {}}\n self.conversations = data\n else:\n self.load_singlefile()\n return self.conversations.get(name, {}).copy()\n\n def update_conversation(self, name, key, new_state):\n \"\"\"Will update the conversations for the given handler and depending on :attr:`on_flush`\n save the pickle file.\n\n Args:\n name (:obj:`str`): The handlers name.\n key (:obj:`tuple`): The key the state is changed for.\n new_state (:obj:`tuple` | :obj:`any`): The new state for the given key.\n \"\"\"\n if self.conversations.setdefault(name, {}).get(key) == new_state:\n return\n self.conversations[name][key] = new_state\n if not self.on_flush:\n if not self.single_file:\n filename = \"{}_conversations\".format(self.filename)\n self.dump_file(filename, self.conversations)\n else:\n self.dump_singlefile()\n\n def update_user_data(self, user_id, data):\n \"\"\"Will update the user_data (if changed) and depending on :attr:`on_flush` save the\n pickle file.\n\n Args:\n user_id (:obj:`int`): The user the data might have been changed for.\n data (:obj:`dict`): The :attr:`telegram.ext.dispatcher.user_data` [user_id].\n \"\"\"\n if self.user_data is None:\n self.user_data = defaultdict(dict)\n if self.user_data.get(user_id) == data:\n return\n self.user_data[user_id] = data\n if not self.on_flush:\n if not self.single_file:\n filename = \"{}_user_data\".format(self.filename)\n self.dump_file(filename, self.user_data)\n else:\n self.dump_singlefile()\n\n def update_chat_data(self, chat_id, data):\n \"\"\"Will update the chat_data (if changed) and depending on :attr:`on_flush` save the\n pickle file.\n\n Args:\n chat_id (:obj:`int`): The chat the data might have been changed for.\n data (:obj:`dict`): The :attr:`telegram.ext.dispatcher.chat_data` [chat_id].\n \"\"\"\n if self.chat_data is None:\n self.chat_data = defaultdict(dict)\n if self.chat_data.get(chat_id) == data:\n return\n self.chat_data[chat_id] = data\n if not self.on_flush:\n if not self.single_file:\n filename = \"{}_chat_data\".format(self.filename)\n self.dump_file(filename, self.chat_data)\n else:\n self.dump_singlefile()\n\n def update_bot_data(self, data):\n \"\"\"Will update the bot_data (if changed) and depending on :attr:`on_flush` save the\n pickle file.\n\n Args:\n data (:obj:`dict`): The :attr:`telegram.ext.dispatcher.bot_data`.\n \"\"\"\n if self.bot_data == data:\n return\n self.bot_data = data.copy()\n if not self.on_flush:\n if not self.single_file:\n filename = \"{}_bot_data\".format(self.filename)\n self.dump_file(filename, self.bot_data)\n else:\n self.dump_singlefile()\n\n def flush(self):\n \"\"\" Will save all data in memory to pickle file(s).\n \"\"\"\n if self.single_file:\n if self.user_data or self.chat_data or self.conversations:\n self.dump_singlefile()\n else:\n if self.user_data:\n self.dump_file(\"{}_user_data\".format(self.filename), self.user_data)\n if self.chat_data:\n self.dump_file(\"{}_chat_data\".format(self.filename), self.chat_data)\n if self.bot_data:\n self.dump_file(\"{}_bot_data\".format(self.filename), self.bot_data)\n if self.conversations:\n self.dump_file(\"{}_conversations\".format(self.filename), self.conversations)\n", "path": "telegram/ext/picklepersistence.py"}], "after_files": [{"content": "#!/usr/bin/env python\n#\n# A library that provides a Python interface to the Telegram Bot API\n# Copyright (C) 2015-2020\n# Leandro Toledo de Souza <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser Public License for more details.\n#\n# You should have received a copy of the GNU Lesser Public License\n# along with this program. If not, see [http://www.gnu.org/licenses/].\n\"\"\"This module contains the PicklePersistence class.\"\"\"\nimport pickle\nfrom collections import defaultdict\nfrom copy import deepcopy\n\nfrom telegram.ext import BasePersistence\n\n\nclass PicklePersistence(BasePersistence):\n \"\"\"Using python's builtin pickle for making you bot persistent.\n\n Attributes:\n filename (:obj:`str`): The filename for storing the pickle files. When :attr:`single_file`\n is false this will be used as a prefix.\n store_user_data (:obj:`bool`): Optional. Whether user_data should be saved by this\n persistence class.\n store_chat_data (:obj:`bool`): Optional. Whether user_data should be saved by this\n persistence class.\n store_bot_data (:obj:`bool`): Optional. Whether bot_data should be saved by this\n persistence class.\n single_file (:obj:`bool`): Optional. When ``False`` will store 3 sperate files of\n `filename_user_data`, `filename_chat_data` and `filename_conversations`. Default is\n ``True``.\n on_flush (:obj:`bool`, optional): When ``True`` will only save to file when :meth:`flush`\n is called and keep data in memory until that happens. When ``False`` will store data\n on any transaction *and* on call fo :meth:`flush`. Default is ``False``.\n\n Args:\n filename (:obj:`str`): The filename for storing the pickle files. When :attr:`single_file`\n is false this will be used as a prefix.\n store_user_data (:obj:`bool`, optional): Whether user_data should be saved by this\n persistence class. Default is ``True``.\n store_chat_data (:obj:`bool`, optional): Whether user_data should be saved by this\n persistence class. Default is ``True``.\n store_bot_data (:obj:`bool`, optional): Whether bot_data should be saved by this\n persistence class. Default is ``True`` .\n single_file (:obj:`bool`, optional): When ``False`` will store 3 sperate files of\n `filename_user_data`, `filename_chat_data` and `filename_conversations`. Default is\n ``True``.\n on_flush (:obj:`bool`, optional): When ``True`` will only save to file when :meth:`flush`\n is called and keep data in memory until that happens. When ``False`` will store data\n on any transaction *and* on call fo :meth:`flush`. Default is ``False``.\n \"\"\"\n\n def __init__(self, filename,\n store_user_data=True,\n store_chat_data=True,\n store_bot_data=True,\n single_file=True,\n on_flush=False):\n super().__init__(store_user_data=store_user_data,\n store_chat_data=store_chat_data,\n store_bot_data=store_bot_data)\n self.filename = filename\n self.single_file = single_file\n self.on_flush = on_flush\n self.user_data = None\n self.chat_data = None\n self.bot_data = None\n self.conversations = None\n\n def load_singlefile(self):\n try:\n filename = self.filename\n with open(self.filename, \"rb\") as f:\n data = pickle.load(f)\n self.user_data = defaultdict(dict, data['user_data'])\n self.chat_data = defaultdict(dict, data['chat_data'])\n # For backwards compatibility with files not containing bot data\n self.bot_data = data.get('bot_data', {})\n self.conversations = data['conversations']\n except IOError:\n self.conversations = {}\n self.user_data = defaultdict(dict)\n self.chat_data = defaultdict(dict)\n self.bot_data = {}\n except pickle.UnpicklingError:\n raise TypeError(\"File {} does not contain valid pickle data\".format(filename))\n except Exception:\n raise TypeError(\"Something went wrong unpickling {}\".format(filename))\n\n def load_file(self, filename):\n try:\n with open(filename, \"rb\") as f:\n return pickle.load(f)\n except IOError:\n return None\n except pickle.UnpicklingError:\n raise TypeError(\"File {} does not contain valid pickle data\".format(filename))\n except Exception:\n raise TypeError(\"Something went wrong unpickling {}\".format(filename))\n\n def dump_singlefile(self):\n with open(self.filename, \"wb\") as f:\n data = {'conversations': self.conversations, 'user_data': self.user_data,\n 'chat_data': self.chat_data, 'bot_data': self.bot_data}\n pickle.dump(data, f)\n\n def dump_file(self, filename, data):\n with open(filename, \"wb\") as f:\n pickle.dump(data, f)\n\n def get_user_data(self):\n \"\"\"Returns the user_data from the pickle file if it exsists or an empty defaultdict.\n\n Returns:\n :obj:`defaultdict`: The restored user data.\n \"\"\"\n if self.user_data:\n pass\n elif not self.single_file:\n filename = \"{}_user_data\".format(self.filename)\n data = self.load_file(filename)\n if not data:\n data = defaultdict(dict)\n else:\n data = defaultdict(dict, data)\n self.user_data = data\n else:\n self.load_singlefile()\n return deepcopy(self.user_data)\n\n def get_chat_data(self):\n \"\"\"Returns the chat_data from the pickle file if it exsists or an empty defaultdict.\n\n Returns:\n :obj:`defaultdict`: The restored chat data.\n \"\"\"\n if self.chat_data:\n pass\n elif not self.single_file:\n filename = \"{}_chat_data\".format(self.filename)\n data = self.load_file(filename)\n if not data:\n data = defaultdict(dict)\n else:\n data = defaultdict(dict, data)\n self.chat_data = data\n else:\n self.load_singlefile()\n return deepcopy(self.chat_data)\n\n def get_bot_data(self):\n \"\"\"Returns the bot_data from the pickle file if it exsists or an empty dict.\n\n Returns:\n :obj:`defaultdict`: The restored bot data.\n \"\"\"\n if self.bot_data:\n pass\n elif not self.single_file:\n filename = \"{}_bot_data\".format(self.filename)\n data = self.load_file(filename)\n if not data:\n data = {}\n self.bot_data = data\n else:\n self.load_singlefile()\n return deepcopy(self.bot_data)\n\n def get_conversations(self, name):\n \"\"\"Returns the conversations from the pickle file if it exsists or an empty defaultdict.\n\n Args:\n name (:obj:`str`): The handlers name.\n\n Returns:\n :obj:`dict`: The restored conversations for the handler.\n \"\"\"\n if self.conversations:\n pass\n elif not self.single_file:\n filename = \"{}_conversations\".format(self.filename)\n data = self.load_file(filename)\n if not data:\n data = {name: {}}\n self.conversations = data\n else:\n self.load_singlefile()\n return self.conversations.get(name, {}).copy()\n\n def update_conversation(self, name, key, new_state):\n \"\"\"Will update the conversations for the given handler and depending on :attr:`on_flush`\n save the pickle file.\n\n Args:\n name (:obj:`str`): The handlers name.\n key (:obj:`tuple`): The key the state is changed for.\n new_state (:obj:`tuple` | :obj:`any`): The new state for the given key.\n \"\"\"\n if self.conversations.setdefault(name, {}).get(key) == new_state:\n return\n self.conversations[name][key] = new_state\n if not self.on_flush:\n if not self.single_file:\n filename = \"{}_conversations\".format(self.filename)\n self.dump_file(filename, self.conversations)\n else:\n self.dump_singlefile()\n\n def update_user_data(self, user_id, data):\n \"\"\"Will update the user_data (if changed) and depending on :attr:`on_flush` save the\n pickle file.\n\n Args:\n user_id (:obj:`int`): The user the data might have been changed for.\n data (:obj:`dict`): The :attr:`telegram.ext.dispatcher.user_data` [user_id].\n \"\"\"\n if self.user_data is None:\n self.user_data = defaultdict(dict)\n if self.user_data.get(user_id) == data:\n return\n self.user_data[user_id] = data\n if not self.on_flush:\n if not self.single_file:\n filename = \"{}_user_data\".format(self.filename)\n self.dump_file(filename, self.user_data)\n else:\n self.dump_singlefile()\n\n def update_chat_data(self, chat_id, data):\n \"\"\"Will update the chat_data (if changed) and depending on :attr:`on_flush` save the\n pickle file.\n\n Args:\n chat_id (:obj:`int`): The chat the data might have been changed for.\n data (:obj:`dict`): The :attr:`telegram.ext.dispatcher.chat_data` [chat_id].\n \"\"\"\n if self.chat_data is None:\n self.chat_data = defaultdict(dict)\n if self.chat_data.get(chat_id) == data:\n return\n self.chat_data[chat_id] = data\n if not self.on_flush:\n if not self.single_file:\n filename = \"{}_chat_data\".format(self.filename)\n self.dump_file(filename, self.chat_data)\n else:\n self.dump_singlefile()\n\n def update_bot_data(self, data):\n \"\"\"Will update the bot_data (if changed) and depending on :attr:`on_flush` save the\n pickle file.\n\n Args:\n data (:obj:`dict`): The :attr:`telegram.ext.dispatcher.bot_data`.\n \"\"\"\n if self.bot_data == data:\n return\n self.bot_data = data.copy()\n if not self.on_flush:\n if not self.single_file:\n filename = \"{}_bot_data\".format(self.filename)\n self.dump_file(filename, self.bot_data)\n else:\n self.dump_singlefile()\n\n def flush(self):\n \"\"\" Will save all data in memory to pickle file(s).\n \"\"\"\n if self.single_file:\n if self.user_data or self.chat_data or self.bot_data or self.conversations:\n self.dump_singlefile()\n else:\n if self.user_data:\n self.dump_file(\"{}_user_data\".format(self.filename), self.user_data)\n if self.chat_data:\n self.dump_file(\"{}_chat_data\".format(self.filename), self.chat_data)\n if self.bot_data:\n self.dump_file(\"{}_bot_data\".format(self.filename), self.bot_data)\n if self.conversations:\n self.dump_file(\"{}_conversations\".format(self.filename), self.conversations)\n", "path": "telegram/ext/picklepersistence.py"}]} |
gh_patches_debug_1362 | rasdani/github-patches | git_diff | scverse__scanpy-2913 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
scale max value only cuts high values
### Please make sure these conditions are met
- [X] I have checked that this issue has not already been reported.
- [X] I have confirmed this bug exists on the latest version of scanpy.
- [X] (optional) I have confirmed this bug exists on the main branch of scanpy.
### What happened?
Shouldn't max Value for `scale` and zero_center also clip the negative values?
### Minimal code sample
```python
bdata = sc.datasets.pbmc3k()
sc.pp.scale(bdata,max_value= 1)
print(bdata.X.min(),bdata.X.max())
```
### Error output
```pytb
-2.62718 1.0
shouldn't this be -1,1
```
### Versions
scanpy build from github main branch
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scanpy/preprocessing/_simple.py`
Content:
```
1 """Simple Preprocessing Functions
2
3 Compositions of these functions are found in sc.preprocess.recipes.
4 """
5 from __future__ import annotations
6
7 import warnings
8 from functools import singledispatch
9 from typing import TYPE_CHECKING, Literal
10
11 import numba
12 import numpy as np
13 import scipy as sp
14 from anndata import AnnData
15 from pandas.api.types import CategoricalDtype
16 from scipy.sparse import csr_matrix, issparse, isspmatrix_csr, spmatrix
17 from sklearn.utils import check_array, sparsefuncs
18
19 from .. import logging as logg
20 from .._compat import old_positionals
21 from .._settings import settings as sett
22 from .._utils import (
23 AnyRandom,
24 _check_array_function_arguments,
25 renamed_arg,
26 sanitize_anndata,
27 view_to_actual,
28 )
29 from ..get import _check_mask, _get_obs_rep, _set_obs_rep
30 from ._distributed import materialize_as_ndarray
31 from ._utils import _get_mean_var
32
33 # install dask if available
34 try:
35 import dask.array as da
36 except ImportError:
37 da = None
38
39 # backwards compat
40 from ._deprecated.highly_variable_genes import filter_genes_dispersion # noqa: F401
41
42 if TYPE_CHECKING:
43 from collections.abc import Collection, Iterable, Sequence
44 from numbers import Number
45
46 from numpy.typing import NDArray
47
48
49 @old_positionals(
50 "min_counts", "min_genes", "max_counts", "max_genes", "inplace", "copy"
51 )
52 def filter_cells(
53 data: AnnData | spmatrix | np.ndarray,
54 *,
55 min_counts: int | None = None,
56 min_genes: int | None = None,
57 max_counts: int | None = None,
58 max_genes: int | None = None,
59 inplace: bool = True,
60 copy: bool = False,
61 ) -> AnnData | tuple[np.ndarray, np.ndarray] | None:
62 """\
63 Filter cell outliers based on counts and numbers of genes expressed.
64
65 For instance, only keep cells with at least `min_counts` counts or
66 `min_genes` genes expressed. This is to filter measurement outliers,
67 i.e. “unreliable” observations.
68
69 Only provide one of the optional parameters `min_counts`, `min_genes`,
70 `max_counts`, `max_genes` per call.
71
72 Parameters
73 ----------
74 data
75 The (annotated) data matrix of shape `n_obs` × `n_vars`.
76 Rows correspond to cells and columns to genes.
77 min_counts
78 Minimum number of counts required for a cell to pass filtering.
79 min_genes
80 Minimum number of genes expressed required for a cell to pass filtering.
81 max_counts
82 Maximum number of counts required for a cell to pass filtering.
83 max_genes
84 Maximum number of genes expressed required for a cell to pass filtering.
85 inplace
86 Perform computation inplace or return result.
87
88 Returns
89 -------
90 Depending on `inplace`, returns the following arrays or directly subsets
91 and annotates the data matrix:
92
93 cells_subset
94 Boolean index mask that does filtering. `True` means that the
95 cell is kept. `False` means the cell is removed.
96 number_per_cell
97 Depending on what was thresholded (`counts` or `genes`),
98 the array stores `n_counts` or `n_cells` per gene.
99
100 Examples
101 --------
102 >>> import scanpy as sc
103 >>> adata = sc.datasets.krumsiek11()
104 UserWarning: Observation names are not unique. To make them unique, call `.obs_names_make_unique`.
105 utils.warn_names_duplicates("obs")
106 >>> adata.obs_names_make_unique()
107 >>> adata.n_obs
108 640
109 >>> adata.var_names.tolist() # doctest: +NORMALIZE_WHITESPACE
110 ['Gata2', 'Gata1', 'Fog1', 'EKLF', 'Fli1', 'SCL',
111 'Cebpa', 'Pu.1', 'cJun', 'EgrNab', 'Gfi1']
112 >>> # add some true zeros
113 >>> adata.X[adata.X < 0.3] = 0
114 >>> # simply compute the number of genes per cell
115 >>> sc.pp.filter_cells(adata, min_genes=0)
116 >>> adata.n_obs
117 640
118 >>> adata.obs['n_genes'].min()
119 1
120 >>> # filter manually
121 >>> adata_copy = adata[adata.obs['n_genes'] >= 3]
122 >>> adata_copy.n_obs
123 554
124 >>> adata_copy.obs['n_genes'].min()
125 3
126 >>> # actually do some filtering
127 >>> sc.pp.filter_cells(adata, min_genes=3)
128 >>> adata.n_obs
129 554
130 >>> adata.obs['n_genes'].min()
131 3
132 """
133 if copy:
134 logg.warning("`copy` is deprecated, use `inplace` instead.")
135 n_given_options = sum(
136 option is not None for option in [min_genes, min_counts, max_genes, max_counts]
137 )
138 if n_given_options != 1:
139 raise ValueError(
140 "Only provide one of the optional parameters `min_counts`, "
141 "`min_genes`, `max_counts`, `max_genes` per call."
142 )
143 if isinstance(data, AnnData):
144 adata = data.copy() if copy else data
145 cell_subset, number = materialize_as_ndarray(
146 filter_cells(
147 adata.X,
148 min_counts=min_counts,
149 min_genes=min_genes,
150 max_counts=max_counts,
151 max_genes=max_genes,
152 ),
153 )
154 if not inplace:
155 return cell_subset, number
156 if min_genes is None and max_genes is None:
157 adata.obs["n_counts"] = number
158 else:
159 adata.obs["n_genes"] = number
160 adata._inplace_subset_obs(cell_subset)
161 return adata if copy else None
162 X = data # proceed with processing the data matrix
163 min_number = min_counts if min_genes is None else min_genes
164 max_number = max_counts if max_genes is None else max_genes
165 number_per_cell = np.sum(
166 X if min_genes is None and max_genes is None else X > 0, axis=1
167 )
168 if issparse(X):
169 number_per_cell = number_per_cell.A1
170 if min_number is not None:
171 cell_subset = number_per_cell >= min_number
172 if max_number is not None:
173 cell_subset = number_per_cell <= max_number
174
175 s = materialize_as_ndarray(np.sum(~cell_subset))
176 if s > 0:
177 msg = f"filtered out {s} cells that have "
178 if min_genes is not None or min_counts is not None:
179 msg += "less than "
180 msg += (
181 f"{min_genes} genes expressed"
182 if min_counts is None
183 else f"{min_counts} counts"
184 )
185 if max_genes is not None or max_counts is not None:
186 msg += "more than "
187 msg += (
188 f"{max_genes} genes expressed"
189 if max_counts is None
190 else f"{max_counts} counts"
191 )
192 logg.info(msg)
193 return cell_subset, number_per_cell
194
195
196 @old_positionals(
197 "min_counts", "min_cells", "max_counts", "max_cells", "inplace", "copy"
198 )
199 def filter_genes(
200 data: AnnData | spmatrix | np.ndarray,
201 *,
202 min_counts: int | None = None,
203 min_cells: int | None = None,
204 max_counts: int | None = None,
205 max_cells: int | None = None,
206 inplace: bool = True,
207 copy: bool = False,
208 ) -> AnnData | tuple[np.ndarray, np.ndarray] | None:
209 """\
210 Filter genes based on number of cells or counts.
211
212 Keep genes that have at least `min_counts` counts or are expressed in at
213 least `min_cells` cells or have at most `max_counts` counts or are expressed
214 in at most `max_cells` cells.
215
216 Only provide one of the optional parameters `min_counts`, `min_cells`,
217 `max_counts`, `max_cells` per call.
218
219 Parameters
220 ----------
221 data
222 An annotated data matrix of shape `n_obs` × `n_vars`. Rows correspond
223 to cells and columns to genes.
224 min_counts
225 Minimum number of counts required for a gene to pass filtering.
226 min_cells
227 Minimum number of cells expressed required for a gene to pass filtering.
228 max_counts
229 Maximum number of counts required for a gene to pass filtering.
230 max_cells
231 Maximum number of cells expressed required for a gene to pass filtering.
232 inplace
233 Perform computation inplace or return result.
234
235 Returns
236 -------
237 Depending on `inplace`, returns the following arrays or directly subsets
238 and annotates the data matrix
239
240 gene_subset
241 Boolean index mask that does filtering. `True` means that the
242 gene is kept. `False` means the gene is removed.
243 number_per_gene
244 Depending on what was thresholded (`counts` or `cells`), the array stores
245 `n_counts` or `n_cells` per gene.
246 """
247 if copy:
248 logg.warning("`copy` is deprecated, use `inplace` instead.")
249 n_given_options = sum(
250 option is not None for option in [min_cells, min_counts, max_cells, max_counts]
251 )
252 if n_given_options != 1:
253 raise ValueError(
254 "Only provide one of the optional parameters `min_counts`, "
255 "`min_cells`, `max_counts`, `max_cells` per call."
256 )
257
258 if isinstance(data, AnnData):
259 adata = data.copy() if copy else data
260 gene_subset, number = materialize_as_ndarray(
261 filter_genes(
262 adata.X,
263 min_cells=min_cells,
264 min_counts=min_counts,
265 max_cells=max_cells,
266 max_counts=max_counts,
267 )
268 )
269 if not inplace:
270 return gene_subset, number
271 if min_cells is None and max_cells is None:
272 adata.var["n_counts"] = number
273 else:
274 adata.var["n_cells"] = number
275 adata._inplace_subset_var(gene_subset)
276 return adata if copy else None
277
278 X = data # proceed with processing the data matrix
279 min_number = min_counts if min_cells is None else min_cells
280 max_number = max_counts if max_cells is None else max_cells
281 number_per_gene = np.sum(
282 X if min_cells is None and max_cells is None else X > 0, axis=0
283 )
284 if issparse(X):
285 number_per_gene = number_per_gene.A1
286 if min_number is not None:
287 gene_subset = number_per_gene >= min_number
288 if max_number is not None:
289 gene_subset = number_per_gene <= max_number
290
291 s = np.sum(~gene_subset)
292 if s > 0:
293 msg = f"filtered out {s} genes that are detected "
294 if min_cells is not None or min_counts is not None:
295 msg += "in less than "
296 msg += (
297 f"{min_cells} cells" if min_counts is None else f"{min_counts} counts"
298 )
299 if max_cells is not None or max_counts is not None:
300 msg += "in more than "
301 msg += (
302 f"{max_cells} cells" if max_counts is None else f"{max_counts} counts"
303 )
304 logg.info(msg)
305 return gene_subset, number_per_gene
306
307
308 @renamed_arg("X", "data", pos_0=True)
309 @singledispatch
310 def log1p(
311 data: AnnData | np.ndarray | spmatrix,
312 *,
313 base: Number | None = None,
314 copy: bool = False,
315 chunked: bool | None = None,
316 chunk_size: int | None = None,
317 layer: str | None = None,
318 obsm: str | None = None,
319 ) -> AnnData | np.ndarray | spmatrix | None:
320 """\
321 Logarithmize the data matrix.
322
323 Computes :math:`X = \\log(X + 1)`,
324 where :math:`log` denotes the natural logarithm unless a different base is given.
325
326 Parameters
327 ----------
328 data
329 The (annotated) data matrix of shape `n_obs` × `n_vars`.
330 Rows correspond to cells and columns to genes.
331 base
332 Base of the logarithm. Natural logarithm is used by default.
333 copy
334 If an :class:`~anndata.AnnData` is passed, determines whether a copy
335 is returned.
336 chunked
337 Process the data matrix in chunks, which will save memory.
338 Applies only to :class:`~anndata.AnnData`.
339 chunk_size
340 `n_obs` of the chunks to process the data in.
341 layer
342 Entry of layers to transform.
343 obsm
344 Entry of obsm to transform.
345
346 Returns
347 -------
348 Returns or updates `data`, depending on `copy`.
349 """
350 _check_array_function_arguments(
351 chunked=chunked, chunk_size=chunk_size, layer=layer, obsm=obsm
352 )
353 return log1p_array(data, copy=copy, base=base)
354
355
356 @log1p.register(spmatrix)
357 def log1p_sparse(X: spmatrix, *, base: Number | None = None, copy: bool = False):
358 X = check_array(
359 X, accept_sparse=("csr", "csc"), dtype=(np.float64, np.float32), copy=copy
360 )
361 X.data = log1p(X.data, copy=False, base=base)
362 return X
363
364
365 @log1p.register(np.ndarray)
366 def log1p_array(X: np.ndarray, *, base: Number | None = None, copy: bool = False):
367 # Can force arrays to be np.ndarrays, but would be useful to not
368 # X = check_array(X, dtype=(np.float64, np.float32), ensure_2d=False, copy=copy)
369 if copy:
370 if not np.issubdtype(X.dtype, np.floating):
371 X = X.astype(float)
372 else:
373 X = X.copy()
374 elif not (np.issubdtype(X.dtype, np.floating) or np.issubdtype(X.dtype, complex)):
375 X = X.astype(float)
376 np.log1p(X, out=X)
377 if base is not None:
378 np.divide(X, np.log(base), out=X)
379 return X
380
381
382 @log1p.register(AnnData)
383 def log1p_anndata(
384 adata: AnnData,
385 *,
386 base: Number | None = None,
387 copy: bool = False,
388 chunked: bool = False,
389 chunk_size: int | None = None,
390 layer: str | None = None,
391 obsm: str | None = None,
392 ) -> AnnData | None:
393 if "log1p" in adata.uns_keys():
394 logg.warning("adata.X seems to be already log-transformed.")
395
396 adata = adata.copy() if copy else adata
397 view_to_actual(adata)
398
399 if chunked:
400 if (layer is not None) or (obsm is not None):
401 raise NotImplementedError(
402 "Currently cannot perform chunked operations on arrays not stored in X."
403 )
404 for chunk, start, end in adata.chunked_X(chunk_size):
405 adata.X[start:end] = log1p(chunk, base=base, copy=False)
406 else:
407 X = _get_obs_rep(adata, layer=layer, obsm=obsm)
408 X = log1p(X, copy=False, base=base)
409 _set_obs_rep(adata, X, layer=layer, obsm=obsm)
410
411 adata.uns["log1p"] = {"base": base}
412 if copy:
413 return adata
414
415
416 @old_positionals("copy", "chunked", "chunk_size")
417 def sqrt(
418 data: AnnData | spmatrix | np.ndarray,
419 *,
420 copy: bool = False,
421 chunked: bool = False,
422 chunk_size: int | None = None,
423 ) -> AnnData | spmatrix | np.ndarray | None:
424 """\
425 Square root the data matrix.
426
427 Computes :math:`X = \\sqrt(X)`.
428
429 Parameters
430 ----------
431 data
432 The (annotated) data matrix of shape `n_obs` × `n_vars`.
433 Rows correspond to cells and columns to genes.
434 copy
435 If an :class:`~anndata.AnnData` object is passed,
436 determines whether a copy is returned.
437 chunked
438 Process the data matrix in chunks, which will save memory.
439 Applies only to :class:`~anndata.AnnData`.
440 chunk_size
441 `n_obs` of the chunks to process the data in.
442
443 Returns
444 -------
445 Returns or updates `data`, depending on `copy`.
446 """
447 if isinstance(data, AnnData):
448 adata = data.copy() if copy else data
449 if chunked:
450 for chunk, start, end in adata.chunked_X(chunk_size):
451 adata.X[start:end] = sqrt(chunk)
452 else:
453 adata.X = sqrt(data.X)
454 return adata if copy else None
455 X = data # proceed with data matrix
456 if not issparse(X):
457 return np.sqrt(X)
458 else:
459 return X.sqrt()
460
461
462 def normalize_per_cell( # noqa: PLR0917
463 data: AnnData | np.ndarray | spmatrix,
464 counts_per_cell_after: float | None = None,
465 counts_per_cell: np.ndarray | None = None,
466 key_n_counts: str = "n_counts",
467 copy: bool = False,
468 layers: Literal["all"] | Iterable[str] = (),
469 use_rep: Literal["after", "X"] | None = None,
470 min_counts: int = 1,
471 ) -> AnnData | np.ndarray | spmatrix | None:
472 """\
473 Normalize total counts per cell.
474
475 .. warning::
476 .. deprecated:: 1.3.7
477 Use :func:`~scanpy.pp.normalize_total` instead.
478 The new function is equivalent to the present
479 function, except that
480
481 * the new function doesn't filter cells based on `min_counts`,
482 use :func:`~scanpy.pp.filter_cells` if filtering is needed.
483 * some arguments were renamed
484 * `copy` is replaced by `inplace`
485
486 Normalize each cell by total counts over all genes, so that every cell has
487 the same total count after normalization.
488
489 Similar functions are used, for example, by Seurat [Satija15]_, Cell Ranger
490 [Zheng17]_ or SPRING [Weinreb17]_.
491
492 Parameters
493 ----------
494 data
495 The (annotated) data matrix of shape `n_obs` × `n_vars`. Rows correspond
496 to cells and columns to genes.
497 counts_per_cell_after
498 If `None`, after normalization, each cell has a total count equal
499 to the median of the *counts_per_cell* before normalization.
500 counts_per_cell
501 Precomputed counts per cell.
502 key_n_counts
503 Name of the field in `adata.obs` where the total counts per cell are
504 stored.
505 copy
506 If an :class:`~anndata.AnnData` is passed, determines whether a copy
507 is returned.
508 min_counts
509 Cells with counts less than `min_counts` are filtered out during
510 normalization.
511
512 Returns
513 -------
514 Returns `None` if `copy=False`, else returns an updated `AnnData` object. Sets the following fields:
515
516 `adata.X` : :class:`numpy.ndarray` | :class:`scipy.sparse._csr.csr_matrix` (dtype `float`)
517 Normalized count data matrix.
518
519 Examples
520 --------
521 >>> import scanpy as sc
522 >>> adata = AnnData(np.array([[1, 0], [3, 0], [5, 6]], dtype=np.float32))
523 >>> print(adata.X.sum(axis=1))
524 [ 1. 3. 11.]
525 >>> sc.pp.normalize_per_cell(adata)
526 >>> print(adata.obs)
527 n_counts
528 0 1.0
529 1 3.0
530 2 11.0
531 >>> print(adata.X.sum(axis=1))
532 [3. 3. 3.]
533 >>> sc.pp.normalize_per_cell(
534 ... adata, counts_per_cell_after=1,
535 ... key_n_counts='n_counts2',
536 ... )
537 >>> print(adata.obs)
538 n_counts n_counts2
539 0 1.0 3.0
540 1 3.0 3.0
541 2 11.0 3.0
542 >>> print(adata.X.sum(axis=1))
543 [1. 1. 1.]
544 """
545 if isinstance(data, AnnData):
546 start = logg.info("normalizing by total count per cell")
547 adata = data.copy() if copy else data
548 if counts_per_cell is None:
549 cell_subset, counts_per_cell = materialize_as_ndarray(
550 filter_cells(adata.X, min_counts=min_counts)
551 )
552 adata.obs[key_n_counts] = counts_per_cell
553 adata._inplace_subset_obs(cell_subset)
554 counts_per_cell = counts_per_cell[cell_subset]
555 normalize_per_cell(adata.X, counts_per_cell_after, counts_per_cell)
556
557 layers = adata.layers.keys() if layers == "all" else layers
558 if use_rep == "after":
559 after = counts_per_cell_after
560 elif use_rep == "X":
561 after = np.median(counts_per_cell[cell_subset])
562 elif use_rep is None:
563 after = None
564 else:
565 raise ValueError('use_rep should be "after", "X" or None')
566 for layer in layers:
567 _subset, counts = filter_cells(adata.layers[layer], min_counts=min_counts)
568 temp = normalize_per_cell(adata.layers[layer], after, counts, copy=True)
569 adata.layers[layer] = temp
570
571 logg.info(
572 " finished ({time_passed}): normalized adata.X and added"
573 f" {key_n_counts!r}, counts per cell before normalization (adata.obs)",
574 time=start,
575 )
576 return adata if copy else None
577 # proceed with data matrix
578 X = data.copy() if copy else data
579 if counts_per_cell is None:
580 if not copy:
581 raise ValueError("Can only be run with copy=True")
582 cell_subset, counts_per_cell = filter_cells(X, min_counts=min_counts)
583 X = X[cell_subset]
584 counts_per_cell = counts_per_cell[cell_subset]
585 if counts_per_cell_after is None:
586 counts_per_cell_after = np.median(counts_per_cell)
587 with warnings.catch_warnings():
588 warnings.simplefilter("ignore")
589 counts_per_cell += counts_per_cell == 0
590 counts_per_cell /= counts_per_cell_after
591 if not issparse(X):
592 X /= counts_per_cell[:, np.newaxis]
593 else:
594 sparsefuncs.inplace_row_scale(X, 1 / counts_per_cell)
595 return X if copy else None
596
597
598 @old_positionals("layer", "n_jobs", "copy")
599 def regress_out(
600 adata: AnnData,
601 keys: str | Sequence[str],
602 *,
603 layer: str | None = None,
604 n_jobs: int | None = None,
605 copy: bool = False,
606 ) -> AnnData | None:
607 """\
608 Regress out (mostly) unwanted sources of variation.
609
610 Uses simple linear regression. This is inspired by Seurat's `regressOut`
611 function in R [Satija15]. Note that this function tends to overcorrect
612 in certain circumstances as described in :issue:`526`.
613
614 Parameters
615 ----------
616 adata
617 The annotated data matrix.
618 keys
619 Keys for observation annotation on which to regress on.
620 layer
621 If provided, which element of layers to regress on.
622 n_jobs
623 Number of jobs for parallel computation.
624 `None` means using :attr:`scanpy._settings.ScanpyConfig.n_jobs`.
625 copy
626 Determines whether a copy of `adata` is returned.
627
628 Returns
629 -------
630 Returns `None` if `copy=False`, else returns an updated `AnnData` object. Sets the following fields:
631
632 `adata.X` | `adata.layers[layer]` : :class:`numpy.ndarray` | :class:`scipy.sparse._csr.csr_matrix` (dtype `float`)
633 Corrected count data matrix.
634 """
635 start = logg.info(f"regressing out {keys}")
636 adata = adata.copy() if copy else adata
637
638 sanitize_anndata(adata)
639
640 view_to_actual(adata)
641
642 if isinstance(keys, str):
643 keys = [keys]
644
645 X = _get_obs_rep(adata, layer=layer)
646
647 if issparse(X):
648 logg.info(" sparse input is densified and may " "lead to high memory use")
649 X = X.toarray()
650
651 n_jobs = sett.n_jobs if n_jobs is None else n_jobs
652
653 # regress on a single categorical variable
654 variable_is_categorical = False
655 if keys[0] in adata.obs_keys() and isinstance(
656 adata.obs[keys[0]].dtype, CategoricalDtype
657 ):
658 if len(keys) > 1:
659 raise ValueError(
660 "If providing categorical variable, "
661 "only a single one is allowed. For this one "
662 "we regress on the mean for each category."
663 )
664 logg.debug("... regressing on per-gene means within categories")
665 regressors = np.zeros(X.shape, dtype="float32")
666 for category in adata.obs[keys[0]].cat.categories:
667 mask = (category == adata.obs[keys[0]]).values
668 for ix, x in enumerate(X.T):
669 regressors[mask, ix] = x[mask].mean()
670 variable_is_categorical = True
671 # regress on one or several ordinal variables
672 else:
673 # create data frame with selected keys (if given)
674 if keys:
675 regressors = adata.obs[keys]
676 else:
677 regressors = adata.obs.copy()
678
679 # add column of ones at index 0 (first column)
680 regressors.insert(0, "ones", 1.0)
681
682 len_chunk = np.ceil(min(1000, X.shape[1]) / n_jobs).astype(int)
683 n_chunks = np.ceil(X.shape[1] / len_chunk).astype(int)
684
685 tasks = []
686 # split the adata.X matrix by columns in chunks of size n_chunk
687 # (the last chunk could be of smaller size than the others)
688 chunk_list = np.array_split(X, n_chunks, axis=1)
689 if variable_is_categorical:
690 regressors_chunk = np.array_split(regressors, n_chunks, axis=1)
691 for idx, data_chunk in enumerate(chunk_list):
692 # each task is a tuple of a data_chunk eg. (adata.X[:,0:100]) and
693 # the regressors. This data will be passed to each of the jobs.
694 if variable_is_categorical:
695 regres = regressors_chunk[idx]
696 else:
697 regres = regressors
698 tasks.append(tuple((data_chunk, regres, variable_is_categorical)))
699
700 from joblib import Parallel, delayed
701
702 # TODO: figure out how to test that this doesn't oversubscribe resources
703 res = Parallel(n_jobs=n_jobs)(delayed(_regress_out_chunk)(task) for task in tasks)
704
705 # res is a list of vectors (each corresponding to a regressed gene column).
706 # The transpose is needed to get the matrix in the shape needed
707 _set_obs_rep(adata, np.vstack(res).T, layer=layer)
708 logg.info(" finished", time=start)
709 return adata if copy else None
710
711
712 def _regress_out_chunk(data):
713 # data is a tuple containing the selected columns from adata.X
714 # and the regressors dataFrame
715 data_chunk = data[0]
716 regressors = data[1]
717 variable_is_categorical = data[2]
718
719 responses_chunk_list = []
720 import statsmodels.api as sm
721 from statsmodels.tools.sm_exceptions import PerfectSeparationError
722
723 for col_index in range(data_chunk.shape[1]):
724 # if all values are identical, the statsmodel.api.GLM throws an error;
725 # but then no regression is necessary anyways...
726 if not (data_chunk[:, col_index] != data_chunk[0, col_index]).any():
727 responses_chunk_list.append(data_chunk[:, col_index])
728 continue
729
730 if variable_is_categorical:
731 regres = np.c_[np.ones(regressors.shape[0]), regressors[:, col_index]]
732 else:
733 regres = regressors
734 try:
735 result = sm.GLM(
736 data_chunk[:, col_index], regres, family=sm.families.Gaussian()
737 ).fit()
738 new_column = result.resid_response
739 except PerfectSeparationError: # this emulates R's behavior
740 logg.warning("Encountered PerfectSeparationError, setting to 0 as in R.")
741 new_column = np.zeros(data_chunk.shape[0])
742
743 responses_chunk_list.append(new_column)
744
745 return np.vstack(responses_chunk_list)
746
747
748 @renamed_arg("X", "data", pos_0=True)
749 @old_positionals("zero_center", "max_value", "copy", "layer", "obsm")
750 @singledispatch
751 def scale(
752 data: AnnData | spmatrix | np.ndarray,
753 *,
754 zero_center: bool = True,
755 max_value: float | None = None,
756 copy: bool = False,
757 layer: str | None = None,
758 obsm: str | None = None,
759 mask_obs: NDArray[np.bool_] | str | None = None,
760 ) -> AnnData | spmatrix | np.ndarray | None:
761 """\
762 Scale data to unit variance and zero mean.
763
764 .. note::
765 Variables (genes) that do not display any variation (are constant across
766 all observations) are retained and (for zero_center==True) set to 0
767 during this operation. In the future, they might be set to NaNs.
768
769 Parameters
770 ----------
771 data
772 The (annotated) data matrix of shape `n_obs` × `n_vars`.
773 Rows correspond to cells and columns to genes.
774 zero_center
775 If `False`, omit zero-centering variables, which allows to handle sparse
776 input efficiently.
777 max_value
778 Clip (truncate) to this value after scaling. If `None`, do not clip.
779 copy
780 Whether this function should be performed inplace. If an AnnData object
781 is passed, this also determines if a copy is returned.
782 layer
783 If provided, which element of layers to scale.
784 obsm
785 If provided, which element of obsm to scale.
786 mask_obs
787 Restrict both the derivation of scaling parameters and the scaling itself
788 to a certain set of observations. The mask is specified as a boolean array
789 or a string referring to an array in :attr:`~anndata.AnnData.obs`.
790
791 Returns
792 -------
793 Returns `None` if `copy=False`, else returns an updated `AnnData` object. Sets the following fields:
794
795 `adata.X` | `adata.layers[layer]` : :class:`numpy.ndarray` | :class:`scipy.sparse._csr.csr_matrix` (dtype `float`)
796 Scaled count data matrix.
797 `adata.var['mean']` : :class:`pandas.Series` (dtype `float`)
798 Means per gene before scaling.
799 `adata.var['std']` : :class:`pandas.Series` (dtype `float`)
800 Standard deviations per gene before scaling.
801 `adata.var['var']` : :class:`pandas.Series` (dtype `float`)
802 Variances per gene before scaling.
803 """
804 _check_array_function_arguments(layer=layer, obsm=obsm)
805 if layer is not None:
806 raise ValueError(
807 f"`layer` argument inappropriate for value of type {type(data)}"
808 )
809 if obsm is not None:
810 raise ValueError(
811 f"`obsm` argument inappropriate for value of type {type(data)}"
812 )
813 return scale_array(
814 data, zero_center=zero_center, max_value=max_value, copy=copy, mask_obs=mask_obs
815 )
816
817
818 @scale.register(np.ndarray)
819 def scale_array(
820 X: np.ndarray,
821 *,
822 zero_center: bool = True,
823 max_value: float | None = None,
824 copy: bool = False,
825 return_mean_std: bool = False,
826 mask_obs: NDArray[np.bool_] | None = None,
827 ) -> np.ndarray | tuple[np.ndarray, NDArray[np.float64], NDArray[np.float64]]:
828 if copy:
829 X = X.copy()
830 if mask_obs is not None:
831 mask_obs = _check_mask(X, mask_obs, "obs")
832 scale_rv = scale_array(
833 X[mask_obs, :],
834 zero_center=zero_center,
835 max_value=max_value,
836 copy=False,
837 return_mean_std=return_mean_std,
838 mask_obs=None,
839 )
840 if return_mean_std:
841 X[mask_obs, :], mean, std = scale_rv
842 return X, mean, std
843 else:
844 X[mask_obs, :] = scale_rv
845 return X
846
847 if not zero_center and max_value is not None:
848 logg.info( # Be careful of what? This should be more specific
849 "... be careful when using `max_value` " "without `zero_center`."
850 )
851
852 if np.issubdtype(X.dtype, np.integer):
853 logg.info(
854 "... as scaling leads to float results, integer "
855 "input is cast to float, returning copy."
856 )
857 X = X.astype(float)
858
859 mean, var = _get_mean_var(X)
860 std = np.sqrt(var)
861 std[std == 0] = 1
862 if issparse(X):
863 if zero_center:
864 raise ValueError("Cannot zero-center sparse matrix.")
865 sparsefuncs.inplace_column_scale(X, 1 / std)
866 else:
867 if zero_center:
868 X -= mean
869 X /= std
870
871 # do the clipping
872 if max_value is not None:
873 logg.debug(f"... clipping at max_value {max_value}")
874 X[X > max_value] = max_value
875 if return_mean_std:
876 return X, mean, std
877 else:
878 return X
879
880
881 @scale.register(spmatrix)
882 def scale_sparse(
883 X: spmatrix,
884 *,
885 zero_center: bool = True,
886 max_value: float | None = None,
887 copy: bool = False,
888 return_mean_std: bool = False,
889 mask_obs: NDArray[np.bool_] | None = None,
890 ) -> np.ndarray | tuple[np.ndarray, NDArray[np.float64], NDArray[np.float64]]:
891 # need to add the following here to make inplace logic work
892 if zero_center:
893 logg.info(
894 "... as `zero_center=True`, sparse input is "
895 "densified and may lead to large memory consumption"
896 )
897 X = X.toarray()
898 copy = False # Since the data has been copied
899 return scale_array(
900 X,
901 zero_center=zero_center,
902 copy=copy,
903 max_value=max_value,
904 return_mean_std=return_mean_std,
905 mask_obs=mask_obs,
906 )
907
908
909 @scale.register(AnnData)
910 def scale_anndata(
911 adata: AnnData,
912 *,
913 zero_center: bool = True,
914 max_value: float | None = None,
915 copy: bool = False,
916 layer: str | None = None,
917 obsm: str | None = None,
918 mask_obs: NDArray[np.bool_] | str | None = None,
919 ) -> AnnData | None:
920 adata = adata.copy() if copy else adata
921 str_mean_std = ("mean", "std")
922 if mask_obs is not None:
923 if isinstance(mask_obs, str):
924 str_mean_std = (f"mean of {mask_obs}", f"std of {mask_obs}")
925 else:
926 str_mean_std = ("mean with mask", "std with mask")
927 mask_obs = _check_mask(adata, mask_obs, "obs")
928 view_to_actual(adata)
929 X = _get_obs_rep(adata, layer=layer, obsm=obsm)
930 X, adata.var[str_mean_std[0]], adata.var[str_mean_std[1]] = scale(
931 X,
932 zero_center=zero_center,
933 max_value=max_value,
934 copy=False, # because a copy has already been made, if it were to be made
935 return_mean_std=True,
936 mask_obs=mask_obs,
937 )
938 _set_obs_rep(adata, X, layer=layer, obsm=obsm)
939 return adata if copy else None
940
941
942 @old_positionals("n_obs", "random_state", "copy")
943 def subsample(
944 data: AnnData | np.ndarray | spmatrix,
945 fraction: float | None = None,
946 *,
947 n_obs: int | None = None,
948 random_state: AnyRandom = 0,
949 copy: bool = False,
950 ) -> AnnData | tuple[np.ndarray | spmatrix, NDArray[np.int64]] | None:
951 """\
952 Subsample to a fraction of the number of observations.
953
954 Parameters
955 ----------
956 data
957 The (annotated) data matrix of shape `n_obs` × `n_vars`.
958 Rows correspond to cells and columns to genes.
959 fraction
960 Subsample to this `fraction` of the number of observations.
961 n_obs
962 Subsample to this number of observations.
963 random_state
964 Random seed to change subsampling.
965 copy
966 If an :class:`~anndata.AnnData` is passed,
967 determines whether a copy is returned.
968
969 Returns
970 -------
971 Returns `X[obs_indices], obs_indices` if data is array-like, otherwise
972 subsamples the passed :class:`~anndata.AnnData` (`copy == False`) or
973 returns a subsampled copy of it (`copy == True`).
974 """
975 np.random.seed(random_state)
976 old_n_obs = data.n_obs if isinstance(data, AnnData) else data.shape[0]
977 if n_obs is not None:
978 new_n_obs = n_obs
979 elif fraction is not None:
980 if fraction > 1 or fraction < 0:
981 raise ValueError(f"`fraction` needs to be within [0, 1], not {fraction}")
982 new_n_obs = int(fraction * old_n_obs)
983 logg.debug(f"... subsampled to {new_n_obs} data points")
984 else:
985 raise ValueError("Either pass `n_obs` or `fraction`.")
986 obs_indices = np.random.choice(old_n_obs, size=new_n_obs, replace=False)
987 if isinstance(data, AnnData):
988 if data.isbacked:
989 if copy:
990 return data[obs_indices].to_memory()
991 else:
992 raise NotImplementedError(
993 "Inplace subsampling is not implemented for backed objects."
994 )
995 else:
996 if copy:
997 return data[obs_indices].copy()
998 else:
999 data._inplace_subset_obs(obs_indices)
1000 else:
1001 X = data
1002 return X[obs_indices], obs_indices
1003
1004
1005 @renamed_arg("target_counts", "counts_per_cell")
1006 def downsample_counts(
1007 adata: AnnData,
1008 counts_per_cell: int | Collection[int] | None = None,
1009 total_counts: int | None = None,
1010 *,
1011 random_state: AnyRandom = 0,
1012 replace: bool = False,
1013 copy: bool = False,
1014 ) -> AnnData | None:
1015 """\
1016 Downsample counts from count matrix.
1017
1018 If `counts_per_cell` is specified, each cell will downsampled.
1019 If `total_counts` is specified, expression matrix will be downsampled to
1020 contain at most `total_counts`.
1021
1022 Parameters
1023 ----------
1024 adata
1025 Annotated data matrix.
1026 counts_per_cell
1027 Target total counts per cell. If a cell has more than 'counts_per_cell',
1028 it will be downsampled to this number. Resulting counts can be specified
1029 on a per cell basis by passing an array.Should be an integer or integer
1030 ndarray with same length as number of obs.
1031 total_counts
1032 Target total counts. If the count matrix has more than `total_counts`
1033 it will be downsampled to have this number.
1034 random_state
1035 Random seed for subsampling.
1036 replace
1037 Whether to sample the counts with replacement.
1038 copy
1039 Determines whether a copy of `adata` is returned.
1040
1041 Returns
1042 -------
1043 Returns `None` if `copy=False`, else returns an `AnnData` object. Sets the following fields:
1044
1045 `adata.X` : :class:`numpy.ndarray` | :class:`scipy.sparse.spmatrix` (dtype `float`)
1046 Downsampled counts matrix.
1047 """
1048 # This logic is all dispatch
1049 total_counts_call = total_counts is not None
1050 counts_per_cell_call = counts_per_cell is not None
1051 if total_counts_call is counts_per_cell_call:
1052 raise ValueError(
1053 "Must specify exactly one of `total_counts` or `counts_per_cell`."
1054 )
1055 if copy:
1056 adata = adata.copy()
1057 if total_counts_call:
1058 adata.X = _downsample_total_counts(adata.X, total_counts, random_state, replace)
1059 elif counts_per_cell_call:
1060 adata.X = _downsample_per_cell(adata.X, counts_per_cell, random_state, replace)
1061 if copy:
1062 return adata
1063
1064
1065 def _downsample_per_cell(X, counts_per_cell, random_state, replace):
1066 n_obs = X.shape[0]
1067 if isinstance(counts_per_cell, int):
1068 counts_per_cell = np.full(n_obs, counts_per_cell)
1069 else:
1070 counts_per_cell = np.asarray(counts_per_cell)
1071 # np.random.choice needs int arguments in numba code:
1072 counts_per_cell = counts_per_cell.astype(np.int_, copy=False)
1073 if not isinstance(counts_per_cell, np.ndarray) or len(counts_per_cell) != n_obs:
1074 raise ValueError(
1075 "If provided, 'counts_per_cell' must be either an integer, or "
1076 "coercible to an `np.ndarray` of length as number of observations"
1077 " by `np.asarray(counts_per_cell)`."
1078 )
1079 if issparse(X):
1080 original_type = type(X)
1081 if not isspmatrix_csr(X):
1082 X = csr_matrix(X)
1083 totals = np.ravel(X.sum(axis=1)) # Faster for csr matrix
1084 under_target = np.nonzero(totals > counts_per_cell)[0]
1085 rows = np.split(X.data, X.indptr[1:-1])
1086 for rowidx in under_target:
1087 row = rows[rowidx]
1088 _downsample_array(
1089 row,
1090 counts_per_cell[rowidx],
1091 random_state=random_state,
1092 replace=replace,
1093 inplace=True,
1094 )
1095 X.eliminate_zeros()
1096 if original_type is not csr_matrix: # Put it back
1097 X = original_type(X)
1098 else:
1099 totals = np.ravel(X.sum(axis=1))
1100 under_target = np.nonzero(totals > counts_per_cell)[0]
1101 for rowidx in under_target:
1102 row = X[rowidx, :]
1103 _downsample_array(
1104 row,
1105 counts_per_cell[rowidx],
1106 random_state=random_state,
1107 replace=replace,
1108 inplace=True,
1109 )
1110 return X
1111
1112
1113 def _downsample_total_counts(X, total_counts, random_state, replace):
1114 total_counts = int(total_counts)
1115 total = X.sum()
1116 if total < total_counts:
1117 return X
1118 if issparse(X):
1119 original_type = type(X)
1120 if not isspmatrix_csr(X):
1121 X = csr_matrix(X)
1122 _downsample_array(
1123 X.data,
1124 total_counts,
1125 random_state=random_state,
1126 replace=replace,
1127 inplace=True,
1128 )
1129 X.eliminate_zeros()
1130 if original_type is not csr_matrix:
1131 X = original_type(X)
1132 else:
1133 v = X.reshape(np.multiply(*X.shape))
1134 _downsample_array(v, total_counts, random_state, replace=replace, inplace=True)
1135 return X
1136
1137
1138 @numba.njit(cache=True)
1139 def _downsample_array(
1140 col: np.ndarray,
1141 target: int,
1142 random_state: AnyRandom = 0,
1143 replace: bool = True,
1144 inplace: bool = False,
1145 ):
1146 """\
1147 Evenly reduce counts in cell to target amount.
1148
1149 This is an internal function and has some restrictions:
1150
1151 * total counts in cell must be less than target
1152 """
1153 np.random.seed(random_state)
1154 cumcounts = col.cumsum()
1155 if inplace:
1156 col[:] = 0
1157 else:
1158 col = np.zeros_like(col)
1159 total = np.int_(cumcounts[-1])
1160 sample = np.random.choice(total, target, replace=replace)
1161 sample.sort()
1162 geneptr = 0
1163 for count in sample:
1164 while count >= cumcounts[geneptr]:
1165 geneptr += 1
1166 col[geneptr] += 1
1167 return col
1168
1169
1170 # --------------------------------------------------------------------------------
1171 # Helper Functions
1172 # --------------------------------------------------------------------------------
1173
1174
1175 def _pca_fallback(data, n_comps=2):
1176 # mean center the data
1177 data -= data.mean(axis=0)
1178 # calculate the covariance matrix
1179 C = np.cov(data, rowvar=False)
1180 # calculate eigenvectors & eigenvalues of the covariance matrix
1181 # use 'eigh' rather than 'eig' since C is symmetric,
1182 # the performance gain is substantial
1183 # evals, evecs = np.linalg.eigh(C)
1184 evals, evecs = sp.sparse.linalg.eigsh(C, k=n_comps)
1185 # sort eigenvalues in decreasing order
1186 idcs = np.argsort(evals)[::-1]
1187 evecs = evecs[:, idcs]
1188 evals = evals[idcs]
1189 # select the first n eigenvectors (n is desired dimension
1190 # of rescaled data array, or n_comps)
1191 evecs = evecs[:, :n_comps]
1192 # project data points on eigenvectors
1193 return np.dot(evecs.T, data.T).T
1194
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/scanpy/preprocessing/_simple.py b/scanpy/preprocessing/_simple.py
--- a/scanpy/preprocessing/_simple.py
+++ b/scanpy/preprocessing/_simple.py
@@ -871,7 +871,10 @@
# do the clipping
if max_value is not None:
logg.debug(f"... clipping at max_value {max_value}")
- X[X > max_value] = max_value
+ if zero_center:
+ X = np.clip(X, a_min=-max_value, a_max=max_value)
+ else:
+ X[X > max_value] = max_value
if return_mean_std:
return X, mean, std
else:
| {"golden_diff": "diff --git a/scanpy/preprocessing/_simple.py b/scanpy/preprocessing/_simple.py\n--- a/scanpy/preprocessing/_simple.py\n+++ b/scanpy/preprocessing/_simple.py\n@@ -871,7 +871,10 @@\n # do the clipping\n if max_value is not None:\n logg.debug(f\"... clipping at max_value {max_value}\")\n- X[X > max_value] = max_value\n+ if zero_center:\n+ X = np.clip(X, a_min=-max_value, a_max=max_value)\n+ else:\n+ X[X > max_value] = max_value\n if return_mean_std:\n return X, mean, std\n else:\n", "issue": "scale max value only cuts high values\n### Please make sure these conditions are met\n\n- [X] I have checked that this issue has not already been reported.\n- [X] I have confirmed this bug exists on the latest version of scanpy.\n- [X] (optional) I have confirmed this bug exists on the main branch of scanpy.\n\n### What happened?\n\nShouldn't max Value for `scale` and zero_center also clip the negative values?\n\n### Minimal code sample\n\n```python\nbdata = sc.datasets.pbmc3k()\r\nsc.pp.scale(bdata,max_value= 1)\r\nprint(bdata.X.min(),bdata.X.max())\n```\n\n\n### Error output\n\n```pytb\n-2.62718 1.0\r\n\r\nshouldn't this be -1,1\n```\n\n\n### Versions\n\nscanpy build from github main branch\n", "before_files": [{"content": "\"\"\"Simple Preprocessing Functions\n\nCompositions of these functions are found in sc.preprocess.recipes.\n\"\"\"\nfrom __future__ import annotations\n\nimport warnings\nfrom functools import singledispatch\nfrom typing import TYPE_CHECKING, Literal\n\nimport numba\nimport numpy as np\nimport scipy as sp\nfrom anndata import AnnData\nfrom pandas.api.types import CategoricalDtype\nfrom scipy.sparse import csr_matrix, issparse, isspmatrix_csr, spmatrix\nfrom sklearn.utils import check_array, sparsefuncs\n\nfrom .. import logging as logg\nfrom .._compat import old_positionals\nfrom .._settings import settings as sett\nfrom .._utils import (\n AnyRandom,\n _check_array_function_arguments,\n renamed_arg,\n sanitize_anndata,\n view_to_actual,\n)\nfrom ..get import _check_mask, _get_obs_rep, _set_obs_rep\nfrom ._distributed import materialize_as_ndarray\nfrom ._utils import _get_mean_var\n\n# install dask if available\ntry:\n import dask.array as da\nexcept ImportError:\n da = None\n\n# backwards compat\nfrom ._deprecated.highly_variable_genes import filter_genes_dispersion # noqa: F401\n\nif TYPE_CHECKING:\n from collections.abc import Collection, Iterable, Sequence\n from numbers import Number\n\n from numpy.typing import NDArray\n\n\n@old_positionals(\n \"min_counts\", \"min_genes\", \"max_counts\", \"max_genes\", \"inplace\", \"copy\"\n)\ndef filter_cells(\n data: AnnData | spmatrix | np.ndarray,\n *,\n min_counts: int | None = None,\n min_genes: int | None = None,\n max_counts: int | None = None,\n max_genes: int | None = None,\n inplace: bool = True,\n copy: bool = False,\n) -> AnnData | tuple[np.ndarray, np.ndarray] | None:\n \"\"\"\\\n Filter cell outliers based on counts and numbers of genes expressed.\n\n For instance, only keep cells with at least `min_counts` counts or\n `min_genes` genes expressed. This is to filter measurement outliers,\n i.e. \u201cunreliable\u201d observations.\n\n Only provide one of the optional parameters `min_counts`, `min_genes`,\n `max_counts`, `max_genes` per call.\n\n Parameters\n ----------\n data\n The (annotated) data matrix of shape `n_obs` \u00d7 `n_vars`.\n Rows correspond to cells and columns to genes.\n min_counts\n Minimum number of counts required for a cell to pass filtering.\n min_genes\n Minimum number of genes expressed required for a cell to pass filtering.\n max_counts\n Maximum number of counts required for a cell to pass filtering.\n max_genes\n Maximum number of genes expressed required for a cell to pass filtering.\n inplace\n Perform computation inplace or return result.\n\n Returns\n -------\n Depending on `inplace`, returns the following arrays or directly subsets\n and annotates the data matrix:\n\n cells_subset\n Boolean index mask that does filtering. `True` means that the\n cell is kept. `False` means the cell is removed.\n number_per_cell\n Depending on what was thresholded (`counts` or `genes`),\n the array stores `n_counts` or `n_cells` per gene.\n\n Examples\n --------\n >>> import scanpy as sc\n >>> adata = sc.datasets.krumsiek11()\n UserWarning: Observation names are not unique. To make them unique, call `.obs_names_make_unique`.\n utils.warn_names_duplicates(\"obs\")\n >>> adata.obs_names_make_unique()\n >>> adata.n_obs\n 640\n >>> adata.var_names.tolist() # doctest: +NORMALIZE_WHITESPACE\n ['Gata2', 'Gata1', 'Fog1', 'EKLF', 'Fli1', 'SCL',\n 'Cebpa', 'Pu.1', 'cJun', 'EgrNab', 'Gfi1']\n >>> # add some true zeros\n >>> adata.X[adata.X < 0.3] = 0\n >>> # simply compute the number of genes per cell\n >>> sc.pp.filter_cells(adata, min_genes=0)\n >>> adata.n_obs\n 640\n >>> adata.obs['n_genes'].min()\n 1\n >>> # filter manually\n >>> adata_copy = adata[adata.obs['n_genes'] >= 3]\n >>> adata_copy.n_obs\n 554\n >>> adata_copy.obs['n_genes'].min()\n 3\n >>> # actually do some filtering\n >>> sc.pp.filter_cells(adata, min_genes=3)\n >>> adata.n_obs\n 554\n >>> adata.obs['n_genes'].min()\n 3\n \"\"\"\n if copy:\n logg.warning(\"`copy` is deprecated, use `inplace` instead.\")\n n_given_options = sum(\n option is not None for option in [min_genes, min_counts, max_genes, max_counts]\n )\n if n_given_options != 1:\n raise ValueError(\n \"Only provide one of the optional parameters `min_counts`, \"\n \"`min_genes`, `max_counts`, `max_genes` per call.\"\n )\n if isinstance(data, AnnData):\n adata = data.copy() if copy else data\n cell_subset, number = materialize_as_ndarray(\n filter_cells(\n adata.X,\n min_counts=min_counts,\n min_genes=min_genes,\n max_counts=max_counts,\n max_genes=max_genes,\n ),\n )\n if not inplace:\n return cell_subset, number\n if min_genes is None and max_genes is None:\n adata.obs[\"n_counts\"] = number\n else:\n adata.obs[\"n_genes\"] = number\n adata._inplace_subset_obs(cell_subset)\n return adata if copy else None\n X = data # proceed with processing the data matrix\n min_number = min_counts if min_genes is None else min_genes\n max_number = max_counts if max_genes is None else max_genes\n number_per_cell = np.sum(\n X if min_genes is None and max_genes is None else X > 0, axis=1\n )\n if issparse(X):\n number_per_cell = number_per_cell.A1\n if min_number is not None:\n cell_subset = number_per_cell >= min_number\n if max_number is not None:\n cell_subset = number_per_cell <= max_number\n\n s = materialize_as_ndarray(np.sum(~cell_subset))\n if s > 0:\n msg = f\"filtered out {s} cells that have \"\n if min_genes is not None or min_counts is not None:\n msg += \"less than \"\n msg += (\n f\"{min_genes} genes expressed\"\n if min_counts is None\n else f\"{min_counts} counts\"\n )\n if max_genes is not None or max_counts is not None:\n msg += \"more than \"\n msg += (\n f\"{max_genes} genes expressed\"\n if max_counts is None\n else f\"{max_counts} counts\"\n )\n logg.info(msg)\n return cell_subset, number_per_cell\n\n\n@old_positionals(\n \"min_counts\", \"min_cells\", \"max_counts\", \"max_cells\", \"inplace\", \"copy\"\n)\ndef filter_genes(\n data: AnnData | spmatrix | np.ndarray,\n *,\n min_counts: int | None = None,\n min_cells: int | None = None,\n max_counts: int | None = None,\n max_cells: int | None = None,\n inplace: bool = True,\n copy: bool = False,\n) -> AnnData | tuple[np.ndarray, np.ndarray] | None:\n \"\"\"\\\n Filter genes based on number of cells or counts.\n\n Keep genes that have at least `min_counts` counts or are expressed in at\n least `min_cells` cells or have at most `max_counts` counts or are expressed\n in at most `max_cells` cells.\n\n Only provide one of the optional parameters `min_counts`, `min_cells`,\n `max_counts`, `max_cells` per call.\n\n Parameters\n ----------\n data\n An annotated data matrix of shape `n_obs` \u00d7 `n_vars`. Rows correspond\n to cells and columns to genes.\n min_counts\n Minimum number of counts required for a gene to pass filtering.\n min_cells\n Minimum number of cells expressed required for a gene to pass filtering.\n max_counts\n Maximum number of counts required for a gene to pass filtering.\n max_cells\n Maximum number of cells expressed required for a gene to pass filtering.\n inplace\n Perform computation inplace or return result.\n\n Returns\n -------\n Depending on `inplace`, returns the following arrays or directly subsets\n and annotates the data matrix\n\n gene_subset\n Boolean index mask that does filtering. `True` means that the\n gene is kept. `False` means the gene is removed.\n number_per_gene\n Depending on what was thresholded (`counts` or `cells`), the array stores\n `n_counts` or `n_cells` per gene.\n \"\"\"\n if copy:\n logg.warning(\"`copy` is deprecated, use `inplace` instead.\")\n n_given_options = sum(\n option is not None for option in [min_cells, min_counts, max_cells, max_counts]\n )\n if n_given_options != 1:\n raise ValueError(\n \"Only provide one of the optional parameters `min_counts`, \"\n \"`min_cells`, `max_counts`, `max_cells` per call.\"\n )\n\n if isinstance(data, AnnData):\n adata = data.copy() if copy else data\n gene_subset, number = materialize_as_ndarray(\n filter_genes(\n adata.X,\n min_cells=min_cells,\n min_counts=min_counts,\n max_cells=max_cells,\n max_counts=max_counts,\n )\n )\n if not inplace:\n return gene_subset, number\n if min_cells is None and max_cells is None:\n adata.var[\"n_counts\"] = number\n else:\n adata.var[\"n_cells\"] = number\n adata._inplace_subset_var(gene_subset)\n return adata if copy else None\n\n X = data # proceed with processing the data matrix\n min_number = min_counts if min_cells is None else min_cells\n max_number = max_counts if max_cells is None else max_cells\n number_per_gene = np.sum(\n X if min_cells is None and max_cells is None else X > 0, axis=0\n )\n if issparse(X):\n number_per_gene = number_per_gene.A1\n if min_number is not None:\n gene_subset = number_per_gene >= min_number\n if max_number is not None:\n gene_subset = number_per_gene <= max_number\n\n s = np.sum(~gene_subset)\n if s > 0:\n msg = f\"filtered out {s} genes that are detected \"\n if min_cells is not None or min_counts is not None:\n msg += \"in less than \"\n msg += (\n f\"{min_cells} cells\" if min_counts is None else f\"{min_counts} counts\"\n )\n if max_cells is not None or max_counts is not None:\n msg += \"in more than \"\n msg += (\n f\"{max_cells} cells\" if max_counts is None else f\"{max_counts} counts\"\n )\n logg.info(msg)\n return gene_subset, number_per_gene\n\n\n@renamed_arg(\"X\", \"data\", pos_0=True)\n@singledispatch\ndef log1p(\n data: AnnData | np.ndarray | spmatrix,\n *,\n base: Number | None = None,\n copy: bool = False,\n chunked: bool | None = None,\n chunk_size: int | None = None,\n layer: str | None = None,\n obsm: str | None = None,\n) -> AnnData | np.ndarray | spmatrix | None:\n \"\"\"\\\n Logarithmize the data matrix.\n\n Computes :math:`X = \\\\log(X + 1)`,\n where :math:`log` denotes the natural logarithm unless a different base is given.\n\n Parameters\n ----------\n data\n The (annotated) data matrix of shape `n_obs` \u00d7 `n_vars`.\n Rows correspond to cells and columns to genes.\n base\n Base of the logarithm. Natural logarithm is used by default.\n copy\n If an :class:`~anndata.AnnData` is passed, determines whether a copy\n is returned.\n chunked\n Process the data matrix in chunks, which will save memory.\n Applies only to :class:`~anndata.AnnData`.\n chunk_size\n `n_obs` of the chunks to process the data in.\n layer\n Entry of layers to transform.\n obsm\n Entry of obsm to transform.\n\n Returns\n -------\n Returns or updates `data`, depending on `copy`.\n \"\"\"\n _check_array_function_arguments(\n chunked=chunked, chunk_size=chunk_size, layer=layer, obsm=obsm\n )\n return log1p_array(data, copy=copy, base=base)\n\n\[email protected](spmatrix)\ndef log1p_sparse(X: spmatrix, *, base: Number | None = None, copy: bool = False):\n X = check_array(\n X, accept_sparse=(\"csr\", \"csc\"), dtype=(np.float64, np.float32), copy=copy\n )\n X.data = log1p(X.data, copy=False, base=base)\n return X\n\n\[email protected](np.ndarray)\ndef log1p_array(X: np.ndarray, *, base: Number | None = None, copy: bool = False):\n # Can force arrays to be np.ndarrays, but would be useful to not\n # X = check_array(X, dtype=(np.float64, np.float32), ensure_2d=False, copy=copy)\n if copy:\n if not np.issubdtype(X.dtype, np.floating):\n X = X.astype(float)\n else:\n X = X.copy()\n elif not (np.issubdtype(X.dtype, np.floating) or np.issubdtype(X.dtype, complex)):\n X = X.astype(float)\n np.log1p(X, out=X)\n if base is not None:\n np.divide(X, np.log(base), out=X)\n return X\n\n\[email protected](AnnData)\ndef log1p_anndata(\n adata: AnnData,\n *,\n base: Number | None = None,\n copy: bool = False,\n chunked: bool = False,\n chunk_size: int | None = None,\n layer: str | None = None,\n obsm: str | None = None,\n) -> AnnData | None:\n if \"log1p\" in adata.uns_keys():\n logg.warning(\"adata.X seems to be already log-transformed.\")\n\n adata = adata.copy() if copy else adata\n view_to_actual(adata)\n\n if chunked:\n if (layer is not None) or (obsm is not None):\n raise NotImplementedError(\n \"Currently cannot perform chunked operations on arrays not stored in X.\"\n )\n for chunk, start, end in adata.chunked_X(chunk_size):\n adata.X[start:end] = log1p(chunk, base=base, copy=False)\n else:\n X = _get_obs_rep(adata, layer=layer, obsm=obsm)\n X = log1p(X, copy=False, base=base)\n _set_obs_rep(adata, X, layer=layer, obsm=obsm)\n\n adata.uns[\"log1p\"] = {\"base\": base}\n if copy:\n return adata\n\n\n@old_positionals(\"copy\", \"chunked\", \"chunk_size\")\ndef sqrt(\n data: AnnData | spmatrix | np.ndarray,\n *,\n copy: bool = False,\n chunked: bool = False,\n chunk_size: int | None = None,\n) -> AnnData | spmatrix | np.ndarray | None:\n \"\"\"\\\n Square root the data matrix.\n\n Computes :math:`X = \\\\sqrt(X)`.\n\n Parameters\n ----------\n data\n The (annotated) data matrix of shape `n_obs` \u00d7 `n_vars`.\n Rows correspond to cells and columns to genes.\n copy\n If an :class:`~anndata.AnnData` object is passed,\n determines whether a copy is returned.\n chunked\n Process the data matrix in chunks, which will save memory.\n Applies only to :class:`~anndata.AnnData`.\n chunk_size\n `n_obs` of the chunks to process the data in.\n\n Returns\n -------\n Returns or updates `data`, depending on `copy`.\n \"\"\"\n if isinstance(data, AnnData):\n adata = data.copy() if copy else data\n if chunked:\n for chunk, start, end in adata.chunked_X(chunk_size):\n adata.X[start:end] = sqrt(chunk)\n else:\n adata.X = sqrt(data.X)\n return adata if copy else None\n X = data # proceed with data matrix\n if not issparse(X):\n return np.sqrt(X)\n else:\n return X.sqrt()\n\n\ndef normalize_per_cell( # noqa: PLR0917\n data: AnnData | np.ndarray | spmatrix,\n counts_per_cell_after: float | None = None,\n counts_per_cell: np.ndarray | None = None,\n key_n_counts: str = \"n_counts\",\n copy: bool = False,\n layers: Literal[\"all\"] | Iterable[str] = (),\n use_rep: Literal[\"after\", \"X\"] | None = None,\n min_counts: int = 1,\n) -> AnnData | np.ndarray | spmatrix | None:\n \"\"\"\\\n Normalize total counts per cell.\n\n .. warning::\n .. deprecated:: 1.3.7\n Use :func:`~scanpy.pp.normalize_total` instead.\n The new function is equivalent to the present\n function, except that\n\n * the new function doesn't filter cells based on `min_counts`,\n use :func:`~scanpy.pp.filter_cells` if filtering is needed.\n * some arguments were renamed\n * `copy` is replaced by `inplace`\n\n Normalize each cell by total counts over all genes, so that every cell has\n the same total count after normalization.\n\n Similar functions are used, for example, by Seurat [Satija15]_, Cell Ranger\n [Zheng17]_ or SPRING [Weinreb17]_.\n\n Parameters\n ----------\n data\n The (annotated) data matrix of shape `n_obs` \u00d7 `n_vars`. Rows correspond\n to cells and columns to genes.\n counts_per_cell_after\n If `None`, after normalization, each cell has a total count equal\n to the median of the *counts_per_cell* before normalization.\n counts_per_cell\n Precomputed counts per cell.\n key_n_counts\n Name of the field in `adata.obs` where the total counts per cell are\n stored.\n copy\n If an :class:`~anndata.AnnData` is passed, determines whether a copy\n is returned.\n min_counts\n Cells with counts less than `min_counts` are filtered out during\n normalization.\n\n Returns\n -------\n Returns `None` if `copy=False`, else returns an updated `AnnData` object. Sets the following fields:\n\n `adata.X` : :class:`numpy.ndarray` | :class:`scipy.sparse._csr.csr_matrix` (dtype `float`)\n Normalized count data matrix.\n\n Examples\n --------\n >>> import scanpy as sc\n >>> adata = AnnData(np.array([[1, 0], [3, 0], [5, 6]], dtype=np.float32))\n >>> print(adata.X.sum(axis=1))\n [ 1. 3. 11.]\n >>> sc.pp.normalize_per_cell(adata)\n >>> print(adata.obs)\n n_counts\n 0 1.0\n 1 3.0\n 2 11.0\n >>> print(adata.X.sum(axis=1))\n [3. 3. 3.]\n >>> sc.pp.normalize_per_cell(\n ... adata, counts_per_cell_after=1,\n ... key_n_counts='n_counts2',\n ... )\n >>> print(adata.obs)\n n_counts n_counts2\n 0 1.0 3.0\n 1 3.0 3.0\n 2 11.0 3.0\n >>> print(adata.X.sum(axis=1))\n [1. 1. 1.]\n \"\"\"\n if isinstance(data, AnnData):\n start = logg.info(\"normalizing by total count per cell\")\n adata = data.copy() if copy else data\n if counts_per_cell is None:\n cell_subset, counts_per_cell = materialize_as_ndarray(\n filter_cells(adata.X, min_counts=min_counts)\n )\n adata.obs[key_n_counts] = counts_per_cell\n adata._inplace_subset_obs(cell_subset)\n counts_per_cell = counts_per_cell[cell_subset]\n normalize_per_cell(adata.X, counts_per_cell_after, counts_per_cell)\n\n layers = adata.layers.keys() if layers == \"all\" else layers\n if use_rep == \"after\":\n after = counts_per_cell_after\n elif use_rep == \"X\":\n after = np.median(counts_per_cell[cell_subset])\n elif use_rep is None:\n after = None\n else:\n raise ValueError('use_rep should be \"after\", \"X\" or None')\n for layer in layers:\n _subset, counts = filter_cells(adata.layers[layer], min_counts=min_counts)\n temp = normalize_per_cell(adata.layers[layer], after, counts, copy=True)\n adata.layers[layer] = temp\n\n logg.info(\n \" finished ({time_passed}): normalized adata.X and added\"\n f\" {key_n_counts!r}, counts per cell before normalization (adata.obs)\",\n time=start,\n )\n return adata if copy else None\n # proceed with data matrix\n X = data.copy() if copy else data\n if counts_per_cell is None:\n if not copy:\n raise ValueError(\"Can only be run with copy=True\")\n cell_subset, counts_per_cell = filter_cells(X, min_counts=min_counts)\n X = X[cell_subset]\n counts_per_cell = counts_per_cell[cell_subset]\n if counts_per_cell_after is None:\n counts_per_cell_after = np.median(counts_per_cell)\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n counts_per_cell += counts_per_cell == 0\n counts_per_cell /= counts_per_cell_after\n if not issparse(X):\n X /= counts_per_cell[:, np.newaxis]\n else:\n sparsefuncs.inplace_row_scale(X, 1 / counts_per_cell)\n return X if copy else None\n\n\n@old_positionals(\"layer\", \"n_jobs\", \"copy\")\ndef regress_out(\n adata: AnnData,\n keys: str | Sequence[str],\n *,\n layer: str | None = None,\n n_jobs: int | None = None,\n copy: bool = False,\n) -> AnnData | None:\n \"\"\"\\\n Regress out (mostly) unwanted sources of variation.\n\n Uses simple linear regression. This is inspired by Seurat's `regressOut`\n function in R [Satija15]. Note that this function tends to overcorrect\n in certain circumstances as described in :issue:`526`.\n\n Parameters\n ----------\n adata\n The annotated data matrix.\n keys\n Keys for observation annotation on which to regress on.\n layer\n If provided, which element of layers to regress on.\n n_jobs\n Number of jobs for parallel computation.\n `None` means using :attr:`scanpy._settings.ScanpyConfig.n_jobs`.\n copy\n Determines whether a copy of `adata` is returned.\n\n Returns\n -------\n Returns `None` if `copy=False`, else returns an updated `AnnData` object. Sets the following fields:\n\n `adata.X` | `adata.layers[layer]` : :class:`numpy.ndarray` | :class:`scipy.sparse._csr.csr_matrix` (dtype `float`)\n Corrected count data matrix.\n \"\"\"\n start = logg.info(f\"regressing out {keys}\")\n adata = adata.copy() if copy else adata\n\n sanitize_anndata(adata)\n\n view_to_actual(adata)\n\n if isinstance(keys, str):\n keys = [keys]\n\n X = _get_obs_rep(adata, layer=layer)\n\n if issparse(X):\n logg.info(\" sparse input is densified and may \" \"lead to high memory use\")\n X = X.toarray()\n\n n_jobs = sett.n_jobs if n_jobs is None else n_jobs\n\n # regress on a single categorical variable\n variable_is_categorical = False\n if keys[0] in adata.obs_keys() and isinstance(\n adata.obs[keys[0]].dtype, CategoricalDtype\n ):\n if len(keys) > 1:\n raise ValueError(\n \"If providing categorical variable, \"\n \"only a single one is allowed. For this one \"\n \"we regress on the mean for each category.\"\n )\n logg.debug(\"... regressing on per-gene means within categories\")\n regressors = np.zeros(X.shape, dtype=\"float32\")\n for category in adata.obs[keys[0]].cat.categories:\n mask = (category == adata.obs[keys[0]]).values\n for ix, x in enumerate(X.T):\n regressors[mask, ix] = x[mask].mean()\n variable_is_categorical = True\n # regress on one or several ordinal variables\n else:\n # create data frame with selected keys (if given)\n if keys:\n regressors = adata.obs[keys]\n else:\n regressors = adata.obs.copy()\n\n # add column of ones at index 0 (first column)\n regressors.insert(0, \"ones\", 1.0)\n\n len_chunk = np.ceil(min(1000, X.shape[1]) / n_jobs).astype(int)\n n_chunks = np.ceil(X.shape[1] / len_chunk).astype(int)\n\n tasks = []\n # split the adata.X matrix by columns in chunks of size n_chunk\n # (the last chunk could be of smaller size than the others)\n chunk_list = np.array_split(X, n_chunks, axis=1)\n if variable_is_categorical:\n regressors_chunk = np.array_split(regressors, n_chunks, axis=1)\n for idx, data_chunk in enumerate(chunk_list):\n # each task is a tuple of a data_chunk eg. (adata.X[:,0:100]) and\n # the regressors. This data will be passed to each of the jobs.\n if variable_is_categorical:\n regres = regressors_chunk[idx]\n else:\n regres = regressors\n tasks.append(tuple((data_chunk, regres, variable_is_categorical)))\n\n from joblib import Parallel, delayed\n\n # TODO: figure out how to test that this doesn't oversubscribe resources\n res = Parallel(n_jobs=n_jobs)(delayed(_regress_out_chunk)(task) for task in tasks)\n\n # res is a list of vectors (each corresponding to a regressed gene column).\n # The transpose is needed to get the matrix in the shape needed\n _set_obs_rep(adata, np.vstack(res).T, layer=layer)\n logg.info(\" finished\", time=start)\n return adata if copy else None\n\n\ndef _regress_out_chunk(data):\n # data is a tuple containing the selected columns from adata.X\n # and the regressors dataFrame\n data_chunk = data[0]\n regressors = data[1]\n variable_is_categorical = data[2]\n\n responses_chunk_list = []\n import statsmodels.api as sm\n from statsmodels.tools.sm_exceptions import PerfectSeparationError\n\n for col_index in range(data_chunk.shape[1]):\n # if all values are identical, the statsmodel.api.GLM throws an error;\n # but then no regression is necessary anyways...\n if not (data_chunk[:, col_index] != data_chunk[0, col_index]).any():\n responses_chunk_list.append(data_chunk[:, col_index])\n continue\n\n if variable_is_categorical:\n regres = np.c_[np.ones(regressors.shape[0]), regressors[:, col_index]]\n else:\n regres = regressors\n try:\n result = sm.GLM(\n data_chunk[:, col_index], regres, family=sm.families.Gaussian()\n ).fit()\n new_column = result.resid_response\n except PerfectSeparationError: # this emulates R's behavior\n logg.warning(\"Encountered PerfectSeparationError, setting to 0 as in R.\")\n new_column = np.zeros(data_chunk.shape[0])\n\n responses_chunk_list.append(new_column)\n\n return np.vstack(responses_chunk_list)\n\n\n@renamed_arg(\"X\", \"data\", pos_0=True)\n@old_positionals(\"zero_center\", \"max_value\", \"copy\", \"layer\", \"obsm\")\n@singledispatch\ndef scale(\n data: AnnData | spmatrix | np.ndarray,\n *,\n zero_center: bool = True,\n max_value: float | None = None,\n copy: bool = False,\n layer: str | None = None,\n obsm: str | None = None,\n mask_obs: NDArray[np.bool_] | str | None = None,\n) -> AnnData | spmatrix | np.ndarray | None:\n \"\"\"\\\n Scale data to unit variance and zero mean.\n\n .. note::\n Variables (genes) that do not display any variation (are constant across\n all observations) are retained and (for zero_center==True) set to 0\n during this operation. In the future, they might be set to NaNs.\n\n Parameters\n ----------\n data\n The (annotated) data matrix of shape `n_obs` \u00d7 `n_vars`.\n Rows correspond to cells and columns to genes.\n zero_center\n If `False`, omit zero-centering variables, which allows to handle sparse\n input efficiently.\n max_value\n Clip (truncate) to this value after scaling. If `None`, do not clip.\n copy\n Whether this function should be performed inplace. If an AnnData object\n is passed, this also determines if a copy is returned.\n layer\n If provided, which element of layers to scale.\n obsm\n If provided, which element of obsm to scale.\n mask_obs\n Restrict both the derivation of scaling parameters and the scaling itself\n to a certain set of observations. The mask is specified as a boolean array\n or a string referring to an array in :attr:`~anndata.AnnData.obs`.\n\n Returns\n -------\n Returns `None` if `copy=False`, else returns an updated `AnnData` object. Sets the following fields:\n\n `adata.X` | `adata.layers[layer]` : :class:`numpy.ndarray` | :class:`scipy.sparse._csr.csr_matrix` (dtype `float`)\n Scaled count data matrix.\n `adata.var['mean']` : :class:`pandas.Series` (dtype `float`)\n Means per gene before scaling.\n `adata.var['std']` : :class:`pandas.Series` (dtype `float`)\n Standard deviations per gene before scaling.\n `adata.var['var']` : :class:`pandas.Series` (dtype `float`)\n Variances per gene before scaling.\n \"\"\"\n _check_array_function_arguments(layer=layer, obsm=obsm)\n if layer is not None:\n raise ValueError(\n f\"`layer` argument inappropriate for value of type {type(data)}\"\n )\n if obsm is not None:\n raise ValueError(\n f\"`obsm` argument inappropriate for value of type {type(data)}\"\n )\n return scale_array(\n data, zero_center=zero_center, max_value=max_value, copy=copy, mask_obs=mask_obs\n )\n\n\[email protected](np.ndarray)\ndef scale_array(\n X: np.ndarray,\n *,\n zero_center: bool = True,\n max_value: float | None = None,\n copy: bool = False,\n return_mean_std: bool = False,\n mask_obs: NDArray[np.bool_] | None = None,\n) -> np.ndarray | tuple[np.ndarray, NDArray[np.float64], NDArray[np.float64]]:\n if copy:\n X = X.copy()\n if mask_obs is not None:\n mask_obs = _check_mask(X, mask_obs, \"obs\")\n scale_rv = scale_array(\n X[mask_obs, :],\n zero_center=zero_center,\n max_value=max_value,\n copy=False,\n return_mean_std=return_mean_std,\n mask_obs=None,\n )\n if return_mean_std:\n X[mask_obs, :], mean, std = scale_rv\n return X, mean, std\n else:\n X[mask_obs, :] = scale_rv\n return X\n\n if not zero_center and max_value is not None:\n logg.info( # Be careful of what? This should be more specific\n \"... be careful when using `max_value` \" \"without `zero_center`.\"\n )\n\n if np.issubdtype(X.dtype, np.integer):\n logg.info(\n \"... as scaling leads to float results, integer \"\n \"input is cast to float, returning copy.\"\n )\n X = X.astype(float)\n\n mean, var = _get_mean_var(X)\n std = np.sqrt(var)\n std[std == 0] = 1\n if issparse(X):\n if zero_center:\n raise ValueError(\"Cannot zero-center sparse matrix.\")\n sparsefuncs.inplace_column_scale(X, 1 / std)\n else:\n if zero_center:\n X -= mean\n X /= std\n\n # do the clipping\n if max_value is not None:\n logg.debug(f\"... clipping at max_value {max_value}\")\n X[X > max_value] = max_value\n if return_mean_std:\n return X, mean, std\n else:\n return X\n\n\[email protected](spmatrix)\ndef scale_sparse(\n X: spmatrix,\n *,\n zero_center: bool = True,\n max_value: float | None = None,\n copy: bool = False,\n return_mean_std: bool = False,\n mask_obs: NDArray[np.bool_] | None = None,\n) -> np.ndarray | tuple[np.ndarray, NDArray[np.float64], NDArray[np.float64]]:\n # need to add the following here to make inplace logic work\n if zero_center:\n logg.info(\n \"... as `zero_center=True`, sparse input is \"\n \"densified and may lead to large memory consumption\"\n )\n X = X.toarray()\n copy = False # Since the data has been copied\n return scale_array(\n X,\n zero_center=zero_center,\n copy=copy,\n max_value=max_value,\n return_mean_std=return_mean_std,\n mask_obs=mask_obs,\n )\n\n\[email protected](AnnData)\ndef scale_anndata(\n adata: AnnData,\n *,\n zero_center: bool = True,\n max_value: float | None = None,\n copy: bool = False,\n layer: str | None = None,\n obsm: str | None = None,\n mask_obs: NDArray[np.bool_] | str | None = None,\n) -> AnnData | None:\n adata = adata.copy() if copy else adata\n str_mean_std = (\"mean\", \"std\")\n if mask_obs is not None:\n if isinstance(mask_obs, str):\n str_mean_std = (f\"mean of {mask_obs}\", f\"std of {mask_obs}\")\n else:\n str_mean_std = (\"mean with mask\", \"std with mask\")\n mask_obs = _check_mask(adata, mask_obs, \"obs\")\n view_to_actual(adata)\n X = _get_obs_rep(adata, layer=layer, obsm=obsm)\n X, adata.var[str_mean_std[0]], adata.var[str_mean_std[1]] = scale(\n X,\n zero_center=zero_center,\n max_value=max_value,\n copy=False, # because a copy has already been made, if it were to be made\n return_mean_std=True,\n mask_obs=mask_obs,\n )\n _set_obs_rep(adata, X, layer=layer, obsm=obsm)\n return adata if copy else None\n\n\n@old_positionals(\"n_obs\", \"random_state\", \"copy\")\ndef subsample(\n data: AnnData | np.ndarray | spmatrix,\n fraction: float | None = None,\n *,\n n_obs: int | None = None,\n random_state: AnyRandom = 0,\n copy: bool = False,\n) -> AnnData | tuple[np.ndarray | spmatrix, NDArray[np.int64]] | None:\n \"\"\"\\\n Subsample to a fraction of the number of observations.\n\n Parameters\n ----------\n data\n The (annotated) data matrix of shape `n_obs` \u00d7 `n_vars`.\n Rows correspond to cells and columns to genes.\n fraction\n Subsample to this `fraction` of the number of observations.\n n_obs\n Subsample to this number of observations.\n random_state\n Random seed to change subsampling.\n copy\n If an :class:`~anndata.AnnData` is passed,\n determines whether a copy is returned.\n\n Returns\n -------\n Returns `X[obs_indices], obs_indices` if data is array-like, otherwise\n subsamples the passed :class:`~anndata.AnnData` (`copy == False`) or\n returns a subsampled copy of it (`copy == True`).\n \"\"\"\n np.random.seed(random_state)\n old_n_obs = data.n_obs if isinstance(data, AnnData) else data.shape[0]\n if n_obs is not None:\n new_n_obs = n_obs\n elif fraction is not None:\n if fraction > 1 or fraction < 0:\n raise ValueError(f\"`fraction` needs to be within [0, 1], not {fraction}\")\n new_n_obs = int(fraction * old_n_obs)\n logg.debug(f\"... subsampled to {new_n_obs} data points\")\n else:\n raise ValueError(\"Either pass `n_obs` or `fraction`.\")\n obs_indices = np.random.choice(old_n_obs, size=new_n_obs, replace=False)\n if isinstance(data, AnnData):\n if data.isbacked:\n if copy:\n return data[obs_indices].to_memory()\n else:\n raise NotImplementedError(\n \"Inplace subsampling is not implemented for backed objects.\"\n )\n else:\n if copy:\n return data[obs_indices].copy()\n else:\n data._inplace_subset_obs(obs_indices)\n else:\n X = data\n return X[obs_indices], obs_indices\n\n\n@renamed_arg(\"target_counts\", \"counts_per_cell\")\ndef downsample_counts(\n adata: AnnData,\n counts_per_cell: int | Collection[int] | None = None,\n total_counts: int | None = None,\n *,\n random_state: AnyRandom = 0,\n replace: bool = False,\n copy: bool = False,\n) -> AnnData | None:\n \"\"\"\\\n Downsample counts from count matrix.\n\n If `counts_per_cell` is specified, each cell will downsampled.\n If `total_counts` is specified, expression matrix will be downsampled to\n contain at most `total_counts`.\n\n Parameters\n ----------\n adata\n Annotated data matrix.\n counts_per_cell\n Target total counts per cell. If a cell has more than 'counts_per_cell',\n it will be downsampled to this number. Resulting counts can be specified\n on a per cell basis by passing an array.Should be an integer or integer\n ndarray with same length as number of obs.\n total_counts\n Target total counts. If the count matrix has more than `total_counts`\n it will be downsampled to have this number.\n random_state\n Random seed for subsampling.\n replace\n Whether to sample the counts with replacement.\n copy\n Determines whether a copy of `adata` is returned.\n\n Returns\n -------\n Returns `None` if `copy=False`, else returns an `AnnData` object. Sets the following fields:\n\n `adata.X` : :class:`numpy.ndarray` | :class:`scipy.sparse.spmatrix` (dtype `float`)\n Downsampled counts matrix.\n \"\"\"\n # This logic is all dispatch\n total_counts_call = total_counts is not None\n counts_per_cell_call = counts_per_cell is not None\n if total_counts_call is counts_per_cell_call:\n raise ValueError(\n \"Must specify exactly one of `total_counts` or `counts_per_cell`.\"\n )\n if copy:\n adata = adata.copy()\n if total_counts_call:\n adata.X = _downsample_total_counts(adata.X, total_counts, random_state, replace)\n elif counts_per_cell_call:\n adata.X = _downsample_per_cell(adata.X, counts_per_cell, random_state, replace)\n if copy:\n return adata\n\n\ndef _downsample_per_cell(X, counts_per_cell, random_state, replace):\n n_obs = X.shape[0]\n if isinstance(counts_per_cell, int):\n counts_per_cell = np.full(n_obs, counts_per_cell)\n else:\n counts_per_cell = np.asarray(counts_per_cell)\n # np.random.choice needs int arguments in numba code:\n counts_per_cell = counts_per_cell.astype(np.int_, copy=False)\n if not isinstance(counts_per_cell, np.ndarray) or len(counts_per_cell) != n_obs:\n raise ValueError(\n \"If provided, 'counts_per_cell' must be either an integer, or \"\n \"coercible to an `np.ndarray` of length as number of observations\"\n \" by `np.asarray(counts_per_cell)`.\"\n )\n if issparse(X):\n original_type = type(X)\n if not isspmatrix_csr(X):\n X = csr_matrix(X)\n totals = np.ravel(X.sum(axis=1)) # Faster for csr matrix\n under_target = np.nonzero(totals > counts_per_cell)[0]\n rows = np.split(X.data, X.indptr[1:-1])\n for rowidx in under_target:\n row = rows[rowidx]\n _downsample_array(\n row,\n counts_per_cell[rowidx],\n random_state=random_state,\n replace=replace,\n inplace=True,\n )\n X.eliminate_zeros()\n if original_type is not csr_matrix: # Put it back\n X = original_type(X)\n else:\n totals = np.ravel(X.sum(axis=1))\n under_target = np.nonzero(totals > counts_per_cell)[0]\n for rowidx in under_target:\n row = X[rowidx, :]\n _downsample_array(\n row,\n counts_per_cell[rowidx],\n random_state=random_state,\n replace=replace,\n inplace=True,\n )\n return X\n\n\ndef _downsample_total_counts(X, total_counts, random_state, replace):\n total_counts = int(total_counts)\n total = X.sum()\n if total < total_counts:\n return X\n if issparse(X):\n original_type = type(X)\n if not isspmatrix_csr(X):\n X = csr_matrix(X)\n _downsample_array(\n X.data,\n total_counts,\n random_state=random_state,\n replace=replace,\n inplace=True,\n )\n X.eliminate_zeros()\n if original_type is not csr_matrix:\n X = original_type(X)\n else:\n v = X.reshape(np.multiply(*X.shape))\n _downsample_array(v, total_counts, random_state, replace=replace, inplace=True)\n return X\n\n\[email protected](cache=True)\ndef _downsample_array(\n col: np.ndarray,\n target: int,\n random_state: AnyRandom = 0,\n replace: bool = True,\n inplace: bool = False,\n):\n \"\"\"\\\n Evenly reduce counts in cell to target amount.\n\n This is an internal function and has some restrictions:\n\n * total counts in cell must be less than target\n \"\"\"\n np.random.seed(random_state)\n cumcounts = col.cumsum()\n if inplace:\n col[:] = 0\n else:\n col = np.zeros_like(col)\n total = np.int_(cumcounts[-1])\n sample = np.random.choice(total, target, replace=replace)\n sample.sort()\n geneptr = 0\n for count in sample:\n while count >= cumcounts[geneptr]:\n geneptr += 1\n col[geneptr] += 1\n return col\n\n\n# --------------------------------------------------------------------------------\n# Helper Functions\n# --------------------------------------------------------------------------------\n\n\ndef _pca_fallback(data, n_comps=2):\n # mean center the data\n data -= data.mean(axis=0)\n # calculate the covariance matrix\n C = np.cov(data, rowvar=False)\n # calculate eigenvectors & eigenvalues of the covariance matrix\n # use 'eigh' rather than 'eig' since C is symmetric,\n # the performance gain is substantial\n # evals, evecs = np.linalg.eigh(C)\n evals, evecs = sp.sparse.linalg.eigsh(C, k=n_comps)\n # sort eigenvalues in decreasing order\n idcs = np.argsort(evals)[::-1]\n evecs = evecs[:, idcs]\n evals = evals[idcs]\n # select the first n eigenvectors (n is desired dimension\n # of rescaled data array, or n_comps)\n evecs = evecs[:, :n_comps]\n # project data points on eigenvectors\n return np.dot(evecs.T, data.T).T\n", "path": "scanpy/preprocessing/_simple.py"}], "after_files": [{"content": "\"\"\"Simple Preprocessing Functions\n\nCompositions of these functions are found in sc.preprocess.recipes.\n\"\"\"\nfrom __future__ import annotations\n\nimport warnings\nfrom functools import singledispatch\nfrom typing import TYPE_CHECKING, Literal\n\nimport numba\nimport numpy as np\nimport scipy as sp\nfrom anndata import AnnData\nfrom pandas.api.types import CategoricalDtype\nfrom scipy.sparse import csr_matrix, issparse, isspmatrix_csr, spmatrix\nfrom sklearn.utils import check_array, sparsefuncs\n\nfrom .. import logging as logg\nfrom .._compat import old_positionals\nfrom .._settings import settings as sett\nfrom .._utils import (\n AnyRandom,\n _check_array_function_arguments,\n renamed_arg,\n sanitize_anndata,\n view_to_actual,\n)\nfrom ..get import _check_mask, _get_obs_rep, _set_obs_rep\nfrom ._distributed import materialize_as_ndarray\nfrom ._utils import _get_mean_var\n\n# install dask if available\ntry:\n import dask.array as da\nexcept ImportError:\n da = None\n\n# backwards compat\nfrom ._deprecated.highly_variable_genes import filter_genes_dispersion # noqa: F401\n\nif TYPE_CHECKING:\n from collections.abc import Collection, Iterable, Sequence\n from numbers import Number\n\n from numpy.typing import NDArray\n\n\n@old_positionals(\n \"min_counts\", \"min_genes\", \"max_counts\", \"max_genes\", \"inplace\", \"copy\"\n)\ndef filter_cells(\n data: AnnData | spmatrix | np.ndarray,\n *,\n min_counts: int | None = None,\n min_genes: int | None = None,\n max_counts: int | None = None,\n max_genes: int | None = None,\n inplace: bool = True,\n copy: bool = False,\n) -> AnnData | tuple[np.ndarray, np.ndarray] | None:\n \"\"\"\\\n Filter cell outliers based on counts and numbers of genes expressed.\n\n For instance, only keep cells with at least `min_counts` counts or\n `min_genes` genes expressed. This is to filter measurement outliers,\n i.e. \u201cunreliable\u201d observations.\n\n Only provide one of the optional parameters `min_counts`, `min_genes`,\n `max_counts`, `max_genes` per call.\n\n Parameters\n ----------\n data\n The (annotated) data matrix of shape `n_obs` \u00d7 `n_vars`.\n Rows correspond to cells and columns to genes.\n min_counts\n Minimum number of counts required for a cell to pass filtering.\n min_genes\n Minimum number of genes expressed required for a cell to pass filtering.\n max_counts\n Maximum number of counts required for a cell to pass filtering.\n max_genes\n Maximum number of genes expressed required for a cell to pass filtering.\n inplace\n Perform computation inplace or return result.\n\n Returns\n -------\n Depending on `inplace`, returns the following arrays or directly subsets\n and annotates the data matrix:\n\n cells_subset\n Boolean index mask that does filtering. `True` means that the\n cell is kept. `False` means the cell is removed.\n number_per_cell\n Depending on what was thresholded (`counts` or `genes`),\n the array stores `n_counts` or `n_cells` per gene.\n\n Examples\n --------\n >>> import scanpy as sc\n >>> adata = sc.datasets.krumsiek11()\n UserWarning: Observation names are not unique. To make them unique, call `.obs_names_make_unique`.\n utils.warn_names_duplicates(\"obs\")\n >>> adata.obs_names_make_unique()\n >>> adata.n_obs\n 640\n >>> adata.var_names.tolist() # doctest: +NORMALIZE_WHITESPACE\n ['Gata2', 'Gata1', 'Fog1', 'EKLF', 'Fli1', 'SCL',\n 'Cebpa', 'Pu.1', 'cJun', 'EgrNab', 'Gfi1']\n >>> # add some true zeros\n >>> adata.X[adata.X < 0.3] = 0\n >>> # simply compute the number of genes per cell\n >>> sc.pp.filter_cells(adata, min_genes=0)\n >>> adata.n_obs\n 640\n >>> adata.obs['n_genes'].min()\n 1\n >>> # filter manually\n >>> adata_copy = adata[adata.obs['n_genes'] >= 3]\n >>> adata_copy.n_obs\n 554\n >>> adata_copy.obs['n_genes'].min()\n 3\n >>> # actually do some filtering\n >>> sc.pp.filter_cells(adata, min_genes=3)\n >>> adata.n_obs\n 554\n >>> adata.obs['n_genes'].min()\n 3\n \"\"\"\n if copy:\n logg.warning(\"`copy` is deprecated, use `inplace` instead.\")\n n_given_options = sum(\n option is not None for option in [min_genes, min_counts, max_genes, max_counts]\n )\n if n_given_options != 1:\n raise ValueError(\n \"Only provide one of the optional parameters `min_counts`, \"\n \"`min_genes`, `max_counts`, `max_genes` per call.\"\n )\n if isinstance(data, AnnData):\n adata = data.copy() if copy else data\n cell_subset, number = materialize_as_ndarray(\n filter_cells(\n adata.X,\n min_counts=min_counts,\n min_genes=min_genes,\n max_counts=max_counts,\n max_genes=max_genes,\n ),\n )\n if not inplace:\n return cell_subset, number\n if min_genes is None and max_genes is None:\n adata.obs[\"n_counts\"] = number\n else:\n adata.obs[\"n_genes\"] = number\n adata._inplace_subset_obs(cell_subset)\n return adata if copy else None\n X = data # proceed with processing the data matrix\n min_number = min_counts if min_genes is None else min_genes\n max_number = max_counts if max_genes is None else max_genes\n number_per_cell = np.sum(\n X if min_genes is None and max_genes is None else X > 0, axis=1\n )\n if issparse(X):\n number_per_cell = number_per_cell.A1\n if min_number is not None:\n cell_subset = number_per_cell >= min_number\n if max_number is not None:\n cell_subset = number_per_cell <= max_number\n\n s = materialize_as_ndarray(np.sum(~cell_subset))\n if s > 0:\n msg = f\"filtered out {s} cells that have \"\n if min_genes is not None or min_counts is not None:\n msg += \"less than \"\n msg += (\n f\"{min_genes} genes expressed\"\n if min_counts is None\n else f\"{min_counts} counts\"\n )\n if max_genes is not None or max_counts is not None:\n msg += \"more than \"\n msg += (\n f\"{max_genes} genes expressed\"\n if max_counts is None\n else f\"{max_counts} counts\"\n )\n logg.info(msg)\n return cell_subset, number_per_cell\n\n\n@old_positionals(\n \"min_counts\", \"min_cells\", \"max_counts\", \"max_cells\", \"inplace\", \"copy\"\n)\ndef filter_genes(\n data: AnnData | spmatrix | np.ndarray,\n *,\n min_counts: int | None = None,\n min_cells: int | None = None,\n max_counts: int | None = None,\n max_cells: int | None = None,\n inplace: bool = True,\n copy: bool = False,\n) -> AnnData | tuple[np.ndarray, np.ndarray] | None:\n \"\"\"\\\n Filter genes based on number of cells or counts.\n\n Keep genes that have at least `min_counts` counts or are expressed in at\n least `min_cells` cells or have at most `max_counts` counts or are expressed\n in at most `max_cells` cells.\n\n Only provide one of the optional parameters `min_counts`, `min_cells`,\n `max_counts`, `max_cells` per call.\n\n Parameters\n ----------\n data\n An annotated data matrix of shape `n_obs` \u00d7 `n_vars`. Rows correspond\n to cells and columns to genes.\n min_counts\n Minimum number of counts required for a gene to pass filtering.\n min_cells\n Minimum number of cells expressed required for a gene to pass filtering.\n max_counts\n Maximum number of counts required for a gene to pass filtering.\n max_cells\n Maximum number of cells expressed required for a gene to pass filtering.\n inplace\n Perform computation inplace or return result.\n\n Returns\n -------\n Depending on `inplace`, returns the following arrays or directly subsets\n and annotates the data matrix\n\n gene_subset\n Boolean index mask that does filtering. `True` means that the\n gene is kept. `False` means the gene is removed.\n number_per_gene\n Depending on what was thresholded (`counts` or `cells`), the array stores\n `n_counts` or `n_cells` per gene.\n \"\"\"\n if copy:\n logg.warning(\"`copy` is deprecated, use `inplace` instead.\")\n n_given_options = sum(\n option is not None for option in [min_cells, min_counts, max_cells, max_counts]\n )\n if n_given_options != 1:\n raise ValueError(\n \"Only provide one of the optional parameters `min_counts`, \"\n \"`min_cells`, `max_counts`, `max_cells` per call.\"\n )\n\n if isinstance(data, AnnData):\n adata = data.copy() if copy else data\n gene_subset, number = materialize_as_ndarray(\n filter_genes(\n adata.X,\n min_cells=min_cells,\n min_counts=min_counts,\n max_cells=max_cells,\n max_counts=max_counts,\n )\n )\n if not inplace:\n return gene_subset, number\n if min_cells is None and max_cells is None:\n adata.var[\"n_counts\"] = number\n else:\n adata.var[\"n_cells\"] = number\n adata._inplace_subset_var(gene_subset)\n return adata if copy else None\n\n X = data # proceed with processing the data matrix\n min_number = min_counts if min_cells is None else min_cells\n max_number = max_counts if max_cells is None else max_cells\n number_per_gene = np.sum(\n X if min_cells is None and max_cells is None else X > 0, axis=0\n )\n if issparse(X):\n number_per_gene = number_per_gene.A1\n if min_number is not None:\n gene_subset = number_per_gene >= min_number\n if max_number is not None:\n gene_subset = number_per_gene <= max_number\n\n s = np.sum(~gene_subset)\n if s > 0:\n msg = f\"filtered out {s} genes that are detected \"\n if min_cells is not None or min_counts is not None:\n msg += \"in less than \"\n msg += (\n f\"{min_cells} cells\" if min_counts is None else f\"{min_counts} counts\"\n )\n if max_cells is not None or max_counts is not None:\n msg += \"in more than \"\n msg += (\n f\"{max_cells} cells\" if max_counts is None else f\"{max_counts} counts\"\n )\n logg.info(msg)\n return gene_subset, number_per_gene\n\n\n@renamed_arg(\"X\", \"data\", pos_0=True)\n@singledispatch\ndef log1p(\n data: AnnData | np.ndarray | spmatrix,\n *,\n base: Number | None = None,\n copy: bool = False,\n chunked: bool | None = None,\n chunk_size: int | None = None,\n layer: str | None = None,\n obsm: str | None = None,\n) -> AnnData | np.ndarray | spmatrix | None:\n \"\"\"\\\n Logarithmize the data matrix.\n\n Computes :math:`X = \\\\log(X + 1)`,\n where :math:`log` denotes the natural logarithm unless a different base is given.\n\n Parameters\n ----------\n data\n The (annotated) data matrix of shape `n_obs` \u00d7 `n_vars`.\n Rows correspond to cells and columns to genes.\n base\n Base of the logarithm. Natural logarithm is used by default.\n copy\n If an :class:`~anndata.AnnData` is passed, determines whether a copy\n is returned.\n chunked\n Process the data matrix in chunks, which will save memory.\n Applies only to :class:`~anndata.AnnData`.\n chunk_size\n `n_obs` of the chunks to process the data in.\n layer\n Entry of layers to transform.\n obsm\n Entry of obsm to transform.\n\n Returns\n -------\n Returns or updates `data`, depending on `copy`.\n \"\"\"\n _check_array_function_arguments(\n chunked=chunked, chunk_size=chunk_size, layer=layer, obsm=obsm\n )\n return log1p_array(data, copy=copy, base=base)\n\n\[email protected](spmatrix)\ndef log1p_sparse(X: spmatrix, *, base: Number | None = None, copy: bool = False):\n X = check_array(\n X, accept_sparse=(\"csr\", \"csc\"), dtype=(np.float64, np.float32), copy=copy\n )\n X.data = log1p(X.data, copy=False, base=base)\n return X\n\n\[email protected](np.ndarray)\ndef log1p_array(X: np.ndarray, *, base: Number | None = None, copy: bool = False):\n # Can force arrays to be np.ndarrays, but would be useful to not\n # X = check_array(X, dtype=(np.float64, np.float32), ensure_2d=False, copy=copy)\n if copy:\n if not np.issubdtype(X.dtype, np.floating):\n X = X.astype(float)\n else:\n X = X.copy()\n elif not (np.issubdtype(X.dtype, np.floating) or np.issubdtype(X.dtype, complex)):\n X = X.astype(float)\n np.log1p(X, out=X)\n if base is not None:\n np.divide(X, np.log(base), out=X)\n return X\n\n\[email protected](AnnData)\ndef log1p_anndata(\n adata: AnnData,\n *,\n base: Number | None = None,\n copy: bool = False,\n chunked: bool = False,\n chunk_size: int | None = None,\n layer: str | None = None,\n obsm: str | None = None,\n) -> AnnData | None:\n if \"log1p\" in adata.uns_keys():\n logg.warning(\"adata.X seems to be already log-transformed.\")\n\n adata = adata.copy() if copy else adata\n view_to_actual(adata)\n\n if chunked:\n if (layer is not None) or (obsm is not None):\n raise NotImplementedError(\n \"Currently cannot perform chunked operations on arrays not stored in X.\"\n )\n for chunk, start, end in adata.chunked_X(chunk_size):\n adata.X[start:end] = log1p(chunk, base=base, copy=False)\n else:\n X = _get_obs_rep(adata, layer=layer, obsm=obsm)\n X = log1p(X, copy=False, base=base)\n _set_obs_rep(adata, X, layer=layer, obsm=obsm)\n\n adata.uns[\"log1p\"] = {\"base\": base}\n if copy:\n return adata\n\n\n@old_positionals(\"copy\", \"chunked\", \"chunk_size\")\ndef sqrt(\n data: AnnData | spmatrix | np.ndarray,\n *,\n copy: bool = False,\n chunked: bool = False,\n chunk_size: int | None = None,\n) -> AnnData | spmatrix | np.ndarray | None:\n \"\"\"\\\n Square root the data matrix.\n\n Computes :math:`X = \\\\sqrt(X)`.\n\n Parameters\n ----------\n data\n The (annotated) data matrix of shape `n_obs` \u00d7 `n_vars`.\n Rows correspond to cells and columns to genes.\n copy\n If an :class:`~anndata.AnnData` object is passed,\n determines whether a copy is returned.\n chunked\n Process the data matrix in chunks, which will save memory.\n Applies only to :class:`~anndata.AnnData`.\n chunk_size\n `n_obs` of the chunks to process the data in.\n\n Returns\n -------\n Returns or updates `data`, depending on `copy`.\n \"\"\"\n if isinstance(data, AnnData):\n adata = data.copy() if copy else data\n if chunked:\n for chunk, start, end in adata.chunked_X(chunk_size):\n adata.X[start:end] = sqrt(chunk)\n else:\n adata.X = sqrt(data.X)\n return adata if copy else None\n X = data # proceed with data matrix\n if not issparse(X):\n return np.sqrt(X)\n else:\n return X.sqrt()\n\n\ndef normalize_per_cell( # noqa: PLR0917\n data: AnnData | np.ndarray | spmatrix,\n counts_per_cell_after: float | None = None,\n counts_per_cell: np.ndarray | None = None,\n key_n_counts: str = \"n_counts\",\n copy: bool = False,\n layers: Literal[\"all\"] | Iterable[str] = (),\n use_rep: Literal[\"after\", \"X\"] | None = None,\n min_counts: int = 1,\n) -> AnnData | np.ndarray | spmatrix | None:\n \"\"\"\\\n Normalize total counts per cell.\n\n .. warning::\n .. deprecated:: 1.3.7\n Use :func:`~scanpy.pp.normalize_total` instead.\n The new function is equivalent to the present\n function, except that\n\n * the new function doesn't filter cells based on `min_counts`,\n use :func:`~scanpy.pp.filter_cells` if filtering is needed.\n * some arguments were renamed\n * `copy` is replaced by `inplace`\n\n Normalize each cell by total counts over all genes, so that every cell has\n the same total count after normalization.\n\n Similar functions are used, for example, by Seurat [Satija15]_, Cell Ranger\n [Zheng17]_ or SPRING [Weinreb17]_.\n\n Parameters\n ----------\n data\n The (annotated) data matrix of shape `n_obs` \u00d7 `n_vars`. Rows correspond\n to cells and columns to genes.\n counts_per_cell_after\n If `None`, after normalization, each cell has a total count equal\n to the median of the *counts_per_cell* before normalization.\n counts_per_cell\n Precomputed counts per cell.\n key_n_counts\n Name of the field in `adata.obs` where the total counts per cell are\n stored.\n copy\n If an :class:`~anndata.AnnData` is passed, determines whether a copy\n is returned.\n min_counts\n Cells with counts less than `min_counts` are filtered out during\n normalization.\n\n Returns\n -------\n Returns `None` if `copy=False`, else returns an updated `AnnData` object. Sets the following fields:\n\n `adata.X` : :class:`numpy.ndarray` | :class:`scipy.sparse._csr.csr_matrix` (dtype `float`)\n Normalized count data matrix.\n\n Examples\n --------\n >>> import scanpy as sc\n >>> adata = AnnData(np.array([[1, 0], [3, 0], [5, 6]], dtype=np.float32))\n >>> print(adata.X.sum(axis=1))\n [ 1. 3. 11.]\n >>> sc.pp.normalize_per_cell(adata)\n >>> print(adata.obs)\n n_counts\n 0 1.0\n 1 3.0\n 2 11.0\n >>> print(adata.X.sum(axis=1))\n [3. 3. 3.]\n >>> sc.pp.normalize_per_cell(\n ... adata, counts_per_cell_after=1,\n ... key_n_counts='n_counts2',\n ... )\n >>> print(adata.obs)\n n_counts n_counts2\n 0 1.0 3.0\n 1 3.0 3.0\n 2 11.0 3.0\n >>> print(adata.X.sum(axis=1))\n [1. 1. 1.]\n \"\"\"\n if isinstance(data, AnnData):\n start = logg.info(\"normalizing by total count per cell\")\n adata = data.copy() if copy else data\n if counts_per_cell is None:\n cell_subset, counts_per_cell = materialize_as_ndarray(\n filter_cells(adata.X, min_counts=min_counts)\n )\n adata.obs[key_n_counts] = counts_per_cell\n adata._inplace_subset_obs(cell_subset)\n counts_per_cell = counts_per_cell[cell_subset]\n normalize_per_cell(adata.X, counts_per_cell_after, counts_per_cell)\n\n layers = adata.layers.keys() if layers == \"all\" else layers\n if use_rep == \"after\":\n after = counts_per_cell_after\n elif use_rep == \"X\":\n after = np.median(counts_per_cell[cell_subset])\n elif use_rep is None:\n after = None\n else:\n raise ValueError('use_rep should be \"after\", \"X\" or None')\n for layer in layers:\n _subset, counts = filter_cells(adata.layers[layer], min_counts=min_counts)\n temp = normalize_per_cell(adata.layers[layer], after, counts, copy=True)\n adata.layers[layer] = temp\n\n logg.info(\n \" finished ({time_passed}): normalized adata.X and added\"\n f\" {key_n_counts!r}, counts per cell before normalization (adata.obs)\",\n time=start,\n )\n return adata if copy else None\n # proceed with data matrix\n X = data.copy() if copy else data\n if counts_per_cell is None:\n if not copy:\n raise ValueError(\"Can only be run with copy=True\")\n cell_subset, counts_per_cell = filter_cells(X, min_counts=min_counts)\n X = X[cell_subset]\n counts_per_cell = counts_per_cell[cell_subset]\n if counts_per_cell_after is None:\n counts_per_cell_after = np.median(counts_per_cell)\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n counts_per_cell += counts_per_cell == 0\n counts_per_cell /= counts_per_cell_after\n if not issparse(X):\n X /= counts_per_cell[:, np.newaxis]\n else:\n sparsefuncs.inplace_row_scale(X, 1 / counts_per_cell)\n return X if copy else None\n\n\n@old_positionals(\"layer\", \"n_jobs\", \"copy\")\ndef regress_out(\n adata: AnnData,\n keys: str | Sequence[str],\n *,\n layer: str | None = None,\n n_jobs: int | None = None,\n copy: bool = False,\n) -> AnnData | None:\n \"\"\"\\\n Regress out (mostly) unwanted sources of variation.\n\n Uses simple linear regression. This is inspired by Seurat's `regressOut`\n function in R [Satija15]. Note that this function tends to overcorrect\n in certain circumstances as described in :issue:`526`.\n\n Parameters\n ----------\n adata\n The annotated data matrix.\n keys\n Keys for observation annotation on which to regress on.\n layer\n If provided, which element of layers to regress on.\n n_jobs\n Number of jobs for parallel computation.\n `None` means using :attr:`scanpy._settings.ScanpyConfig.n_jobs`.\n copy\n Determines whether a copy of `adata` is returned.\n\n Returns\n -------\n Returns `None` if `copy=False`, else returns an updated `AnnData` object. Sets the following fields:\n\n `adata.X` | `adata.layers[layer]` : :class:`numpy.ndarray` | :class:`scipy.sparse._csr.csr_matrix` (dtype `float`)\n Corrected count data matrix.\n \"\"\"\n start = logg.info(f\"regressing out {keys}\")\n adata = adata.copy() if copy else adata\n\n sanitize_anndata(adata)\n\n view_to_actual(adata)\n\n if isinstance(keys, str):\n keys = [keys]\n\n X = _get_obs_rep(adata, layer=layer)\n\n if issparse(X):\n logg.info(\" sparse input is densified and may \" \"lead to high memory use\")\n X = X.toarray()\n\n n_jobs = sett.n_jobs if n_jobs is None else n_jobs\n\n # regress on a single categorical variable\n variable_is_categorical = False\n if keys[0] in adata.obs_keys() and isinstance(\n adata.obs[keys[0]].dtype, CategoricalDtype\n ):\n if len(keys) > 1:\n raise ValueError(\n \"If providing categorical variable, \"\n \"only a single one is allowed. For this one \"\n \"we regress on the mean for each category.\"\n )\n logg.debug(\"... regressing on per-gene means within categories\")\n regressors = np.zeros(X.shape, dtype=\"float32\")\n for category in adata.obs[keys[0]].cat.categories:\n mask = (category == adata.obs[keys[0]]).values\n for ix, x in enumerate(X.T):\n regressors[mask, ix] = x[mask].mean()\n variable_is_categorical = True\n # regress on one or several ordinal variables\n else:\n # create data frame with selected keys (if given)\n if keys:\n regressors = adata.obs[keys]\n else:\n regressors = adata.obs.copy()\n\n # add column of ones at index 0 (first column)\n regressors.insert(0, \"ones\", 1.0)\n\n len_chunk = np.ceil(min(1000, X.shape[1]) / n_jobs).astype(int)\n n_chunks = np.ceil(X.shape[1] / len_chunk).astype(int)\n\n tasks = []\n # split the adata.X matrix by columns in chunks of size n_chunk\n # (the last chunk could be of smaller size than the others)\n chunk_list = np.array_split(X, n_chunks, axis=1)\n if variable_is_categorical:\n regressors_chunk = np.array_split(regressors, n_chunks, axis=1)\n for idx, data_chunk in enumerate(chunk_list):\n # each task is a tuple of a data_chunk eg. (adata.X[:,0:100]) and\n # the regressors. This data will be passed to each of the jobs.\n if variable_is_categorical:\n regres = regressors_chunk[idx]\n else:\n regres = regressors\n tasks.append(tuple((data_chunk, regres, variable_is_categorical)))\n\n from joblib import Parallel, delayed\n\n # TODO: figure out how to test that this doesn't oversubscribe resources\n res = Parallel(n_jobs=n_jobs)(delayed(_regress_out_chunk)(task) for task in tasks)\n\n # res is a list of vectors (each corresponding to a regressed gene column).\n # The transpose is needed to get the matrix in the shape needed\n _set_obs_rep(adata, np.vstack(res).T, layer=layer)\n logg.info(\" finished\", time=start)\n return adata if copy else None\n\n\ndef _regress_out_chunk(data):\n # data is a tuple containing the selected columns from adata.X\n # and the regressors dataFrame\n data_chunk = data[0]\n regressors = data[1]\n variable_is_categorical = data[2]\n\n responses_chunk_list = []\n import statsmodels.api as sm\n from statsmodels.tools.sm_exceptions import PerfectSeparationError\n\n for col_index in range(data_chunk.shape[1]):\n # if all values are identical, the statsmodel.api.GLM throws an error;\n # but then no regression is necessary anyways...\n if not (data_chunk[:, col_index] != data_chunk[0, col_index]).any():\n responses_chunk_list.append(data_chunk[:, col_index])\n continue\n\n if variable_is_categorical:\n regres = np.c_[np.ones(regressors.shape[0]), regressors[:, col_index]]\n else:\n regres = regressors\n try:\n result = sm.GLM(\n data_chunk[:, col_index], regres, family=sm.families.Gaussian()\n ).fit()\n new_column = result.resid_response\n except PerfectSeparationError: # this emulates R's behavior\n logg.warning(\"Encountered PerfectSeparationError, setting to 0 as in R.\")\n new_column = np.zeros(data_chunk.shape[0])\n\n responses_chunk_list.append(new_column)\n\n return np.vstack(responses_chunk_list)\n\n\n@renamed_arg(\"X\", \"data\", pos_0=True)\n@old_positionals(\"zero_center\", \"max_value\", \"copy\", \"layer\", \"obsm\")\n@singledispatch\ndef scale(\n data: AnnData | spmatrix | np.ndarray,\n *,\n zero_center: bool = True,\n max_value: float | None = None,\n copy: bool = False,\n layer: str | None = None,\n obsm: str | None = None,\n mask_obs: NDArray[np.bool_] | str | None = None,\n) -> AnnData | spmatrix | np.ndarray | None:\n \"\"\"\\\n Scale data to unit variance and zero mean.\n\n .. note::\n Variables (genes) that do not display any variation (are constant across\n all observations) are retained and (for zero_center==True) set to 0\n during this operation. In the future, they might be set to NaNs.\n\n Parameters\n ----------\n data\n The (annotated) data matrix of shape `n_obs` \u00d7 `n_vars`.\n Rows correspond to cells and columns to genes.\n zero_center\n If `False`, omit zero-centering variables, which allows to handle sparse\n input efficiently.\n max_value\n Clip (truncate) to this value after scaling. If `None`, do not clip.\n copy\n Whether this function should be performed inplace. If an AnnData object\n is passed, this also determines if a copy is returned.\n layer\n If provided, which element of layers to scale.\n obsm\n If provided, which element of obsm to scale.\n mask_obs\n Restrict both the derivation of scaling parameters and the scaling itself\n to a certain set of observations. The mask is specified as a boolean array\n or a string referring to an array in :attr:`~anndata.AnnData.obs`.\n\n Returns\n -------\n Returns `None` if `copy=False`, else returns an updated `AnnData` object. Sets the following fields:\n\n `adata.X` | `adata.layers[layer]` : :class:`numpy.ndarray` | :class:`scipy.sparse._csr.csr_matrix` (dtype `float`)\n Scaled count data matrix.\n `adata.var['mean']` : :class:`pandas.Series` (dtype `float`)\n Means per gene before scaling.\n `adata.var['std']` : :class:`pandas.Series` (dtype `float`)\n Standard deviations per gene before scaling.\n `adata.var['var']` : :class:`pandas.Series` (dtype `float`)\n Variances per gene before scaling.\n \"\"\"\n _check_array_function_arguments(layer=layer, obsm=obsm)\n if layer is not None:\n raise ValueError(\n f\"`layer` argument inappropriate for value of type {type(data)}\"\n )\n if obsm is not None:\n raise ValueError(\n f\"`obsm` argument inappropriate for value of type {type(data)}\"\n )\n return scale_array(\n data, zero_center=zero_center, max_value=max_value, copy=copy, mask_obs=mask_obs\n )\n\n\[email protected](np.ndarray)\ndef scale_array(\n X: np.ndarray,\n *,\n zero_center: bool = True,\n max_value: float | None = None,\n copy: bool = False,\n return_mean_std: bool = False,\n mask_obs: NDArray[np.bool_] | None = None,\n) -> np.ndarray | tuple[np.ndarray, NDArray[np.float64], NDArray[np.float64]]:\n if copy:\n X = X.copy()\n if mask_obs is not None:\n mask_obs = _check_mask(X, mask_obs, \"obs\")\n scale_rv = scale_array(\n X[mask_obs, :],\n zero_center=zero_center,\n max_value=max_value,\n copy=False,\n return_mean_std=return_mean_std,\n mask_obs=None,\n )\n if return_mean_std:\n X[mask_obs, :], mean, std = scale_rv\n return X, mean, std\n else:\n X[mask_obs, :] = scale_rv\n return X\n\n if not zero_center and max_value is not None:\n logg.info( # Be careful of what? This should be more specific\n \"... be careful when using `max_value` \" \"without `zero_center`.\"\n )\n\n if np.issubdtype(X.dtype, np.integer):\n logg.info(\n \"... as scaling leads to float results, integer \"\n \"input is cast to float, returning copy.\"\n )\n X = X.astype(float)\n\n mean, var = _get_mean_var(X)\n std = np.sqrt(var)\n std[std == 0] = 1\n if issparse(X):\n if zero_center:\n raise ValueError(\"Cannot zero-center sparse matrix.\")\n sparsefuncs.inplace_column_scale(X, 1 / std)\n else:\n if zero_center:\n X -= mean\n X /= std\n\n # do the clipping\n if max_value is not None:\n logg.debug(f\"... clipping at max_value {max_value}\")\n if zero_center:\n X = np.clip(X, a_min=-max_value, a_max=max_value)\n else:\n X[X > max_value] = max_value\n if return_mean_std:\n return X, mean, std\n else:\n return X\n\n\[email protected](spmatrix)\ndef scale_sparse(\n X: spmatrix,\n *,\n zero_center: bool = True,\n max_value: float | None = None,\n copy: bool = False,\n return_mean_std: bool = False,\n mask_obs: NDArray[np.bool_] | None = None,\n) -> np.ndarray | tuple[np.ndarray, NDArray[np.float64], NDArray[np.float64]]:\n # need to add the following here to make inplace logic work\n if zero_center:\n logg.info(\n \"... as `zero_center=True`, sparse input is \"\n \"densified and may lead to large memory consumption\"\n )\n X = X.toarray()\n copy = False # Since the data has been copied\n return scale_array(\n X,\n zero_center=zero_center,\n copy=copy,\n max_value=max_value,\n return_mean_std=return_mean_std,\n mask_obs=mask_obs,\n )\n\n\[email protected](AnnData)\ndef scale_anndata(\n adata: AnnData,\n *,\n zero_center: bool = True,\n max_value: float | None = None,\n copy: bool = False,\n layer: str | None = None,\n obsm: str | None = None,\n mask_obs: NDArray[np.bool_] | str | None = None,\n) -> AnnData | None:\n adata = adata.copy() if copy else adata\n str_mean_std = (\"mean\", \"std\")\n if mask_obs is not None:\n if isinstance(mask_obs, str):\n str_mean_std = (f\"mean of {mask_obs}\", f\"std of {mask_obs}\")\n else:\n str_mean_std = (\"mean with mask\", \"std with mask\")\n mask_obs = _check_mask(adata, mask_obs, \"obs\")\n view_to_actual(adata)\n X = _get_obs_rep(adata, layer=layer, obsm=obsm)\n X, adata.var[str_mean_std[0]], adata.var[str_mean_std[1]] = scale(\n X,\n zero_center=zero_center,\n max_value=max_value,\n copy=False, # because a copy has already been made, if it were to be made\n return_mean_std=True,\n mask_obs=mask_obs,\n )\n _set_obs_rep(adata, X, layer=layer, obsm=obsm)\n return adata if copy else None\n\n\n@old_positionals(\"n_obs\", \"random_state\", \"copy\")\ndef subsample(\n data: AnnData | np.ndarray | spmatrix,\n fraction: float | None = None,\n *,\n n_obs: int | None = None,\n random_state: AnyRandom = 0,\n copy: bool = False,\n) -> AnnData | tuple[np.ndarray | spmatrix, NDArray[np.int64]] | None:\n \"\"\"\\\n Subsample to a fraction of the number of observations.\n\n Parameters\n ----------\n data\n The (annotated) data matrix of shape `n_obs` \u00d7 `n_vars`.\n Rows correspond to cells and columns to genes.\n fraction\n Subsample to this `fraction` of the number of observations.\n n_obs\n Subsample to this number of observations.\n random_state\n Random seed to change subsampling.\n copy\n If an :class:`~anndata.AnnData` is passed,\n determines whether a copy is returned.\n\n Returns\n -------\n Returns `X[obs_indices], obs_indices` if data is array-like, otherwise\n subsamples the passed :class:`~anndata.AnnData` (`copy == False`) or\n returns a subsampled copy of it (`copy == True`).\n \"\"\"\n np.random.seed(random_state)\n old_n_obs = data.n_obs if isinstance(data, AnnData) else data.shape[0]\n if n_obs is not None:\n new_n_obs = n_obs\n elif fraction is not None:\n if fraction > 1 or fraction < 0:\n raise ValueError(f\"`fraction` needs to be within [0, 1], not {fraction}\")\n new_n_obs = int(fraction * old_n_obs)\n logg.debug(f\"... subsampled to {new_n_obs} data points\")\n else:\n raise ValueError(\"Either pass `n_obs` or `fraction`.\")\n obs_indices = np.random.choice(old_n_obs, size=new_n_obs, replace=False)\n if isinstance(data, AnnData):\n if data.isbacked:\n if copy:\n return data[obs_indices].to_memory()\n else:\n raise NotImplementedError(\n \"Inplace subsampling is not implemented for backed objects.\"\n )\n else:\n if copy:\n return data[obs_indices].copy()\n else:\n data._inplace_subset_obs(obs_indices)\n else:\n X = data\n return X[obs_indices], obs_indices\n\n\n@renamed_arg(\"target_counts\", \"counts_per_cell\")\ndef downsample_counts(\n adata: AnnData,\n counts_per_cell: int | Collection[int] | None = None,\n total_counts: int | None = None,\n *,\n random_state: AnyRandom = 0,\n replace: bool = False,\n copy: bool = False,\n) -> AnnData | None:\n \"\"\"\\\n Downsample counts from count matrix.\n\n If `counts_per_cell` is specified, each cell will downsampled.\n If `total_counts` is specified, expression matrix will be downsampled to\n contain at most `total_counts`.\n\n Parameters\n ----------\n adata\n Annotated data matrix.\n counts_per_cell\n Target total counts per cell. If a cell has more than 'counts_per_cell',\n it will be downsampled to this number. Resulting counts can be specified\n on a per cell basis by passing an array.Should be an integer or integer\n ndarray with same length as number of obs.\n total_counts\n Target total counts. If the count matrix has more than `total_counts`\n it will be downsampled to have this number.\n random_state\n Random seed for subsampling.\n replace\n Whether to sample the counts with replacement.\n copy\n Determines whether a copy of `adata` is returned.\n\n Returns\n -------\n Returns `None` if `copy=False`, else returns an `AnnData` object. Sets the following fields:\n\n `adata.X` : :class:`numpy.ndarray` | :class:`scipy.sparse.spmatrix` (dtype `float`)\n Downsampled counts matrix.\n \"\"\"\n # This logic is all dispatch\n total_counts_call = total_counts is not None\n counts_per_cell_call = counts_per_cell is not None\n if total_counts_call is counts_per_cell_call:\n raise ValueError(\n \"Must specify exactly one of `total_counts` or `counts_per_cell`.\"\n )\n if copy:\n adata = adata.copy()\n if total_counts_call:\n adata.X = _downsample_total_counts(adata.X, total_counts, random_state, replace)\n elif counts_per_cell_call:\n adata.X = _downsample_per_cell(adata.X, counts_per_cell, random_state, replace)\n if copy:\n return adata\n\n\ndef _downsample_per_cell(X, counts_per_cell, random_state, replace):\n n_obs = X.shape[0]\n if isinstance(counts_per_cell, int):\n counts_per_cell = np.full(n_obs, counts_per_cell)\n else:\n counts_per_cell = np.asarray(counts_per_cell)\n # np.random.choice needs int arguments in numba code:\n counts_per_cell = counts_per_cell.astype(np.int_, copy=False)\n if not isinstance(counts_per_cell, np.ndarray) or len(counts_per_cell) != n_obs:\n raise ValueError(\n \"If provided, 'counts_per_cell' must be either an integer, or \"\n \"coercible to an `np.ndarray` of length as number of observations\"\n \" by `np.asarray(counts_per_cell)`.\"\n )\n if issparse(X):\n original_type = type(X)\n if not isspmatrix_csr(X):\n X = csr_matrix(X)\n totals = np.ravel(X.sum(axis=1)) # Faster for csr matrix\n under_target = np.nonzero(totals > counts_per_cell)[0]\n rows = np.split(X.data, X.indptr[1:-1])\n for rowidx in under_target:\n row = rows[rowidx]\n _downsample_array(\n row,\n counts_per_cell[rowidx],\n random_state=random_state,\n replace=replace,\n inplace=True,\n )\n X.eliminate_zeros()\n if original_type is not csr_matrix: # Put it back\n X = original_type(X)\n else:\n totals = np.ravel(X.sum(axis=1))\n under_target = np.nonzero(totals > counts_per_cell)[0]\n for rowidx in under_target:\n row = X[rowidx, :]\n _downsample_array(\n row,\n counts_per_cell[rowidx],\n random_state=random_state,\n replace=replace,\n inplace=True,\n )\n return X\n\n\ndef _downsample_total_counts(X, total_counts, random_state, replace):\n total_counts = int(total_counts)\n total = X.sum()\n if total < total_counts:\n return X\n if issparse(X):\n original_type = type(X)\n if not isspmatrix_csr(X):\n X = csr_matrix(X)\n _downsample_array(\n X.data,\n total_counts,\n random_state=random_state,\n replace=replace,\n inplace=True,\n )\n X.eliminate_zeros()\n if original_type is not csr_matrix:\n X = original_type(X)\n else:\n v = X.reshape(np.multiply(*X.shape))\n _downsample_array(v, total_counts, random_state, replace=replace, inplace=True)\n return X\n\n\[email protected](cache=True)\ndef _downsample_array(\n col: np.ndarray,\n target: int,\n random_state: AnyRandom = 0,\n replace: bool = True,\n inplace: bool = False,\n):\n \"\"\"\\\n Evenly reduce counts in cell to target amount.\n\n This is an internal function and has some restrictions:\n\n * total counts in cell must be less than target\n \"\"\"\n np.random.seed(random_state)\n cumcounts = col.cumsum()\n if inplace:\n col[:] = 0\n else:\n col = np.zeros_like(col)\n total = np.int_(cumcounts[-1])\n sample = np.random.choice(total, target, replace=replace)\n sample.sort()\n geneptr = 0\n for count in sample:\n while count >= cumcounts[geneptr]:\n geneptr += 1\n col[geneptr] += 1\n return col\n\n\n# --------------------------------------------------------------------------------\n# Helper Functions\n# --------------------------------------------------------------------------------\n\n\ndef _pca_fallback(data, n_comps=2):\n # mean center the data\n data -= data.mean(axis=0)\n # calculate the covariance matrix\n C = np.cov(data, rowvar=False)\n # calculate eigenvectors & eigenvalues of the covariance matrix\n # use 'eigh' rather than 'eig' since C is symmetric,\n # the performance gain is substantial\n # evals, evecs = np.linalg.eigh(C)\n evals, evecs = sp.sparse.linalg.eigsh(C, k=n_comps)\n # sort eigenvalues in decreasing order\n idcs = np.argsort(evals)[::-1]\n evecs = evecs[:, idcs]\n evals = evals[idcs]\n # select the first n eigenvectors (n is desired dimension\n # of rescaled data array, or n_comps)\n evecs = evecs[:, :n_comps]\n # project data points on eigenvectors\n return np.dot(evecs.T, data.T).T\n", "path": "scanpy/preprocessing/_simple.py"}]} |
gh_patches_debug_1363 | rasdani/github-patches | git_diff | DataBiosphere__toil-2777 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Problem with --batchSystem Slurm option
I'm trying to run a CWL workflow on a Slurm queueing system.
I am able to succesfully run a CWL workflow on a local machine (batchSystem singlemachine):
```
$ toil-cwl-runner --workDir /mnt/home/tjarosiewicz/tmp/ --jobStore test example.cwl example-job.yaml
INFO:cwltool:Resolved 'example.cwl' to 'file:///mnt/home/tjarosiewicz/workflows/example.cwl'
WARNING:toil.batchSystems.singleMachine:Limiting maxCores to CPU count of system (40).
WARNING:toil.batchSystems.singleMachine:Limiting maxMemory to physically available memory (201217310720).
WARNING:toil.batchSystems.singleMachine:Limiting maxDisk to physically available disk (203102003134464).
INFO:toil:Running Toil version 3.20.0-cf34ca3416697f2abc816b2538f20ee29ba16932.
INFO:toil.leader:Issued job 'file:///mnt/home/tjarosiewicz/workflows/example.cwl' echo E/p/job8c_ibarm with job batch system ID: 0 and cores: 1, disk: 3.0 G, and memory: 2.0 G
DEBUG:toil.jobStores.fileJobStore:Path to job store directory is '/tmp/tmptrn1ykx7'.
INFO:toil.worker:Redirecting logging to /mnt/home/tjarosiewicz/tmp/toil-cc520ff8-24a4-46d0-896d-479c31d802f8-7a1a9ca2-0214-47f8-b07e-6c3685a89053/tmp9bsoxv63/worker_log.txt
INFO:toil.leader:Job ended successfully: 'file:///mnt/home/tjarosiewicz/workflows/example.cwl' echo E/p/job8c_ibarm
INFO:toil.leader:Finished toil run successfully.
INFO:toil.common:Successfully deleted the job store: FileJobStore(/tmp/tmptrn1ykx7)
```
I am able to succesfully run a Toil workflow (using Toil Python module) on a local machine:
```
$ python helloworld.py
WARNING:toil.batchSystems.singleMachine:Limiting maxCores to CPU count of system (40).
WARNING:toil.batchSystems.singleMachine:Limiting maxMemory to physically available memory (201217310720).
WARNING:toil.batchSystems.singleMachine:Limiting maxDisk to physically available disk (203103171248128).
INFO:toil:Running Toil version 3.20.0-cf34ca3416697f2abc816b2538f20ee29ba16932.
INFO:toil.leader:Issued job 'helloWorld' 6/n/job7cpnl9am with job batch system ID: 0 and cores: 1, disk: 3.0 G, and memory: 2.0 G
DEBUG:toil.jobStores.fileJobStore:Path to job store directory is '/mnt/home/tjarosiewicz/workflows/toilRun'.
INFO:toil.worker:Redirecting logging to /mnt/home/tjarosiewicz/tmp/toil-dd5907f8-63ba-425c-ab15-eb9406d8dca1-7a1a9ca2-0214-47f8-b07e-6c3685a89053/tmpvmuy5qdy/worker_log.txt
INFO:toil.leader:Job ended successfully: 'helloWorld' 6/n/job7cpnl9am
INFO:toil.leader:Finished toil run successfully.
INFO:toil.common:Successfully deleted the job store: FileJobStore(/mnt/home/tjarosiewicz/workflows/toilRun)
Hello, world!, here's a message: You did it!!!!!
```
When I try to run above workflows through Slurm queueing system I get errors about job submission.
1) With `toil-cwl-runner`:
```
$ toil-cwl-runner --batchSystem Slurm --workDir /mnt/home/tjarosiewicz/tmp/ --jobStore test example.cwl example-job.yaml
INFO:cwltool:Resolved 'example.cwl' to 'file:///mnt/home/tjarosiewicz/workflows/example.cwl'
WARNING:toil.batchSystems.singleMachine:Limiting maxMemory to physically available memory (201217310720).
WARNING:toil.batchSystems.singleMachine:Limiting maxDisk to physically available disk (203101904568320).
INFO:toil:Running Toil version 3.20.0-cf34ca3416697f2abc816b2538f20ee29ba16932.
INFO:toil.leader:Issued job 'file:///mnt/home/tjarosiewicz/workflows/example.cwl' echo 8/X/job1slhcxw_ with job batch system ID: 0 and cores: 1, disk: 3.0 G, and memory: 2.0 G
Exception in thread Thread-401:
Traceback (most recent call last):
File "/mnt/home/tjarosiewicz/anaconda3/lib/python3.6/threading.py", line 916, in _bootstrap_inner
self.run()
File "/mnt/home/tjarosiewicz/venv/lib/python3.6/site-packages/toil/batchSystems/abstractGridEngineBatchSystem.py", line 234, in run
activity |= self.createJobs(newJob)
File "/mnt/home/tjarosiewicz/venv/lib/python3.6/site-packages/toil/batchSystems/abstractGridEngineBatchSystem.py", line 138, in createJobs
batchJobID = with_retries(self.submitJob, subLine)
File "/mnt/home/tjarosiewicz/venv/lib/python3.6/site-packages/toil/batchSystems/abstractGridEngineBatchSystem.py", line 42, in with_retries
return operation(*args, **kwargs)
File "/mnt/home/tjarosiewicz/venv/lib/python3.6/site-packages/toil/batchSystems/slurm.py", line 71, in submitJob
result = int(output.strip().split()[-1])
IndexError: list index out of range
```
2) With Toil as a Python module; I have added following line to the hello world code:
`options.batchSystem = "Slurm"`
```
$ python helloworld.py
WARNING:toil.batchSystems.singleMachine:Limiting maxMemory to physically available memory (201217310720).
WARNING:toil.batchSystems.singleMachine:Limiting maxDisk to physically available disk (203102687854592).
WARNING:toil.common:Batch system does not support auto-deployment. The user script ModuleDescriptor(dirPath='/mnt/home/tjarosiewicz/workflows', name='agent', fromVirtualEnv=False) will have to be present at the same location on every worker.
INFO:toil:Running Toil version 3.20.0-cf34ca3416697f2abc816b2538f20ee29ba16932.
INFO:toil.leader:Issued job 'helloWorld' d/Q/jobwvjxxov7 with job batch system ID: 0 and cores: 1, disk: 3.0 G, and memory: 2.0 G
Exception in thread Thread-401:
Traceback (most recent call last):
File "/mnt/home/tjarosiewicz/anaconda3/lib/python3.6/threading.py", line 916, in _bootstrap_inner
self.run()
File "/mnt/home/tjarosiewicz/venv/lib/python3.6/site-packages/toil/batchSystems/abstractGridEngineBatchSystem.py", line 234, in run
activity |= self.createJobs(newJob)
File "/mnt/home/tjarosiewicz/venv/lib/python3.6/site-packages/toil/batchSystems/abstractGridEngineBatchSystem.py", line 138, in createJobs
batchJobID = with_retries(self.submitJob, subLine)
File "/mnt/home/tjarosiewicz/venv/lib/python3.6/site-packages/toil/batchSystems/abstractGridEngineBatchSystem.py", line 42, in with_retries
return operation(*args, **kwargs)
File "/mnt/home/tjarosiewicz/venv/lib/python3.6/site-packages/toil/batchSystems/slurm.py", line 71, in submitJob
result = int(output.strip().split()[-1])
IndexError: list index out of range
```
I have managed to fix this issue by modifying the 173 line in `.../toil/batchSystems/slurm.py`
from
`sbatch_line = ['sbatch', '-Q', '-J', 'toil_job_{}'.format(jobID)]`
to
`sbatch_line = ['sbatch', '-J', 'toil_job_{}'.format(jobID)]`
With Toil CWL runner:
```
$ toil-cwl-runner --batchSystem Slurm --workDir /mnt/home/tjarosiewicz/tmp/ --jobStore test example.cwl example-job.yaml
INFO:cwltool:Resolved 'example.cwl' to 'file:///mnt/home/tjarosiewicz/workflows/example.cwl'
WARNING:toil.batchSystems.singleMachine:Limiting maxMemory to physically available memory (201217310720).
WARNING:toil.batchSystems.singleMachine:Limiting maxDisk to physically available disk (203094966140928).
INFO:toil:Running Toil version 3.20.0-cf34ca3416697f2abc816b2538f20ee29ba16932.
INFO:toil.leader:Issued job 'file:///mnt/home/tjarosiewicz/workflows/example.cwl' echo i/k/jobdhlfuse_ with job batch system ID: 0 and cores: 1, disk: 3.0 G, and memory: 2.0 G
INFO:toil.leader:Job ended successfully: 'file:///mnt/home/tjarosiewicz/workflows/example.cwl' echo i/k/jobdhlfuse_
INFO:toil.leader:Finished toil run successfully.
INFO:toil.common:Successfully deleted the job store: FileJobStore(/mnt/home/tjarosiewicz/workflows/test)
```
With Toil Python module:
```
$ python helloworld.py
WARNING:toil.batchSystems.singleMachine:Limiting maxMemory to physically available memory (201217310720).
WARNING:toil.batchSystems.singleMachine:Limiting maxDisk to physically available disk (203094390472704).
WARNING:toil.common:Batch system does not support auto-deployment. The user script ModuleDescriptor(dirPath='/mnt/home/tjarosiewicz/workflows', name='helloworld', fromVirtualEnv=False) will have to be present at the same location on every worker.
INFO:toil:Running Toil version 3.20.0-cf34ca3416697f2abc816b2538f20ee29ba16932.
INFO:toil.leader:Issued job 'helloWorld' 9/X/jobio7nz7hm with job batch system ID: 0 and cores: 1, disk: 3.0 G, and memory: 2.0 G
INFO:toil.leader:Job ended successfully: 'helloWorld' 9/X/jobio7nz7hm
INFO:toil.leader:Finished toil run successfully.
INFO:toil.common:Successfully deleted the job store: FileJobStore(/mnt/home/tjarosiewicz/workflows/toilRun)
Hello, world!, here's a message: You did it!!!!!
```
Both examples are taken from the tutorial; https://toil.readthedocs.io/en/latest/gettingStarted/quickStart.html#running-a-basic-cwl-workflow
I haven't noticed other people having issues with Slurm batch system so maybe it is a problem with my Slurm configuration?
I will be happy to learn if there are other ways of dealing with this problem.
Cheers,
Tobiasz
┆Issue is synchronized with this [Jira Task](https://ucsc-cgl.atlassian.net/browse/TOIL-411)
┆Issue Number: TOIL-411
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/toil/batchSystems/slurm.py`
Content:
```
1 # Copyright (c) 2016 Duke Center for Genomic and Computational Biology
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from __future__ import absolute_import
16 from __future__ import division
17 from builtins import str
18 from past.utils import old_div
19 import logging
20 import os
21 from pipes import quote
22 from toil import subprocess
23 import time
24 import math
25
26 # Python 3 compatibility imports
27 from six.moves.queue import Empty, Queue
28 from six import iteritems
29
30 from toil.batchSystems import MemoryString
31 from toil.batchSystems.abstractGridEngineBatchSystem import AbstractGridEngineBatchSystem
32
33 logger = logging.getLogger(__name__)
34
35 class SlurmBatchSystem(AbstractGridEngineBatchSystem):
36
37 class Worker(AbstractGridEngineBatchSystem.Worker):
38
39 def getRunningJobIDs(self):
40 # Should return a dictionary of Job IDs and number of seconds
41 times = {}
42 with self.runningJobsLock:
43 currentjobs = dict((str(self.batchJobIDs[x][0]), x) for x in self.runningJobs)
44 # currentjobs is a dictionary that maps a slurm job id (string) to our own internal job id
45 # squeue arguments:
46 # -h for no header
47 # --format to get jobid i, state %t and time days-hours:minutes:seconds
48
49 lines = subprocess.check_output(['squeue', '-h', '--format', '%i %t %M']).decode('utf-8').split('\n')
50 for line in lines:
51 values = line.split()
52 if len(values) < 3:
53 continue
54 slurm_jobid, state, elapsed_time = values
55 if slurm_jobid in currentjobs and state == 'R':
56 seconds_running = self.parse_elapsed(elapsed_time)
57 times[currentjobs[slurm_jobid]] = seconds_running
58
59 return times
60
61 def killJob(self, jobID):
62 subprocess.check_call(['scancel', self.getBatchSystemID(jobID)])
63
64 def prepareSubmission(self, cpu, memory, jobID, command):
65 return self.prepareSbatch(cpu, memory, jobID) + ['--wrap={}'.format(command)]
66
67 def submitJob(self, subLine):
68 try:
69 output = subprocess.check_output(subLine, stderr=subprocess.STDOUT).decode('utf-8')
70 # sbatch prints a line like 'Submitted batch job 2954103'
71 result = int(output.strip().split()[-1])
72 logger.debug("sbatch submitted job %d", result)
73 return result
74 except OSError as e:
75 logger.error("sbatch command failed")
76 raise e
77
78 def getJobExitCode(self, slurmJobID):
79 logger.debug("Getting exit code for slurm job %d", int(slurmJobID))
80
81 state, rc = self._getJobDetailsFromSacct(slurmJobID)
82
83 if rc == -999:
84 state, rc = self._getJobDetailsFromScontrol(slurmJobID)
85
86 logger.debug("s job state is %s", state)
87 # If Job is in a running state, return None to indicate we don't have an update
88 if state in ('PENDING', 'RUNNING', 'CONFIGURING', 'COMPLETING', 'RESIZING', 'SUSPENDED'):
89 return None
90
91 return rc
92
93 def _getJobDetailsFromSacct(self, slurmJobID):
94 # SLURM job exit codes are obtained by running sacct.
95 args = ['sacct',
96 '-n', # no header
97 '-j', str(slurmJobID), # job
98 '--format', 'State,ExitCode', # specify output columns
99 '-P', # separate columns with pipes
100 '-S', '1970-01-01'] # override start time limit
101
102 process = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
103 rc = process.returncode
104
105 if rc != 0:
106 # no accounting system or some other error
107 return (None, -999)
108
109 for line in process.stdout:
110 values = line.decode('utf-8').strip().split('|')
111 if len(values) < 2:
112 continue
113 state, exitcode = values
114 logger.debug("sacct job state is %s", state)
115 # If Job is in a running state, return None to indicate we don't have an update
116 status, signal = [int(n) for n in exitcode.split(':')]
117 if signal > 0:
118 # A non-zero signal may indicate e.g. an out-of-memory killed job
119 status = 128 + signal
120 logger.debug("sacct exit code is %s, returning status %d", exitcode, status)
121 return (state, status)
122 logger.debug("Did not find exit code for job in sacct output")
123 return None
124
125 def _getJobDetailsFromScontrol(self, slurmJobID):
126 args = ['scontrol',
127 'show',
128 'job',
129 str(slurmJobID)]
130
131 process = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
132
133 job = dict()
134 for line in process.stdout:
135 values = line.decode('utf-8').strip().split()
136
137 # If job information is not available an error is issued:
138 # slurm_load_jobs error: Invalid job id specified
139 # There is no job information, so exit.
140 if len(values)>0 and values[0] == 'slurm_load_jobs':
141 return (None, None)
142
143 # Output is in the form of many key=value pairs, multiple pairs on each line
144 # and multiple lines in the output. Each pair is pulled out of each line and
145 # added to a dictionary
146 for v in values:
147 bits = v.split('=')
148 job[bits[0]] = bits[1]
149
150 state = job['JobState']
151 try:
152 exitcode = job['ExitCode']
153 if exitcode is not None:
154 status, signal = [int(n) for n in exitcode.split(':')]
155 if signal > 0:
156 # A non-zero signal may indicate e.g. an out-of-memory killed job
157 status = 128 + signal
158 logger.debug("scontrol exit code is %s, returning status %d", exitcode, status)
159 rc = status
160 else:
161 rc = None
162 except KeyError:
163 rc = None
164
165 return (state, rc)
166
167 """
168 Implementation-specific helper methods
169 """
170
171 def prepareSbatch(self, cpu, mem, jobID):
172 # Returns the sbatch command line before the script to run
173 sbatch_line = ['sbatch', '-Q', '-J', 'toil_job_{}'.format(jobID)]
174
175 if self.boss.environment:
176 argList = []
177
178 for k, v in self.boss.environment.items():
179 quoted_value = quote(os.environ[k] if v is None else v)
180 argList.append('{}={}'.format(k, quoted_value))
181
182 sbatch_line.append('--export=' + ','.join(argList))
183
184 if mem is not None:
185 # memory passed in is in bytes, but slurm expects megabytes
186 sbatch_line.append('--mem={}'.format(old_div(int(mem), 2 ** 20)))
187 if cpu is not None:
188 sbatch_line.append('--cpus-per-task={}'.format(int(math.ceil(cpu))))
189
190 stdoutfile = self.boss.formatStdOutErrPath(jobID, 'slurm', '%j', 'std_output')
191 stderrfile = self.boss.formatStdOutErrPath(jobID, 'slurm', '%j', 'std_error')
192 sbatch_line.extend(['-o', stdoutfile, '-e', stderrfile])
193
194 # "Native extensions" for SLURM (see DRMAA or SAGA)
195 nativeConfig = os.getenv('TOIL_SLURM_ARGS')
196 if nativeConfig is not None:
197 logger.debug("Native SLURM options appended to sbatch from TOIL_SLURM_ARGS env. variable: {}".format(nativeConfig))
198 if ("--mem" in nativeConfig) or ("--cpus-per-task" in nativeConfig):
199 raise ValueError("Some resource arguments are incompatible: {}".format(nativeConfig))
200
201 sbatch_line.extend(nativeConfig.split())
202
203 return sbatch_line
204
205 def parse_elapsed(self, elapsed):
206 # slurm returns elapsed time in days-hours:minutes:seconds format
207 # Sometimes it will only return minutes:seconds, so days may be omitted
208 # For ease of calculating, we'll make sure all the delimeters are ':'
209 # Then reverse the list so that we're always counting up from seconds -> minutes -> hours -> days
210 total_seconds = 0
211 try:
212 elapsed = elapsed.replace('-', ':').split(':')
213 elapsed.reverse()
214 seconds_per_unit = [1, 60, 3600, 86400]
215 for index, multiplier in enumerate(seconds_per_unit):
216 if index < len(elapsed):
217 total_seconds += multiplier * int(elapsed[index])
218 except ValueError:
219 pass # slurm may return INVALID instead of a time
220 return total_seconds
221
222 """
223 The interface for SLURM
224 """
225
226 @classmethod
227 def getWaitDuration(cls):
228 return 1
229
230 @classmethod
231 def obtainSystemConstants(cls):
232 # sinfo -Ne --format '%m,%c'
233 # sinfo arguments:
234 # -N for node-oriented
235 # -h for no header
236 # -e for exact values (e.g. don't return 32+)
237 # --format to get memory, cpu
238 max_cpu = 0
239 max_mem = MemoryString('0')
240 lines = subprocess.check_output(['sinfo', '-Nhe', '--format', '%m %c']).decode('utf-8').split('\n')
241 for line in lines:
242 values = line.split()
243 if len(values) < 2:
244 continue
245 mem, cpu = values
246 max_cpu = max(max_cpu, int(cpu))
247 max_mem = max(max_mem, MemoryString(mem + 'M'))
248 if max_cpu == 0 or max_mem.byteVal() == 0:
249 RuntimeError('sinfo did not return memory or cpu info')
250 return max_cpu, max_mem
251
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/toil/batchSystems/slurm.py b/src/toil/batchSystems/slurm.py
--- a/src/toil/batchSystems/slurm.py
+++ b/src/toil/batchSystems/slurm.py
@@ -170,7 +170,7 @@
def prepareSbatch(self, cpu, mem, jobID):
# Returns the sbatch command line before the script to run
- sbatch_line = ['sbatch', '-Q', '-J', 'toil_job_{}'.format(jobID)]
+ sbatch_line = ['sbatch', '-J', 'toil_job_{}'.format(jobID)]
if self.boss.environment:
argList = []
| {"golden_diff": "diff --git a/src/toil/batchSystems/slurm.py b/src/toil/batchSystems/slurm.py\n--- a/src/toil/batchSystems/slurm.py\n+++ b/src/toil/batchSystems/slurm.py\n@@ -170,7 +170,7 @@\n \n def prepareSbatch(self, cpu, mem, jobID):\n # Returns the sbatch command line before the script to run\n- sbatch_line = ['sbatch', '-Q', '-J', 'toil_job_{}'.format(jobID)]\n+ sbatch_line = ['sbatch', '-J', 'toil_job_{}'.format(jobID)]\n \n if self.boss.environment:\n argList = []\n", "issue": "Problem with --batchSystem Slurm option\nI'm trying to run a CWL workflow on a Slurm queueing system. \nI am able to succesfully run a CWL workflow on a local machine (batchSystem singlemachine):\n```\n$ toil-cwl-runner --workDir /mnt/home/tjarosiewicz/tmp/ --jobStore test example.cwl example-job.yaml \nINFO:cwltool:Resolved 'example.cwl' to 'file:///mnt/home/tjarosiewicz/workflows/example.cwl'\nWARNING:toil.batchSystems.singleMachine:Limiting maxCores to CPU count of system (40).\nWARNING:toil.batchSystems.singleMachine:Limiting maxMemory to physically available memory (201217310720).\nWARNING:toil.batchSystems.singleMachine:Limiting maxDisk to physically available disk (203102003134464).\nINFO:toil:Running Toil version 3.20.0-cf34ca3416697f2abc816b2538f20ee29ba16932.\nINFO:toil.leader:Issued job 'file:///mnt/home/tjarosiewicz/workflows/example.cwl' echo E/p/job8c_ibarm with job batch system ID: 0 and cores: 1, disk: 3.0 G, and memory: 2.0 G\nDEBUG:toil.jobStores.fileJobStore:Path to job store directory is '/tmp/tmptrn1ykx7'.\nINFO:toil.worker:Redirecting logging to /mnt/home/tjarosiewicz/tmp/toil-cc520ff8-24a4-46d0-896d-479c31d802f8-7a1a9ca2-0214-47f8-b07e-6c3685a89053/tmp9bsoxv63/worker_log.txt\nINFO:toil.leader:Job ended successfully: 'file:///mnt/home/tjarosiewicz/workflows/example.cwl' echo E/p/job8c_ibarm\nINFO:toil.leader:Finished toil run successfully.\nINFO:toil.common:Successfully deleted the job store: FileJobStore(/tmp/tmptrn1ykx7)\n\n```\nI am able to succesfully run a Toil workflow (using Toil Python module) on a local machine:\n```\n$ python helloworld.py\nWARNING:toil.batchSystems.singleMachine:Limiting maxCores to CPU count of system (40).\nWARNING:toil.batchSystems.singleMachine:Limiting maxMemory to physically available memory (201217310720).\nWARNING:toil.batchSystems.singleMachine:Limiting maxDisk to physically available disk (203103171248128).\nINFO:toil:Running Toil version 3.20.0-cf34ca3416697f2abc816b2538f20ee29ba16932.\nINFO:toil.leader:Issued job 'helloWorld' 6/n/job7cpnl9am with job batch system ID: 0 and cores: 1, disk: 3.0 G, and memory: 2.0 G\nDEBUG:toil.jobStores.fileJobStore:Path to job store directory is '/mnt/home/tjarosiewicz/workflows/toilRun'.\nINFO:toil.worker:Redirecting logging to /mnt/home/tjarosiewicz/tmp/toil-dd5907f8-63ba-425c-ab15-eb9406d8dca1-7a1a9ca2-0214-47f8-b07e-6c3685a89053/tmpvmuy5qdy/worker_log.txt\nINFO:toil.leader:Job ended successfully: 'helloWorld' 6/n/job7cpnl9am\nINFO:toil.leader:Finished toil run successfully.\nINFO:toil.common:Successfully deleted the job store: FileJobStore(/mnt/home/tjarosiewicz/workflows/toilRun)\nHello, world!, here's a message: You did it!!!!!\n```\n\nWhen I try to run above workflows through Slurm queueing system I get errors about job submission. \n\n1) With `toil-cwl-runner`:\n```\n$ toil-cwl-runner --batchSystem Slurm --workDir /mnt/home/tjarosiewicz/tmp/ --jobStore test example.cwl example-job.yaml \nINFO:cwltool:Resolved 'example.cwl' to 'file:///mnt/home/tjarosiewicz/workflows/example.cwl'\nWARNING:toil.batchSystems.singleMachine:Limiting maxMemory to physically available memory (201217310720).\nWARNING:toil.batchSystems.singleMachine:Limiting maxDisk to physically available disk (203101904568320).\nINFO:toil:Running Toil version 3.20.0-cf34ca3416697f2abc816b2538f20ee29ba16932.\nINFO:toil.leader:Issued job 'file:///mnt/home/tjarosiewicz/workflows/example.cwl' echo 8/X/job1slhcxw_ with job batch system ID: 0 and cores: 1, disk: 3.0 G, and memory: 2.0 G\nException in thread Thread-401:\nTraceback (most recent call last):\n File \"/mnt/home/tjarosiewicz/anaconda3/lib/python3.6/threading.py\", line 916, in _bootstrap_inner\n self.run()\n File \"/mnt/home/tjarosiewicz/venv/lib/python3.6/site-packages/toil/batchSystems/abstractGridEngineBatchSystem.py\", line 234, in run\n activity |= self.createJobs(newJob)\n File \"/mnt/home/tjarosiewicz/venv/lib/python3.6/site-packages/toil/batchSystems/abstractGridEngineBatchSystem.py\", line 138, in createJobs\n batchJobID = with_retries(self.submitJob, subLine)\n File \"/mnt/home/tjarosiewicz/venv/lib/python3.6/site-packages/toil/batchSystems/abstractGridEngineBatchSystem.py\", line 42, in with_retries\n return operation(*args, **kwargs)\n File \"/mnt/home/tjarosiewicz/venv/lib/python3.6/site-packages/toil/batchSystems/slurm.py\", line 71, in submitJob\n result = int(output.strip().split()[-1])\nIndexError: list index out of range\n\n```\n2) With Toil as a Python module; I have added following line to the hello world code:\n`options.batchSystem = \"Slurm\"`\n```\n$ python helloworld.py\nWARNING:toil.batchSystems.singleMachine:Limiting maxMemory to physically available memory (201217310720).\nWARNING:toil.batchSystems.singleMachine:Limiting maxDisk to physically available disk (203102687854592).\nWARNING:toil.common:Batch system does not support auto-deployment. The user script ModuleDescriptor(dirPath='/mnt/home/tjarosiewicz/workflows', name='agent', fromVirtualEnv=False) will have to be present at the same location on every worker.\nINFO:toil:Running Toil version 3.20.0-cf34ca3416697f2abc816b2538f20ee29ba16932.\nINFO:toil.leader:Issued job 'helloWorld' d/Q/jobwvjxxov7 with job batch system ID: 0 and cores: 1, disk: 3.0 G, and memory: 2.0 G\nException in thread Thread-401:\nTraceback (most recent call last):\n File \"/mnt/home/tjarosiewicz/anaconda3/lib/python3.6/threading.py\", line 916, in _bootstrap_inner\n self.run()\n File \"/mnt/home/tjarosiewicz/venv/lib/python3.6/site-packages/toil/batchSystems/abstractGridEngineBatchSystem.py\", line 234, in run\n activity |= self.createJobs(newJob)\n File \"/mnt/home/tjarosiewicz/venv/lib/python3.6/site-packages/toil/batchSystems/abstractGridEngineBatchSystem.py\", line 138, in createJobs\n batchJobID = with_retries(self.submitJob, subLine)\n File \"/mnt/home/tjarosiewicz/venv/lib/python3.6/site-packages/toil/batchSystems/abstractGridEngineBatchSystem.py\", line 42, in with_retries\n return operation(*args, **kwargs)\n File \"/mnt/home/tjarosiewicz/venv/lib/python3.6/site-packages/toil/batchSystems/slurm.py\", line 71, in submitJob\n result = int(output.strip().split()[-1])\nIndexError: list index out of range\n```\n\nI have managed to fix this issue by modifying the 173 line in `.../toil/batchSystems/slurm.py`\nfrom \n`sbatch_line = ['sbatch', '-Q', '-J', 'toil_job_{}'.format(jobID)]`\nto\n`sbatch_line = ['sbatch', '-J', 'toil_job_{}'.format(jobID)]`\n\nWith Toil CWL runner:\n```\n$ toil-cwl-runner --batchSystem Slurm --workDir /mnt/home/tjarosiewicz/tmp/ --jobStore test example.cwl example-job.yaml \nINFO:cwltool:Resolved 'example.cwl' to 'file:///mnt/home/tjarosiewicz/workflows/example.cwl'\nWARNING:toil.batchSystems.singleMachine:Limiting maxMemory to physically available memory (201217310720).\nWARNING:toil.batchSystems.singleMachine:Limiting maxDisk to physically available disk (203094966140928).\nINFO:toil:Running Toil version 3.20.0-cf34ca3416697f2abc816b2538f20ee29ba16932.\nINFO:toil.leader:Issued job 'file:///mnt/home/tjarosiewicz/workflows/example.cwl' echo i/k/jobdhlfuse_ with job batch system ID: 0 and cores: 1, disk: 3.0 G, and memory: 2.0 G\nINFO:toil.leader:Job ended successfully: 'file:///mnt/home/tjarosiewicz/workflows/example.cwl' echo i/k/jobdhlfuse_\nINFO:toil.leader:Finished toil run successfully.\nINFO:toil.common:Successfully deleted the job store: FileJobStore(/mnt/home/tjarosiewicz/workflows/test)\n```\n\nWith Toil Python module:\n```\n$ python helloworld.py \nWARNING:toil.batchSystems.singleMachine:Limiting maxMemory to physically available memory (201217310720).\nWARNING:toil.batchSystems.singleMachine:Limiting maxDisk to physically available disk (203094390472704).\nWARNING:toil.common:Batch system does not support auto-deployment. The user script ModuleDescriptor(dirPath='/mnt/home/tjarosiewicz/workflows', name='helloworld', fromVirtualEnv=False) will have to be present at the same location on every worker.\nINFO:toil:Running Toil version 3.20.0-cf34ca3416697f2abc816b2538f20ee29ba16932.\nINFO:toil.leader:Issued job 'helloWorld' 9/X/jobio7nz7hm with job batch system ID: 0 and cores: 1, disk: 3.0 G, and memory: 2.0 G\nINFO:toil.leader:Job ended successfully: 'helloWorld' 9/X/jobio7nz7hm\nINFO:toil.leader:Finished toil run successfully.\nINFO:toil.common:Successfully deleted the job store: FileJobStore(/mnt/home/tjarosiewicz/workflows/toilRun)\nHello, world!, here's a message: You did it!!!!!\n```\n\nBoth examples are taken from the tutorial; https://toil.readthedocs.io/en/latest/gettingStarted/quickStart.html#running-a-basic-cwl-workflow\n\nI haven't noticed other people having issues with Slurm batch system so maybe it is a problem with my Slurm configuration?\nI will be happy to learn if there are other ways of dealing with this problem.\n\nCheers, \nTobiasz\n\n\u2506Issue is synchronized with this [Jira Task](https://ucsc-cgl.atlassian.net/browse/TOIL-411)\n\u2506Issue Number: TOIL-411\n\n", "before_files": [{"content": "# Copyright (c) 2016 Duke Center for Genomic and Computational Biology\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom builtins import str\nfrom past.utils import old_div\nimport logging\nimport os\nfrom pipes import quote\nfrom toil import subprocess\nimport time\nimport math\n\n# Python 3 compatibility imports\nfrom six.moves.queue import Empty, Queue\nfrom six import iteritems\n\nfrom toil.batchSystems import MemoryString\nfrom toil.batchSystems.abstractGridEngineBatchSystem import AbstractGridEngineBatchSystem\n\nlogger = logging.getLogger(__name__)\n\nclass SlurmBatchSystem(AbstractGridEngineBatchSystem):\n\n class Worker(AbstractGridEngineBatchSystem.Worker):\n\n def getRunningJobIDs(self):\n # Should return a dictionary of Job IDs and number of seconds\n times = {}\n with self.runningJobsLock:\n currentjobs = dict((str(self.batchJobIDs[x][0]), x) for x in self.runningJobs)\n # currentjobs is a dictionary that maps a slurm job id (string) to our own internal job id\n # squeue arguments:\n # -h for no header\n # --format to get jobid i, state %t and time days-hours:minutes:seconds\n\n lines = subprocess.check_output(['squeue', '-h', '--format', '%i %t %M']).decode('utf-8').split('\\n')\n for line in lines:\n values = line.split()\n if len(values) < 3:\n continue\n slurm_jobid, state, elapsed_time = values\n if slurm_jobid in currentjobs and state == 'R':\n seconds_running = self.parse_elapsed(elapsed_time)\n times[currentjobs[slurm_jobid]] = seconds_running\n\n return times\n\n def killJob(self, jobID):\n subprocess.check_call(['scancel', self.getBatchSystemID(jobID)])\n\n def prepareSubmission(self, cpu, memory, jobID, command):\n return self.prepareSbatch(cpu, memory, jobID) + ['--wrap={}'.format(command)]\n\n def submitJob(self, subLine):\n try:\n output = subprocess.check_output(subLine, stderr=subprocess.STDOUT).decode('utf-8')\n # sbatch prints a line like 'Submitted batch job 2954103'\n result = int(output.strip().split()[-1])\n logger.debug(\"sbatch submitted job %d\", result)\n return result\n except OSError as e:\n logger.error(\"sbatch command failed\")\n raise e\n\n def getJobExitCode(self, slurmJobID):\n logger.debug(\"Getting exit code for slurm job %d\", int(slurmJobID))\n \n state, rc = self._getJobDetailsFromSacct(slurmJobID)\n \n if rc == -999:\n state, rc = self._getJobDetailsFromScontrol(slurmJobID)\n \n logger.debug(\"s job state is %s\", state)\n # If Job is in a running state, return None to indicate we don't have an update \n if state in ('PENDING', 'RUNNING', 'CONFIGURING', 'COMPLETING', 'RESIZING', 'SUSPENDED'):\n return None\n \n return rc\n \n def _getJobDetailsFromSacct(self, slurmJobID):\n # SLURM job exit codes are obtained by running sacct.\n args = ['sacct',\n '-n', # no header\n '-j', str(slurmJobID), # job\n '--format', 'State,ExitCode', # specify output columns\n '-P', # separate columns with pipes\n '-S', '1970-01-01'] # override start time limit\n \n process = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n rc = process.returncode\n \n if rc != 0:\n # no accounting system or some other error\n return (None, -999)\n \n for line in process.stdout:\n values = line.decode('utf-8').strip().split('|')\n if len(values) < 2:\n continue\n state, exitcode = values\n logger.debug(\"sacct job state is %s\", state)\n # If Job is in a running state, return None to indicate we don't have an update\n status, signal = [int(n) for n in exitcode.split(':')]\n if signal > 0:\n # A non-zero signal may indicate e.g. an out-of-memory killed job\n status = 128 + signal\n logger.debug(\"sacct exit code is %s, returning status %d\", exitcode, status)\n return (state, status)\n logger.debug(\"Did not find exit code for job in sacct output\")\n return None\n\n def _getJobDetailsFromScontrol(self, slurmJobID):\n args = ['scontrol',\n 'show',\n 'job',\n str(slurmJobID)]\n \n process = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n \n job = dict()\n for line in process.stdout:\n values = line.decode('utf-8').strip().split()\n \n # If job information is not available an error is issued:\n # slurm_load_jobs error: Invalid job id specified\n # There is no job information, so exit.\n if len(values)>0 and values[0] == 'slurm_load_jobs':\n return (None, None)\n \n # Output is in the form of many key=value pairs, multiple pairs on each line\n # and multiple lines in the output. Each pair is pulled out of each line and\n # added to a dictionary\n for v in values:\n bits = v.split('=')\n job[bits[0]] = bits[1]\n \n state = job['JobState']\n try:\n exitcode = job['ExitCode']\n if exitcode is not None:\n status, signal = [int(n) for n in exitcode.split(':')]\n if signal > 0:\n # A non-zero signal may indicate e.g. an out-of-memory killed job\n status = 128 + signal\n logger.debug(\"scontrol exit code is %s, returning status %d\", exitcode, status)\n rc = status\n else:\n rc = None\n except KeyError:\n rc = None\n \n return (state, rc)\n\n \"\"\"\n Implementation-specific helper methods\n \"\"\"\n\n def prepareSbatch(self, cpu, mem, jobID):\n # Returns the sbatch command line before the script to run\n sbatch_line = ['sbatch', '-Q', '-J', 'toil_job_{}'.format(jobID)]\n\n if self.boss.environment:\n argList = []\n \n for k, v in self.boss.environment.items():\n quoted_value = quote(os.environ[k] if v is None else v)\n argList.append('{}={}'.format(k, quoted_value))\n \n sbatch_line.append('--export=' + ','.join(argList))\n \n if mem is not None:\n # memory passed in is in bytes, but slurm expects megabytes\n sbatch_line.append('--mem={}'.format(old_div(int(mem), 2 ** 20)))\n if cpu is not None:\n sbatch_line.append('--cpus-per-task={}'.format(int(math.ceil(cpu))))\n\n stdoutfile = self.boss.formatStdOutErrPath(jobID, 'slurm', '%j', 'std_output')\n stderrfile = self.boss.formatStdOutErrPath(jobID, 'slurm', '%j', 'std_error')\n sbatch_line.extend(['-o', stdoutfile, '-e', stderrfile])\n\n # \"Native extensions\" for SLURM (see DRMAA or SAGA)\n nativeConfig = os.getenv('TOIL_SLURM_ARGS')\n if nativeConfig is not None:\n logger.debug(\"Native SLURM options appended to sbatch from TOIL_SLURM_ARGS env. variable: {}\".format(nativeConfig))\n if (\"--mem\" in nativeConfig) or (\"--cpus-per-task\" in nativeConfig):\n raise ValueError(\"Some resource arguments are incompatible: {}\".format(nativeConfig))\n\n sbatch_line.extend(nativeConfig.split())\n\n return sbatch_line\n\n def parse_elapsed(self, elapsed):\n # slurm returns elapsed time in days-hours:minutes:seconds format\n # Sometimes it will only return minutes:seconds, so days may be omitted\n # For ease of calculating, we'll make sure all the delimeters are ':'\n # Then reverse the list so that we're always counting up from seconds -> minutes -> hours -> days\n total_seconds = 0\n try:\n elapsed = elapsed.replace('-', ':').split(':')\n elapsed.reverse()\n seconds_per_unit = [1, 60, 3600, 86400]\n for index, multiplier in enumerate(seconds_per_unit):\n if index < len(elapsed):\n total_seconds += multiplier * int(elapsed[index])\n except ValueError:\n pass # slurm may return INVALID instead of a time\n return total_seconds\n\n \"\"\"\n The interface for SLURM\n \"\"\"\n\n @classmethod\n def getWaitDuration(cls):\n return 1\n\n @classmethod\n def obtainSystemConstants(cls):\n # sinfo -Ne --format '%m,%c'\n # sinfo arguments:\n # -N for node-oriented\n # -h for no header\n # -e for exact values (e.g. don't return 32+)\n # --format to get memory, cpu\n max_cpu = 0\n max_mem = MemoryString('0')\n lines = subprocess.check_output(['sinfo', '-Nhe', '--format', '%m %c']).decode('utf-8').split('\\n')\n for line in lines:\n values = line.split()\n if len(values) < 2:\n continue\n mem, cpu = values\n max_cpu = max(max_cpu, int(cpu))\n max_mem = max(max_mem, MemoryString(mem + 'M'))\n if max_cpu == 0 or max_mem.byteVal() == 0:\n RuntimeError('sinfo did not return memory or cpu info')\n return max_cpu, max_mem\n", "path": "src/toil/batchSystems/slurm.py"}], "after_files": [{"content": "# Copyright (c) 2016 Duke Center for Genomic and Computational Biology\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom builtins import str\nfrom past.utils import old_div\nimport logging\nimport os\nfrom pipes import quote\nfrom toil import subprocess\nimport time\nimport math\n\n# Python 3 compatibility imports\nfrom six.moves.queue import Empty, Queue\nfrom six import iteritems\n\nfrom toil.batchSystems import MemoryString\nfrom toil.batchSystems.abstractGridEngineBatchSystem import AbstractGridEngineBatchSystem\n\nlogger = logging.getLogger(__name__)\n\nclass SlurmBatchSystem(AbstractGridEngineBatchSystem):\n\n class Worker(AbstractGridEngineBatchSystem.Worker):\n\n def getRunningJobIDs(self):\n # Should return a dictionary of Job IDs and number of seconds\n times = {}\n with self.runningJobsLock:\n currentjobs = dict((str(self.batchJobIDs[x][0]), x) for x in self.runningJobs)\n # currentjobs is a dictionary that maps a slurm job id (string) to our own internal job id\n # squeue arguments:\n # -h for no header\n # --format to get jobid i, state %t and time days-hours:minutes:seconds\n\n lines = subprocess.check_output(['squeue', '-h', '--format', '%i %t %M']).decode('utf-8').split('\\n')\n for line in lines:\n values = line.split()\n if len(values) < 3:\n continue\n slurm_jobid, state, elapsed_time = values\n if slurm_jobid in currentjobs and state == 'R':\n seconds_running = self.parse_elapsed(elapsed_time)\n times[currentjobs[slurm_jobid]] = seconds_running\n\n return times\n\n def killJob(self, jobID):\n subprocess.check_call(['scancel', self.getBatchSystemID(jobID)])\n\n def prepareSubmission(self, cpu, memory, jobID, command):\n return self.prepareSbatch(cpu, memory, jobID) + ['--wrap={}'.format(command)]\n\n def submitJob(self, subLine):\n try:\n output = subprocess.check_output(subLine, stderr=subprocess.STDOUT).decode('utf-8')\n # sbatch prints a line like 'Submitted batch job 2954103'\n result = int(output.strip().split()[-1])\n logger.debug(\"sbatch submitted job %d\", result)\n return result\n except OSError as e:\n logger.error(\"sbatch command failed\")\n raise e\n\n def getJobExitCode(self, slurmJobID):\n logger.debug(\"Getting exit code for slurm job %d\", int(slurmJobID))\n \n state, rc = self._getJobDetailsFromSacct(slurmJobID)\n \n if rc == -999:\n state, rc = self._getJobDetailsFromScontrol(slurmJobID)\n \n logger.debug(\"s job state is %s\", state)\n # If Job is in a running state, return None to indicate we don't have an update \n if state in ('PENDING', 'RUNNING', 'CONFIGURING', 'COMPLETING', 'RESIZING', 'SUSPENDED'):\n return None\n \n return rc\n \n def _getJobDetailsFromSacct(self, slurmJobID):\n # SLURM job exit codes are obtained by running sacct.\n args = ['sacct',\n '-n', # no header\n '-j', str(slurmJobID), # job\n '--format', 'State,ExitCode', # specify output columns\n '-P', # separate columns with pipes\n '-S', '1970-01-01'] # override start time limit\n \n process = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n rc = process.returncode\n \n if rc != 0:\n # no accounting system or some other error\n return (None, -999)\n \n for line in process.stdout:\n values = line.decode('utf-8').strip().split('|')\n if len(values) < 2:\n continue\n state, exitcode = values\n logger.debug(\"sacct job state is %s\", state)\n # If Job is in a running state, return None to indicate we don't have an update\n status, signal = [int(n) for n in exitcode.split(':')]\n if signal > 0:\n # A non-zero signal may indicate e.g. an out-of-memory killed job\n status = 128 + signal\n logger.debug(\"sacct exit code is %s, returning status %d\", exitcode, status)\n return (state, status)\n logger.debug(\"Did not find exit code for job in sacct output\")\n return None\n\n def _getJobDetailsFromScontrol(self, slurmJobID):\n args = ['scontrol',\n 'show',\n 'job',\n str(slurmJobID)]\n \n process = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n \n job = dict()\n for line in process.stdout:\n values = line.decode('utf-8').strip().split()\n \n # If job information is not available an error is issued:\n # slurm_load_jobs error: Invalid job id specified\n # There is no job information, so exit.\n if len(values)>0 and values[0] == 'slurm_load_jobs':\n return (None, None)\n \n # Output is in the form of many key=value pairs, multiple pairs on each line\n # and multiple lines in the output. Each pair is pulled out of each line and\n # added to a dictionary\n for v in values:\n bits = v.split('=')\n job[bits[0]] = bits[1]\n \n state = job['JobState']\n try:\n exitcode = job['ExitCode']\n if exitcode is not None:\n status, signal = [int(n) for n in exitcode.split(':')]\n if signal > 0:\n # A non-zero signal may indicate e.g. an out-of-memory killed job\n status = 128 + signal\n logger.debug(\"scontrol exit code is %s, returning status %d\", exitcode, status)\n rc = status\n else:\n rc = None\n except KeyError:\n rc = None\n \n return (state, rc)\n\n \"\"\"\n Implementation-specific helper methods\n \"\"\"\n\n def prepareSbatch(self, cpu, mem, jobID):\n # Returns the sbatch command line before the script to run\n sbatch_line = ['sbatch', '-J', 'toil_job_{}'.format(jobID)]\n\n if self.boss.environment:\n argList = []\n \n for k, v in self.boss.environment.items():\n quoted_value = quote(os.environ[k] if v is None else v)\n argList.append('{}={}'.format(k, quoted_value))\n \n sbatch_line.append('--export=' + ','.join(argList))\n \n if mem is not None:\n # memory passed in is in bytes, but slurm expects megabytes\n sbatch_line.append('--mem={}'.format(old_div(int(mem), 2 ** 20)))\n if cpu is not None:\n sbatch_line.append('--cpus-per-task={}'.format(int(math.ceil(cpu))))\n\n stdoutfile = self.boss.formatStdOutErrPath(jobID, 'slurm', '%j', 'std_output')\n stderrfile = self.boss.formatStdOutErrPath(jobID, 'slurm', '%j', 'std_error')\n sbatch_line.extend(['-o', stdoutfile, '-e', stderrfile])\n\n # \"Native extensions\" for SLURM (see DRMAA or SAGA)\n nativeConfig = os.getenv('TOIL_SLURM_ARGS')\n if nativeConfig is not None:\n logger.debug(\"Native SLURM options appended to sbatch from TOIL_SLURM_ARGS env. variable: {}\".format(nativeConfig))\n if (\"--mem\" in nativeConfig) or (\"--cpus-per-task\" in nativeConfig):\n raise ValueError(\"Some resource arguments are incompatible: {}\".format(nativeConfig))\n\n sbatch_line.extend(nativeConfig.split())\n\n return sbatch_line\n\n def parse_elapsed(self, elapsed):\n # slurm returns elapsed time in days-hours:minutes:seconds format\n # Sometimes it will only return minutes:seconds, so days may be omitted\n # For ease of calculating, we'll make sure all the delimeters are ':'\n # Then reverse the list so that we're always counting up from seconds -> minutes -> hours -> days\n total_seconds = 0\n try:\n elapsed = elapsed.replace('-', ':').split(':')\n elapsed.reverse()\n seconds_per_unit = [1, 60, 3600, 86400]\n for index, multiplier in enumerate(seconds_per_unit):\n if index < len(elapsed):\n total_seconds += multiplier * int(elapsed[index])\n except ValueError:\n pass # slurm may return INVALID instead of a time\n return total_seconds\n\n \"\"\"\n The interface for SLURM\n \"\"\"\n\n @classmethod\n def getWaitDuration(cls):\n return 1\n\n @classmethod\n def obtainSystemConstants(cls):\n # sinfo -Ne --format '%m,%c'\n # sinfo arguments:\n # -N for node-oriented\n # -h for no header\n # -e for exact values (e.g. don't return 32+)\n # --format to get memory, cpu\n max_cpu = 0\n max_mem = MemoryString('0')\n lines = subprocess.check_output(['sinfo', '-Nhe', '--format', '%m %c']).decode('utf-8').split('\\n')\n for line in lines:\n values = line.split()\n if len(values) < 2:\n continue\n mem, cpu = values\n max_cpu = max(max_cpu, int(cpu))\n max_mem = max(max_mem, MemoryString(mem + 'M'))\n if max_cpu == 0 or max_mem.byteVal() == 0:\n RuntimeError('sinfo did not return memory or cpu info')\n return max_cpu, max_mem\n", "path": "src/toil/batchSystems/slurm.py"}]} |
gh_patches_debug_1364 | rasdani/github-patches | git_diff | rucio__rucio-2761 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Dark Reaper rse_id crash
Motivation
----------
The dark reaper raises an error in 1.20.3
Modification
------------
Fix the query in core/quarantined_replica.py -> list_rses()
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lib/rucio/core/quarantined_replica.py`
Content:
```
1 #!/usr/bin/env python
2 # Copyright 2016-2018 CERN for the benefit of the ATLAS collaboration.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15 #
16 # Authors:
17 # - Vincent Garonne <[email protected]>, 2016-2017
18 # - Hannes Hansen <[email protected]>, 2018
19 # - Dimitrios Christidis <[email protected]>, 2018
20 # - Andrew Lister <[email protected]>, 2019
21 #
22 # PY3K COMPATIBLE
23
24 import datetime
25
26 from sqlalchemy import and_, or_, exists, not_
27 from sqlalchemy.sql.expression import bindparam, text, select, false
28
29 from rucio.common.utils import chunks
30 from rucio.db.sqla import models
31 from rucio.db.sqla.session import read_session, transactional_session
32
33
34 @transactional_session
35 def add_quarantined_replicas(rse_id, replicas, session=None):
36 """
37 Bulk add quarantined file replicas.
38
39 :param rse_id: The rse id.
40 :param replicas: A list of dicts with the replica information.
41 :param session: The database session in use.
42 """
43
44 for chunk in chunks(replicas, 100):
45 # Exlude files that have a registered replica. This is a
46 # safeguard against potential issues in the Auditor.
47 file_clause = []
48 for replica in chunk:
49 file_clause.append(and_(models.RSEFileAssociation.scope == replica.get('scope', None),
50 models.RSEFileAssociation.name == replica.get('name', None),
51 models.RSEFileAssociation.rse_id == rse_id))
52 file_query = session.query(models.RSEFileAssociation.scope,
53 models.RSEFileAssociation.name,
54 models.RSEFileAssociation.rse_id).\
55 with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\
56 filter(or_(*file_clause))
57 existing_replicas = [(scope, name, rseid) for scope, name, rseid in file_query]
58 chunk = [replica for replica in chunk if (replica.get('scope', None), replica.get('name', None), rse_id) not in existing_replicas]
59
60 # Exclude files that have already been added to the quarantined
61 # replica table.
62 quarantine_clause = []
63 for replica in chunk:
64 quarantine_clause.append(and_(models.QuarantinedReplica.path == replica['path'],
65 models.QuarantinedReplica.rse_id == rse_id))
66 quarantine_query = session.query(models.QuarantinedReplica.path,
67 models.QuarantinedReplica.rse_id).\
68 filter(or_(*quarantine_clause))
69 quarantine_replicas = [(path, rseid) for path, rseid in quarantine_query]
70 chunk = [replica for replica in chunk if (replica['path'], rse_id) not in quarantine_replicas]
71
72 session.bulk_insert_mappings(
73 models.QuarantinedReplica,
74 [{'rse_id': rse_id, 'path': file['path'],
75 'scope': file.get('scope'), 'name': file.get('name'),
76 'bytes': file.get('bytes')} for file in chunk])
77
78
79 @transactional_session
80 def delete_quarantined_replicas(rse_id, replicas, session=None):
81 """
82 Delete file replicas.
83
84 :param rse_id: the rse id.
85 :param files: the list of files to delete.
86 :param ignore_availability: Ignore the RSE blacklisting.
87 :param session: The database session in use.
88 """
89
90 conditions = []
91 for replica in replicas:
92 conditions.append(models.QuarantinedReplica.path == replica['path'])
93
94 if conditions:
95 session.query(models.QuarantinedReplica).\
96 filter(models.QuarantinedReplica.rse_id == rse_id).\
97 filter(or_(*conditions)).\
98 delete(synchronize_session=False)
99
100 session.\
101 bulk_insert_mappings(models.QuarantinedReplica.__history_mapper__.class_,
102 [{'rse_id': rse_id, 'path': replica['path'],
103 'bytes': replica.get('bytes'),
104 'created_at': replica.get('created_at'),
105 'deleted_at': datetime.datetime.utcnow()}
106 for replica in replicas])
107
108
109 @read_session
110 def list_quarantined_replicas(rse_id, limit, worker_number=None, total_workers=None, session=None):
111 """
112 List RSE Quarantined File replicas.
113
114 :param rse_id: the rse id.
115 :param limit: The maximum number of replicas returned.
116 :param worker_number: id of the executing worker.
117 :param total_workers: Number of total workers.
118 :param session: The database session in use.
119
120 :returns: a list of dictionary replica.
121 """
122
123 query = session.query(models.QuarantinedReplica.path,
124 models.QuarantinedReplica.bytes,
125 models.QuarantinedReplica.scope,
126 models.QuarantinedReplica.name,
127 models.QuarantinedReplica.created_at).\
128 filter(models.QuarantinedReplica.rse_id == rse_id)
129
130 # do no delete valid replicas
131 stmt = exists(select([1]).prefix_with("/*+ index(REPLICAS REPLICAS_PK) */", dialect='oracle')).\
132 where(and_(models.RSEFileAssociation.scope == models.QuarantinedReplica.scope,
133 models.RSEFileAssociation.name == models.QuarantinedReplica.name,
134 models.RSEFileAssociation.rse_id == models.QuarantinedReplica.rse_id))
135 query = query.filter(not_(stmt))
136
137 if worker_number and total_workers and total_workers - 1 > 0:
138 if session.bind.dialect.name == 'oracle':
139 bindparams = [bindparam('worker_number', worker_number - 1), bindparam('total_workers', total_workers - 1)]
140 query = query.filter(text('ORA_HASH(path, :total_workers) = :worker_number', bindparams=bindparams))
141 elif session.bind.dialect.name == 'mysql':
142 query = query.filter('mod(md5(path), %s) = %s' % (total_workers - 1, worker_number - 1))
143 elif session.bind.dialect.name == 'postgresql':
144 query = query.filter('mod(abs((\'x\'||md5(path))::bit(32)::int), %s) = %s' % (total_workers - 1, worker_number - 1))
145
146 return [{'path': path,
147 'rse_id': rse_id,
148 'created_at': created_at,
149 'scope': scope,
150 'name': name,
151 'bytes': bytes}
152 for path, bytes, scope, name, created_at in query.limit(limit)]
153
154
155 @read_session
156 def list_rses(session=None):
157 """
158 List RSEs in the Quarantined Queues.
159
160 :param session: The database session in use.
161
162 :returns: a list of RSEs.
163 """
164 query = session.query(models.RSE.rse_id).distinct(models.RSE.rse_id).\
165 filter(models.QuarantinedReplica.rse_id == models.RSE.id).\
166 filter(models.RSE.deleted == false())
167 return [rse for (rse,) in query]
168
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lib/rucio/core/quarantined_replica.py b/lib/rucio/core/quarantined_replica.py
--- a/lib/rucio/core/quarantined_replica.py
+++ b/lib/rucio/core/quarantined_replica.py
@@ -161,7 +161,7 @@
:returns: a list of RSEs.
"""
- query = session.query(models.RSE.rse_id).distinct(models.RSE.rse_id).\
+ query = session.query(models.RSE.id).distinct(models.RSE.id).\
filter(models.QuarantinedReplica.rse_id == models.RSE.id).\
filter(models.RSE.deleted == false())
return [rse for (rse,) in query]
| {"golden_diff": "diff --git a/lib/rucio/core/quarantined_replica.py b/lib/rucio/core/quarantined_replica.py\n--- a/lib/rucio/core/quarantined_replica.py\n+++ b/lib/rucio/core/quarantined_replica.py\n@@ -161,7 +161,7 @@\n \n :returns: a list of RSEs.\n \"\"\"\n- query = session.query(models.RSE.rse_id).distinct(models.RSE.rse_id).\\\n+ query = session.query(models.RSE.id).distinct(models.RSE.id).\\\n filter(models.QuarantinedReplica.rse_id == models.RSE.id).\\\n filter(models.RSE.deleted == false())\n return [rse for (rse,) in query]\n", "issue": "Dark Reaper rse_id crash\nMotivation\r\n----------\r\nThe dark reaper raises an error in 1.20.3\r\n\r\n\r\nModification\r\n------------\r\nFix the query in core/quarantined_replica.py -> list_rses()\r\n\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# Copyright 2016-2018 CERN for the benefit of the ATLAS collaboration.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Authors:\n# - Vincent Garonne <[email protected]>, 2016-2017\n# - Hannes Hansen <[email protected]>, 2018\n# - Dimitrios Christidis <[email protected]>, 2018\n# - Andrew Lister <[email protected]>, 2019\n#\n# PY3K COMPATIBLE\n\nimport datetime\n\nfrom sqlalchemy import and_, or_, exists, not_\nfrom sqlalchemy.sql.expression import bindparam, text, select, false\n\nfrom rucio.common.utils import chunks\nfrom rucio.db.sqla import models\nfrom rucio.db.sqla.session import read_session, transactional_session\n\n\n@transactional_session\ndef add_quarantined_replicas(rse_id, replicas, session=None):\n \"\"\"\n Bulk add quarantined file replicas.\n\n :param rse_id: The rse id.\n :param replicas: A list of dicts with the replica information.\n :param session: The database session in use.\n \"\"\"\n\n for chunk in chunks(replicas, 100):\n # Exlude files that have a registered replica. This is a\n # safeguard against potential issues in the Auditor.\n file_clause = []\n for replica in chunk:\n file_clause.append(and_(models.RSEFileAssociation.scope == replica.get('scope', None),\n models.RSEFileAssociation.name == replica.get('name', None),\n models.RSEFileAssociation.rse_id == rse_id))\n file_query = session.query(models.RSEFileAssociation.scope,\n models.RSEFileAssociation.name,\n models.RSEFileAssociation.rse_id).\\\n with_hint(models.RSEFileAssociation, \"index(REPLICAS REPLICAS_PK)\", 'oracle').\\\n filter(or_(*file_clause))\n existing_replicas = [(scope, name, rseid) for scope, name, rseid in file_query]\n chunk = [replica for replica in chunk if (replica.get('scope', None), replica.get('name', None), rse_id) not in existing_replicas]\n\n # Exclude files that have already been added to the quarantined\n # replica table.\n quarantine_clause = []\n for replica in chunk:\n quarantine_clause.append(and_(models.QuarantinedReplica.path == replica['path'],\n models.QuarantinedReplica.rse_id == rse_id))\n quarantine_query = session.query(models.QuarantinedReplica.path,\n models.QuarantinedReplica.rse_id).\\\n filter(or_(*quarantine_clause))\n quarantine_replicas = [(path, rseid) for path, rseid in quarantine_query]\n chunk = [replica for replica in chunk if (replica['path'], rse_id) not in quarantine_replicas]\n\n session.bulk_insert_mappings(\n models.QuarantinedReplica,\n [{'rse_id': rse_id, 'path': file['path'],\n 'scope': file.get('scope'), 'name': file.get('name'),\n 'bytes': file.get('bytes')} for file in chunk])\n\n\n@transactional_session\ndef delete_quarantined_replicas(rse_id, replicas, session=None):\n \"\"\"\n Delete file replicas.\n\n :param rse_id: the rse id.\n :param files: the list of files to delete.\n :param ignore_availability: Ignore the RSE blacklisting.\n :param session: The database session in use.\n \"\"\"\n\n conditions = []\n for replica in replicas:\n conditions.append(models.QuarantinedReplica.path == replica['path'])\n\n if conditions:\n session.query(models.QuarantinedReplica).\\\n filter(models.QuarantinedReplica.rse_id == rse_id).\\\n filter(or_(*conditions)).\\\n delete(synchronize_session=False)\n\n session.\\\n bulk_insert_mappings(models.QuarantinedReplica.__history_mapper__.class_,\n [{'rse_id': rse_id, 'path': replica['path'],\n 'bytes': replica.get('bytes'),\n 'created_at': replica.get('created_at'),\n 'deleted_at': datetime.datetime.utcnow()}\n for replica in replicas])\n\n\n@read_session\ndef list_quarantined_replicas(rse_id, limit, worker_number=None, total_workers=None, session=None):\n \"\"\"\n List RSE Quarantined File replicas.\n\n :param rse_id: the rse id.\n :param limit: The maximum number of replicas returned.\n :param worker_number: id of the executing worker.\n :param total_workers: Number of total workers.\n :param session: The database session in use.\n\n :returns: a list of dictionary replica.\n \"\"\"\n\n query = session.query(models.QuarantinedReplica.path,\n models.QuarantinedReplica.bytes,\n models.QuarantinedReplica.scope,\n models.QuarantinedReplica.name,\n models.QuarantinedReplica.created_at).\\\n filter(models.QuarantinedReplica.rse_id == rse_id)\n\n # do no delete valid replicas\n stmt = exists(select([1]).prefix_with(\"/*+ index(REPLICAS REPLICAS_PK) */\", dialect='oracle')).\\\n where(and_(models.RSEFileAssociation.scope == models.QuarantinedReplica.scope,\n models.RSEFileAssociation.name == models.QuarantinedReplica.name,\n models.RSEFileAssociation.rse_id == models.QuarantinedReplica.rse_id))\n query = query.filter(not_(stmt))\n\n if worker_number and total_workers and total_workers - 1 > 0:\n if session.bind.dialect.name == 'oracle':\n bindparams = [bindparam('worker_number', worker_number - 1), bindparam('total_workers', total_workers - 1)]\n query = query.filter(text('ORA_HASH(path, :total_workers) = :worker_number', bindparams=bindparams))\n elif session.bind.dialect.name == 'mysql':\n query = query.filter('mod(md5(path), %s) = %s' % (total_workers - 1, worker_number - 1))\n elif session.bind.dialect.name == 'postgresql':\n query = query.filter('mod(abs((\\'x\\'||md5(path))::bit(32)::int), %s) = %s' % (total_workers - 1, worker_number - 1))\n\n return [{'path': path,\n 'rse_id': rse_id,\n 'created_at': created_at,\n 'scope': scope,\n 'name': name,\n 'bytes': bytes}\n for path, bytes, scope, name, created_at in query.limit(limit)]\n\n\n@read_session\ndef list_rses(session=None):\n \"\"\"\n List RSEs in the Quarantined Queues.\n\n :param session: The database session in use.\n\n :returns: a list of RSEs.\n \"\"\"\n query = session.query(models.RSE.rse_id).distinct(models.RSE.rse_id).\\\n filter(models.QuarantinedReplica.rse_id == models.RSE.id).\\\n filter(models.RSE.deleted == false())\n return [rse for (rse,) in query]\n", "path": "lib/rucio/core/quarantined_replica.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# Copyright 2016-2018 CERN for the benefit of the ATLAS collaboration.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Authors:\n# - Vincent Garonne <[email protected]>, 2016-2017\n# - Hannes Hansen <[email protected]>, 2018\n# - Dimitrios Christidis <[email protected]>, 2018\n# - Andrew Lister <[email protected]>, 2019\n#\n# PY3K COMPATIBLE\n\nimport datetime\n\nfrom sqlalchemy import and_, or_, exists, not_\nfrom sqlalchemy.sql.expression import bindparam, text, select, false\n\nfrom rucio.common.utils import chunks\nfrom rucio.db.sqla import models\nfrom rucio.db.sqla.session import read_session, transactional_session\n\n\n@transactional_session\ndef add_quarantined_replicas(rse_id, replicas, session=None):\n \"\"\"\n Bulk add quarantined file replicas.\n\n :param rse_id: The rse id.\n :param replicas: A list of dicts with the replica information.\n :param session: The database session in use.\n \"\"\"\n\n for chunk in chunks(replicas, 100):\n # Exlude files that have a registered replica. This is a\n # safeguard against potential issues in the Auditor.\n file_clause = []\n for replica in chunk:\n file_clause.append(and_(models.RSEFileAssociation.scope == replica.get('scope', None),\n models.RSEFileAssociation.name == replica.get('name', None),\n models.RSEFileAssociation.rse_id == rse_id))\n file_query = session.query(models.RSEFileAssociation.scope,\n models.RSEFileAssociation.name,\n models.RSEFileAssociation.rse_id).\\\n with_hint(models.RSEFileAssociation, \"index(REPLICAS REPLICAS_PK)\", 'oracle').\\\n filter(or_(*file_clause))\n existing_replicas = [(scope, name, rseid) for scope, name, rseid in file_query]\n chunk = [replica for replica in chunk if (replica.get('scope', None), replica.get('name', None), rse_id) not in existing_replicas]\n\n # Exclude files that have already been added to the quarantined\n # replica table.\n quarantine_clause = []\n for replica in chunk:\n quarantine_clause.append(and_(models.QuarantinedReplica.path == replica['path'],\n models.QuarantinedReplica.rse_id == rse_id))\n quarantine_query = session.query(models.QuarantinedReplica.path,\n models.QuarantinedReplica.rse_id).\\\n filter(or_(*quarantine_clause))\n quarantine_replicas = [(path, rseid) for path, rseid in quarantine_query]\n chunk = [replica for replica in chunk if (replica['path'], rse_id) not in quarantine_replicas]\n\n session.bulk_insert_mappings(\n models.QuarantinedReplica,\n [{'rse_id': rse_id, 'path': file['path'],\n 'scope': file.get('scope'), 'name': file.get('name'),\n 'bytes': file.get('bytes')} for file in chunk])\n\n\n@transactional_session\ndef delete_quarantined_replicas(rse_id, replicas, session=None):\n \"\"\"\n Delete file replicas.\n\n :param rse_id: the rse id.\n :param files: the list of files to delete.\n :param ignore_availability: Ignore the RSE blacklisting.\n :param session: The database session in use.\n \"\"\"\n\n conditions = []\n for replica in replicas:\n conditions.append(models.QuarantinedReplica.path == replica['path'])\n\n if conditions:\n session.query(models.QuarantinedReplica).\\\n filter(models.QuarantinedReplica.rse_id == rse_id).\\\n filter(or_(*conditions)).\\\n delete(synchronize_session=False)\n\n session.\\\n bulk_insert_mappings(models.QuarantinedReplica.__history_mapper__.class_,\n [{'rse_id': rse_id, 'path': replica['path'],\n 'bytes': replica.get('bytes'),\n 'created_at': replica.get('created_at'),\n 'deleted_at': datetime.datetime.utcnow()}\n for replica in replicas])\n\n\n@read_session\ndef list_quarantined_replicas(rse_id, limit, worker_number=None, total_workers=None, session=None):\n \"\"\"\n List RSE Quarantined File replicas.\n\n :param rse_id: the rse id.\n :param limit: The maximum number of replicas returned.\n :param worker_number: id of the executing worker.\n :param total_workers: Number of total workers.\n :param session: The database session in use.\n\n :returns: a list of dictionary replica.\n \"\"\"\n\n query = session.query(models.QuarantinedReplica.path,\n models.QuarantinedReplica.bytes,\n models.QuarantinedReplica.scope,\n models.QuarantinedReplica.name,\n models.QuarantinedReplica.created_at).\\\n filter(models.QuarantinedReplica.rse_id == rse_id)\n\n # do no delete valid replicas\n stmt = exists(select([1]).prefix_with(\"/*+ index(REPLICAS REPLICAS_PK) */\", dialect='oracle')).\\\n where(and_(models.RSEFileAssociation.scope == models.QuarantinedReplica.scope,\n models.RSEFileAssociation.name == models.QuarantinedReplica.name,\n models.RSEFileAssociation.rse_id == models.QuarantinedReplica.rse_id))\n query = query.filter(not_(stmt))\n\n if worker_number and total_workers and total_workers - 1 > 0:\n if session.bind.dialect.name == 'oracle':\n bindparams = [bindparam('worker_number', worker_number - 1), bindparam('total_workers', total_workers - 1)]\n query = query.filter(text('ORA_HASH(path, :total_workers) = :worker_number', bindparams=bindparams))\n elif session.bind.dialect.name == 'mysql':\n query = query.filter('mod(md5(path), %s) = %s' % (total_workers - 1, worker_number - 1))\n elif session.bind.dialect.name == 'postgresql':\n query = query.filter('mod(abs((\\'x\\'||md5(path))::bit(32)::int), %s) = %s' % (total_workers - 1, worker_number - 1))\n\n return [{'path': path,\n 'rse_id': rse_id,\n 'created_at': created_at,\n 'scope': scope,\n 'name': name,\n 'bytes': bytes}\n for path, bytes, scope, name, created_at in query.limit(limit)]\n\n\n@read_session\ndef list_rses(session=None):\n \"\"\"\n List RSEs in the Quarantined Queues.\n\n :param session: The database session in use.\n\n :returns: a list of RSEs.\n \"\"\"\n query = session.query(models.RSE.id).distinct(models.RSE.id).\\\n filter(models.QuarantinedReplica.rse_id == models.RSE.id).\\\n filter(models.RSE.deleted == false())\n return [rse for (rse,) in query]\n", "path": "lib/rucio/core/quarantined_replica.py"}]} |
gh_patches_debug_1365 | rasdani/github-patches | git_diff | carltongibson__django-filter-402 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Improve docs on common pitfalls
Referencing this [discussion](https://github.com/carltongibson/django-filter/issues/394#issuecomment-202942471), docs could better explain some common misunderstandings about filter validation and data shape for various query expressions. eg, a CharFilter validates and outputs a single string, which is incompatible with 'in' lookups as it expects a list of strings.
[This](https://github.com/carltongibson/django-filter/blob/v0.13/docs/usage.txt#L82-L100) section would be a good spot to expand on it.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/conf.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 #
3 # django-filter documentation build configuration file, created by
4 # sphinx-quickstart on Mon Sep 17 11:25:20 2012.
5 #
6 # This file is execfile()d with the current directory set to its containing dir.
7 #
8 # Note that not all possible configuration values are present in this
9 # autogenerated file.
10 #
11 # All configuration values have a default; values that are commented out
12 # serve to show the default.
13
14 import sys, os
15
16 # If extensions (or modules to document with autodoc) are in another directory,
17 # add these directories to sys.path here. If the directory is relative to the
18 # documentation root, use os.path.abspath to make it absolute, like shown here.
19 #sys.path.insert(0, os.path.abspath('.'))
20
21 # -- General configuration -----------------------------------------------------
22
23 # If your documentation needs a minimal Sphinx version, state it here.
24 #needs_sphinx = '1.0'
25
26 # Add any Sphinx extension module names here, as strings. They can be extensions
27 # coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
28 extensions = []
29
30 # Add any paths that contain templates here, relative to this directory.
31 templates_path = ['_templates']
32
33 # The suffix of source filenames.
34 source_suffix = '.txt'
35
36 # The encoding of source files.
37 #source_encoding = 'utf-8-sig'
38
39 # The master toctree document.
40 master_doc = 'index'
41
42 # General information about the project.
43 project = u'django-filter'
44 copyright = u'2013, Alex Gaynor and others.'
45
46 # The version info for the project you're documenting, acts as replacement for
47 # |version| and |release|, also used in various other places throughout the
48 # built documents.
49 #
50 # The short X.Y version.
51 version = '0.13.0'
52 # The full version, including alpha/beta/rc tags.
53 release = '0.13.0'
54
55 # The language for content autogenerated by Sphinx. Refer to documentation
56 # for a list of supported languages.
57 #language = None
58
59 # There are two options for replacing |today|: either, you set today to some
60 # non-false value, then it is used:
61 #today = ''
62 # Else, today_fmt is used as the format for a strftime call.
63 #today_fmt = '%B %d, %Y'
64
65 # List of patterns, relative to source directory, that match files and
66 # directories to ignore when looking for source files.
67 exclude_patterns = ['_build']
68
69 # The reST default role (used for this markup: `text`) to use for all documents.
70 #default_role = None
71
72 # If true, '()' will be appended to :func: etc. cross-reference text.
73 #add_function_parentheses = True
74
75 # If true, the current module name will be prepended to all description
76 # unit titles (such as .. function::).
77 #add_module_names = True
78
79 # If true, sectionauthor and moduleauthor directives will be shown in the
80 # output. They are ignored by default.
81 #show_authors = False
82
83 # The name of the Pygments (syntax highlighting) style to use.
84 pygments_style = 'sphinx'
85
86 # A list of ignored prefixes for module index sorting.
87 #modindex_common_prefix = []
88
89
90 # -- Options for HTML output ---------------------------------------------------
91
92 # The theme to use for HTML and HTML Help pages. See the documentation for
93 # a list of builtin themes.
94 html_theme = 'default'
95
96 # Theme options are theme-specific and customize the look and feel of a theme
97 # further. For a list of options available for each theme, see the
98 # documentation.
99 #html_theme_options = {}
100
101 # Add any paths that contain custom themes here, relative to this directory.
102 #html_theme_path = []
103
104 # The name for this set of Sphinx documents. If None, it defaults to
105 # "<project> v<release> documentation".
106 #html_title = None
107
108 # A shorter title for the navigation bar. Default is the same as html_title.
109 #html_short_title = None
110
111 # The name of an image file (relative to this directory) to place at the top
112 # of the sidebar.
113 #html_logo = None
114
115 # The name of an image file (within the static path) to use as favicon of the
116 # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
117 # pixels large.
118 #html_favicon = None
119
120 # Add any paths that contain custom static files (such as style sheets) here,
121 # relative to this directory. They are copied after the builtin static files,
122 # so a file named "default.css" will overwrite the builtin "default.css".
123 #html_static_path = ['_static']
124
125 # If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
126 # using the given strftime format.
127 #html_last_updated_fmt = '%b %d, %Y'
128
129 # If true, SmartyPants will be used to convert quotes and dashes to
130 # typographically correct entities.
131 #html_use_smartypants = True
132
133 # Custom sidebar templates, maps document names to template names.
134 #html_sidebars = {}
135
136 # Additional templates that should be rendered to pages, maps page names to
137 # template names.
138 #html_additional_pages = {}
139
140 # If false, no module index is generated.
141 #html_domain_indices = True
142
143 # If false, no index is generated.
144 #html_use_index = True
145
146 # If true, the index is split into individual pages for each letter.
147 #html_split_index = False
148
149 # If true, links to the reST sources are added to the pages.
150 #html_show_sourcelink = True
151
152 # If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
153 #html_show_sphinx = True
154
155 # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
156 #html_show_copyright = True
157
158 # If true, an OpenSearch description file will be output, and all pages will
159 # contain a <link> tag referring to it. The value of this option must be the
160 # base URL from which the finished HTML is served.
161 #html_use_opensearch = ''
162
163 # This is the file name suffix for HTML files (e.g. ".xhtml").
164 #html_file_suffix = None
165
166 # Output file base name for HTML help builder.
167 htmlhelp_basename = 'django-filterdoc'
168
169
170 # -- Options for LaTeX output --------------------------------------------------
171
172 latex_elements = {
173 # The paper size ('letterpaper' or 'a4paper').
174 #'papersize': 'letterpaper',
175
176 # The font size ('10pt', '11pt' or '12pt').
177 #'pointsize': '10pt',
178
179 # Additional stuff for the LaTeX preamble.
180 #'preamble': '',
181 }
182
183 # Grouping the document tree into LaTeX files. List of tuples
184 # (source start file, target name, title, author, documentclass [howto/manual]).
185 latex_documents = [
186 ('index', 'django-filter.tex', u'django-filter Documentation',
187 u'Alex Gaynor and others.', 'manual'),
188 ]
189
190 # The name of an image file (relative to this directory) to place at the top of
191 # the title page.
192 #latex_logo = None
193
194 # For "manual" documents, if this is true, then toplevel headings are parts,
195 # not chapters.
196 #latex_use_parts = False
197
198 # If true, show page references after internal links.
199 #latex_show_pagerefs = False
200
201 # If true, show URL addresses after external links.
202 #latex_show_urls = False
203
204 # Documents to append as an appendix to all manuals.
205 #latex_appendices = []
206
207 # If false, no module index is generated.
208 #latex_domain_indices = True
209
210
211 # -- Options for manual page output --------------------------------------------
212
213 # One entry per manual page. List of tuples
214 # (source start file, name, description, authors, manual section).
215 man_pages = [
216 ('index', 'django-filter', u'django-filter Documentation',
217 [u'Alex Gaynor and others.'], 1)
218 ]
219
220 # If true, show URL addresses after external links.
221 #man_show_urls = False
222
223
224 # -- Options for Texinfo output ------------------------------------------------
225
226 # Grouping the document tree into Texinfo files. List of tuples
227 # (source start file, target name, title, author,
228 # dir menu entry, description, category)
229 texinfo_documents = [
230 ('index', 'django-filter', u'django-filter Documentation',
231 u'Alex Gaynor and others.', 'django-filter', 'One line description of project.',
232 'Miscellaneous'),
233 ]
234
235 # Documents to append as an appendix to all manuals.
236 #texinfo_appendices = []
237
238 # If false, no module index is generated.
239 #texinfo_domain_indices = True
240
241 # How to display URL addresses: 'footnote', 'no', or 'inline'.
242 #texinfo_show_urls = 'footnote'
243
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -240,3 +240,15 @@
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
+
+
+# see:
+# https://github.com/snide/sphinx_rtd_theme#using-this-theme-locally-then-building-on-read-the-docs
+on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
+
+# only import and set the theme if we're building docs locally
+if not on_rtd:
+ import sphinx_rtd_theme
+
+ html_theme = 'sphinx_rtd_theme'
+ html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
| {"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -240,3 +240,15 @@\n \n # How to display URL addresses: 'footnote', 'no', or 'inline'.\n #texinfo_show_urls = 'footnote'\n+\n+\n+# see:\n+# https://github.com/snide/sphinx_rtd_theme#using-this-theme-locally-then-building-on-read-the-docs\n+on_rtd = os.environ.get('READTHEDOCS', None) == 'True'\n+\n+# only import and set the theme if we're building docs locally\n+if not on_rtd:\n+ import sphinx_rtd_theme\n+\n+ html_theme = 'sphinx_rtd_theme'\n+ html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n", "issue": "Improve docs on common pitfalls\nReferencing this [discussion](https://github.com/carltongibson/django-filter/issues/394#issuecomment-202942471), docs could better explain some common misunderstandings about filter validation and data shape for various query expressions. eg, a CharFilter validates and outputs a single string, which is incompatible with 'in' lookups as it expects a list of strings. \n\n[This](https://github.com/carltongibson/django-filter/blob/v0.13/docs/usage.txt#L82-L100) section would be a good spot to expand on it. \n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# django-filter documentation build configuration file, created by\n# sphinx-quickstart on Mon Sep 17 11:25:20 2012.\n#\n# This file is execfile()d with the current directory set to its containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nimport sys, os\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#sys.path.insert(0, os.path.abspath('.'))\n\n# -- General configuration -----------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be extensions\n# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.\nextensions = []\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix of source filenames.\nsource_suffix = '.txt'\n\n# The encoding of source files.\n#source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = u'django-filter'\ncopyright = u'2013, Alex Gaynor and others.'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = '0.13.0'\n# The full version, including alpha/beta/rc tags.\nrelease = '0.13.0'\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#language = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#today = ''\n# Else, today_fmt is used as the format for a strftime call.\n#today_fmt = '%B %d, %Y'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = ['_build']\n\n# The reST default role (used for this markup: `text`) to use for all documents.\n#default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n#add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# A list of ignored prefixes for module index sorting.\n#modindex_common_prefix = []\n\n\n# -- Options for HTML output ---------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nhtml_theme = 'default'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#html_theme_options = {}\n\n# Add any paths that contain custom themes here, relative to this directory.\n#html_theme_path = []\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \"<project> v<release> documentation\".\n#html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n#html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\n#html_logo = None\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n#html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\n#html_static_path = ['_static']\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\n#html_last_updated_fmt = '%b %d, %Y'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n#html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\n#html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#html_additional_pages = {}\n\n# If false, no module index is generated.\n#html_domain_indices = True\n\n# If false, no index is generated.\n#html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n#html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\n#html_show_sourcelink = True\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n#html_show_sphinx = True\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n#html_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n#html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n#html_file_suffix = None\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'django-filterdoc'\n\n\n# -- Options for LaTeX output --------------------------------------------------\n\nlatex_elements = {\n# The paper size ('letterpaper' or 'a4paper').\n#'papersize': 'letterpaper',\n\n# The font size ('10pt', '11pt' or '12pt').\n#'pointsize': '10pt',\n\n# Additional stuff for the LaTeX preamble.\n#'preamble': '',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title, author, documentclass [howto/manual]).\nlatex_documents = [\n ('index', 'django-filter.tex', u'django-filter Documentation',\n u'Alex Gaynor and others.', 'manual'),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n#latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#latex_use_parts = False\n\n# If true, show page references after internal links.\n#latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n#latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n#latex_appendices = []\n\n# If false, no module index is generated.\n#latex_domain_indices = True\n\n\n# -- Options for manual page output --------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n ('index', 'django-filter', u'django-filter Documentation',\n [u'Alex Gaynor and others.'], 1)\n]\n\n# If true, show URL addresses after external links.\n#man_show_urls = False\n\n\n# -- Options for Texinfo output ------------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n ('index', 'django-filter', u'django-filter Documentation',\n u'Alex Gaynor and others.', 'django-filter', 'One line description of project.',\n 'Miscellaneous'),\n]\n\n# Documents to append as an appendix to all manuals.\n#texinfo_appendices = []\n\n# If false, no module index is generated.\n#texinfo_domain_indices = True\n\n# How to display URL addresses: 'footnote', 'no', or 'inline'.\n#texinfo_show_urls = 'footnote'\n", "path": "docs/conf.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# django-filter documentation build configuration file, created by\n# sphinx-quickstart on Mon Sep 17 11:25:20 2012.\n#\n# This file is execfile()d with the current directory set to its containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nimport sys, os\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#sys.path.insert(0, os.path.abspath('.'))\n\n# -- General configuration -----------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be extensions\n# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.\nextensions = []\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix of source filenames.\nsource_suffix = '.txt'\n\n# The encoding of source files.\n#source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = u'django-filter'\ncopyright = u'2013, Alex Gaynor and others.'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = '0.13.0'\n# The full version, including alpha/beta/rc tags.\nrelease = '0.13.0'\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#language = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#today = ''\n# Else, today_fmt is used as the format for a strftime call.\n#today_fmt = '%B %d, %Y'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = ['_build']\n\n# The reST default role (used for this markup: `text`) to use for all documents.\n#default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n#add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# A list of ignored prefixes for module index sorting.\n#modindex_common_prefix = []\n\n\n# -- Options for HTML output ---------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nhtml_theme = 'default'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#html_theme_options = {}\n\n# Add any paths that contain custom themes here, relative to this directory.\n#html_theme_path = []\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \"<project> v<release> documentation\".\n#html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n#html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\n#html_logo = None\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n#html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\n#html_static_path = ['_static']\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\n#html_last_updated_fmt = '%b %d, %Y'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n#html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\n#html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#html_additional_pages = {}\n\n# If false, no module index is generated.\n#html_domain_indices = True\n\n# If false, no index is generated.\n#html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n#html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\n#html_show_sourcelink = True\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n#html_show_sphinx = True\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n#html_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n#html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n#html_file_suffix = None\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'django-filterdoc'\n\n\n# -- Options for LaTeX output --------------------------------------------------\n\nlatex_elements = {\n# The paper size ('letterpaper' or 'a4paper').\n#'papersize': 'letterpaper',\n\n# The font size ('10pt', '11pt' or '12pt').\n#'pointsize': '10pt',\n\n# Additional stuff for the LaTeX preamble.\n#'preamble': '',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title, author, documentclass [howto/manual]).\nlatex_documents = [\n ('index', 'django-filter.tex', u'django-filter Documentation',\n u'Alex Gaynor and others.', 'manual'),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n#latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#latex_use_parts = False\n\n# If true, show page references after internal links.\n#latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n#latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n#latex_appendices = []\n\n# If false, no module index is generated.\n#latex_domain_indices = True\n\n\n# -- Options for manual page output --------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n ('index', 'django-filter', u'django-filter Documentation',\n [u'Alex Gaynor and others.'], 1)\n]\n\n# If true, show URL addresses after external links.\n#man_show_urls = False\n\n\n# -- Options for Texinfo output ------------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n ('index', 'django-filter', u'django-filter Documentation',\n u'Alex Gaynor and others.', 'django-filter', 'One line description of project.',\n 'Miscellaneous'),\n]\n\n# Documents to append as an appendix to all manuals.\n#texinfo_appendices = []\n\n# If false, no module index is generated.\n#texinfo_domain_indices = True\n\n# How to display URL addresses: 'footnote', 'no', or 'inline'.\n#texinfo_show_urls = 'footnote'\n\n\n# see:\n# https://github.com/snide/sphinx_rtd_theme#using-this-theme-locally-then-building-on-read-the-docs\non_rtd = os.environ.get('READTHEDOCS', None) == 'True'\n\n# only import and set the theme if we're building docs locally\nif not on_rtd:\n import sphinx_rtd_theme\n\n html_theme = 'sphinx_rtd_theme'\n html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n", "path": "docs/conf.py"}]} |
gh_patches_debug_1366 | rasdani/github-patches | git_diff | SeldonIO__MLServer-1171 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add OS constraint in PyPI
Mention MLServer's OS constraints as metadata in `setup.py`, so that it becomes visible in pypi.org.
```
setup(...,
classifiers=[
'Operating System :: POSIX',
],
)
```
_Originally posted by @HugoMVale in https://github.com/SeldonIO/MLServer/issues/1022#issuecomment-1456788132_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 import os
2
3 from typing import Dict
4 from setuptools import setup, find_packages
5
6 ROOT_PATH = os.path.dirname(__file__)
7 PKG_NAME = "mlserver"
8 PKG_PATH = os.path.join(ROOT_PATH, PKG_NAME)
9
10
11 def _load_version() -> str:
12 version = ""
13 version_path = os.path.join(PKG_PATH, "version.py")
14 with open(version_path) as fp:
15 version_module: Dict[str, str] = {}
16 exec(fp.read(), version_module)
17 version = version_module["__version__"]
18
19 return version
20
21
22 def _load_description() -> str:
23 readme_path = os.path.join(ROOT_PATH, "README.md")
24 with open(readme_path) as fp:
25 return fp.read()
26
27
28 env_marker_cpython = (
29 "sys_platform != 'win32'"
30 " and (sys_platform != 'cygwin'"
31 " and platform_python_implementation != 'PyPy')"
32 )
33
34 setup(
35 name=PKG_NAME,
36 version=_load_version(),
37 url="https://github.com/SeldonIO/MLServer.git",
38 author="Seldon Technologies Ltd.",
39 author_email="[email protected]",
40 description="ML server",
41 include_package_data=True,
42 packages=find_packages(exclude=["tests", "tests.*"]),
43 install_requires=[
44 "click",
45 # 0.89.0: https://github.com/tiangolo/fastapi/issues/5861
46 "fastapi >=0.88.0, <=0.89.1, !=0.89.0",
47 "python-dotenv",
48 "grpcio",
49 # The importlib-resources backport is required to use some
50 # functionality added in Python 3.10
51 # https://setuptools.pypa.io/en/latest/userguide/datafiles.html#accessing-data-files-at-runtime
52 "importlib-resources",
53 "numpy",
54 "pandas",
55 "protobuf",
56 "uvicorn",
57 "starlette_exporter",
58 "py-grpc-prometheus",
59 "uvloop;" + env_marker_cpython,
60 "aiokafka",
61 "tritonclient[http]>=2.24",
62 "aiofiles",
63 "orjson",
64 ],
65 entry_points={"console_scripts": ["mlserver=mlserver.cli:main"]},
66 long_description=_load_description(),
67 long_description_content_type="text/markdown",
68 license="Apache 2.0",
69 )
70
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -37,6 +37,7 @@
url="https://github.com/SeldonIO/MLServer.git",
author="Seldon Technologies Ltd.",
author_email="[email protected]",
+ classifiers=["Operating System :: POSIX", "Operating System :: MacOS"],
description="ML server",
include_package_data=True,
packages=find_packages(exclude=["tests", "tests.*"]),
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -37,6 +37,7 @@\n url=\"https://github.com/SeldonIO/MLServer.git\",\n author=\"Seldon Technologies Ltd.\",\n author_email=\"[email protected]\",\n+ classifiers=[\"Operating System :: POSIX\", \"Operating System :: MacOS\"],\n description=\"ML server\",\n include_package_data=True,\n packages=find_packages(exclude=[\"tests\", \"tests.*\"]),\n", "issue": "Add OS constraint in PyPI\nMention MLServer's OS constraints as metadata in `setup.py`, so that it becomes visible in pypi.org. \r\n\r\n```\r\nsetup(...,\r\n classifiers=[\r\n 'Operating System :: POSIX',\r\n ],\r\n )\r\n```\r\n\r\n_Originally posted by @HugoMVale in https://github.com/SeldonIO/MLServer/issues/1022#issuecomment-1456788132_\r\n \n", "before_files": [{"content": "import os\n\nfrom typing import Dict\nfrom setuptools import setup, find_packages\n\nROOT_PATH = os.path.dirname(__file__)\nPKG_NAME = \"mlserver\"\nPKG_PATH = os.path.join(ROOT_PATH, PKG_NAME)\n\n\ndef _load_version() -> str:\n version = \"\"\n version_path = os.path.join(PKG_PATH, \"version.py\")\n with open(version_path) as fp:\n version_module: Dict[str, str] = {}\n exec(fp.read(), version_module)\n version = version_module[\"__version__\"]\n\n return version\n\n\ndef _load_description() -> str:\n readme_path = os.path.join(ROOT_PATH, \"README.md\")\n with open(readme_path) as fp:\n return fp.read()\n\n\nenv_marker_cpython = (\n \"sys_platform != 'win32'\"\n \" and (sys_platform != 'cygwin'\"\n \" and platform_python_implementation != 'PyPy')\"\n)\n\nsetup(\n name=PKG_NAME,\n version=_load_version(),\n url=\"https://github.com/SeldonIO/MLServer.git\",\n author=\"Seldon Technologies Ltd.\",\n author_email=\"[email protected]\",\n description=\"ML server\",\n include_package_data=True,\n packages=find_packages(exclude=[\"tests\", \"tests.*\"]),\n install_requires=[\n \"click\",\n # 0.89.0: https://github.com/tiangolo/fastapi/issues/5861\n \"fastapi >=0.88.0, <=0.89.1, !=0.89.0\",\n \"python-dotenv\",\n \"grpcio\",\n # The importlib-resources backport is required to use some\n # functionality added in Python 3.10\n # https://setuptools.pypa.io/en/latest/userguide/datafiles.html#accessing-data-files-at-runtime\n \"importlib-resources\",\n \"numpy\",\n \"pandas\",\n \"protobuf\",\n \"uvicorn\",\n \"starlette_exporter\",\n \"py-grpc-prometheus\",\n \"uvloop;\" + env_marker_cpython,\n \"aiokafka\",\n \"tritonclient[http]>=2.24\",\n \"aiofiles\",\n \"orjson\",\n ],\n entry_points={\"console_scripts\": [\"mlserver=mlserver.cli:main\"]},\n long_description=_load_description(),\n long_description_content_type=\"text/markdown\",\n license=\"Apache 2.0\",\n)\n", "path": "setup.py"}], "after_files": [{"content": "import os\n\nfrom typing import Dict\nfrom setuptools import setup, find_packages\n\nROOT_PATH = os.path.dirname(__file__)\nPKG_NAME = \"mlserver\"\nPKG_PATH = os.path.join(ROOT_PATH, PKG_NAME)\n\n\ndef _load_version() -> str:\n version = \"\"\n version_path = os.path.join(PKG_PATH, \"version.py\")\n with open(version_path) as fp:\n version_module: Dict[str, str] = {}\n exec(fp.read(), version_module)\n version = version_module[\"__version__\"]\n\n return version\n\n\ndef _load_description() -> str:\n readme_path = os.path.join(ROOT_PATH, \"README.md\")\n with open(readme_path) as fp:\n return fp.read()\n\n\nenv_marker_cpython = (\n \"sys_platform != 'win32'\"\n \" and (sys_platform != 'cygwin'\"\n \" and platform_python_implementation != 'PyPy')\"\n)\n\nsetup(\n name=PKG_NAME,\n version=_load_version(),\n url=\"https://github.com/SeldonIO/MLServer.git\",\n author=\"Seldon Technologies Ltd.\",\n author_email=\"[email protected]\",\n classifiers=[\"Operating System :: POSIX\", \"Operating System :: MacOS\"],\n description=\"ML server\",\n include_package_data=True,\n packages=find_packages(exclude=[\"tests\", \"tests.*\"]),\n install_requires=[\n \"click\",\n # 0.89.0: https://github.com/tiangolo/fastapi/issues/5861\n \"fastapi >=0.88.0, <=0.89.1, !=0.89.0\",\n \"python-dotenv\",\n \"grpcio\",\n # The importlib-resources backport is required to use some\n # functionality added in Python 3.10\n # https://setuptools.pypa.io/en/latest/userguide/datafiles.html#accessing-data-files-at-runtime\n \"importlib-resources\",\n \"numpy\",\n \"pandas\",\n \"protobuf\",\n \"uvicorn\",\n \"starlette_exporter\",\n \"py-grpc-prometheus\",\n \"uvloop;\" + env_marker_cpython,\n \"aiokafka\",\n \"tritonclient[http]>=2.24\",\n \"aiofiles\",\n \"orjson\",\n ],\n entry_points={\"console_scripts\": [\"mlserver=mlserver.cli:main\"]},\n long_description=_load_description(),\n long_description_content_type=\"text/markdown\",\n license=\"Apache 2.0\",\n)\n", "path": "setup.py"}]} |
gh_patches_debug_1367 | rasdani/github-patches | git_diff | pymodbus-dev__pymodbus-2023 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Async client not working when sync client is.
### Versions
- Python: 3.9.2
- OS: Raspbian 11 (Bullseye)
- Pymodbus: 3.6.4
- Modbus Hardware (if used): Raspberry Pi 4B GPIO Pins 14,15 (using UART port) to a non-pymodbus device.
### Pymodbus Specific
- Client: rtu - async serial
### Description
I have successfully run the client with the synchronous client but when I switch to the asynchronous client it no longer works. On viewing the logs, it looks like there's a mismatch between the transaction numbers and the asynchronous client is perceiving the response to be an unrequested message. How can I fix this
### Code
```python
# code and logs here.
```python
client = ModbusClient.AsyncModbusSerialClient(
port="/dev/ttyS0",
method="rtu",
baudrate=9600,
bytesize=8,
parity="N",
stopbits=1
)
await client.connect()
response1 = await async_client.read_holding_registers(address=124, count=4,unit=0)
```
### Logs
```sh
2024-02-15 14:16:38,974 DEBUG logging:103 Connecting to /dev/ttyS0.
2024-02-15 14:16:38,975 DEBUG logging:103 Connecting comm
2024-02-15 14:16:38,976 DEBUG logging:103 Connected to comm
2024-02-15 14:16:38,976 DEBUG logging:103 callback_connected called
2024-02-15 14:16:38,977 DEBUG logging:103 Adding transaction 0
2024-02-15 14:16:38,977 DEBUG logging:103 send: 0x0 0x3 0x0 0x7c 0x0 0x4 0x84 0x0
2024-02-15 14:16:39,048 DEBUG logging:103 recv: 0x1 old_data: addr=None
2024-02-15 14:16:39,048 DEBUG logging:103 Processing: 0x1
2024-02-15 14:16:39,049 DEBUG logging:103 recv: 0x3 old_data: addr=None
2024-02-15 14:16:39,049 DEBUG logging:103 Processing: 0x3
2024-02-15 14:16:39,050 DEBUG logging:103 recv: 0x8 old_data: addr=None
2024-02-15 14:16:39,050 DEBUG logging:103 Processing: 0x8
2024-02-15 14:16:39,051 DEBUG logging:103 recv: 0x0 old_data: addr=None
2024-02-15 14:16:39,051 DEBUG logging:103 Processing: 0x0
2024-02-15 14:16:39,051 DEBUG logging:103 Frame - not ready
2024-02-15 14:16:39,052 DEBUG logging:103 recv: 0x5 old_data: addr=None
2024-02-15 14:16:39,052 DEBUG logging:103 Processing: 0x5
2024-02-15 14:16:39,052 DEBUG logging:103 Frame - not ready
2024-02-15 14:16:39,053 DEBUG logging:103 recv: 0x0 old_data: addr=None
2024-02-15 14:16:39,053 DEBUG logging:103 Processing: 0x0
2024-02-15 14:16:39,053 DEBUG logging:103 Frame - not ready
2024-02-15 14:16:39,054 DEBUG logging:103 recv: 0x5 old_data: addr=None
2024-02-15 14:16:39,054 DEBUG logging:103 Processing: 0x5
2024-02-15 14:16:39,054 DEBUG logging:103 Frame - not ready
2024-02-15 14:16:39,055 DEBUG logging:103 recv: 0x0 old_data: addr=None
2024-02-15 14:16:39,055 DEBUG logging:103 Processing: 0x0
2024-02-15 14:16:39,055 DEBUG logging:103 Frame - not ready
2024-02-15 14:16:39,056 DEBUG logging:103 recv: 0x0 old_data: addr=None
2024-02-15 14:16:39,056 DEBUG logging:103 Processing: 0x0
2024-02-15 14:16:39,057 DEBUG logging:103 Frame - not ready
2024-02-15 14:16:39,057 DEBUG logging:103 recv: 0x0 old_data: addr=None
2024-02-15 14:16:39,057 DEBUG logging:103 Processing: 0x0
2024-02-15 14:16:39,057 DEBUG logging:103 Frame - not ready
2024-02-15 14:16:39,058 DEBUG logging:103 recv: 0x0 old_data: addr=None
2024-02-15 14:16:39,058 DEBUG logging:103 Processing: 0x0
2024-02-15 14:16:39,058 DEBUG logging:103 Frame - not ready
2024-02-15 14:16:39,059 DEBUG logging:103 recv: 0xc old_data: addr=None
2024-02-15 14:16:39,059 DEBUG logging:103 Processing: 0xc
2024-02-15 14:16:39,060 DEBUG logging:103 Frame - not ready
2024-02-15 14:16:39,061 DEBUG logging:103 recv: 0xd7 old_data: addr=None
2024-02-15 14:16:39,061 DEBUG logging:103 Processing: 0xd7
2024-02-15 14:16:39,061 DEBUG logging:103 Getting Frame - 0x3 0x8 0x0 0x5 0x0 0x5 0x0 0x0 0x0 0x0
2024-02-15 14:16:39,062 DEBUG logging:103 Factory Response[ReadHoldingRegistersResponse': 3]
2024-02-15 14:16:39,062 DEBUG logging:103 Frame advanced, resetting header!!
2024-02-15 14:16:39,062 DEBUG logging:103 Getting transaction 1
2024-02-15 14:16:39,062 DEBUG logging:103 Unrequested message: ReadHoldingRegistersResponse (4)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pymodbus/framer/rtu_framer.py`
Content:
```
1 """RTU framer."""
2 # pylint: disable=missing-type-doc
3 import struct
4 import time
5
6 from pymodbus.exceptions import (
7 InvalidMessageReceivedException,
8 ModbusIOException,
9 )
10 from pymodbus.framer.base import BYTE_ORDER, FRAME_HEADER, ModbusFramer
11 from pymodbus.logging import Log
12 from pymodbus.utilities import ModbusTransactionState, checkCRC, computeCRC
13
14
15 RTU_FRAME_HEADER = BYTE_ORDER + FRAME_HEADER
16
17
18 # --------------------------------------------------------------------------- #
19 # Modbus RTU Message
20 # --------------------------------------------------------------------------- #
21 class ModbusRtuFramer(ModbusFramer):
22 """Modbus RTU Frame controller.
23
24 [ Start Wait ] [Address ][ Function Code] [ Data ][ CRC ][ End Wait ]
25 3.5 chars 1b 1b Nb 2b 3.5 chars
26
27 Wait refers to the amount of time required to transmit at least x many
28 characters. In this case it is 3.5 characters. Also, if we receive a
29 wait of 1.5 characters at any point, we must trigger an error message.
30 Also, it appears as though this message is little endian. The logic is
31 simplified as the following::
32
33 block-on-read:
34 read until 3.5 delay
35 check for errors
36 decode
37
38 The following table is a listing of the baud wait times for the specified
39 baud rates::
40
41 ------------------------------------------------------------------
42 Baud 1.5c (18 bits) 3.5c (38 bits)
43 ------------------------------------------------------------------
44 1200 13333.3 us 31666.7 us
45 4800 3333.3 us 7916.7 us
46 9600 1666.7 us 3958.3 us
47 19200 833.3 us 1979.2 us
48 38400 416.7 us 989.6 us
49 ------------------------------------------------------------------
50 1 Byte = start + 8 bits + parity + stop = 11 bits
51 (1/Baud)(bits) = delay seconds
52 """
53
54 method = "rtu"
55
56 def __init__(self, decoder, client=None):
57 """Initialize a new instance of the framer.
58
59 :param decoder: The decoder factory implementation to use
60 """
61 super().__init__(decoder, client)
62 self._hsize = 0x01
63 self._end = b"\x0d\x0a"
64 self._min_frame_size = 4
65 self.function_codes = decoder.lookup.keys() if decoder else {}
66
67 # ----------------------------------------------------------------------- #
68 # Private Helper Functions
69 # ----------------------------------------------------------------------- #
70 def decode_data(self, data):
71 """Decode data."""
72 if len(data) > self._hsize:
73 uid = int(data[0])
74 fcode = int(data[1])
75 return {"slave": uid, "fcode": fcode}
76 return {}
77
78 def checkFrame(self):
79 """Check if the next frame is available.
80
81 Return True if we were successful.
82
83 1. Populate header
84 2. Discard frame if UID does not match
85 """
86 try:
87 self.populateHeader()
88 frame_size = self._header["len"]
89 data = self._buffer[: frame_size - 2]
90 crc = self._header["crc"]
91 crc_val = (int(crc[0]) << 8) + int(crc[1])
92 return checkCRC(data, crc_val)
93 except (IndexError, KeyError, struct.error):
94 return False
95
96 def advanceFrame(self):
97 """Skip over the current framed message.
98
99 This allows us to skip over the current message after we have processed
100 it or determined that it contains an error. It also has to reset the
101 current frame header handle
102 """
103 self._buffer = self._buffer[self._header["len"] :]
104 Log.debug("Frame advanced, resetting header!!")
105 self._header = {"uid": 0x00, "len": 0, "crc": b"\x00\x00"}
106
107 def resetFrame(self):
108 """Reset the entire message frame.
109
110 This allows us to skip over errors that may be in the stream.
111 It is hard to know if we are simply out of sync or if there is
112 an error in the stream as we have no way to check the start or
113 end of the message (python just doesn't have the resolution to
114 check for millisecond delays).
115 """
116 x = self._buffer
117 super().resetFrame()
118 self._buffer = x
119
120 def isFrameReady(self):
121 """Check if we should continue decode logic.
122
123 This is meant to be used in a while loop in the decoding phase to let
124 the decoder know that there is still data in the buffer.
125
126 :returns: True if ready, False otherwise
127 """
128 size = self._header.get("len", 0)
129 if not size and len(self._buffer) > self._hsize:
130 try:
131 # Frame is ready only if populateHeader() successfully
132 # populates crc field which finishes RTU frame otherwise,
133 # if buffer is not yet long enough, populateHeader() raises IndexError
134 size = self.populateHeader()
135 except IndexError:
136 return False
137
138 return len(self._buffer) >= size if size > 0 else False
139
140 def populateHeader(self, data=None):
141 """Try to set the headers `uid`, `len` and `crc`.
142
143 This method examines `self._buffer` and writes meta
144 information into `self._header`.
145
146 Beware that this method will raise an IndexError if
147 `self._buffer` is not yet long enough.
148 """
149 data = data if data is not None else self._buffer
150 self._header["uid"] = int(data[0])
151 self._header["tid"] = int(data[0])
152 size = self.get_expected_response_length(data)
153 self._header["len"] = size
154
155 if len(data) < size:
156 # crc yet not available
157 raise IndexError
158 self._header["crc"] = data[size - 2 : size]
159 return size
160
161 def getFrame(self):
162 """Get the next frame from the buffer.
163
164 :returns: The frame data or ""
165 """
166 start = self._hsize
167 end = self._header["len"] - 2
168 buffer = self._buffer[start:end]
169 if end > 0:
170 Log.debug("Getting Frame - {}", buffer, ":hex")
171 return buffer
172 return b""
173
174 def populateResult(self, result):
175 """Populate the modbus result header.
176
177 The serial packets do not have any header information
178 that is copied.
179
180 :param result: The response packet
181 """
182 result.slave_id = self._header["uid"]
183 result.transaction_id = self._header["tid"]
184
185 def getFrameStart(self, slaves, broadcast, skip_cur_frame):
186 """Scan buffer for a relevant frame start."""
187 start = 1 if skip_cur_frame else 0
188 if (buf_len := len(self._buffer)) < 4:
189 return False
190 for i in range(start, buf_len - 3): # <slave id><function code><crc 2 bytes>
191 if not broadcast and self._buffer[i] not in slaves:
192 continue
193 if (
194 self._buffer[i + 1] not in self.function_codes
195 and (self._buffer[i + 1] - 0x80) not in self.function_codes
196 ):
197 continue
198 if i:
199 self._buffer = self._buffer[i:] # remove preceding trash.
200 return True
201 if buf_len > 3:
202 self._buffer = self._buffer[-3:]
203 return False
204
205 # ----------------------------------------------------------------------- #
206 # Public Member Functions
207 # ----------------------------------------------------------------------- #
208 def frameProcessIncomingPacket(self, single, callback, slave, _tid=None, **kwargs):
209 """Process new packet pattern."""
210 broadcast = not slave[0]
211 skip_cur_frame = False
212 while self.getFrameStart(slave, broadcast, skip_cur_frame):
213 if not self.isFrameReady():
214 Log.debug("Frame - not ready")
215 break
216 if not self.checkFrame():
217 Log.debug("Frame check failed, ignoring!!")
218 self.resetFrame()
219 skip_cur_frame = True
220 continue
221 if not self._validate_slave_id(slave, single):
222 header_txt = self._header["uid"]
223 Log.debug("Not a valid slave id - {}, ignoring!!", header_txt)
224 self.resetFrame()
225 skip_cur_frame = True
226 continue
227 self._process(callback)
228
229 def buildPacket(self, message):
230 """Create a ready to send modbus packet.
231
232 :param message: The populated request/response to send
233 """
234 data = message.encode()
235 packet = (
236 struct.pack(RTU_FRAME_HEADER, message.slave_id, message.function_code)
237 + data
238 )
239 packet += struct.pack(">H", computeCRC(packet))
240 # Ensure that transaction is actually the slave id for serial comms
241 message.transaction_id = message.slave_id
242 return packet
243
244 def sendPacket(self, message):
245 """Send packets on the bus with 3.5char delay between frames.
246
247 :param message: Message to be sent over the bus
248 :return:
249 """
250 super().resetFrame()
251 start = time.time()
252 timeout = start + self.client.comm_params.timeout_connect
253 while self.client.state != ModbusTransactionState.IDLE:
254 if self.client.state == ModbusTransactionState.TRANSACTION_COMPLETE:
255 timestamp = round(time.time(), 6)
256 Log.debug(
257 "Changing state to IDLE - Last Frame End - {} Current Time stamp - {}",
258 self.client.last_frame_end,
259 timestamp,
260 )
261 if self.client.last_frame_end:
262 idle_time = self.client.idle_time()
263 if round(timestamp - idle_time, 6) <= self.client.silent_interval:
264 Log.debug(
265 "Waiting for 3.5 char before next send - {} ms",
266 self.client.silent_interval * 1000,
267 )
268 time.sleep(self.client.silent_interval)
269 else:
270 # Recovering from last error ??
271 time.sleep(self.client.silent_interval)
272 self.client.state = ModbusTransactionState.IDLE
273 elif self.client.state == ModbusTransactionState.RETRYING:
274 # Simple lets settle down!!!
275 # To check for higher baudrates
276 time.sleep(self.client.comm_params.timeout_connect)
277 break
278 elif time.time() > timeout:
279 Log.debug(
280 "Spent more time than the read time out, "
281 "resetting the transaction to IDLE"
282 )
283 self.client.state = ModbusTransactionState.IDLE
284 else:
285 Log.debug("Sleeping")
286 time.sleep(self.client.silent_interval)
287 size = self.client.send(message)
288 self.client.last_frame_end = round(time.time(), 6)
289 return size
290
291 def recvPacket(self, size):
292 """Receive packet from the bus with specified len.
293
294 :param size: Number of bytes to read
295 :return:
296 """
297 result = self.client.recv(size)
298 self.client.last_frame_end = round(time.time(), 6)
299 return result
300
301 def _process(self, callback, error=False):
302 """Process incoming packets irrespective error condition."""
303 data = self._buffer if error else self.getFrame()
304 if (result := self.decoder.decode(data)) is None:
305 raise ModbusIOException("Unable to decode request")
306 if error and result.function_code < 0x80:
307 raise InvalidMessageReceivedException(str(result))
308 self.populateResult(result)
309 self.advanceFrame()
310 callback(result) # defer or push to a thread?
311
312 def get_expected_response_length(self, data):
313 """Get the expected response length.
314
315 :param data: Message data read so far
316 :raises IndexError: If not enough data to read byte count
317 :return: Total frame size
318 """
319 func_code = int(data[1])
320 pdu_class = self.decoder.lookupPduClass(func_code)
321 return pdu_class.calculateRtuFrameSize(data)
322
323
324 # __END__
325
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pymodbus/framer/rtu_framer.py b/pymodbus/framer/rtu_framer.py
--- a/pymodbus/framer/rtu_framer.py
+++ b/pymodbus/framer/rtu_framer.py
@@ -238,7 +238,8 @@
)
packet += struct.pack(">H", computeCRC(packet))
# Ensure that transaction is actually the slave id for serial comms
- message.transaction_id = message.slave_id
+ if message.slave_id:
+ message.transaction_id = message.slave_id
return packet
def sendPacket(self, message):
| {"golden_diff": "diff --git a/pymodbus/framer/rtu_framer.py b/pymodbus/framer/rtu_framer.py\n--- a/pymodbus/framer/rtu_framer.py\n+++ b/pymodbus/framer/rtu_framer.py\n@@ -238,7 +238,8 @@\n )\n packet += struct.pack(\">H\", computeCRC(packet))\n # Ensure that transaction is actually the slave id for serial comms\n- message.transaction_id = message.slave_id\n+ if message.slave_id:\n+ message.transaction_id = message.slave_id\n return packet\n \n def sendPacket(self, message):\n", "issue": "Async client not working when sync client is. \n### Versions\r\n\r\n- Python: 3.9.2\r\n- OS: Raspbian 11 (Bullseye)\r\n- Pymodbus: 3.6.4\r\n- Modbus Hardware (if used): Raspberry Pi 4B GPIO Pins 14,15 (using UART port) to a non-pymodbus device. \r\n\r\n### Pymodbus Specific\r\n\r\n- Client: rtu - async serial \r\n\r\n### Description\r\n\r\nI have successfully run the client with the synchronous client but when I switch to the asynchronous client it no longer works. On viewing the logs, it looks like there's a mismatch between the transaction numbers and the asynchronous client is perceiving the response to be an unrequested message. How can I fix this \r\n\r\n\r\n### Code \r\n\r\n```python\r\n# code and logs here.\r\n```python\r\n\r\nclient = ModbusClient.AsyncModbusSerialClient(\r\n port=\"/dev/ttyS0\",\r\n method=\"rtu\", \r\n baudrate=9600,\r\n bytesize=8,\r\n parity=\"N\",\r\n stopbits=1\r\n)\r\n\r\nawait client.connect()\r\nresponse1 = await async_client.read_holding_registers(address=124, count=4,unit=0)\r\n\r\n```\r\n### Logs\r\n```sh\r\n2024-02-15 14:16:38,974 DEBUG logging:103 Connecting to /dev/ttyS0.\r\n2024-02-15 14:16:38,975 DEBUG logging:103 Connecting comm\r\n2024-02-15 14:16:38,976 DEBUG logging:103 Connected to comm\r\n2024-02-15 14:16:38,976 DEBUG logging:103 callback_connected called\r\n2024-02-15 14:16:38,977 DEBUG logging:103 Adding transaction 0\r\n2024-02-15 14:16:38,977 DEBUG logging:103 send: 0x0 0x3 0x0 0x7c 0x0 0x4 0x84 0x0\r\n2024-02-15 14:16:39,048 DEBUG logging:103 recv: 0x1 old_data: addr=None\r\n2024-02-15 14:16:39,048 DEBUG logging:103 Processing: 0x1\r\n2024-02-15 14:16:39,049 DEBUG logging:103 recv: 0x3 old_data: addr=None\r\n2024-02-15 14:16:39,049 DEBUG logging:103 Processing: 0x3\r\n2024-02-15 14:16:39,050 DEBUG logging:103 recv: 0x8 old_data: addr=None\r\n2024-02-15 14:16:39,050 DEBUG logging:103 Processing: 0x8\r\n2024-02-15 14:16:39,051 DEBUG logging:103 recv: 0x0 old_data: addr=None\r\n2024-02-15 14:16:39,051 DEBUG logging:103 Processing: 0x0\r\n2024-02-15 14:16:39,051 DEBUG logging:103 Frame - not ready\r\n2024-02-15 14:16:39,052 DEBUG logging:103 recv: 0x5 old_data: addr=None\r\n2024-02-15 14:16:39,052 DEBUG logging:103 Processing: 0x5\r\n2024-02-15 14:16:39,052 DEBUG logging:103 Frame - not ready\r\n2024-02-15 14:16:39,053 DEBUG logging:103 recv: 0x0 old_data: addr=None\r\n2024-02-15 14:16:39,053 DEBUG logging:103 Processing: 0x0\r\n2024-02-15 14:16:39,053 DEBUG logging:103 Frame - not ready\r\n2024-02-15 14:16:39,054 DEBUG logging:103 recv: 0x5 old_data: addr=None\r\n2024-02-15 14:16:39,054 DEBUG logging:103 Processing: 0x5\r\n2024-02-15 14:16:39,054 DEBUG logging:103 Frame - not ready\r\n2024-02-15 14:16:39,055 DEBUG logging:103 recv: 0x0 old_data: addr=None\r\n2024-02-15 14:16:39,055 DEBUG logging:103 Processing: 0x0\r\n2024-02-15 14:16:39,055 DEBUG logging:103 Frame - not ready\r\n2024-02-15 14:16:39,056 DEBUG logging:103 recv: 0x0 old_data: addr=None\r\n2024-02-15 14:16:39,056 DEBUG logging:103 Processing: 0x0\r\n2024-02-15 14:16:39,057 DEBUG logging:103 Frame - not ready\r\n2024-02-15 14:16:39,057 DEBUG logging:103 recv: 0x0 old_data: addr=None\r\n2024-02-15 14:16:39,057 DEBUG logging:103 Processing: 0x0\r\n2024-02-15 14:16:39,057 DEBUG logging:103 Frame - not ready\r\n2024-02-15 14:16:39,058 DEBUG logging:103 recv: 0x0 old_data: addr=None\r\n2024-02-15 14:16:39,058 DEBUG logging:103 Processing: 0x0\r\n2024-02-15 14:16:39,058 DEBUG logging:103 Frame - not ready\r\n2024-02-15 14:16:39,059 DEBUG logging:103 recv: 0xc old_data: addr=None\r\n2024-02-15 14:16:39,059 DEBUG logging:103 Processing: 0xc\r\n2024-02-15 14:16:39,060 DEBUG logging:103 Frame - not ready\r\n2024-02-15 14:16:39,061 DEBUG logging:103 recv: 0xd7 old_data: addr=None\r\n2024-02-15 14:16:39,061 DEBUG logging:103 Processing: 0xd7\r\n2024-02-15 14:16:39,061 DEBUG logging:103 Getting Frame - 0x3 0x8 0x0 0x5 0x0 0x5 0x0 0x0 0x0 0x0\r\n2024-02-15 14:16:39,062 DEBUG logging:103 Factory Response[ReadHoldingRegistersResponse': 3]\r\n2024-02-15 14:16:39,062 DEBUG logging:103 Frame advanced, resetting header!!\r\n2024-02-15 14:16:39,062 DEBUG logging:103 Getting transaction 1\r\n2024-02-15 14:16:39,062 DEBUG logging:103 Unrequested message: ReadHoldingRegistersResponse (4)\r\n```\r\n\r\n\n", "before_files": [{"content": "\"\"\"RTU framer.\"\"\"\n# pylint: disable=missing-type-doc\nimport struct\nimport time\n\nfrom pymodbus.exceptions import (\n InvalidMessageReceivedException,\n ModbusIOException,\n)\nfrom pymodbus.framer.base import BYTE_ORDER, FRAME_HEADER, ModbusFramer\nfrom pymodbus.logging import Log\nfrom pymodbus.utilities import ModbusTransactionState, checkCRC, computeCRC\n\n\nRTU_FRAME_HEADER = BYTE_ORDER + FRAME_HEADER\n\n\n# --------------------------------------------------------------------------- #\n# Modbus RTU Message\n# --------------------------------------------------------------------------- #\nclass ModbusRtuFramer(ModbusFramer):\n \"\"\"Modbus RTU Frame controller.\n\n [ Start Wait ] [Address ][ Function Code] [ Data ][ CRC ][ End Wait ]\n 3.5 chars 1b 1b Nb 2b 3.5 chars\n\n Wait refers to the amount of time required to transmit at least x many\n characters. In this case it is 3.5 characters. Also, if we receive a\n wait of 1.5 characters at any point, we must trigger an error message.\n Also, it appears as though this message is little endian. The logic is\n simplified as the following::\n\n block-on-read:\n read until 3.5 delay\n check for errors\n decode\n\n The following table is a listing of the baud wait times for the specified\n baud rates::\n\n ------------------------------------------------------------------\n Baud 1.5c (18 bits) 3.5c (38 bits)\n ------------------------------------------------------------------\n 1200 13333.3 us 31666.7 us\n 4800 3333.3 us 7916.7 us\n 9600 1666.7 us 3958.3 us\n 19200 833.3 us 1979.2 us\n 38400 416.7 us 989.6 us\n ------------------------------------------------------------------\n 1 Byte = start + 8 bits + parity + stop = 11 bits\n (1/Baud)(bits) = delay seconds\n \"\"\"\n\n method = \"rtu\"\n\n def __init__(self, decoder, client=None):\n \"\"\"Initialize a new instance of the framer.\n\n :param decoder: The decoder factory implementation to use\n \"\"\"\n super().__init__(decoder, client)\n self._hsize = 0x01\n self._end = b\"\\x0d\\x0a\"\n self._min_frame_size = 4\n self.function_codes = decoder.lookup.keys() if decoder else {}\n\n # ----------------------------------------------------------------------- #\n # Private Helper Functions\n # ----------------------------------------------------------------------- #\n def decode_data(self, data):\n \"\"\"Decode data.\"\"\"\n if len(data) > self._hsize:\n uid = int(data[0])\n fcode = int(data[1])\n return {\"slave\": uid, \"fcode\": fcode}\n return {}\n\n def checkFrame(self):\n \"\"\"Check if the next frame is available.\n\n Return True if we were successful.\n\n 1. Populate header\n 2. Discard frame if UID does not match\n \"\"\"\n try:\n self.populateHeader()\n frame_size = self._header[\"len\"]\n data = self._buffer[: frame_size - 2]\n crc = self._header[\"crc\"]\n crc_val = (int(crc[0]) << 8) + int(crc[1])\n return checkCRC(data, crc_val)\n except (IndexError, KeyError, struct.error):\n return False\n\n def advanceFrame(self):\n \"\"\"Skip over the current framed message.\n\n This allows us to skip over the current message after we have processed\n it or determined that it contains an error. It also has to reset the\n current frame header handle\n \"\"\"\n self._buffer = self._buffer[self._header[\"len\"] :]\n Log.debug(\"Frame advanced, resetting header!!\")\n self._header = {\"uid\": 0x00, \"len\": 0, \"crc\": b\"\\x00\\x00\"}\n\n def resetFrame(self):\n \"\"\"Reset the entire message frame.\n\n This allows us to skip over errors that may be in the stream.\n It is hard to know if we are simply out of sync or if there is\n an error in the stream as we have no way to check the start or\n end of the message (python just doesn't have the resolution to\n check for millisecond delays).\n \"\"\"\n x = self._buffer\n super().resetFrame()\n self._buffer = x\n\n def isFrameReady(self):\n \"\"\"Check if we should continue decode logic.\n\n This is meant to be used in a while loop in the decoding phase to let\n the decoder know that there is still data in the buffer.\n\n :returns: True if ready, False otherwise\n \"\"\"\n size = self._header.get(\"len\", 0)\n if not size and len(self._buffer) > self._hsize:\n try:\n # Frame is ready only if populateHeader() successfully\n # populates crc field which finishes RTU frame otherwise,\n # if buffer is not yet long enough, populateHeader() raises IndexError\n size = self.populateHeader()\n except IndexError:\n return False\n\n return len(self._buffer) >= size if size > 0 else False\n\n def populateHeader(self, data=None):\n \"\"\"Try to set the headers `uid`, `len` and `crc`.\n\n This method examines `self._buffer` and writes meta\n information into `self._header`.\n\n Beware that this method will raise an IndexError if\n `self._buffer` is not yet long enough.\n \"\"\"\n data = data if data is not None else self._buffer\n self._header[\"uid\"] = int(data[0])\n self._header[\"tid\"] = int(data[0])\n size = self.get_expected_response_length(data)\n self._header[\"len\"] = size\n\n if len(data) < size:\n # crc yet not available\n raise IndexError\n self._header[\"crc\"] = data[size - 2 : size]\n return size\n\n def getFrame(self):\n \"\"\"Get the next frame from the buffer.\n\n :returns: The frame data or \"\"\n \"\"\"\n start = self._hsize\n end = self._header[\"len\"] - 2\n buffer = self._buffer[start:end]\n if end > 0:\n Log.debug(\"Getting Frame - {}\", buffer, \":hex\")\n return buffer\n return b\"\"\n\n def populateResult(self, result):\n \"\"\"Populate the modbus result header.\n\n The serial packets do not have any header information\n that is copied.\n\n :param result: The response packet\n \"\"\"\n result.slave_id = self._header[\"uid\"]\n result.transaction_id = self._header[\"tid\"]\n\n def getFrameStart(self, slaves, broadcast, skip_cur_frame):\n \"\"\"Scan buffer for a relevant frame start.\"\"\"\n start = 1 if skip_cur_frame else 0\n if (buf_len := len(self._buffer)) < 4:\n return False\n for i in range(start, buf_len - 3): # <slave id><function code><crc 2 bytes>\n if not broadcast and self._buffer[i] not in slaves:\n continue\n if (\n self._buffer[i + 1] not in self.function_codes\n and (self._buffer[i + 1] - 0x80) not in self.function_codes\n ):\n continue\n if i:\n self._buffer = self._buffer[i:] # remove preceding trash.\n return True\n if buf_len > 3:\n self._buffer = self._buffer[-3:]\n return False\n\n # ----------------------------------------------------------------------- #\n # Public Member Functions\n # ----------------------------------------------------------------------- #\n def frameProcessIncomingPacket(self, single, callback, slave, _tid=None, **kwargs):\n \"\"\"Process new packet pattern.\"\"\"\n broadcast = not slave[0]\n skip_cur_frame = False\n while self.getFrameStart(slave, broadcast, skip_cur_frame):\n if not self.isFrameReady():\n Log.debug(\"Frame - not ready\")\n break\n if not self.checkFrame():\n Log.debug(\"Frame check failed, ignoring!!\")\n self.resetFrame()\n skip_cur_frame = True\n continue\n if not self._validate_slave_id(slave, single):\n header_txt = self._header[\"uid\"]\n Log.debug(\"Not a valid slave id - {}, ignoring!!\", header_txt)\n self.resetFrame()\n skip_cur_frame = True\n continue\n self._process(callback)\n\n def buildPacket(self, message):\n \"\"\"Create a ready to send modbus packet.\n\n :param message: The populated request/response to send\n \"\"\"\n data = message.encode()\n packet = (\n struct.pack(RTU_FRAME_HEADER, message.slave_id, message.function_code)\n + data\n )\n packet += struct.pack(\">H\", computeCRC(packet))\n # Ensure that transaction is actually the slave id for serial comms\n message.transaction_id = message.slave_id\n return packet\n\n def sendPacket(self, message):\n \"\"\"Send packets on the bus with 3.5char delay between frames.\n\n :param message: Message to be sent over the bus\n :return:\n \"\"\"\n super().resetFrame()\n start = time.time()\n timeout = start + self.client.comm_params.timeout_connect\n while self.client.state != ModbusTransactionState.IDLE:\n if self.client.state == ModbusTransactionState.TRANSACTION_COMPLETE:\n timestamp = round(time.time(), 6)\n Log.debug(\n \"Changing state to IDLE - Last Frame End - {} Current Time stamp - {}\",\n self.client.last_frame_end,\n timestamp,\n )\n if self.client.last_frame_end:\n idle_time = self.client.idle_time()\n if round(timestamp - idle_time, 6) <= self.client.silent_interval:\n Log.debug(\n \"Waiting for 3.5 char before next send - {} ms\",\n self.client.silent_interval * 1000,\n )\n time.sleep(self.client.silent_interval)\n else:\n # Recovering from last error ??\n time.sleep(self.client.silent_interval)\n self.client.state = ModbusTransactionState.IDLE\n elif self.client.state == ModbusTransactionState.RETRYING:\n # Simple lets settle down!!!\n # To check for higher baudrates\n time.sleep(self.client.comm_params.timeout_connect)\n break\n elif time.time() > timeout:\n Log.debug(\n \"Spent more time than the read time out, \"\n \"resetting the transaction to IDLE\"\n )\n self.client.state = ModbusTransactionState.IDLE\n else:\n Log.debug(\"Sleeping\")\n time.sleep(self.client.silent_interval)\n size = self.client.send(message)\n self.client.last_frame_end = round(time.time(), 6)\n return size\n\n def recvPacket(self, size):\n \"\"\"Receive packet from the bus with specified len.\n\n :param size: Number of bytes to read\n :return:\n \"\"\"\n result = self.client.recv(size)\n self.client.last_frame_end = round(time.time(), 6)\n return result\n\n def _process(self, callback, error=False):\n \"\"\"Process incoming packets irrespective error condition.\"\"\"\n data = self._buffer if error else self.getFrame()\n if (result := self.decoder.decode(data)) is None:\n raise ModbusIOException(\"Unable to decode request\")\n if error and result.function_code < 0x80:\n raise InvalidMessageReceivedException(str(result))\n self.populateResult(result)\n self.advanceFrame()\n callback(result) # defer or push to a thread?\n\n def get_expected_response_length(self, data):\n \"\"\"Get the expected response length.\n\n :param data: Message data read so far\n :raises IndexError: If not enough data to read byte count\n :return: Total frame size\n \"\"\"\n func_code = int(data[1])\n pdu_class = self.decoder.lookupPduClass(func_code)\n return pdu_class.calculateRtuFrameSize(data)\n\n\n# __END__\n", "path": "pymodbus/framer/rtu_framer.py"}], "after_files": [{"content": "\"\"\"RTU framer.\"\"\"\n# pylint: disable=missing-type-doc\nimport struct\nimport time\n\nfrom pymodbus.exceptions import (\n InvalidMessageReceivedException,\n ModbusIOException,\n)\nfrom pymodbus.framer.base import BYTE_ORDER, FRAME_HEADER, ModbusFramer\nfrom pymodbus.logging import Log\nfrom pymodbus.utilities import ModbusTransactionState, checkCRC, computeCRC\n\n\nRTU_FRAME_HEADER = BYTE_ORDER + FRAME_HEADER\n\n\n# --------------------------------------------------------------------------- #\n# Modbus RTU Message\n# --------------------------------------------------------------------------- #\nclass ModbusRtuFramer(ModbusFramer):\n \"\"\"Modbus RTU Frame controller.\n\n [ Start Wait ] [Address ][ Function Code] [ Data ][ CRC ][ End Wait ]\n 3.5 chars 1b 1b Nb 2b 3.5 chars\n\n Wait refers to the amount of time required to transmit at least x many\n characters. In this case it is 3.5 characters. Also, if we receive a\n wait of 1.5 characters at any point, we must trigger an error message.\n Also, it appears as though this message is little endian. The logic is\n simplified as the following::\n\n block-on-read:\n read until 3.5 delay\n check for errors\n decode\n\n The following table is a listing of the baud wait times for the specified\n baud rates::\n\n ------------------------------------------------------------------\n Baud 1.5c (18 bits) 3.5c (38 bits)\n ------------------------------------------------------------------\n 1200 13333.3 us 31666.7 us\n 4800 3333.3 us 7916.7 us\n 9600 1666.7 us 3958.3 us\n 19200 833.3 us 1979.2 us\n 38400 416.7 us 989.6 us\n ------------------------------------------------------------------\n 1 Byte = start + 8 bits + parity + stop = 11 bits\n (1/Baud)(bits) = delay seconds\n \"\"\"\n\n method = \"rtu\"\n\n def __init__(self, decoder, client=None):\n \"\"\"Initialize a new instance of the framer.\n\n :param decoder: The decoder factory implementation to use\n \"\"\"\n super().__init__(decoder, client)\n self._hsize = 0x01\n self._end = b\"\\x0d\\x0a\"\n self._min_frame_size = 4\n self.function_codes = decoder.lookup.keys() if decoder else {}\n\n # ----------------------------------------------------------------------- #\n # Private Helper Functions\n # ----------------------------------------------------------------------- #\n def decode_data(self, data):\n \"\"\"Decode data.\"\"\"\n if len(data) > self._hsize:\n uid = int(data[0])\n fcode = int(data[1])\n return {\"slave\": uid, \"fcode\": fcode}\n return {}\n\n def checkFrame(self):\n \"\"\"Check if the next frame is available.\n\n Return True if we were successful.\n\n 1. Populate header\n 2. Discard frame if UID does not match\n \"\"\"\n try:\n self.populateHeader()\n frame_size = self._header[\"len\"]\n data = self._buffer[: frame_size - 2]\n crc = self._header[\"crc\"]\n crc_val = (int(crc[0]) << 8) + int(crc[1])\n return checkCRC(data, crc_val)\n except (IndexError, KeyError, struct.error):\n return False\n\n def advanceFrame(self):\n \"\"\"Skip over the current framed message.\n\n This allows us to skip over the current message after we have processed\n it or determined that it contains an error. It also has to reset the\n current frame header handle\n \"\"\"\n self._buffer = self._buffer[self._header[\"len\"] :]\n Log.debug(\"Frame advanced, resetting header!!\")\n self._header = {\"uid\": 0x00, \"len\": 0, \"crc\": b\"\\x00\\x00\"}\n\n def resetFrame(self):\n \"\"\"Reset the entire message frame.\n\n This allows us to skip over errors that may be in the stream.\n It is hard to know if we are simply out of sync or if there is\n an error in the stream as we have no way to check the start or\n end of the message (python just doesn't have the resolution to\n check for millisecond delays).\n \"\"\"\n x = self._buffer\n super().resetFrame()\n self._buffer = x\n\n def isFrameReady(self):\n \"\"\"Check if we should continue decode logic.\n\n This is meant to be used in a while loop in the decoding phase to let\n the decoder know that there is still data in the buffer.\n\n :returns: True if ready, False otherwise\n \"\"\"\n size = self._header.get(\"len\", 0)\n if not size and len(self._buffer) > self._hsize:\n try:\n # Frame is ready only if populateHeader() successfully\n # populates crc field which finishes RTU frame otherwise,\n # if buffer is not yet long enough, populateHeader() raises IndexError\n size = self.populateHeader()\n except IndexError:\n return False\n\n return len(self._buffer) >= size if size > 0 else False\n\n def populateHeader(self, data=None):\n \"\"\"Try to set the headers `uid`, `len` and `crc`.\n\n This method examines `self._buffer` and writes meta\n information into `self._header`.\n\n Beware that this method will raise an IndexError if\n `self._buffer` is not yet long enough.\n \"\"\"\n data = data if data is not None else self._buffer\n self._header[\"uid\"] = int(data[0])\n self._header[\"tid\"] = int(data[0])\n size = self.get_expected_response_length(data)\n self._header[\"len\"] = size\n\n if len(data) < size:\n # crc yet not available\n raise IndexError\n self._header[\"crc\"] = data[size - 2 : size]\n return size\n\n def getFrame(self):\n \"\"\"Get the next frame from the buffer.\n\n :returns: The frame data or \"\"\n \"\"\"\n start = self._hsize\n end = self._header[\"len\"] - 2\n buffer = self._buffer[start:end]\n if end > 0:\n Log.debug(\"Getting Frame - {}\", buffer, \":hex\")\n return buffer\n return b\"\"\n\n def populateResult(self, result):\n \"\"\"Populate the modbus result header.\n\n The serial packets do not have any header information\n that is copied.\n\n :param result: The response packet\n \"\"\"\n result.slave_id = self._header[\"uid\"]\n result.transaction_id = self._header[\"tid\"]\n\n def getFrameStart(self, slaves, broadcast, skip_cur_frame):\n \"\"\"Scan buffer for a relevant frame start.\"\"\"\n start = 1 if skip_cur_frame else 0\n if (buf_len := len(self._buffer)) < 4:\n return False\n for i in range(start, buf_len - 3): # <slave id><function code><crc 2 bytes>\n if not broadcast and self._buffer[i] not in slaves:\n continue\n if (\n self._buffer[i + 1] not in self.function_codes\n and (self._buffer[i + 1] - 0x80) not in self.function_codes\n ):\n continue\n if i:\n self._buffer = self._buffer[i:] # remove preceding trash.\n return True\n if buf_len > 3:\n self._buffer = self._buffer[-3:]\n return False\n\n # ----------------------------------------------------------------------- #\n # Public Member Functions\n # ----------------------------------------------------------------------- #\n def frameProcessIncomingPacket(self, single, callback, slave, _tid=None, **kwargs):\n \"\"\"Process new packet pattern.\"\"\"\n broadcast = not slave[0]\n skip_cur_frame = False\n while self.getFrameStart(slave, broadcast, skip_cur_frame):\n if not self.isFrameReady():\n Log.debug(\"Frame - not ready\")\n break\n if not self.checkFrame():\n Log.debug(\"Frame check failed, ignoring!!\")\n self.resetFrame()\n skip_cur_frame = True\n continue\n if not self._validate_slave_id(slave, single):\n header_txt = self._header[\"uid\"]\n Log.debug(\"Not a valid slave id - {}, ignoring!!\", header_txt)\n self.resetFrame()\n skip_cur_frame = True\n continue\n self._process(callback)\n\n def buildPacket(self, message):\n \"\"\"Create a ready to send modbus packet.\n\n :param message: The populated request/response to send\n \"\"\"\n data = message.encode()\n packet = (\n struct.pack(RTU_FRAME_HEADER, message.slave_id, message.function_code)\n + data\n )\n packet += struct.pack(\">H\", computeCRC(packet))\n # Ensure that transaction is actually the slave id for serial comms\n if message.slave_id:\n message.transaction_id = message.slave_id\n return packet\n\n def sendPacket(self, message):\n \"\"\"Send packets on the bus with 3.5char delay between frames.\n\n :param message: Message to be sent over the bus\n :return:\n \"\"\"\n super().resetFrame()\n start = time.time()\n timeout = start + self.client.comm_params.timeout_connect\n while self.client.state != ModbusTransactionState.IDLE:\n if self.client.state == ModbusTransactionState.TRANSACTION_COMPLETE:\n timestamp = round(time.time(), 6)\n Log.debug(\n \"Changing state to IDLE - Last Frame End - {} Current Time stamp - {}\",\n self.client.last_frame_end,\n timestamp,\n )\n if self.client.last_frame_end:\n idle_time = self.client.idle_time()\n if round(timestamp - idle_time, 6) <= self.client.silent_interval:\n Log.debug(\n \"Waiting for 3.5 char before next send - {} ms\",\n self.client.silent_interval * 1000,\n )\n time.sleep(self.client.silent_interval)\n else:\n # Recovering from last error ??\n time.sleep(self.client.silent_interval)\n self.client.state = ModbusTransactionState.IDLE\n elif self.client.state == ModbusTransactionState.RETRYING:\n # Simple lets settle down!!!\n # To check for higher baudrates\n time.sleep(self.client.comm_params.timeout_connect)\n break\n elif time.time() > timeout:\n Log.debug(\n \"Spent more time than the read time out, \"\n \"resetting the transaction to IDLE\"\n )\n self.client.state = ModbusTransactionState.IDLE\n else:\n Log.debug(\"Sleeping\")\n time.sleep(self.client.silent_interval)\n size = self.client.send(message)\n self.client.last_frame_end = round(time.time(), 6)\n return size\n\n def recvPacket(self, size):\n \"\"\"Receive packet from the bus with specified len.\n\n :param size: Number of bytes to read\n :return:\n \"\"\"\n result = self.client.recv(size)\n self.client.last_frame_end = round(time.time(), 6)\n return result\n\n def _process(self, callback, error=False):\n \"\"\"Process incoming packets irrespective error condition.\"\"\"\n data = self._buffer if error else self.getFrame()\n if (result := self.decoder.decode(data)) is None:\n raise ModbusIOException(\"Unable to decode request\")\n if error and result.function_code < 0x80:\n raise InvalidMessageReceivedException(str(result))\n self.populateResult(result)\n self.advanceFrame()\n callback(result) # defer or push to a thread?\n\n def get_expected_response_length(self, data):\n \"\"\"Get the expected response length.\n\n :param data: Message data read so far\n :raises IndexError: If not enough data to read byte count\n :return: Total frame size\n \"\"\"\n func_code = int(data[1])\n pdu_class = self.decoder.lookupPduClass(func_code)\n return pdu_class.calculateRtuFrameSize(data)\n\n\n# __END__\n", "path": "pymodbus/framer/rtu_framer.py"}]} |
gh_patches_debug_1368 | rasdani/github-patches | git_diff | chainer__chainer-781 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Support numpy 1.10
numpy 1.10.0 is released on 2015/10/07
https://pypi.python.org/pypi/numpy/1.10.0
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cupy/creation/ranges.py`
Content:
```
1 import numpy
2
3 import cupy
4 from cupy import core
5
6
7 def arange(start, stop=None, step=1, dtype=None):
8 """Rerurns an array with evenly spaced values within a given interval.
9
10 Values are generated within the half-open interval [start, stop). The first
11 three arguments are mapped like the ``range`` built-in function, i.e. start
12 and step are optional.
13
14 Args:
15 start: Start of the interval.
16 stop: End of the interval.
17 step: Step width between each pair of consecutive values.
18 dtype: Data type specifier. It is inferred from other arguments by
19 default.
20
21 Returns:
22 cupy.ndarray: The 1-D array of range values.
23
24 .. seealso:: :func:`numpy.arange`
25
26 """
27 if dtype is None:
28 if any(numpy.dtype(type(val)).kind == 'f'
29 for val in (start, stop, step)):
30 dtype = float
31 else:
32 dtype = int
33
34 if stop is None:
35 stop = start
36 start = 0
37 size = int(numpy.ceil((stop - start) / step))
38 if size <= 0:
39 return cupy.empty((0,), dtype=dtype)
40
41 ret = cupy.empty((size,), dtype=dtype)
42 typ = numpy.dtype(dtype).type
43 _arange_ufunc(typ(start), typ(step), ret, dtype=dtype)
44 return ret
45
46
47 def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None):
48 """Returns an array with evenly-spaced values within a given interval.
49
50 Instead of specifying the step width like :func:`cupy.arange`, this
51 function requires the total number of elements specified.
52
53 Args:
54 start: Start of the interval.
55 stop: End of the interval.
56 num: Number of elements.
57 endpoint (bool): If True, the stop value is included as the last
58 element. Otherwise, the stop value is omitted.
59 retstep (bool): If True, this function returns (array, step).
60 Otherwise, it returns only the array.
61 dtype: Data type specifier. It is inferred from the start and stop
62 arguments by default.
63
64 Returns:
65 cupy.ndarray: The 1-D array of ranged values.
66
67 """
68 if num < 0:
69 raise ValueError('linspace with num<0 is not supported')
70
71 if dtype is None:
72 # In actual implementation, only float is used
73 dtype = float
74
75 ret = cupy.empty((num,), dtype=dtype)
76 if num == 0:
77 step = float('nan')
78 elif num == 1:
79 ret.fill(start)
80 step = float('nan')
81 else:
82 div = (num - 1) if endpoint else num
83 step = float(stop - start) / div
84 stop = float(stop)
85
86 if step == 0.0:
87 # for underflow
88 _linspace_ufunc_underflow(start, stop - start, div, ret)
89 else:
90 _linspace_ufunc(start, step, ret)
91
92 if endpoint:
93 ret[-1] = stop
94
95 if retstep:
96 return ret, step
97 else:
98 return ret
99
100
101 # TODO(okuta): Implement logspace
102
103
104 # TODO(okuta): Implement meshgrid
105
106
107 # mgrid
108 # ogrid
109
110
111 _arange_ufunc = core.create_ufunc(
112 'cupy_arange',
113 ('bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', 'LL->L',
114 'qq->q', 'QQ->Q', 'ee->e', 'ff->f', 'dd->d'),
115 'out0 = in0 + i * in1')
116
117
118 _linspace_ufunc = core.create_ufunc(
119 'cupy_linspace',
120 ('dd->d',),
121 'out0 = in0 + i * in1')
122
123 _linspace_ufunc_underflow = core.create_ufunc(
124 'cupy_linspace',
125 ('ddd->d',),
126 'out0 = in0 + i * in1 / in2')
127
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/cupy/creation/ranges.py b/cupy/creation/ranges.py
--- a/cupy/creation/ranges.py
+++ b/cupy/creation/ranges.py
@@ -85,9 +85,10 @@
if step == 0.0:
# for underflow
- _linspace_ufunc_underflow(start, stop - start, div, ret)
+ _linspace_ufunc_underflow(start, stop - start, div, ret,
+ casting='unsafe')
else:
- _linspace_ufunc(start, step, ret)
+ _linspace_ufunc(start, step, ret, casting='unsafe')
if endpoint:
ret[-1] = stop
| {"golden_diff": "diff --git a/cupy/creation/ranges.py b/cupy/creation/ranges.py\n--- a/cupy/creation/ranges.py\n+++ b/cupy/creation/ranges.py\n@@ -85,9 +85,10 @@\n \n if step == 0.0:\n # for underflow\n- _linspace_ufunc_underflow(start, stop - start, div, ret)\n+ _linspace_ufunc_underflow(start, stop - start, div, ret,\n+ casting='unsafe')\n else:\n- _linspace_ufunc(start, step, ret)\n+ _linspace_ufunc(start, step, ret, casting='unsafe')\n \n if endpoint:\n ret[-1] = stop\n", "issue": "Support numpy 1.10\nnumpy 1.10.0 is released on 2015/10/07\n\nhttps://pypi.python.org/pypi/numpy/1.10.0\n\n", "before_files": [{"content": "import numpy\n\nimport cupy\nfrom cupy import core\n\n\ndef arange(start, stop=None, step=1, dtype=None):\n \"\"\"Rerurns an array with evenly spaced values within a given interval.\n\n Values are generated within the half-open interval [start, stop). The first\n three arguments are mapped like the ``range`` built-in function, i.e. start\n and step are optional.\n\n Args:\n start: Start of the interval.\n stop: End of the interval.\n step: Step width between each pair of consecutive values.\n dtype: Data type specifier. It is inferred from other arguments by\n default.\n\n Returns:\n cupy.ndarray: The 1-D array of range values.\n\n .. seealso:: :func:`numpy.arange`\n\n \"\"\"\n if dtype is None:\n if any(numpy.dtype(type(val)).kind == 'f'\n for val in (start, stop, step)):\n dtype = float\n else:\n dtype = int\n\n if stop is None:\n stop = start\n start = 0\n size = int(numpy.ceil((stop - start) / step))\n if size <= 0:\n return cupy.empty((0,), dtype=dtype)\n\n ret = cupy.empty((size,), dtype=dtype)\n typ = numpy.dtype(dtype).type\n _arange_ufunc(typ(start), typ(step), ret, dtype=dtype)\n return ret\n\n\ndef linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None):\n \"\"\"Returns an array with evenly-spaced values within a given interval.\n\n Instead of specifying the step width like :func:`cupy.arange`, this\n function requires the total number of elements specified.\n\n Args:\n start: Start of the interval.\n stop: End of the interval.\n num: Number of elements.\n endpoint (bool): If True, the stop value is included as the last\n element. Otherwise, the stop value is omitted.\n retstep (bool): If True, this function returns (array, step).\n Otherwise, it returns only the array.\n dtype: Data type specifier. It is inferred from the start and stop\n arguments by default.\n\n Returns:\n cupy.ndarray: The 1-D array of ranged values.\n\n \"\"\"\n if num < 0:\n raise ValueError('linspace with num<0 is not supported')\n\n if dtype is None:\n # In actual implementation, only float is used\n dtype = float\n\n ret = cupy.empty((num,), dtype=dtype)\n if num == 0:\n step = float('nan')\n elif num == 1:\n ret.fill(start)\n step = float('nan')\n else:\n div = (num - 1) if endpoint else num\n step = float(stop - start) / div\n stop = float(stop)\n\n if step == 0.0:\n # for underflow\n _linspace_ufunc_underflow(start, stop - start, div, ret)\n else:\n _linspace_ufunc(start, step, ret)\n\n if endpoint:\n ret[-1] = stop\n\n if retstep:\n return ret, step\n else:\n return ret\n\n\n# TODO(okuta): Implement logspace\n\n\n# TODO(okuta): Implement meshgrid\n\n\n# mgrid\n# ogrid\n\n\n_arange_ufunc = core.create_ufunc(\n 'cupy_arange',\n ('bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', 'LL->L',\n 'qq->q', 'QQ->Q', 'ee->e', 'ff->f', 'dd->d'),\n 'out0 = in0 + i * in1')\n\n\n_linspace_ufunc = core.create_ufunc(\n 'cupy_linspace',\n ('dd->d',),\n 'out0 = in0 + i * in1')\n\n_linspace_ufunc_underflow = core.create_ufunc(\n 'cupy_linspace',\n ('ddd->d',),\n 'out0 = in0 + i * in1 / in2')\n", "path": "cupy/creation/ranges.py"}], "after_files": [{"content": "import numpy\n\nimport cupy\nfrom cupy import core\n\n\ndef arange(start, stop=None, step=1, dtype=None):\n \"\"\"Rerurns an array with evenly spaced values within a given interval.\n\n Values are generated within the half-open interval [start, stop). The first\n three arguments are mapped like the ``range`` built-in function, i.e. start\n and step are optional.\n\n Args:\n start: Start of the interval.\n stop: End of the interval.\n step: Step width between each pair of consecutive values.\n dtype: Data type specifier. It is inferred from other arguments by\n default.\n\n Returns:\n cupy.ndarray: The 1-D array of range values.\n\n .. seealso:: :func:`numpy.arange`\n\n \"\"\"\n if dtype is None:\n if any(numpy.dtype(type(val)).kind == 'f'\n for val in (start, stop, step)):\n dtype = float\n else:\n dtype = int\n\n if stop is None:\n stop = start\n start = 0\n size = int(numpy.ceil((stop - start) / step))\n if size <= 0:\n return cupy.empty((0,), dtype=dtype)\n\n ret = cupy.empty((size,), dtype=dtype)\n typ = numpy.dtype(dtype).type\n _arange_ufunc(typ(start), typ(step), ret, dtype=dtype)\n return ret\n\n\ndef linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None):\n \"\"\"Returns an array with evenly-spaced values within a given interval.\n\n Instead of specifying the step width like :func:`cupy.arange`, this\n function requires the total number of elements specified.\n\n Args:\n start: Start of the interval.\n stop: End of the interval.\n num: Number of elements.\n endpoint (bool): If True, the stop value is included as the last\n element. Otherwise, the stop value is omitted.\n retstep (bool): If True, this function returns (array, step).\n Otherwise, it returns only the array.\n dtype: Data type specifier. It is inferred from the start and stop\n arguments by default.\n\n Returns:\n cupy.ndarray: The 1-D array of ranged values.\n\n \"\"\"\n if num < 0:\n raise ValueError('linspace with num<0 is not supported')\n\n if dtype is None:\n # In actual implementation, only float is used\n dtype = float\n\n ret = cupy.empty((num,), dtype=dtype)\n if num == 0:\n step = float('nan')\n elif num == 1:\n ret.fill(start)\n step = float('nan')\n else:\n div = (num - 1) if endpoint else num\n step = float(stop - start) / div\n stop = float(stop)\n\n if step == 0.0:\n # for underflow\n _linspace_ufunc_underflow(start, stop - start, div, ret,\n casting='unsafe')\n else:\n _linspace_ufunc(start, step, ret, casting='unsafe')\n\n if endpoint:\n ret[-1] = stop\n\n if retstep:\n return ret, step\n else:\n return ret\n\n\n# TODO(okuta): Implement logspace\n\n\n# TODO(okuta): Implement meshgrid\n\n\n# mgrid\n# ogrid\n\n\n_arange_ufunc = core.create_ufunc(\n 'cupy_arange',\n ('bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', 'LL->L',\n 'qq->q', 'QQ->Q', 'ee->e', 'ff->f', 'dd->d'),\n 'out0 = in0 + i * in1')\n\n\n_linspace_ufunc = core.create_ufunc(\n 'cupy_linspace',\n ('dd->d',),\n 'out0 = in0 + i * in1')\n\n_linspace_ufunc_underflow = core.create_ufunc(\n 'cupy_linspace',\n ('ddd->d',),\n 'out0 = in0 + i * in1 / in2')\n", "path": "cupy/creation/ranges.py"}]} |
gh_patches_debug_1369 | rasdani/github-patches | git_diff | redis__redis-py-1780 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Module installation fails due to missing dependency
https://github.com/redis/redis-py/blob/039488d97ec545b37e903d1b791a88bac8f77973/redis/connection.py#L1
the deprecated distutils was replaced with the packaging module as part of release v4.0.0b1
packaging is not a builtin python module but was not added to setup.py as a dependency which causes applications that require redis-py to fail if packaging isn't already installed on the machine.
the packaging module should probably be added as a dependency in setup.py to resolve this
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 from setuptools import find_packages, setup
3
4 import redis
5
6 setup(
7 name="redis",
8 description="Python client for Redis database and key-value store",
9 long_description=open("README.md").read().strip(),
10 long_description_content_type="text/markdown",
11 keywords=["Redis", "key-value store", "database"],
12 license="MIT",
13 version=redis.__version__,
14 packages=find_packages(
15 include=[
16 "redis",
17 "redis.commands",
18 "redis.commands.bf",
19 "redis.commands.json",
20 "redis.commands.search",
21 "redis.commands.timeseries",
22 "redis.commands.graph",
23 ]
24 ),
25 url="https://github.com/redis/redis-py",
26 author="Redis Inc.",
27 author_email="[email protected]",
28 python_requires=">=3.6",
29 install_requires=[
30 "deprecated==1.2.3",
31 "packaging==21.3",
32 ],
33 classifiers=[
34 "Development Status :: 5 - Production/Stable",
35 "Environment :: Console",
36 "Intended Audience :: Developers",
37 "License :: OSI Approved :: MIT License",
38 "Operating System :: OS Independent",
39 "Programming Language :: Python",
40 "Programming Language :: Python :: 3",
41 "Programming Language :: Python :: 3 :: Only",
42 "Programming Language :: Python :: 3.6",
43 "Programming Language :: Python :: 3.7",
44 "Programming Language :: Python :: 3.8",
45 "Programming Language :: Python :: 3.9",
46 "Programming Language :: Python :: 3.10",
47 "Programming Language :: Python :: Implementation :: CPython",
48 "Programming Language :: Python :: Implementation :: PyPy",
49 ],
50 extras_require={
51 "hiredis": ["hiredis>=1.0.0"],
52 },
53 )
54
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -26,9 +26,12 @@
author="Redis Inc.",
author_email="[email protected]",
python_requires=">=3.6",
+ setup_requires=[
+ "packaging>=21.3",
+ ],
install_requires=[
- "deprecated==1.2.3",
- "packaging==21.3",
+ "deprecated>=1.2.3",
+ "packaging>=21.3",
],
classifiers=[
"Development Status :: 5 - Production/Stable",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -26,9 +26,12 @@\n author=\"Redis Inc.\",\n author_email=\"[email protected]\",\n python_requires=\">=3.6\",\n+ setup_requires=[\n+ \"packaging>=21.3\",\n+ ],\n install_requires=[\n- \"deprecated==1.2.3\",\n- \"packaging==21.3\",\n+ \"deprecated>=1.2.3\",\n+ \"packaging>=21.3\",\n ],\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n", "issue": "Module installation fails due to missing dependency\nhttps://github.com/redis/redis-py/blob/039488d97ec545b37e903d1b791a88bac8f77973/redis/connection.py#L1\r\nthe deprecated distutils was replaced with the packaging module as part of release v4.0.0b1\r\npackaging is not a builtin python module but was not added to setup.py as a dependency which causes applications that require redis-py to fail if packaging isn't already installed on the machine.\r\nthe packaging module should probably be added as a dependency in setup.py to resolve this\n", "before_files": [{"content": "#!/usr/bin/env python\nfrom setuptools import find_packages, setup\n\nimport redis\n\nsetup(\n name=\"redis\",\n description=\"Python client for Redis database and key-value store\",\n long_description=open(\"README.md\").read().strip(),\n long_description_content_type=\"text/markdown\",\n keywords=[\"Redis\", \"key-value store\", \"database\"],\n license=\"MIT\",\n version=redis.__version__,\n packages=find_packages(\n include=[\n \"redis\",\n \"redis.commands\",\n \"redis.commands.bf\",\n \"redis.commands.json\",\n \"redis.commands.search\",\n \"redis.commands.timeseries\",\n \"redis.commands.graph\",\n ]\n ),\n url=\"https://github.com/redis/redis-py\",\n author=\"Redis Inc.\",\n author_email=\"[email protected]\",\n python_requires=\">=3.6\",\n install_requires=[\n \"deprecated==1.2.3\",\n \"packaging==21.3\",\n ],\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n ],\n extras_require={\n \"hiredis\": [\"hiredis>=1.0.0\"],\n },\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\nfrom setuptools import find_packages, setup\n\nimport redis\n\nsetup(\n name=\"redis\",\n description=\"Python client for Redis database and key-value store\",\n long_description=open(\"README.md\").read().strip(),\n long_description_content_type=\"text/markdown\",\n keywords=[\"Redis\", \"key-value store\", \"database\"],\n license=\"MIT\",\n version=redis.__version__,\n packages=find_packages(\n include=[\n \"redis\",\n \"redis.commands\",\n \"redis.commands.bf\",\n \"redis.commands.json\",\n \"redis.commands.search\",\n \"redis.commands.timeseries\",\n \"redis.commands.graph\",\n ]\n ),\n url=\"https://github.com/redis/redis-py\",\n author=\"Redis Inc.\",\n author_email=\"[email protected]\",\n python_requires=\">=3.6\",\n setup_requires=[\n \"packaging>=21.3\",\n ],\n install_requires=[\n \"deprecated>=1.2.3\",\n \"packaging>=21.3\",\n ],\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n ],\n extras_require={\n \"hiredis\": [\"hiredis>=1.0.0\"],\n },\n)\n", "path": "setup.py"}]} |
gh_patches_debug_1370 | rasdani/github-patches | git_diff | encode__uvicorn-279 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Multipart form handling weird behavior
### Background
Hey everyone!
I'm writing an app that processes multipart form with file upload. When I serve it with `uvicorn` and run cURL against the endpoint I get an error `curl: (56) Illegal or missing hexadecimal sequence in chunked-encoding`. Data from backend reaches cURL, but it returns an odd error.
Looking into `ascii-trace` I can see that data chunk from backend mystically comes to cURL without length line which should be at the beginning of chunk.
```
<= Recv data, 20 bytes (0x14)
0000: Hello, world!
000f: 0
0012:
```
However, dtrace and Wireshark showed me that chunk data was sent from `uvicorn` with length as it should. Other interesting fact is that I get no cURL error if serving app with daphne(`daphne -p 8000 app:App`).
What am I missing? 🤔
### Environment
1. Mac OS X
2. uvicorn==0.3.21
3. Python 3.7.0 (with pipenv)
4. uvloop / asyncio gives same result
### Steps to reproduce
1. Create snippet:
```python
class App():
def __init__(self, scope):
self.scope = scope
async def __call__(self, receive, send):
while True:
message = await receive()
print('(%s) message from recv is %r' % (id(self),message))
if message["type"] == "http.disconnect":
return
else:
if not message.get("more_body"):
await send({
'type': 'http.response.start',
'status': 200,
'headers': [[b'content-type', b'text/plain']]
})
await send({
'type': 'http.response.body',
'body': b'Hello, world!',
'more_body': False
})
```
2. Run http server
```
python uvicorn app:App
```
3. Query with cURL
```
curl --trace-ascii curl.trace-uvicorn \
--form file=@foo \
--form file2=@foo \
http://localhost:8000/api/upload
curl: (56) Illegal or missing hexadecimal sequence in chunked-encoding
```
4. Check what's in trace
```
$ cat curl.trace-uvicorn
== Info: Trying ::1...
== Info: TCP_NODELAY set
== Info: Connection failed
== Info: connect to ::1 port 8000 failed: Connection refused
== Info: Trying fe80::1...
== Info: TCP_NODELAY set
== Info: Connection failed
== Info: connect to fe80::1 port 8000 failed: Connection refused
== Info: Trying 127.0.0.1...
== Info: TCP_NODELAY set
== Info: Connected to localhost (127.0.0.1) port 8000 (#0)
=> Send header, 218 bytes (0xda)
0000: POST /api/upload HTTP/1.1
001b: Host: localhost:8000
0031: User-Agent: curl/7.54.0
004a: Accept: */*
0057: Content-Length: 361
006c: Expect: 100-continue
0082: Content-Type: multipart/form-data; boundary=--------------------
00c2: ----68de9f32447ad9b5
00d8:
<= Recv header, 23 bytes (0x17)
0000: HTTP/1.1 100 Continue
== Info: Done waiting for 100-continue
=> Send data, 147 bytes (0x93)
0000: --------------------------68de9f32447ad9b5
002c: Content-Disposition: form-data; name="file"; filename="foo"
0069: Content-Type: application/octet-stream
0091:
=> Send data, 8 bytes (0x8)
0000: content.
=> Send data, 150 bytes (0x96)
0000:
0002: --------------------------68de9f32447ad9b5
002e: Content-Disposition: form-data; name="file2"; filename="foo"
006c: Content-Type: application/octet-stream
0094:
=> Send data, 8 bytes (0x8)
0000: content.
=> Send data, 48 bytes (0x30)
0000:
0002: --------------------------68de9f32447ad9b5--
<= Recv header, 17 bytes (0x11)
0000: HTTP/1.1 200 OK
<= Recv header, 17 bytes (0x11)
0000: server: uvicorn
<= Recv header, 37 bytes (0x25)
0000: date: Tue, 20 Nov 2018 04:21:02 GMT
<= Recv header, 26 bytes (0x1a)
0000: content-type: text/plain
<= Recv header, 28 bytes (0x1c)
0000: transfer-encoding: chunked
<= Recv data, 20 bytes (0x14)
0000: Hello, world!
000f: 0
0012:
== Info: Illegal or missing hexadecimal sequence in chunked-encoding
== Info: stopped the pause stream!
== Info: Closing connection 0
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `uvicorn/protocols/http/httptools_impl.py`
Content:
```
1 import asyncio
2 from email.utils import formatdate
3 import http
4 import logging
5 import time
6 import urllib
7 from uvicorn.global_state import GlobalState
8 from uvicorn.protocols.utils import get_local_addr, get_remote_addr, is_ssl
9
10 import httptools
11
12
13 def _get_default_headers():
14 current_time = time.time()
15 current_date = formatdate(current_time, usegmt=True).encode()
16 return b"".join([b"server: uvicorn\r\ndate: ", current_date, b"\r\n"])
17
18
19 def _get_status_line(status_code):
20 try:
21 phrase = http.HTTPStatus(status_code).phrase.encode()
22 except ValueError as exc:
23 phrase = b""
24 return b"".join([b"HTTP/1.1 ", str(status_code).encode(), b" ", phrase, b"\r\n"])
25
26
27 STATUS_LINE = {
28 status_code: _get_status_line(status_code) for status_code in range(100, 600)
29 }
30
31 DEFAULT_HEADERS = _get_default_headers()
32
33 HIGH_WATER_LIMIT = 65536
34
35
36 class FlowControl:
37 def __init__(self, transport):
38 self._transport = transport
39 self.read_paused = False
40 self.write_paused = False
41 self._is_writable_event = asyncio.Event()
42 self._is_writable_event.set()
43
44 async def drain(self):
45 await self._is_writable_event.wait()
46
47 def pause_reading(self):
48 if not self.read_paused:
49 self.read_paused = True
50 self._transport.pause_reading()
51
52 def resume_reading(self):
53 if self.read_paused:
54 self.read_paused = False
55 self._transport.resume_reading()
56
57 def pause_writing(self):
58 if not self.write_paused:
59 self.write_paused = True
60 self._is_writable_event.clear()
61
62 def resume_writing(self):
63 if self.write_paused:
64 self.write_paused = False
65 self._is_writable_event.set()
66
67
68 class ServiceUnavailable:
69 def __init__(self, scope):
70 pass
71
72 async def __call__(self, receive, send):
73 await send(
74 {
75 "type": "http.response.start",
76 "status": 503,
77 "headers": [
78 (b"content-type", b"text/plain; charset=utf-8"),
79 (b"connection", b"close"),
80 ],
81 }
82 )
83 await send({"type": "http.response.body", "body": b"Service Unavailable"})
84
85
86 class HttpToolsProtocol(asyncio.Protocol):
87 def __init__(self, config, global_state=None):
88 self.config = config
89 self.app = config.app
90 self.loop = config.loop or asyncio.get_event_loop()
91 self.logger = config.logger or logging.getLogger("uvicorn")
92 self.access_log = config.access_log and (self.logger.level <= logging.INFO)
93 self.parser = httptools.HttpRequestParser(self)
94 self.ws_protocol_class = config.ws_protocol_class
95 self.root_path = config.root_path
96 self.limit_concurrency = config.limit_concurrency
97
98 # Timeouts
99 self.timeout_keep_alive_task = None
100 self.timeout_keep_alive = config.timeout_keep_alive
101
102 # Global state
103 if global_state is None:
104 global_state = GlobalState()
105 self.global_state = global_state
106 self.connections = global_state.connections
107 self.tasks = global_state.tasks
108
109 # Per-connection state
110 self.transport = None
111 self.flow = None
112 self.server = None
113 self.client = None
114 self.scheme = None
115 self.pipeline = []
116
117 # Per-request state
118 self.url = None
119 self.scope = None
120 self.headers = None
121 self.expect_100_continue = False
122 self.cycle = None
123 self.message_event = asyncio.Event()
124
125 @classmethod
126 def tick(cls):
127 global DEFAULT_HEADERS
128 DEFAULT_HEADERS = _get_default_headers()
129
130 # Protocol interface
131 def connection_made(self, transport):
132 self.connections.add(self)
133
134 self.transport = transport
135 self.flow = FlowControl(transport)
136 self.server = get_local_addr(transport)
137 self.client = get_remote_addr(transport)
138 self.scheme = "https" if is_ssl(transport) else "http"
139
140 if self.logger.level <= logging.DEBUG:
141 self.logger.debug("%s - Connected", self.client)
142
143 def connection_lost(self, exc):
144 self.connections.discard(self)
145
146 if self.logger.level <= logging.DEBUG:
147 self.logger.debug("%s - Disconnected", self.client)
148
149 if self.cycle and not self.cycle.response_complete:
150 self.cycle.disconnected = True
151 self.message_event.set()
152
153 def eof_received(self):
154 pass
155
156 def data_received(self, data):
157 if self.timeout_keep_alive_task is not None:
158 self.timeout_keep_alive_task.cancel()
159 self.timeout_keep_alive_task = None
160
161 try:
162 self.parser.feed_data(data)
163 except httptools.parser.errors.HttpParserError as exc:
164 msg = "Invalid HTTP request received."
165 self.logger.warning(msg)
166 self.transport.close()
167 except httptools.HttpParserUpgrade as exc:
168 self.handle_upgrade()
169
170 def handle_upgrade(self):
171 upgrade_value = None
172 for name, value in self.headers:
173 if name == b"upgrade":
174 upgrade_value = value.lower()
175
176 if upgrade_value != b'websocket' or self.ws_protocol_class is None:
177 msg = "Unsupported upgrade request."
178 self.logger.warning(msg)
179 content = [STATUS_LINE[400], DEFAULT_HEADERS]
180 content.extend([
181 b"content-type: text/plain; charset=utf-8\r\n",
182 b"content-length: " + str(len(msg)).encode('ascii') + b"\r\n",
183 b"connection: close\r\n",
184 b"\r\n",
185 msg.encode('ascii')
186 ])
187 self.transport.write(b"".join(content))
188 self.transport.close()
189 return
190
191 self.connections.discard(self)
192 method = self.scope['method'].encode()
193 output = [method, b' ', self.url, b' HTTP/1.1\r\n']
194 for name, value in self.scope['headers']:
195 output += [name, b": ", value, b"\r\n"]
196 output.append(b'\r\n')
197 protocol = self.ws_protocol_class(
198 config=self.config,
199 global_state=self.global_state,
200 )
201 protocol.connection_made(self.transport)
202 protocol.data_received(b''.join(output))
203 self.transport.set_protocol(protocol)
204
205 # Parser callbacks
206 def on_url(self, url):
207 method = self.parser.get_method()
208 parsed_url = httptools.parse_url(url)
209 path = parsed_url.path.decode("ascii")
210 if '%' in path:
211 path = urllib.parse.unquote(path)
212 self.url = url
213 self.expect_100_continue = False
214 self.headers = []
215 self.scope = {
216 "type": "http",
217 "http_version": "1.1",
218 "server": self.server,
219 "client": self.client,
220 "scheme": self.scheme,
221 "method": method.decode("ascii"),
222 "root_path": self.root_path,
223 "path": path,
224 "query_string": parsed_url.query if parsed_url.query else b"",
225 "headers": self.headers,
226 }
227
228 def on_header(self, name: bytes, value: bytes):
229 name = name.lower()
230 if name == b"expect" and value.lower() == b"100-continue":
231 self.expect_100_continue = True
232 self.headers.append((name, value))
233
234 def on_headers_complete(self):
235 http_version = self.parser.get_http_version()
236 if http_version != "1.1":
237 self.scope["http_version"] = http_version
238 if self.parser.should_upgrade():
239 return
240
241 # Handle 503 responses when 'limit_concurrency' is exceeded.
242 if self.limit_concurrency is not None and (
243 len(self.connections) >= self.limit_concurrency
244 or len(self.tasks) >= self.limit_concurrency
245 ):
246 app = ServiceUnavailable
247 message = "Exceeded concurrency limit."
248 self.logger.warning(message)
249 else:
250 app = self.app
251
252 existing_cycle = self.cycle
253 self.cycle = RequestResponseCycle(
254 scope=self.scope,
255 transport=self.transport,
256 flow=self.flow,
257 logger=self.logger,
258 access_log=self.access_log,
259 message_event=self.message_event,
260 expect_100_continue=self.expect_100_continue,
261 keep_alive=http_version != "1.0",
262 on_response=self.on_response_complete,
263 )
264 if existing_cycle is None or existing_cycle.response_complete:
265 # Standard case - start processing the request.
266 task = self.loop.create_task(self.cycle.run_asgi(app))
267 task.add_done_callback(self.tasks.discard)
268 self.tasks.add(task)
269 else:
270 # Pipelined HTTP requests need to be queued up.
271 self.flow.pause_reading()
272 self.pipeline.insert(0, (self.cycle, app))
273
274 def on_body(self, body: bytes):
275 if self.parser.should_upgrade() or self.cycle.response_complete:
276 return
277 self.cycle.body += body
278 if len(self.cycle.body) > HIGH_WATER_LIMIT:
279 self.flow.pause_reading()
280 self.message_event.set()
281
282 def on_message_complete(self):
283 if self.parser.should_upgrade() or self.cycle.response_complete:
284 return
285 self.cycle.more_body = False
286 self.message_event.set()
287
288 def on_response_complete(self):
289 # Callback for pipelined HTTP requests to be started.
290 self.global_state.total_requests += 1
291
292 if self.transport.is_closing():
293 return
294
295 # Set a short Keep-Alive timeout.
296 self.timeout_keep_alive_task = self.loop.call_later(
297 self.timeout_keep_alive, self.timeout_keep_alive_handler
298 )
299
300 # Unpause data reads if needed.
301 self.flow.resume_reading()
302
303 # Unblock any pipelined events.
304 if self.pipeline:
305 cycle, app = self.pipeline.pop()
306 task = self.loop.create_task(cycle.run_asgi(app))
307 task.add_done_callback(self.tasks.discard)
308 self.tasks.add(task)
309
310 def shutdown(self):
311 """
312 Called by the server to commence a graceful shutdown.
313 """
314 if self.cycle is None or self.cycle.response_complete:
315 self.transport.close()
316 else:
317 self.cycle.keep_alive = False
318
319 def pause_writing(self):
320 """
321 Called by the transport when the write buffer exceeds the high water mark.
322 """
323 self.flow.pause_writing()
324
325 def resume_writing(self):
326 """
327 Called by the transport when the write buffer drops below the low water mark.
328 """
329 self.flow.resume_writing()
330
331 def timeout_keep_alive_handler(self):
332 """
333 Called on a keep-alive connection if no new data is received after a short delay.
334 """
335 if not self.transport.is_closing():
336 self.transport.close()
337
338
339 class RequestResponseCycle:
340 def __init__(
341 self,
342 scope,
343 transport,
344 flow,
345 logger,
346 access_log,
347 message_event,
348 expect_100_continue,
349 keep_alive,
350 on_response,
351 ):
352 self.scope = scope
353 self.transport = transport
354 self.flow = flow
355 self.logger = logger
356 self.access_log = access_log
357 self.message_event = message_event
358 self.on_response = on_response
359
360 # Connection state
361 self.disconnected = False
362 self.keep_alive = keep_alive
363 self.waiting_for_100_continue = expect_100_continue
364
365 # Request state
366 self.body = b""
367 self.more_body = True
368
369 # Response state
370 self.response_started = False
371 self.response_complete = False
372 self.chunked_encoding = None
373 self.expected_content_length = 0
374
375 # ASGI exception wrapper
376 async def run_asgi(self, app):
377 try:
378 asgi = app(self.scope)
379 result = await asgi(self.receive, self.send)
380 except BaseException as exc:
381 msg = "Exception in ASGI application\n"
382 self.logger.error(msg, exc_info=exc)
383 if not self.response_started:
384 await self.send_500_response()
385 else:
386 self.transport.close()
387 else:
388 if result is not None:
389 msg = "ASGI callable should return None, but returned '%s'."
390 self.logger.error(msg, result)
391 self.transport.close()
392 elif not self.response_started and not self.disconnected:
393 msg = "ASGI callable returned without starting response."
394 self.logger.error(msg)
395 await self.send_500_response()
396 elif not self.response_complete and not self.disconnected:
397 msg = "ASGI callable returned without completing response."
398 self.logger.error(msg)
399 self.transport.close()
400 finally:
401 self.on_response = None
402
403 async def send_500_response(self):
404 await self.send(
405 {
406 "type": "http.response.start",
407 "status": 500,
408 "headers": [
409 (b"content-type", b"text/plain; charset=utf-8"),
410 (b"connection", b"close"),
411 ],
412 }
413 )
414 await self.send(
415 {"type": "http.response.body", "body": b"Internal Server Error"}
416 )
417
418 # ASGI interface
419 async def send(self, message):
420 message_type = message["type"]
421
422 if self.disconnected:
423 return
424
425 if self.flow.write_paused:
426 await self.flow.drain()
427
428 if not self.response_started:
429 # Sending response status line and headers
430 if message_type != "http.response.start":
431 msg = "Expected ASGI message 'http.response.start', but got '%s'."
432 raise RuntimeError(msg % message_type)
433
434 self.response_started = True
435 self.waiting_for_100_continue = False
436
437 status_code = message["status"]
438 headers = message.get("headers", [])
439
440 if self.access_log:
441 self.logger.info(
442 '%s - "%s %s HTTP/%s" %d',
443 self.scope["client"],
444 self.scope["method"],
445 self.scope["path"],
446 self.scope["http_version"],
447 status_code,
448 )
449
450 # Write response status line and headers
451 content = [STATUS_LINE[status_code], DEFAULT_HEADERS]
452
453 for name, value in headers:
454 name = name.lower()
455 if name == b"content-length" and self.chunked_encoding is None:
456 self.expected_content_length = int(value.decode())
457 self.chunked_encoding = False
458 elif name == b"transfer-encoding" and value.lower() == b"chunked":
459 self.expected_content_length = 0
460 self.chunked_encoding = True
461 elif name == b"connection" and value.lower() == b"close":
462 self.keep_alive = False
463 content.extend([name, b": ", value, b"\r\n"])
464
465 if self.chunked_encoding is None and self.scope["method"] != 'HEAD' and status_code not in (204, 304):
466 # Neither content-length nor transfer-encoding specified
467 self.chunked_encoding = True
468 content.append(b"transfer-encoding: chunked\r\n")
469
470 content.append(b"\r\n")
471 self.transport.write(b"".join(content))
472
473 elif not self.response_complete:
474 # Sending response body
475 if message_type != "http.response.body":
476 msg = "Expected ASGI message 'http.response.body', but got '%s'."
477 raise RuntimeError(msg % message_type)
478
479 body = message.get("body", b"")
480 more_body = message.get("more_body", False)
481
482 # Write response body
483 if self.scope["method"] == "HEAD":
484 self.expected_content_length = 0
485 elif self.chunked_encoding:
486 if body:
487 content = [b"%x\r\n" % len(body), body, b"\r\n"]
488 else:
489 content = []
490 if not more_body:
491 content.append(b"0\r\n\r\n")
492 self.transport.write(b"".join(content))
493 else:
494 num_bytes = len(body)
495 if num_bytes > self.expected_content_length:
496 raise RuntimeError("Response content longer than Content-Length")
497 else:
498 self.expected_content_length -= num_bytes
499 self.transport.write(body)
500
501 # Handle response completion
502 if not more_body:
503 if self.expected_content_length != 0:
504 raise RuntimeError("Response content shorter than Content-Length")
505 self.response_complete = True
506 if not self.keep_alive:
507 self.transport.close()
508 self.on_response()
509
510 else:
511 # Response already sent
512 msg = "Unexpected ASGI message '%s' sent, after response already completed."
513 raise RuntimeError(msg % message_type)
514
515 async def receive(self):
516 if self.waiting_for_100_continue and not self.transport.is_closing():
517 self.transport.write(b"HTTP/1.1 100 Continue\r\n")
518 self.waiting_for_100_continue = False
519
520 self.flow.resume_reading()
521 await self.message_event.wait()
522 self.message_event.clear()
523
524 if self.disconnected or self.response_complete:
525 message = {"type": "http.disconnect"}
526 else:
527 message = {
528 "type": "http.request",
529 "body": self.body,
530 "more_body": self.more_body,
531 }
532 self.body = b""
533
534 return message
535
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/uvicorn/protocols/http/httptools_impl.py b/uvicorn/protocols/http/httptools_impl.py
--- a/uvicorn/protocols/http/httptools_impl.py
+++ b/uvicorn/protocols/http/httptools_impl.py
@@ -514,7 +514,7 @@
async def receive(self):
if self.waiting_for_100_continue and not self.transport.is_closing():
- self.transport.write(b"HTTP/1.1 100 Continue\r\n")
+ self.transport.write(b"HTTP/1.1 100 Continue\r\n\r\n")
self.waiting_for_100_continue = False
self.flow.resume_reading()
| {"golden_diff": "diff --git a/uvicorn/protocols/http/httptools_impl.py b/uvicorn/protocols/http/httptools_impl.py\n--- a/uvicorn/protocols/http/httptools_impl.py\n+++ b/uvicorn/protocols/http/httptools_impl.py\n@@ -514,7 +514,7 @@\n \n async def receive(self):\n if self.waiting_for_100_continue and not self.transport.is_closing():\n- self.transport.write(b\"HTTP/1.1 100 Continue\\r\\n\")\n+ self.transport.write(b\"HTTP/1.1 100 Continue\\r\\n\\r\\n\")\n self.waiting_for_100_continue = False\n \n self.flow.resume_reading()\n", "issue": "Multipart form handling weird behavior \n### Background \r\nHey everyone!\r\n\r\nI'm writing an app that processes multipart form with file upload. When I serve it with `uvicorn` and run cURL against the endpoint I get an error `curl: (56) Illegal or missing hexadecimal sequence in chunked-encoding`. Data from backend reaches cURL, but it returns an odd error. \r\n\r\nLooking into `ascii-trace` I can see that data chunk from backend mystically comes to cURL without length line which should be at the beginning of chunk.\r\n```\r\n<= Recv data, 20 bytes (0x14)\r\n0000: Hello, world!\r\n000f: 0\r\n0012:\r\n```\r\nHowever, dtrace and Wireshark showed me that chunk data was sent from `uvicorn` with length as it should. Other interesting fact is that I get no cURL error if serving app with daphne(`daphne -p 8000 app:App`).\r\n\r\nWhat am I missing? \ud83e\udd14 \r\n\r\n### Environment\r\n1. Mac OS X \r\n2. uvicorn==0.3.21\r\n3. Python 3.7.0 (with pipenv)\r\n4. uvloop / asyncio gives same result\r\n\r\n### Steps to reproduce\r\n1. Create snippet:\r\n\r\n```python\r\nclass App():\r\n def __init__(self, scope):\r\n self.scope = scope\r\n\r\n async def __call__(self, receive, send):\r\n while True:\r\n message = await receive()\r\n print('(%s) message from recv is %r' % (id(self),message))\r\n if message[\"type\"] == \"http.disconnect\":\r\n return\r\n else:\r\n if not message.get(\"more_body\"):\r\n await send({\r\n 'type': 'http.response.start',\r\n 'status': 200,\r\n 'headers': [[b'content-type', b'text/plain']]\r\n })\r\n await send({\r\n 'type': 'http.response.body',\r\n 'body': b'Hello, world!',\r\n 'more_body': False\r\n })\r\n```\r\n\r\n2. Run http server \r\n```\r\npython uvicorn app:App\r\n```\r\n\r\n3. Query with cURL\r\n```\r\ncurl --trace-ascii curl.trace-uvicorn \\\r\n --form file=@foo \\\r\n --form file2=@foo \\\r\n http://localhost:8000/api/upload\r\ncurl: (56) Illegal or missing hexadecimal sequence in chunked-encoding\r\n```\r\n\r\n4. Check what's in trace\r\n\r\n```\r\n$ cat curl.trace-uvicorn\r\n== Info: Trying ::1...\r\n== Info: TCP_NODELAY set\r\n== Info: Connection failed\r\n== Info: connect to ::1 port 8000 failed: Connection refused\r\n== Info: Trying fe80::1...\r\n== Info: TCP_NODELAY set\r\n== Info: Connection failed\r\n== Info: connect to fe80::1 port 8000 failed: Connection refused\r\n== Info: Trying 127.0.0.1...\r\n== Info: TCP_NODELAY set\r\n== Info: Connected to localhost (127.0.0.1) port 8000 (#0)\r\n=> Send header, 218 bytes (0xda)\r\n0000: POST /api/upload HTTP/1.1\r\n001b: Host: localhost:8000\r\n0031: User-Agent: curl/7.54.0\r\n004a: Accept: */*\r\n0057: Content-Length: 361\r\n006c: Expect: 100-continue\r\n0082: Content-Type: multipart/form-data; boundary=--------------------\r\n00c2: ----68de9f32447ad9b5\r\n00d8:\r\n<= Recv header, 23 bytes (0x17)\r\n0000: HTTP/1.1 100 Continue\r\n== Info: Done waiting for 100-continue\r\n=> Send data, 147 bytes (0x93)\r\n0000: --------------------------68de9f32447ad9b5\r\n002c: Content-Disposition: form-data; name=\"file\"; filename=\"foo\"\r\n0069: Content-Type: application/octet-stream\r\n0091:\r\n=> Send data, 8 bytes (0x8)\r\n0000: content.\r\n=> Send data, 150 bytes (0x96)\r\n0000:\r\n0002: --------------------------68de9f32447ad9b5\r\n002e: Content-Disposition: form-data; name=\"file2\"; filename=\"foo\"\r\n006c: Content-Type: application/octet-stream\r\n0094:\r\n=> Send data, 8 bytes (0x8)\r\n0000: content.\r\n=> Send data, 48 bytes (0x30)\r\n0000:\r\n0002: --------------------------68de9f32447ad9b5--\r\n<= Recv header, 17 bytes (0x11)\r\n0000: HTTP/1.1 200 OK\r\n<= Recv header, 17 bytes (0x11)\r\n0000: server: uvicorn\r\n<= Recv header, 37 bytes (0x25)\r\n0000: date: Tue, 20 Nov 2018 04:21:02 GMT\r\n<= Recv header, 26 bytes (0x1a)\r\n0000: content-type: text/plain\r\n<= Recv header, 28 bytes (0x1c)\r\n0000: transfer-encoding: chunked\r\n<= Recv data, 20 bytes (0x14)\r\n0000: Hello, world!\r\n000f: 0\r\n0012:\r\n== Info: Illegal or missing hexadecimal sequence in chunked-encoding\r\n== Info: stopped the pause stream!\r\n== Info: Closing connection 0\r\n```\n", "before_files": [{"content": "import asyncio\nfrom email.utils import formatdate\nimport http\nimport logging\nimport time\nimport urllib\nfrom uvicorn.global_state import GlobalState\nfrom uvicorn.protocols.utils import get_local_addr, get_remote_addr, is_ssl\n\nimport httptools\n\n\ndef _get_default_headers():\n current_time = time.time()\n current_date = formatdate(current_time, usegmt=True).encode()\n return b\"\".join([b\"server: uvicorn\\r\\ndate: \", current_date, b\"\\r\\n\"])\n\n\ndef _get_status_line(status_code):\n try:\n phrase = http.HTTPStatus(status_code).phrase.encode()\n except ValueError as exc:\n phrase = b\"\"\n return b\"\".join([b\"HTTP/1.1 \", str(status_code).encode(), b\" \", phrase, b\"\\r\\n\"])\n\n\nSTATUS_LINE = {\n status_code: _get_status_line(status_code) for status_code in range(100, 600)\n}\n\nDEFAULT_HEADERS = _get_default_headers()\n\nHIGH_WATER_LIMIT = 65536\n\n\nclass FlowControl:\n def __init__(self, transport):\n self._transport = transport\n self.read_paused = False\n self.write_paused = False\n self._is_writable_event = asyncio.Event()\n self._is_writable_event.set()\n\n async def drain(self):\n await self._is_writable_event.wait()\n\n def pause_reading(self):\n if not self.read_paused:\n self.read_paused = True\n self._transport.pause_reading()\n\n def resume_reading(self):\n if self.read_paused:\n self.read_paused = False\n self._transport.resume_reading()\n\n def pause_writing(self):\n if not self.write_paused:\n self.write_paused = True\n self._is_writable_event.clear()\n\n def resume_writing(self):\n if self.write_paused:\n self.write_paused = False\n self._is_writable_event.set()\n\n\nclass ServiceUnavailable:\n def __init__(self, scope):\n pass\n\n async def __call__(self, receive, send):\n await send(\n {\n \"type\": \"http.response.start\",\n \"status\": 503,\n \"headers\": [\n (b\"content-type\", b\"text/plain; charset=utf-8\"),\n (b\"connection\", b\"close\"),\n ],\n }\n )\n await send({\"type\": \"http.response.body\", \"body\": b\"Service Unavailable\"})\n\n\nclass HttpToolsProtocol(asyncio.Protocol):\n def __init__(self, config, global_state=None):\n self.config = config\n self.app = config.app\n self.loop = config.loop or asyncio.get_event_loop()\n self.logger = config.logger or logging.getLogger(\"uvicorn\")\n self.access_log = config.access_log and (self.logger.level <= logging.INFO)\n self.parser = httptools.HttpRequestParser(self)\n self.ws_protocol_class = config.ws_protocol_class\n self.root_path = config.root_path\n self.limit_concurrency = config.limit_concurrency\n\n # Timeouts\n self.timeout_keep_alive_task = None\n self.timeout_keep_alive = config.timeout_keep_alive\n\n # Global state\n if global_state is None:\n global_state = GlobalState()\n self.global_state = global_state\n self.connections = global_state.connections\n self.tasks = global_state.tasks\n\n # Per-connection state\n self.transport = None\n self.flow = None\n self.server = None\n self.client = None\n self.scheme = None\n self.pipeline = []\n\n # Per-request state\n self.url = None\n self.scope = None\n self.headers = None\n self.expect_100_continue = False\n self.cycle = None\n self.message_event = asyncio.Event()\n\n @classmethod\n def tick(cls):\n global DEFAULT_HEADERS\n DEFAULT_HEADERS = _get_default_headers()\n\n # Protocol interface\n def connection_made(self, transport):\n self.connections.add(self)\n\n self.transport = transport\n self.flow = FlowControl(transport)\n self.server = get_local_addr(transport)\n self.client = get_remote_addr(transport)\n self.scheme = \"https\" if is_ssl(transport) else \"http\"\n\n if self.logger.level <= logging.DEBUG:\n self.logger.debug(\"%s - Connected\", self.client)\n\n def connection_lost(self, exc):\n self.connections.discard(self)\n\n if self.logger.level <= logging.DEBUG:\n self.logger.debug(\"%s - Disconnected\", self.client)\n\n if self.cycle and not self.cycle.response_complete:\n self.cycle.disconnected = True\n self.message_event.set()\n\n def eof_received(self):\n pass\n\n def data_received(self, data):\n if self.timeout_keep_alive_task is not None:\n self.timeout_keep_alive_task.cancel()\n self.timeout_keep_alive_task = None\n\n try:\n self.parser.feed_data(data)\n except httptools.parser.errors.HttpParserError as exc:\n msg = \"Invalid HTTP request received.\"\n self.logger.warning(msg)\n self.transport.close()\n except httptools.HttpParserUpgrade as exc:\n self.handle_upgrade()\n\n def handle_upgrade(self):\n upgrade_value = None\n for name, value in self.headers:\n if name == b\"upgrade\":\n upgrade_value = value.lower()\n\n if upgrade_value != b'websocket' or self.ws_protocol_class is None:\n msg = \"Unsupported upgrade request.\"\n self.logger.warning(msg)\n content = [STATUS_LINE[400], DEFAULT_HEADERS]\n content.extend([\n b\"content-type: text/plain; charset=utf-8\\r\\n\",\n b\"content-length: \" + str(len(msg)).encode('ascii') + b\"\\r\\n\",\n b\"connection: close\\r\\n\",\n b\"\\r\\n\",\n msg.encode('ascii')\n ])\n self.transport.write(b\"\".join(content))\n self.transport.close()\n return\n\n self.connections.discard(self)\n method = self.scope['method'].encode()\n output = [method, b' ', self.url, b' HTTP/1.1\\r\\n']\n for name, value in self.scope['headers']:\n output += [name, b\": \", value, b\"\\r\\n\"]\n output.append(b'\\r\\n')\n protocol = self.ws_protocol_class(\n config=self.config,\n global_state=self.global_state,\n )\n protocol.connection_made(self.transport)\n protocol.data_received(b''.join(output))\n self.transport.set_protocol(protocol)\n\n # Parser callbacks\n def on_url(self, url):\n method = self.parser.get_method()\n parsed_url = httptools.parse_url(url)\n path = parsed_url.path.decode(\"ascii\")\n if '%' in path:\n path = urllib.parse.unquote(path)\n self.url = url\n self.expect_100_continue = False\n self.headers = []\n self.scope = {\n \"type\": \"http\",\n \"http_version\": \"1.1\",\n \"server\": self.server,\n \"client\": self.client,\n \"scheme\": self.scheme,\n \"method\": method.decode(\"ascii\"),\n \"root_path\": self.root_path,\n \"path\": path,\n \"query_string\": parsed_url.query if parsed_url.query else b\"\",\n \"headers\": self.headers,\n }\n\n def on_header(self, name: bytes, value: bytes):\n name = name.lower()\n if name == b\"expect\" and value.lower() == b\"100-continue\":\n self.expect_100_continue = True\n self.headers.append((name, value))\n\n def on_headers_complete(self):\n http_version = self.parser.get_http_version()\n if http_version != \"1.1\":\n self.scope[\"http_version\"] = http_version\n if self.parser.should_upgrade():\n return\n\n # Handle 503 responses when 'limit_concurrency' is exceeded.\n if self.limit_concurrency is not None and (\n len(self.connections) >= self.limit_concurrency\n or len(self.tasks) >= self.limit_concurrency\n ):\n app = ServiceUnavailable\n message = \"Exceeded concurrency limit.\"\n self.logger.warning(message)\n else:\n app = self.app\n\n existing_cycle = self.cycle\n self.cycle = RequestResponseCycle(\n scope=self.scope,\n transport=self.transport,\n flow=self.flow,\n logger=self.logger,\n access_log=self.access_log,\n message_event=self.message_event,\n expect_100_continue=self.expect_100_continue,\n keep_alive=http_version != \"1.0\",\n on_response=self.on_response_complete,\n )\n if existing_cycle is None or existing_cycle.response_complete:\n # Standard case - start processing the request.\n task = self.loop.create_task(self.cycle.run_asgi(app))\n task.add_done_callback(self.tasks.discard)\n self.tasks.add(task)\n else:\n # Pipelined HTTP requests need to be queued up.\n self.flow.pause_reading()\n self.pipeline.insert(0, (self.cycle, app))\n\n def on_body(self, body: bytes):\n if self.parser.should_upgrade() or self.cycle.response_complete:\n return\n self.cycle.body += body\n if len(self.cycle.body) > HIGH_WATER_LIMIT:\n self.flow.pause_reading()\n self.message_event.set()\n\n def on_message_complete(self):\n if self.parser.should_upgrade() or self.cycle.response_complete:\n return\n self.cycle.more_body = False\n self.message_event.set()\n\n def on_response_complete(self):\n # Callback for pipelined HTTP requests to be started.\n self.global_state.total_requests += 1\n\n if self.transport.is_closing():\n return\n\n # Set a short Keep-Alive timeout.\n self.timeout_keep_alive_task = self.loop.call_later(\n self.timeout_keep_alive, self.timeout_keep_alive_handler\n )\n\n # Unpause data reads if needed.\n self.flow.resume_reading()\n\n # Unblock any pipelined events.\n if self.pipeline:\n cycle, app = self.pipeline.pop()\n task = self.loop.create_task(cycle.run_asgi(app))\n task.add_done_callback(self.tasks.discard)\n self.tasks.add(task)\n\n def shutdown(self):\n \"\"\"\n Called by the server to commence a graceful shutdown.\n \"\"\"\n if self.cycle is None or self.cycle.response_complete:\n self.transport.close()\n else:\n self.cycle.keep_alive = False\n\n def pause_writing(self):\n \"\"\"\n Called by the transport when the write buffer exceeds the high water mark.\n \"\"\"\n self.flow.pause_writing()\n\n def resume_writing(self):\n \"\"\"\n Called by the transport when the write buffer drops below the low water mark.\n \"\"\"\n self.flow.resume_writing()\n\n def timeout_keep_alive_handler(self):\n \"\"\"\n Called on a keep-alive connection if no new data is received after a short delay.\n \"\"\"\n if not self.transport.is_closing():\n self.transport.close()\n\n\nclass RequestResponseCycle:\n def __init__(\n self,\n scope,\n transport,\n flow,\n logger,\n access_log,\n message_event,\n expect_100_continue,\n keep_alive,\n on_response,\n ):\n self.scope = scope\n self.transport = transport\n self.flow = flow\n self.logger = logger\n self.access_log = access_log\n self.message_event = message_event\n self.on_response = on_response\n\n # Connection state\n self.disconnected = False\n self.keep_alive = keep_alive\n self.waiting_for_100_continue = expect_100_continue\n\n # Request state\n self.body = b\"\"\n self.more_body = True\n\n # Response state\n self.response_started = False\n self.response_complete = False\n self.chunked_encoding = None\n self.expected_content_length = 0\n\n # ASGI exception wrapper\n async def run_asgi(self, app):\n try:\n asgi = app(self.scope)\n result = await asgi(self.receive, self.send)\n except BaseException as exc:\n msg = \"Exception in ASGI application\\n\"\n self.logger.error(msg, exc_info=exc)\n if not self.response_started:\n await self.send_500_response()\n else:\n self.transport.close()\n else:\n if result is not None:\n msg = \"ASGI callable should return None, but returned '%s'.\"\n self.logger.error(msg, result)\n self.transport.close()\n elif not self.response_started and not self.disconnected:\n msg = \"ASGI callable returned without starting response.\"\n self.logger.error(msg)\n await self.send_500_response()\n elif not self.response_complete and not self.disconnected:\n msg = \"ASGI callable returned without completing response.\"\n self.logger.error(msg)\n self.transport.close()\n finally:\n self.on_response = None\n\n async def send_500_response(self):\n await self.send(\n {\n \"type\": \"http.response.start\",\n \"status\": 500,\n \"headers\": [\n (b\"content-type\", b\"text/plain; charset=utf-8\"),\n (b\"connection\", b\"close\"),\n ],\n }\n )\n await self.send(\n {\"type\": \"http.response.body\", \"body\": b\"Internal Server Error\"}\n )\n\n # ASGI interface\n async def send(self, message):\n message_type = message[\"type\"]\n\n if self.disconnected:\n return\n\n if self.flow.write_paused:\n await self.flow.drain()\n\n if not self.response_started:\n # Sending response status line and headers\n if message_type != \"http.response.start\":\n msg = \"Expected ASGI message 'http.response.start', but got '%s'.\"\n raise RuntimeError(msg % message_type)\n\n self.response_started = True\n self.waiting_for_100_continue = False\n\n status_code = message[\"status\"]\n headers = message.get(\"headers\", [])\n\n if self.access_log:\n self.logger.info(\n '%s - \"%s %s HTTP/%s\" %d',\n self.scope[\"client\"],\n self.scope[\"method\"],\n self.scope[\"path\"],\n self.scope[\"http_version\"],\n status_code,\n )\n\n # Write response status line and headers\n content = [STATUS_LINE[status_code], DEFAULT_HEADERS]\n\n for name, value in headers:\n name = name.lower()\n if name == b\"content-length\" and self.chunked_encoding is None:\n self.expected_content_length = int(value.decode())\n self.chunked_encoding = False\n elif name == b\"transfer-encoding\" and value.lower() == b\"chunked\":\n self.expected_content_length = 0\n self.chunked_encoding = True\n elif name == b\"connection\" and value.lower() == b\"close\":\n self.keep_alive = False\n content.extend([name, b\": \", value, b\"\\r\\n\"])\n\n if self.chunked_encoding is None and self.scope[\"method\"] != 'HEAD' and status_code not in (204, 304):\n # Neither content-length nor transfer-encoding specified\n self.chunked_encoding = True\n content.append(b\"transfer-encoding: chunked\\r\\n\")\n\n content.append(b\"\\r\\n\")\n self.transport.write(b\"\".join(content))\n\n elif not self.response_complete:\n # Sending response body\n if message_type != \"http.response.body\":\n msg = \"Expected ASGI message 'http.response.body', but got '%s'.\"\n raise RuntimeError(msg % message_type)\n\n body = message.get(\"body\", b\"\")\n more_body = message.get(\"more_body\", False)\n\n # Write response body\n if self.scope[\"method\"] == \"HEAD\":\n self.expected_content_length = 0\n elif self.chunked_encoding:\n if body:\n content = [b\"%x\\r\\n\" % len(body), body, b\"\\r\\n\"]\n else:\n content = []\n if not more_body:\n content.append(b\"0\\r\\n\\r\\n\")\n self.transport.write(b\"\".join(content))\n else:\n num_bytes = len(body)\n if num_bytes > self.expected_content_length:\n raise RuntimeError(\"Response content longer than Content-Length\")\n else:\n self.expected_content_length -= num_bytes\n self.transport.write(body)\n\n # Handle response completion\n if not more_body:\n if self.expected_content_length != 0:\n raise RuntimeError(\"Response content shorter than Content-Length\")\n self.response_complete = True\n if not self.keep_alive:\n self.transport.close()\n self.on_response()\n\n else:\n # Response already sent\n msg = \"Unexpected ASGI message '%s' sent, after response already completed.\"\n raise RuntimeError(msg % message_type)\n\n async def receive(self):\n if self.waiting_for_100_continue and not self.transport.is_closing():\n self.transport.write(b\"HTTP/1.1 100 Continue\\r\\n\")\n self.waiting_for_100_continue = False\n\n self.flow.resume_reading()\n await self.message_event.wait()\n self.message_event.clear()\n\n if self.disconnected or self.response_complete:\n message = {\"type\": \"http.disconnect\"}\n else:\n message = {\n \"type\": \"http.request\",\n \"body\": self.body,\n \"more_body\": self.more_body,\n }\n self.body = b\"\"\n\n return message\n", "path": "uvicorn/protocols/http/httptools_impl.py"}], "after_files": [{"content": "import asyncio\nfrom email.utils import formatdate\nimport http\nimport logging\nimport time\nimport urllib\nfrom uvicorn.global_state import GlobalState\nfrom uvicorn.protocols.utils import get_local_addr, get_remote_addr, is_ssl\n\nimport httptools\n\n\ndef _get_default_headers():\n current_time = time.time()\n current_date = formatdate(current_time, usegmt=True).encode()\n return b\"\".join([b\"server: uvicorn\\r\\ndate: \", current_date, b\"\\r\\n\"])\n\n\ndef _get_status_line(status_code):\n try:\n phrase = http.HTTPStatus(status_code).phrase.encode()\n except ValueError as exc:\n phrase = b\"\"\n return b\"\".join([b\"HTTP/1.1 \", str(status_code).encode(), b\" \", phrase, b\"\\r\\n\"])\n\n\nSTATUS_LINE = {\n status_code: _get_status_line(status_code) for status_code in range(100, 600)\n}\n\nDEFAULT_HEADERS = _get_default_headers()\n\nHIGH_WATER_LIMIT = 65536\n\n\nclass FlowControl:\n def __init__(self, transport):\n self._transport = transport\n self.read_paused = False\n self.write_paused = False\n self._is_writable_event = asyncio.Event()\n self._is_writable_event.set()\n\n async def drain(self):\n await self._is_writable_event.wait()\n\n def pause_reading(self):\n if not self.read_paused:\n self.read_paused = True\n self._transport.pause_reading()\n\n def resume_reading(self):\n if self.read_paused:\n self.read_paused = False\n self._transport.resume_reading()\n\n def pause_writing(self):\n if not self.write_paused:\n self.write_paused = True\n self._is_writable_event.clear()\n\n def resume_writing(self):\n if self.write_paused:\n self.write_paused = False\n self._is_writable_event.set()\n\n\nclass ServiceUnavailable:\n def __init__(self, scope):\n pass\n\n async def __call__(self, receive, send):\n await send(\n {\n \"type\": \"http.response.start\",\n \"status\": 503,\n \"headers\": [\n (b\"content-type\", b\"text/plain; charset=utf-8\"),\n (b\"connection\", b\"close\"),\n ],\n }\n )\n await send({\"type\": \"http.response.body\", \"body\": b\"Service Unavailable\"})\n\n\nclass HttpToolsProtocol(asyncio.Protocol):\n def __init__(self, config, global_state=None):\n self.config = config\n self.app = config.app\n self.loop = config.loop or asyncio.get_event_loop()\n self.logger = config.logger or logging.getLogger(\"uvicorn\")\n self.access_log = config.access_log and (self.logger.level <= logging.INFO)\n self.parser = httptools.HttpRequestParser(self)\n self.ws_protocol_class = config.ws_protocol_class\n self.root_path = config.root_path\n self.limit_concurrency = config.limit_concurrency\n\n # Timeouts\n self.timeout_keep_alive_task = None\n self.timeout_keep_alive = config.timeout_keep_alive\n\n # Global state\n if global_state is None:\n global_state = GlobalState()\n self.global_state = global_state\n self.connections = global_state.connections\n self.tasks = global_state.tasks\n\n # Per-connection state\n self.transport = None\n self.flow = None\n self.server = None\n self.client = None\n self.scheme = None\n self.pipeline = []\n\n # Per-request state\n self.url = None\n self.scope = None\n self.headers = None\n self.expect_100_continue = False\n self.cycle = None\n self.message_event = asyncio.Event()\n\n @classmethod\n def tick(cls):\n global DEFAULT_HEADERS\n DEFAULT_HEADERS = _get_default_headers()\n\n # Protocol interface\n def connection_made(self, transport):\n self.connections.add(self)\n\n self.transport = transport\n self.flow = FlowControl(transport)\n self.server = get_local_addr(transport)\n self.client = get_remote_addr(transport)\n self.scheme = \"https\" if is_ssl(transport) else \"http\"\n\n if self.logger.level <= logging.DEBUG:\n self.logger.debug(\"%s - Connected\", self.client)\n\n def connection_lost(self, exc):\n self.connections.discard(self)\n\n if self.logger.level <= logging.DEBUG:\n self.logger.debug(\"%s - Disconnected\", self.client)\n\n if self.cycle and not self.cycle.response_complete:\n self.cycle.disconnected = True\n self.message_event.set()\n\n def eof_received(self):\n pass\n\n def data_received(self, data):\n if self.timeout_keep_alive_task is not None:\n self.timeout_keep_alive_task.cancel()\n self.timeout_keep_alive_task = None\n\n try:\n self.parser.feed_data(data)\n except httptools.parser.errors.HttpParserError as exc:\n msg = \"Invalid HTTP request received.\"\n self.logger.warning(msg)\n self.transport.close()\n except httptools.HttpParserUpgrade as exc:\n self.handle_upgrade()\n\n def handle_upgrade(self):\n upgrade_value = None\n for name, value in self.headers:\n if name == b\"upgrade\":\n upgrade_value = value.lower()\n\n if upgrade_value != b'websocket' or self.ws_protocol_class is None:\n msg = \"Unsupported upgrade request.\"\n self.logger.warning(msg)\n content = [STATUS_LINE[400], DEFAULT_HEADERS]\n content.extend([\n b\"content-type: text/plain; charset=utf-8\\r\\n\",\n b\"content-length: \" + str(len(msg)).encode('ascii') + b\"\\r\\n\",\n b\"connection: close\\r\\n\",\n b\"\\r\\n\",\n msg.encode('ascii')\n ])\n self.transport.write(b\"\".join(content))\n self.transport.close()\n return\n\n self.connections.discard(self)\n method = self.scope['method'].encode()\n output = [method, b' ', self.url, b' HTTP/1.1\\r\\n']\n for name, value in self.scope['headers']:\n output += [name, b\": \", value, b\"\\r\\n\"]\n output.append(b'\\r\\n')\n protocol = self.ws_protocol_class(\n config=self.config,\n global_state=self.global_state,\n )\n protocol.connection_made(self.transport)\n protocol.data_received(b''.join(output))\n self.transport.set_protocol(protocol)\n\n # Parser callbacks\n def on_url(self, url):\n method = self.parser.get_method()\n parsed_url = httptools.parse_url(url)\n path = parsed_url.path.decode(\"ascii\")\n if '%' in path:\n path = urllib.parse.unquote(path)\n self.url = url\n self.expect_100_continue = False\n self.headers = []\n self.scope = {\n \"type\": \"http\",\n \"http_version\": \"1.1\",\n \"server\": self.server,\n \"client\": self.client,\n \"scheme\": self.scheme,\n \"method\": method.decode(\"ascii\"),\n \"root_path\": self.root_path,\n \"path\": path,\n \"query_string\": parsed_url.query if parsed_url.query else b\"\",\n \"headers\": self.headers,\n }\n\n def on_header(self, name: bytes, value: bytes):\n name = name.lower()\n if name == b\"expect\" and value.lower() == b\"100-continue\":\n self.expect_100_continue = True\n self.headers.append((name, value))\n\n def on_headers_complete(self):\n http_version = self.parser.get_http_version()\n if http_version != \"1.1\":\n self.scope[\"http_version\"] = http_version\n if self.parser.should_upgrade():\n return\n\n # Handle 503 responses when 'limit_concurrency' is exceeded.\n if self.limit_concurrency is not None and (\n len(self.connections) >= self.limit_concurrency\n or len(self.tasks) >= self.limit_concurrency\n ):\n app = ServiceUnavailable\n message = \"Exceeded concurrency limit.\"\n self.logger.warning(message)\n else:\n app = self.app\n\n existing_cycle = self.cycle\n self.cycle = RequestResponseCycle(\n scope=self.scope,\n transport=self.transport,\n flow=self.flow,\n logger=self.logger,\n access_log=self.access_log,\n message_event=self.message_event,\n expect_100_continue=self.expect_100_continue,\n keep_alive=http_version != \"1.0\",\n on_response=self.on_response_complete,\n )\n if existing_cycle is None or existing_cycle.response_complete:\n # Standard case - start processing the request.\n task = self.loop.create_task(self.cycle.run_asgi(app))\n task.add_done_callback(self.tasks.discard)\n self.tasks.add(task)\n else:\n # Pipelined HTTP requests need to be queued up.\n self.flow.pause_reading()\n self.pipeline.insert(0, (self.cycle, app))\n\n def on_body(self, body: bytes):\n if self.parser.should_upgrade() or self.cycle.response_complete:\n return\n self.cycle.body += body\n if len(self.cycle.body) > HIGH_WATER_LIMIT:\n self.flow.pause_reading()\n self.message_event.set()\n\n def on_message_complete(self):\n if self.parser.should_upgrade() or self.cycle.response_complete:\n return\n self.cycle.more_body = False\n self.message_event.set()\n\n def on_response_complete(self):\n # Callback for pipelined HTTP requests to be started.\n self.global_state.total_requests += 1\n\n if self.transport.is_closing():\n return\n\n # Set a short Keep-Alive timeout.\n self.timeout_keep_alive_task = self.loop.call_later(\n self.timeout_keep_alive, self.timeout_keep_alive_handler\n )\n\n # Unpause data reads if needed.\n self.flow.resume_reading()\n\n # Unblock any pipelined events.\n if self.pipeline:\n cycle, app = self.pipeline.pop()\n task = self.loop.create_task(cycle.run_asgi(app))\n task.add_done_callback(self.tasks.discard)\n self.tasks.add(task)\n\n def shutdown(self):\n \"\"\"\n Called by the server to commence a graceful shutdown.\n \"\"\"\n if self.cycle is None or self.cycle.response_complete:\n self.transport.close()\n else:\n self.cycle.keep_alive = False\n\n def pause_writing(self):\n \"\"\"\n Called by the transport when the write buffer exceeds the high water mark.\n \"\"\"\n self.flow.pause_writing()\n\n def resume_writing(self):\n \"\"\"\n Called by the transport when the write buffer drops below the low water mark.\n \"\"\"\n self.flow.resume_writing()\n\n def timeout_keep_alive_handler(self):\n \"\"\"\n Called on a keep-alive connection if no new data is received after a short delay.\n \"\"\"\n if not self.transport.is_closing():\n self.transport.close()\n\n\nclass RequestResponseCycle:\n def __init__(\n self,\n scope,\n transport,\n flow,\n logger,\n access_log,\n message_event,\n expect_100_continue,\n keep_alive,\n on_response,\n ):\n self.scope = scope\n self.transport = transport\n self.flow = flow\n self.logger = logger\n self.access_log = access_log\n self.message_event = message_event\n self.on_response = on_response\n\n # Connection state\n self.disconnected = False\n self.keep_alive = keep_alive\n self.waiting_for_100_continue = expect_100_continue\n\n # Request state\n self.body = b\"\"\n self.more_body = True\n\n # Response state\n self.response_started = False\n self.response_complete = False\n self.chunked_encoding = None\n self.expected_content_length = 0\n\n # ASGI exception wrapper\n async def run_asgi(self, app):\n try:\n asgi = app(self.scope)\n result = await asgi(self.receive, self.send)\n except BaseException as exc:\n msg = \"Exception in ASGI application\\n\"\n self.logger.error(msg, exc_info=exc)\n if not self.response_started:\n await self.send_500_response()\n else:\n self.transport.close()\n else:\n if result is not None:\n msg = \"ASGI callable should return None, but returned '%s'.\"\n self.logger.error(msg, result)\n self.transport.close()\n elif not self.response_started and not self.disconnected:\n msg = \"ASGI callable returned without starting response.\"\n self.logger.error(msg)\n await self.send_500_response()\n elif not self.response_complete and not self.disconnected:\n msg = \"ASGI callable returned without completing response.\"\n self.logger.error(msg)\n self.transport.close()\n finally:\n self.on_response = None\n\n async def send_500_response(self):\n await self.send(\n {\n \"type\": \"http.response.start\",\n \"status\": 500,\n \"headers\": [\n (b\"content-type\", b\"text/plain; charset=utf-8\"),\n (b\"connection\", b\"close\"),\n ],\n }\n )\n await self.send(\n {\"type\": \"http.response.body\", \"body\": b\"Internal Server Error\"}\n )\n\n # ASGI interface\n async def send(self, message):\n message_type = message[\"type\"]\n\n if self.disconnected:\n return\n\n if self.flow.write_paused:\n await self.flow.drain()\n\n if not self.response_started:\n # Sending response status line and headers\n if message_type != \"http.response.start\":\n msg = \"Expected ASGI message 'http.response.start', but got '%s'.\"\n raise RuntimeError(msg % message_type)\n\n self.response_started = True\n self.waiting_for_100_continue = False\n\n status_code = message[\"status\"]\n headers = message.get(\"headers\", [])\n\n if self.access_log:\n self.logger.info(\n '%s - \"%s %s HTTP/%s\" %d',\n self.scope[\"client\"],\n self.scope[\"method\"],\n self.scope[\"path\"],\n self.scope[\"http_version\"],\n status_code,\n )\n\n # Write response status line and headers\n content = [STATUS_LINE[status_code], DEFAULT_HEADERS]\n\n for name, value in headers:\n name = name.lower()\n if name == b\"content-length\" and self.chunked_encoding is None:\n self.expected_content_length = int(value.decode())\n self.chunked_encoding = False\n elif name == b\"transfer-encoding\" and value.lower() == b\"chunked\":\n self.expected_content_length = 0\n self.chunked_encoding = True\n elif name == b\"connection\" and value.lower() == b\"close\":\n self.keep_alive = False\n content.extend([name, b\": \", value, b\"\\r\\n\"])\n\n if self.chunked_encoding is None and self.scope[\"method\"] != 'HEAD' and status_code not in (204, 304):\n # Neither content-length nor transfer-encoding specified\n self.chunked_encoding = True\n content.append(b\"transfer-encoding: chunked\\r\\n\")\n\n content.append(b\"\\r\\n\")\n self.transport.write(b\"\".join(content))\n\n elif not self.response_complete:\n # Sending response body\n if message_type != \"http.response.body\":\n msg = \"Expected ASGI message 'http.response.body', but got '%s'.\"\n raise RuntimeError(msg % message_type)\n\n body = message.get(\"body\", b\"\")\n more_body = message.get(\"more_body\", False)\n\n # Write response body\n if self.scope[\"method\"] == \"HEAD\":\n self.expected_content_length = 0\n elif self.chunked_encoding:\n if body:\n content = [b\"%x\\r\\n\" % len(body), body, b\"\\r\\n\"]\n else:\n content = []\n if not more_body:\n content.append(b\"0\\r\\n\\r\\n\")\n self.transport.write(b\"\".join(content))\n else:\n num_bytes = len(body)\n if num_bytes > self.expected_content_length:\n raise RuntimeError(\"Response content longer than Content-Length\")\n else:\n self.expected_content_length -= num_bytes\n self.transport.write(body)\n\n # Handle response completion\n if not more_body:\n if self.expected_content_length != 0:\n raise RuntimeError(\"Response content shorter than Content-Length\")\n self.response_complete = True\n if not self.keep_alive:\n self.transport.close()\n self.on_response()\n\n else:\n # Response already sent\n msg = \"Unexpected ASGI message '%s' sent, after response already completed.\"\n raise RuntimeError(msg % message_type)\n\n async def receive(self):\n if self.waiting_for_100_continue and not self.transport.is_closing():\n self.transport.write(b\"HTTP/1.1 100 Continue\\r\\n\\r\\n\")\n self.waiting_for_100_continue = False\n\n self.flow.resume_reading()\n await self.message_event.wait()\n self.message_event.clear()\n\n if self.disconnected or self.response_complete:\n message = {\"type\": \"http.disconnect\"}\n else:\n message = {\n \"type\": \"http.request\",\n \"body\": self.body,\n \"more_body\": self.more_body,\n }\n self.body = b\"\"\n\n return message\n", "path": "uvicorn/protocols/http/httptools_impl.py"}]} |
gh_patches_debug_1371 | rasdani/github-patches | git_diff | getnikola__nikola-2770 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Get rid of "stories" everywhere
The "stories" term is a remnant of my own config before I even started writing Nikola. In fact, the term comes from PyDS, a blogging software so old it doesn't even have a website anymore.
So, the right term is "pages", let's use it.
- [x] Remove "stories" from the docs
- [x] Remove "stories" from the config (for new sites at least)
- [x] Remove "stories" from the code (if it's still there) (done except for public APIs)
- [x] Move `story.tmpl` code to `page.tmpl`, leaving `story.tmpl` as a stub
Once we are using pages consistently everywhere, this closes.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nikola/plugins/command/init.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 # Copyright © 2012-2017 Roberto Alsina and others.
4
5 # Permission is hereby granted, free of charge, to any
6 # person obtaining a copy of this software and associated
7 # documentation files (the "Software"), to deal in the
8 # Software without restriction, including without limitation
9 # the rights to use, copy, modify, merge, publish,
10 # distribute, sublicense, and/or sell copies of the
11 # Software, and to permit persons to whom the Software is
12 # furnished to do so, subject to the following conditions:
13 #
14 # The above copyright notice and this permission notice
15 # shall be included in all copies or substantial portions of
16 # the Software.
17 #
18 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
19 # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
20 # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
21 # PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
22 # OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
27 """Create a new site."""
28
29 from __future__ import print_function, unicode_literals
30 import os
31 import shutil
32 import io
33 import json
34 import textwrap
35 import datetime
36 import unidecode
37 import dateutil.tz
38 import dateutil.zoneinfo
39 from mako.template import Template
40 from pkg_resources import resource_filename
41 import tarfile
42
43 import nikola
44 from nikola.nikola import DEFAULT_TRANSLATIONS_PATTERN, DEFAULT_INDEX_READ_MORE_LINK, DEFAULT_FEED_READ_MORE_LINK, LEGAL_VALUES, urlsplit, urlunsplit
45 from nikola.plugin_categories import Command
46 from nikola.utils import ask, ask_yesno, get_logger, makedirs, STDERR_HANDLER, load_messages
47 from nikola.packages.tzlocal import get_localzone
48
49
50 LOGGER = get_logger('init', STDERR_HANDLER)
51
52 SAMPLE_CONF = {
53 'BLOG_AUTHOR': "Your Name",
54 'BLOG_TITLE': "Demo Site",
55 'SITE_URL': "https://example.com/",
56 'BLOG_EMAIL': "[email protected]",
57 'BLOG_DESCRIPTION': "This is a demo site for Nikola.",
58 'PRETTY_URLS': False,
59 'STRIP_INDEXES': False,
60 'DEFAULT_LANG': "en",
61 'TRANSLATIONS': """{
62 DEFAULT_LANG: "",
63 # Example for another language:
64 # "es": "./es",
65 }""",
66 'THEME': 'bootstrap3',
67 'TIMEZONE': 'UTC',
68 'COMMENT_SYSTEM': 'disqus',
69 'COMMENT_SYSTEM_ID': 'nikolademo',
70 'CATEGORY_ALLOW_HIERARCHIES': False,
71 'CATEGORY_OUTPUT_FLAT_HIERARCHY': False,
72 'TRANSLATIONS_PATTERN': DEFAULT_TRANSLATIONS_PATTERN,
73 'INDEX_READ_MORE_LINK': DEFAULT_INDEX_READ_MORE_LINK,
74 'FEED_READ_MORE_LINK': DEFAULT_FEED_READ_MORE_LINK,
75 'POSTS': """(
76 ("posts/*.rst", "posts", "post.tmpl"),
77 ("posts/*.txt", "posts", "post.tmpl"),
78 ("posts/*.html", "posts", "post.tmpl"),
79 )""",
80 'PAGES': """(
81 ("pages/*.rst", "pages", "story.tmpl"),
82 ("pages/*.txt", "pages", "story.tmpl"),
83 ("pages/*.html", "pages", "story.tmpl"),
84 )""",
85 'COMPILERS': """{
86 "rest": ('.rst', '.txt'),
87 "markdown": ('.md', '.mdown', '.markdown'),
88 "textile": ('.textile',),
89 "txt2tags": ('.t2t',),
90 "bbcode": ('.bb',),
91 "wiki": ('.wiki',),
92 "ipynb": ('.ipynb',),
93 "html": ('.html', '.htm'),
94 # PHP files are rendered the usual way (i.e. with the full templates).
95 # The resulting files have .php extensions, making it possible to run
96 # them without reconfiguring your server to recognize them.
97 "php": ('.php',),
98 # Pandoc detects the input from the source filename
99 # but is disabled by default as it would conflict
100 # with many of the others.
101 # "pandoc": ('.rst', '.md', '.txt'),
102 }""",
103 'NAVIGATION_LINKS': """{
104 DEFAULT_LANG: (
105 ("/archive.html", "Archives"),
106 ("/categories/index.html", "Tags"),
107 ("/rss.xml", "RSS feed"),
108 ),
109 }""",
110 'REDIRECTIONS': [],
111 }
112
113
114 # Generate a list of supported languages here.
115 # Ugly code follows.
116 _suplang = {}
117 _sllength = 0
118
119 for k, v in LEGAL_VALUES['TRANSLATIONS'].items():
120 if not isinstance(k, tuple):
121 main = k
122 _suplang[main] = v
123 else:
124 main = k[0]
125 k = k[1:]
126 bad = []
127 good = []
128 for i in k:
129 if i.startswith('!'):
130 bad.append(i[1:])
131 else:
132 good.append(i)
133 different = ''
134 if good or bad:
135 different += ' ['
136 if good:
137 different += 'ALTERNATIVELY ' + ', '.join(good)
138 if bad:
139 if good:
140 different += '; '
141 different += 'NOT ' + ', '.join(bad)
142 if good or bad:
143 different += ']'
144 _suplang[main] = v + different
145
146 if len(main) > _sllength:
147 _sllength = len(main)
148
149 _sllength = str(_sllength)
150 suplang = (u'# {0:<' + _sllength + u'} {1}\n').format('en', 'English')
151 del _suplang['en']
152 for k, v in sorted(_suplang.items()):
153 suplang += (u'# {0:<' + _sllength + u'} {1}\n').format(k, v)
154
155 SAMPLE_CONF['_SUPPORTED_LANGUAGES'] = suplang.strip()
156
157 # Generate a list of supported comment systems here.
158
159 SAMPLE_CONF['_SUPPORTED_COMMENT_SYSTEMS'] = '\n'.join(textwrap.wrap(
160 u', '.join(LEGAL_VALUES['COMMENT_SYSTEM']),
161 initial_indent=u'# ', subsequent_indent=u'# ', width=79))
162
163
164 def format_default_translations_config(additional_languages):
165 """Adapt TRANSLATIONS setting for all additional languages."""
166 if not additional_languages:
167 return SAMPLE_CONF["TRANSLATIONS"]
168 lang_paths = [' DEFAULT_LANG: "",']
169 for lang in sorted(additional_languages):
170 lang_paths.append(' "{0}": "./{0}",'.format(lang))
171 return "{{\n{0}\n}}".format("\n".join(lang_paths))
172
173
174 def format_navigation_links(additional_languages, default_lang, messages, strip_indexes=False):
175 """Return the string to configure NAVIGATION_LINKS."""
176 f = u"""\
177 {0}: (
178 ("{1}/archive.html", "{2[Archive]}"),
179 ("{1}/categories/{3}", "{2[Tags]}"),
180 ("{1}/rss.xml", "{2[RSS feed]}"),
181 ),"""
182
183 pairs = []
184
185 def get_msg(lang):
186 """Generate a smaller messages dict with fallback."""
187 fmsg = {}
188 for i in (u'Archive', u'Tags', u'RSS feed'):
189 if messages[lang][i]:
190 fmsg[i] = messages[lang][i]
191 else:
192 fmsg[i] = i
193 return fmsg
194
195 if strip_indexes:
196 index_html = ''
197 else:
198 index_html = 'index.html'
199
200 # handle the default language
201 pairs.append(f.format('DEFAULT_LANG', '', get_msg(default_lang), index_html))
202
203 for l in additional_languages:
204 pairs.append(f.format(json.dumps(l, ensure_ascii=False), '/' + l, get_msg(l), index_html))
205
206 return u'{{\n{0}\n}}'.format('\n\n'.join(pairs))
207
208
209 # In order to ensure proper escaping, all variables but the pre-formatted ones
210 # are handled by json.dumps().
211 def prepare_config(config):
212 """Parse sample config with JSON."""
213 p = config.copy()
214 p.update({k: json.dumps(v, ensure_ascii=False) for k, v in p.items()
215 if k not in ('POSTS', 'PAGES', 'COMPILERS', 'TRANSLATIONS', 'NAVIGATION_LINKS', '_SUPPORTED_LANGUAGES', '_SUPPORTED_COMMENT_SYSTEMS', 'INDEX_READ_MORE_LINK', 'FEED_READ_MORE_LINK')})
216 # READ_MORE_LINKs require some special treatment.
217 p['INDEX_READ_MORE_LINK'] = "'" + p['INDEX_READ_MORE_LINK'].replace("'", "\\'") + "'"
218 p['FEED_READ_MORE_LINK'] = "'" + p['FEED_READ_MORE_LINK'].replace("'", "\\'") + "'"
219 # fix booleans and None
220 p.update({k: str(v) for k, v in config.items() if isinstance(v, bool) or v is None})
221 return p
222
223
224 def test_destination(destination, demo=False):
225 """Check if the destination already exists, which can break demo site creation."""
226 # Issue #2214
227 if demo and os.path.exists(destination):
228 LOGGER.warning("The directory {0} already exists, and a new demo site cannot be initialized in an existing directory.".format(destination))
229 LOGGER.warning("Please remove the directory and try again, or use another directory.")
230 LOGGER.info("Hint: If you want to initialize a git repository in this directory, run `git init` in the directory after creating a Nikola site.")
231 return False
232 else:
233 return True
234
235
236 class CommandInit(Command):
237 """Create a new site."""
238
239 name = "init"
240
241 doc_usage = "[--demo] [--quiet] folder"
242 needs_config = False
243 doc_purpose = "create a Nikola site in the specified folder"
244 cmd_options = [
245 {
246 'name': 'quiet',
247 'long': 'quiet',
248 'short': 'q',
249 'default': False,
250 'type': bool,
251 'help': "Do not ask questions about config.",
252 },
253 {
254 'name': 'demo',
255 'long': 'demo',
256 'short': 'd',
257 'default': False,
258 'type': bool,
259 'help': "Create a site filled with example data.",
260 }
261 ]
262
263 @classmethod
264 def copy_sample_site(cls, target):
265 """Copy sample site data to target directory."""
266 src = resource_filename('nikola', os.path.join('data', 'samplesite'))
267 shutil.copytree(src, target)
268
269 @staticmethod
270 def create_configuration(target):
271 """Create configuration file."""
272 template_path = resource_filename('nikola', 'conf.py.in')
273 conf_template = Template(filename=template_path)
274 conf_path = os.path.join(target, 'conf.py')
275 with io.open(conf_path, 'w+', encoding='utf8') as fd:
276 fd.write(conf_template.render(**prepare_config(SAMPLE_CONF)))
277
278 @staticmethod
279 def create_configuration_to_string():
280 """Return configuration file as a string."""
281 template_path = resource_filename('nikola', 'conf.py.in')
282 conf_template = Template(filename=template_path)
283 return conf_template.render(**prepare_config(SAMPLE_CONF))
284
285 @classmethod
286 def create_empty_site(cls, target):
287 """Create an empty site with directories only."""
288 for folder in ('files', 'galleries', 'listings', 'posts', 'pages'):
289 makedirs(os.path.join(target, folder))
290
291 @staticmethod
292 def ask_questions(target, demo=False):
293 """Ask some questions about Nikola."""
294 def urlhandler(default, toconf):
295 answer = ask('Site URL', 'https://example.com/')
296 try:
297 answer = answer.decode('utf-8')
298 except (AttributeError, UnicodeDecodeError):
299 pass
300 if not answer.startswith(u'http'):
301 print(" ERROR: You must specify a protocol (http or https).")
302 urlhandler(default, toconf)
303 return
304 if not answer.endswith('/'):
305 print(" The URL does not end in '/' -- adding it.")
306 answer += '/'
307
308 dst_url = urlsplit(answer)
309 try:
310 dst_url.netloc.encode('ascii')
311 except (UnicodeEncodeError, UnicodeDecodeError):
312 # The IDN contains characters beyond ASCII. We must convert it
313 # to Punycode. (Issue #1644)
314 nl = dst_url.netloc.encode('idna')
315 answer = urlunsplit((dst_url.scheme,
316 nl,
317 dst_url.path,
318 dst_url.query,
319 dst_url.fragment))
320 print(" Converting to Punycode:", answer)
321
322 SAMPLE_CONF['SITE_URL'] = answer
323
324 def prettyhandler(default, toconf):
325 SAMPLE_CONF['PRETTY_URLS'] = ask_yesno('Enable pretty URLs (/page/ instead of /page.html) that don\'t need web server configuration?', default=True)
326 SAMPLE_CONF['STRIP_INDEXES'] = SAMPLE_CONF['PRETTY_URLS']
327
328 def lhandler(default, toconf, show_header=True):
329 if show_header:
330 print("We will now ask you to provide the list of languages you want to use.")
331 print("Please list all the desired languages, comma-separated, using ISO 639-1 codes. The first language will be used as the default.")
332 print("Type '?' (a question mark, sans quotes) to list available languages.")
333 answer = ask('Language(s) to use', 'en')
334 while answer.strip() == '?':
335 print('\n# Available languages:')
336 try:
337 print(SAMPLE_CONF['_SUPPORTED_LANGUAGES'] + '\n')
338 except UnicodeEncodeError:
339 # avoid Unicode characters in supported language names
340 print(unidecode.unidecode(SAMPLE_CONF['_SUPPORTED_LANGUAGES']) + '\n')
341 answer = ask('Language(s) to use', 'en')
342
343 langs = [i.strip().lower().replace('-', '_') for i in answer.split(',')]
344 for partial, full in LEGAL_VALUES['_TRANSLATIONS_WITH_COUNTRY_SPECIFIERS'].items():
345 if partial in langs:
346 langs[langs.index(partial)] = full
347 print("NOTICE: Assuming '{0}' instead of '{1}'.".format(full, partial))
348
349 default = langs.pop(0)
350 SAMPLE_CONF['DEFAULT_LANG'] = default
351 # format_default_translations_config() is intelligent enough to
352 # return the current value if there are no additional languages.
353 SAMPLE_CONF['TRANSLATIONS'] = format_default_translations_config(langs)
354
355 # Get messages for navigation_links. In order to do this, we need
356 # to generate a throwaway TRANSLATIONS dict.
357 tr = {default: ''}
358 for l in langs:
359 tr[l] = './' + l
360 # Assuming that base contains all the locales, and that base does
361 # not inherit from anywhere.
362 try:
363 messages = load_messages(['base'], tr, default, themes_dirs=['themes'])
364 SAMPLE_CONF['NAVIGATION_LINKS'] = format_navigation_links(langs, default, messages, SAMPLE_CONF['STRIP_INDEXES'])
365 except nikola.utils.LanguageNotFoundError as e:
366 print(" ERROR: the language '{0}' is not supported.".format(e.lang))
367 print(" Are you sure you spelled the name correctly? Names are case-sensitive and need to be reproduced as-is (complete with the country specifier, if any).")
368 print("\nType '?' (a question mark, sans quotes) to list available languages.")
369 lhandler(default, toconf, show_header=False)
370
371 def tzhandler(default, toconf):
372 print("\nPlease choose the correct time zone for your blog. Nikola uses the tz database.")
373 print("You can find your time zone here:")
374 print("https://en.wikipedia.org/wiki/List_of_tz_database_time_zones")
375 print("")
376 answered = False
377 while not answered:
378 try:
379 lz = get_localzone()
380 except:
381 lz = None
382 answer = ask('Time zone', lz if lz else "UTC")
383 tz = dateutil.tz.gettz(answer)
384
385 if tz is None:
386 print(" WARNING: Time zone not found. Searching list of time zones for a match.")
387 zonesfile = tarfile.open(fileobj=dateutil.zoneinfo.getzoneinfofile_stream())
388 zonenames = [zone for zone in zonesfile.getnames() if answer.lower() in zone.lower()]
389 if len(zonenames) == 1:
390 tz = dateutil.tz.gettz(zonenames[0])
391 answer = zonenames[0]
392 print(" Picking '{0}'.".format(answer))
393 elif len(zonenames) > 1:
394 print(" The following time zones match your query:")
395 print(' ' + '\n '.join(zonenames))
396 continue
397
398 if tz is not None:
399 time = datetime.datetime.now(tz).strftime('%H:%M:%S')
400 print(" Current time in {0}: {1}".format(answer, time))
401 answered = ask_yesno("Use this time zone?", True)
402 else:
403 print(" ERROR: No matches found. Please try again.")
404
405 SAMPLE_CONF['TIMEZONE'] = answer
406
407 def chandler(default, toconf):
408 print("You can configure comments now. Type '?' (a question mark, sans quotes) to list available comment systems. If you do not want any comments, just leave the field blank.")
409 answer = ask('Comment system', '')
410 while answer.strip() == '?':
411 print('\n# Available comment systems:')
412 print(SAMPLE_CONF['_SUPPORTED_COMMENT_SYSTEMS'])
413 print('')
414 answer = ask('Comment system', '')
415
416 while answer and answer not in LEGAL_VALUES['COMMENT_SYSTEM']:
417 if answer != '?':
418 print(' ERROR: Nikola does not know this comment system.')
419 print('\n# Available comment systems:')
420 print(SAMPLE_CONF['_SUPPORTED_COMMENT_SYSTEMS'])
421 print('')
422 answer = ask('Comment system', '')
423
424 SAMPLE_CONF['COMMENT_SYSTEM'] = answer
425 SAMPLE_CONF['COMMENT_SYSTEM_ID'] = ''
426
427 if answer:
428 print("You need to provide the site identifier for your comment system. Consult the Nikola manual for details on what the value should be. (you can leave it empty and come back later)")
429 answer = ask('Comment system site identifier', '')
430 SAMPLE_CONF['COMMENT_SYSTEM_ID'] = answer
431
432 STORAGE = {'target': target}
433
434 questions = [
435 ('Questions about the site', None, None, None),
436 # query, default, toconf, destination
437 ('Destination', None, False, '!target'),
438 ('Site title', 'My Nikola Site', True, 'BLOG_TITLE'),
439 ('Site author', 'Nikola Tesla', True, 'BLOG_AUTHOR'),
440 ('Site author\'s e-mail', '[email protected]', True, 'BLOG_EMAIL'),
441 ('Site description', 'This is a demo site for Nikola.', True, 'BLOG_DESCRIPTION'),
442 (urlhandler, None, True, True),
443 (prettyhandler, None, True, True),
444 ('Questions about languages and locales', None, None, None),
445 (lhandler, None, True, True),
446 (tzhandler, None, True, True),
447 ('Questions about comments', None, None, None),
448 (chandler, None, True, True),
449 ]
450
451 print("Creating Nikola Site")
452 print("====================\n")
453 print("This is Nikola v{0}. We will now ask you a few easy questions about your new site.".format(nikola.__version__))
454 print("If you do not want to answer and want to go with the defaults instead, simply restart with the `-q` parameter.")
455
456 for query, default, toconf, destination in questions:
457 if target and destination == '!target' and test_destination(target, demo):
458 # Skip the destination question if we know it already
459 pass
460 else:
461 if default is toconf is destination is None:
462 print('--- {0} ---'.format(query))
463 elif destination is True:
464 query(default, toconf)
465 else:
466 answer = ask(query, default)
467 try:
468 answer = answer.decode('utf-8')
469 except (AttributeError, UnicodeDecodeError):
470 pass
471 if toconf:
472 SAMPLE_CONF[destination] = answer
473 if destination == '!target':
474 while not answer or not test_destination(answer, demo):
475 if not answer:
476 print(' ERROR: you need to specify a target directory.\n')
477 answer = ask(query, default)
478 STORAGE['target'] = answer
479
480 print("\nThat's it, Nikola is now configured. Make sure to edit conf.py to your liking.")
481 print("If you are looking for themes and addons, check out https://themes.getnikola.com/ and https://plugins.getnikola.com/.")
482 print("Have fun!")
483 return STORAGE
484
485 def _execute(self, options={}, args=None):
486 """Create a new site."""
487 try:
488 target = args[0]
489 except IndexError:
490 target = None
491 if not options.get('quiet'):
492 st = self.ask_questions(target=target, demo=options.get('demo'))
493 try:
494 if not target:
495 target = st['target']
496 except KeyError:
497 pass
498
499 if not target:
500 print("Usage: nikola init [--demo] [--quiet] folder")
501 print("""
502 Options:
503 -q, --quiet Do not ask questions about config.
504 -d, --demo Create a site filled with example data.""")
505 return 1
506 if not options.get('demo'):
507 self.create_empty_site(target)
508 LOGGER.info('Created empty site at {0}.'.format(target))
509 else:
510 if not test_destination(target, True):
511 return 2
512 self.copy_sample_site(target)
513 LOGGER.info("A new site with example data has been created at "
514 "{0}.".format(target))
515 LOGGER.info("See README.txt in that folder for more information.")
516
517 self.create_configuration(target)
518
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/nikola/plugins/command/init.py b/nikola/plugins/command/init.py
--- a/nikola/plugins/command/init.py
+++ b/nikola/plugins/command/init.py
@@ -78,9 +78,9 @@
("posts/*.html", "posts", "post.tmpl"),
)""",
'PAGES': """(
- ("pages/*.rst", "pages", "story.tmpl"),
- ("pages/*.txt", "pages", "story.tmpl"),
- ("pages/*.html", "pages", "story.tmpl"),
+ ("pages/*.rst", "pages", "page.tmpl"),
+ ("pages/*.txt", "pages", "page.tmpl"),
+ ("pages/*.html", "pages", "page.tmpl"),
)""",
'COMPILERS': """{
"rest": ('.rst', '.txt'),
| {"golden_diff": "diff --git a/nikola/plugins/command/init.py b/nikola/plugins/command/init.py\n--- a/nikola/plugins/command/init.py\n+++ b/nikola/plugins/command/init.py\n@@ -78,9 +78,9 @@\n (\"posts/*.html\", \"posts\", \"post.tmpl\"),\n )\"\"\",\n 'PAGES': \"\"\"(\n- (\"pages/*.rst\", \"pages\", \"story.tmpl\"),\n- (\"pages/*.txt\", \"pages\", \"story.tmpl\"),\n- (\"pages/*.html\", \"pages\", \"story.tmpl\"),\n+ (\"pages/*.rst\", \"pages\", \"page.tmpl\"),\n+ (\"pages/*.txt\", \"pages\", \"page.tmpl\"),\n+ (\"pages/*.html\", \"pages\", \"page.tmpl\"),\n )\"\"\",\n 'COMPILERS': \"\"\"{\n \"rest\": ('.rst', '.txt'),\n", "issue": "Get rid of \"stories\" everywhere\nThe \"stories\" term is a remnant of my own config before I even started writing Nikola. In fact, the term comes from PyDS, a blogging software so old it doesn't even have a website anymore.\r\n\r\nSo, the right term is \"pages\", let's use it.\r\n- [x] Remove \"stories\" from the docs\r\n- [x] Remove \"stories\" from the config (for new sites at least)\r\n- [x] Remove \"stories\" from the code (if it's still there) (done except for public APIs)\r\n- [x] Move `story.tmpl` code to `page.tmpl`, leaving `story.tmpl` as a stub\r\n\r\nOnce we are using pages consistently everywhere, this closes.\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright \u00a9 2012-2017 Roberto Alsina and others.\n\n# Permission is hereby granted, free of charge, to any\n# person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the\n# Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice\n# shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\"\"\"Create a new site.\"\"\"\n\nfrom __future__ import print_function, unicode_literals\nimport os\nimport shutil\nimport io\nimport json\nimport textwrap\nimport datetime\nimport unidecode\nimport dateutil.tz\nimport dateutil.zoneinfo\nfrom mako.template import Template\nfrom pkg_resources import resource_filename\nimport tarfile\n\nimport nikola\nfrom nikola.nikola import DEFAULT_TRANSLATIONS_PATTERN, DEFAULT_INDEX_READ_MORE_LINK, DEFAULT_FEED_READ_MORE_LINK, LEGAL_VALUES, urlsplit, urlunsplit\nfrom nikola.plugin_categories import Command\nfrom nikola.utils import ask, ask_yesno, get_logger, makedirs, STDERR_HANDLER, load_messages\nfrom nikola.packages.tzlocal import get_localzone\n\n\nLOGGER = get_logger('init', STDERR_HANDLER)\n\nSAMPLE_CONF = {\n 'BLOG_AUTHOR': \"Your Name\",\n 'BLOG_TITLE': \"Demo Site\",\n 'SITE_URL': \"https://example.com/\",\n 'BLOG_EMAIL': \"[email protected]\",\n 'BLOG_DESCRIPTION': \"This is a demo site for Nikola.\",\n 'PRETTY_URLS': False,\n 'STRIP_INDEXES': False,\n 'DEFAULT_LANG': \"en\",\n 'TRANSLATIONS': \"\"\"{\n DEFAULT_LANG: \"\",\n # Example for another language:\n # \"es\": \"./es\",\n}\"\"\",\n 'THEME': 'bootstrap3',\n 'TIMEZONE': 'UTC',\n 'COMMENT_SYSTEM': 'disqus',\n 'COMMENT_SYSTEM_ID': 'nikolademo',\n 'CATEGORY_ALLOW_HIERARCHIES': False,\n 'CATEGORY_OUTPUT_FLAT_HIERARCHY': False,\n 'TRANSLATIONS_PATTERN': DEFAULT_TRANSLATIONS_PATTERN,\n 'INDEX_READ_MORE_LINK': DEFAULT_INDEX_READ_MORE_LINK,\n 'FEED_READ_MORE_LINK': DEFAULT_FEED_READ_MORE_LINK,\n 'POSTS': \"\"\"(\n (\"posts/*.rst\", \"posts\", \"post.tmpl\"),\n (\"posts/*.txt\", \"posts\", \"post.tmpl\"),\n (\"posts/*.html\", \"posts\", \"post.tmpl\"),\n)\"\"\",\n 'PAGES': \"\"\"(\n (\"pages/*.rst\", \"pages\", \"story.tmpl\"),\n (\"pages/*.txt\", \"pages\", \"story.tmpl\"),\n (\"pages/*.html\", \"pages\", \"story.tmpl\"),\n)\"\"\",\n 'COMPILERS': \"\"\"{\n \"rest\": ('.rst', '.txt'),\n \"markdown\": ('.md', '.mdown', '.markdown'),\n \"textile\": ('.textile',),\n \"txt2tags\": ('.t2t',),\n \"bbcode\": ('.bb',),\n \"wiki\": ('.wiki',),\n \"ipynb\": ('.ipynb',),\n \"html\": ('.html', '.htm'),\n # PHP files are rendered the usual way (i.e. with the full templates).\n # The resulting files have .php extensions, making it possible to run\n # them without reconfiguring your server to recognize them.\n \"php\": ('.php',),\n # Pandoc detects the input from the source filename\n # but is disabled by default as it would conflict\n # with many of the others.\n # \"pandoc\": ('.rst', '.md', '.txt'),\n}\"\"\",\n 'NAVIGATION_LINKS': \"\"\"{\n DEFAULT_LANG: (\n (\"/archive.html\", \"Archives\"),\n (\"/categories/index.html\", \"Tags\"),\n (\"/rss.xml\", \"RSS feed\"),\n ),\n}\"\"\",\n 'REDIRECTIONS': [],\n}\n\n\n# Generate a list of supported languages here.\n# Ugly code follows.\n_suplang = {}\n_sllength = 0\n\nfor k, v in LEGAL_VALUES['TRANSLATIONS'].items():\n if not isinstance(k, tuple):\n main = k\n _suplang[main] = v\n else:\n main = k[0]\n k = k[1:]\n bad = []\n good = []\n for i in k:\n if i.startswith('!'):\n bad.append(i[1:])\n else:\n good.append(i)\n different = ''\n if good or bad:\n different += ' ['\n if good:\n different += 'ALTERNATIVELY ' + ', '.join(good)\n if bad:\n if good:\n different += '; '\n different += 'NOT ' + ', '.join(bad)\n if good or bad:\n different += ']'\n _suplang[main] = v + different\n\n if len(main) > _sllength:\n _sllength = len(main)\n\n_sllength = str(_sllength)\nsuplang = (u'# {0:<' + _sllength + u'} {1}\\n').format('en', 'English')\ndel _suplang['en']\nfor k, v in sorted(_suplang.items()):\n suplang += (u'# {0:<' + _sllength + u'} {1}\\n').format(k, v)\n\nSAMPLE_CONF['_SUPPORTED_LANGUAGES'] = suplang.strip()\n\n# Generate a list of supported comment systems here.\n\nSAMPLE_CONF['_SUPPORTED_COMMENT_SYSTEMS'] = '\\n'.join(textwrap.wrap(\n u', '.join(LEGAL_VALUES['COMMENT_SYSTEM']),\n initial_indent=u'# ', subsequent_indent=u'# ', width=79))\n\n\ndef format_default_translations_config(additional_languages):\n \"\"\"Adapt TRANSLATIONS setting for all additional languages.\"\"\"\n if not additional_languages:\n return SAMPLE_CONF[\"TRANSLATIONS\"]\n lang_paths = [' DEFAULT_LANG: \"\",']\n for lang in sorted(additional_languages):\n lang_paths.append(' \"{0}\": \"./{0}\",'.format(lang))\n return \"{{\\n{0}\\n}}\".format(\"\\n\".join(lang_paths))\n\n\ndef format_navigation_links(additional_languages, default_lang, messages, strip_indexes=False):\n \"\"\"Return the string to configure NAVIGATION_LINKS.\"\"\"\n f = u\"\"\"\\\n {0}: (\n (\"{1}/archive.html\", \"{2[Archive]}\"),\n (\"{1}/categories/{3}\", \"{2[Tags]}\"),\n (\"{1}/rss.xml\", \"{2[RSS feed]}\"),\n ),\"\"\"\n\n pairs = []\n\n def get_msg(lang):\n \"\"\"Generate a smaller messages dict with fallback.\"\"\"\n fmsg = {}\n for i in (u'Archive', u'Tags', u'RSS feed'):\n if messages[lang][i]:\n fmsg[i] = messages[lang][i]\n else:\n fmsg[i] = i\n return fmsg\n\n if strip_indexes:\n index_html = ''\n else:\n index_html = 'index.html'\n\n # handle the default language\n pairs.append(f.format('DEFAULT_LANG', '', get_msg(default_lang), index_html))\n\n for l in additional_languages:\n pairs.append(f.format(json.dumps(l, ensure_ascii=False), '/' + l, get_msg(l), index_html))\n\n return u'{{\\n{0}\\n}}'.format('\\n\\n'.join(pairs))\n\n\n# In order to ensure proper escaping, all variables but the pre-formatted ones\n# are handled by json.dumps().\ndef prepare_config(config):\n \"\"\"Parse sample config with JSON.\"\"\"\n p = config.copy()\n p.update({k: json.dumps(v, ensure_ascii=False) for k, v in p.items()\n if k not in ('POSTS', 'PAGES', 'COMPILERS', 'TRANSLATIONS', 'NAVIGATION_LINKS', '_SUPPORTED_LANGUAGES', '_SUPPORTED_COMMENT_SYSTEMS', 'INDEX_READ_MORE_LINK', 'FEED_READ_MORE_LINK')})\n # READ_MORE_LINKs require some special treatment.\n p['INDEX_READ_MORE_LINK'] = \"'\" + p['INDEX_READ_MORE_LINK'].replace(\"'\", \"\\\\'\") + \"'\"\n p['FEED_READ_MORE_LINK'] = \"'\" + p['FEED_READ_MORE_LINK'].replace(\"'\", \"\\\\'\") + \"'\"\n # fix booleans and None\n p.update({k: str(v) for k, v in config.items() if isinstance(v, bool) or v is None})\n return p\n\n\ndef test_destination(destination, demo=False):\n \"\"\"Check if the destination already exists, which can break demo site creation.\"\"\"\n # Issue #2214\n if demo and os.path.exists(destination):\n LOGGER.warning(\"The directory {0} already exists, and a new demo site cannot be initialized in an existing directory.\".format(destination))\n LOGGER.warning(\"Please remove the directory and try again, or use another directory.\")\n LOGGER.info(\"Hint: If you want to initialize a git repository in this directory, run `git init` in the directory after creating a Nikola site.\")\n return False\n else:\n return True\n\n\nclass CommandInit(Command):\n \"\"\"Create a new site.\"\"\"\n\n name = \"init\"\n\n doc_usage = \"[--demo] [--quiet] folder\"\n needs_config = False\n doc_purpose = \"create a Nikola site in the specified folder\"\n cmd_options = [\n {\n 'name': 'quiet',\n 'long': 'quiet',\n 'short': 'q',\n 'default': False,\n 'type': bool,\n 'help': \"Do not ask questions about config.\",\n },\n {\n 'name': 'demo',\n 'long': 'demo',\n 'short': 'd',\n 'default': False,\n 'type': bool,\n 'help': \"Create a site filled with example data.\",\n }\n ]\n\n @classmethod\n def copy_sample_site(cls, target):\n \"\"\"Copy sample site data to target directory.\"\"\"\n src = resource_filename('nikola', os.path.join('data', 'samplesite'))\n shutil.copytree(src, target)\n\n @staticmethod\n def create_configuration(target):\n \"\"\"Create configuration file.\"\"\"\n template_path = resource_filename('nikola', 'conf.py.in')\n conf_template = Template(filename=template_path)\n conf_path = os.path.join(target, 'conf.py')\n with io.open(conf_path, 'w+', encoding='utf8') as fd:\n fd.write(conf_template.render(**prepare_config(SAMPLE_CONF)))\n\n @staticmethod\n def create_configuration_to_string():\n \"\"\"Return configuration file as a string.\"\"\"\n template_path = resource_filename('nikola', 'conf.py.in')\n conf_template = Template(filename=template_path)\n return conf_template.render(**prepare_config(SAMPLE_CONF))\n\n @classmethod\n def create_empty_site(cls, target):\n \"\"\"Create an empty site with directories only.\"\"\"\n for folder in ('files', 'galleries', 'listings', 'posts', 'pages'):\n makedirs(os.path.join(target, folder))\n\n @staticmethod\n def ask_questions(target, demo=False):\n \"\"\"Ask some questions about Nikola.\"\"\"\n def urlhandler(default, toconf):\n answer = ask('Site URL', 'https://example.com/')\n try:\n answer = answer.decode('utf-8')\n except (AttributeError, UnicodeDecodeError):\n pass\n if not answer.startswith(u'http'):\n print(\" ERROR: You must specify a protocol (http or https).\")\n urlhandler(default, toconf)\n return\n if not answer.endswith('/'):\n print(\" The URL does not end in '/' -- adding it.\")\n answer += '/'\n\n dst_url = urlsplit(answer)\n try:\n dst_url.netloc.encode('ascii')\n except (UnicodeEncodeError, UnicodeDecodeError):\n # The IDN contains characters beyond ASCII. We must convert it\n # to Punycode. (Issue #1644)\n nl = dst_url.netloc.encode('idna')\n answer = urlunsplit((dst_url.scheme,\n nl,\n dst_url.path,\n dst_url.query,\n dst_url.fragment))\n print(\" Converting to Punycode:\", answer)\n\n SAMPLE_CONF['SITE_URL'] = answer\n\n def prettyhandler(default, toconf):\n SAMPLE_CONF['PRETTY_URLS'] = ask_yesno('Enable pretty URLs (/page/ instead of /page.html) that don\\'t need web server configuration?', default=True)\n SAMPLE_CONF['STRIP_INDEXES'] = SAMPLE_CONF['PRETTY_URLS']\n\n def lhandler(default, toconf, show_header=True):\n if show_header:\n print(\"We will now ask you to provide the list of languages you want to use.\")\n print(\"Please list all the desired languages, comma-separated, using ISO 639-1 codes. The first language will be used as the default.\")\n print(\"Type '?' (a question mark, sans quotes) to list available languages.\")\n answer = ask('Language(s) to use', 'en')\n while answer.strip() == '?':\n print('\\n# Available languages:')\n try:\n print(SAMPLE_CONF['_SUPPORTED_LANGUAGES'] + '\\n')\n except UnicodeEncodeError:\n # avoid Unicode characters in supported language names\n print(unidecode.unidecode(SAMPLE_CONF['_SUPPORTED_LANGUAGES']) + '\\n')\n answer = ask('Language(s) to use', 'en')\n\n langs = [i.strip().lower().replace('-', '_') for i in answer.split(',')]\n for partial, full in LEGAL_VALUES['_TRANSLATIONS_WITH_COUNTRY_SPECIFIERS'].items():\n if partial in langs:\n langs[langs.index(partial)] = full\n print(\"NOTICE: Assuming '{0}' instead of '{1}'.\".format(full, partial))\n\n default = langs.pop(0)\n SAMPLE_CONF['DEFAULT_LANG'] = default\n # format_default_translations_config() is intelligent enough to\n # return the current value if there are no additional languages.\n SAMPLE_CONF['TRANSLATIONS'] = format_default_translations_config(langs)\n\n # Get messages for navigation_links. In order to do this, we need\n # to generate a throwaway TRANSLATIONS dict.\n tr = {default: ''}\n for l in langs:\n tr[l] = './' + l\n # Assuming that base contains all the locales, and that base does\n # not inherit from anywhere.\n try:\n messages = load_messages(['base'], tr, default, themes_dirs=['themes'])\n SAMPLE_CONF['NAVIGATION_LINKS'] = format_navigation_links(langs, default, messages, SAMPLE_CONF['STRIP_INDEXES'])\n except nikola.utils.LanguageNotFoundError as e:\n print(\" ERROR: the language '{0}' is not supported.\".format(e.lang))\n print(\" Are you sure you spelled the name correctly? Names are case-sensitive and need to be reproduced as-is (complete with the country specifier, if any).\")\n print(\"\\nType '?' (a question mark, sans quotes) to list available languages.\")\n lhandler(default, toconf, show_header=False)\n\n def tzhandler(default, toconf):\n print(\"\\nPlease choose the correct time zone for your blog. Nikola uses the tz database.\")\n print(\"You can find your time zone here:\")\n print(\"https://en.wikipedia.org/wiki/List_of_tz_database_time_zones\")\n print(\"\")\n answered = False\n while not answered:\n try:\n lz = get_localzone()\n except:\n lz = None\n answer = ask('Time zone', lz if lz else \"UTC\")\n tz = dateutil.tz.gettz(answer)\n\n if tz is None:\n print(\" WARNING: Time zone not found. Searching list of time zones for a match.\")\n zonesfile = tarfile.open(fileobj=dateutil.zoneinfo.getzoneinfofile_stream())\n zonenames = [zone for zone in zonesfile.getnames() if answer.lower() in zone.lower()]\n if len(zonenames) == 1:\n tz = dateutil.tz.gettz(zonenames[0])\n answer = zonenames[0]\n print(\" Picking '{0}'.\".format(answer))\n elif len(zonenames) > 1:\n print(\" The following time zones match your query:\")\n print(' ' + '\\n '.join(zonenames))\n continue\n\n if tz is not None:\n time = datetime.datetime.now(tz).strftime('%H:%M:%S')\n print(\" Current time in {0}: {1}\".format(answer, time))\n answered = ask_yesno(\"Use this time zone?\", True)\n else:\n print(\" ERROR: No matches found. Please try again.\")\n\n SAMPLE_CONF['TIMEZONE'] = answer\n\n def chandler(default, toconf):\n print(\"You can configure comments now. Type '?' (a question mark, sans quotes) to list available comment systems. If you do not want any comments, just leave the field blank.\")\n answer = ask('Comment system', '')\n while answer.strip() == '?':\n print('\\n# Available comment systems:')\n print(SAMPLE_CONF['_SUPPORTED_COMMENT_SYSTEMS'])\n print('')\n answer = ask('Comment system', '')\n\n while answer and answer not in LEGAL_VALUES['COMMENT_SYSTEM']:\n if answer != '?':\n print(' ERROR: Nikola does not know this comment system.')\n print('\\n# Available comment systems:')\n print(SAMPLE_CONF['_SUPPORTED_COMMENT_SYSTEMS'])\n print('')\n answer = ask('Comment system', '')\n\n SAMPLE_CONF['COMMENT_SYSTEM'] = answer\n SAMPLE_CONF['COMMENT_SYSTEM_ID'] = ''\n\n if answer:\n print(\"You need to provide the site identifier for your comment system. Consult the Nikola manual for details on what the value should be. (you can leave it empty and come back later)\")\n answer = ask('Comment system site identifier', '')\n SAMPLE_CONF['COMMENT_SYSTEM_ID'] = answer\n\n STORAGE = {'target': target}\n\n questions = [\n ('Questions about the site', None, None, None),\n # query, default, toconf, destination\n ('Destination', None, False, '!target'),\n ('Site title', 'My Nikola Site', True, 'BLOG_TITLE'),\n ('Site author', 'Nikola Tesla', True, 'BLOG_AUTHOR'),\n ('Site author\\'s e-mail', '[email protected]', True, 'BLOG_EMAIL'),\n ('Site description', 'This is a demo site for Nikola.', True, 'BLOG_DESCRIPTION'),\n (urlhandler, None, True, True),\n (prettyhandler, None, True, True),\n ('Questions about languages and locales', None, None, None),\n (lhandler, None, True, True),\n (tzhandler, None, True, True),\n ('Questions about comments', None, None, None),\n (chandler, None, True, True),\n ]\n\n print(\"Creating Nikola Site\")\n print(\"====================\\n\")\n print(\"This is Nikola v{0}. We will now ask you a few easy questions about your new site.\".format(nikola.__version__))\n print(\"If you do not want to answer and want to go with the defaults instead, simply restart with the `-q` parameter.\")\n\n for query, default, toconf, destination in questions:\n if target and destination == '!target' and test_destination(target, demo):\n # Skip the destination question if we know it already\n pass\n else:\n if default is toconf is destination is None:\n print('--- {0} ---'.format(query))\n elif destination is True:\n query(default, toconf)\n else:\n answer = ask(query, default)\n try:\n answer = answer.decode('utf-8')\n except (AttributeError, UnicodeDecodeError):\n pass\n if toconf:\n SAMPLE_CONF[destination] = answer\n if destination == '!target':\n while not answer or not test_destination(answer, demo):\n if not answer:\n print(' ERROR: you need to specify a target directory.\\n')\n answer = ask(query, default)\n STORAGE['target'] = answer\n\n print(\"\\nThat's it, Nikola is now configured. Make sure to edit conf.py to your liking.\")\n print(\"If you are looking for themes and addons, check out https://themes.getnikola.com/ and https://plugins.getnikola.com/.\")\n print(\"Have fun!\")\n return STORAGE\n\n def _execute(self, options={}, args=None):\n \"\"\"Create a new site.\"\"\"\n try:\n target = args[0]\n except IndexError:\n target = None\n if not options.get('quiet'):\n st = self.ask_questions(target=target, demo=options.get('demo'))\n try:\n if not target:\n target = st['target']\n except KeyError:\n pass\n\n if not target:\n print(\"Usage: nikola init [--demo] [--quiet] folder\")\n print(\"\"\"\nOptions:\n -q, --quiet Do not ask questions about config.\n -d, --demo Create a site filled with example data.\"\"\")\n return 1\n if not options.get('demo'):\n self.create_empty_site(target)\n LOGGER.info('Created empty site at {0}.'.format(target))\n else:\n if not test_destination(target, True):\n return 2\n self.copy_sample_site(target)\n LOGGER.info(\"A new site with example data has been created at \"\n \"{0}.\".format(target))\n LOGGER.info(\"See README.txt in that folder for more information.\")\n\n self.create_configuration(target)\n", "path": "nikola/plugins/command/init.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright \u00a9 2012-2017 Roberto Alsina and others.\n\n# Permission is hereby granted, free of charge, to any\n# person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the\n# Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice\n# shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\"\"\"Create a new site.\"\"\"\n\nfrom __future__ import print_function, unicode_literals\nimport os\nimport shutil\nimport io\nimport json\nimport textwrap\nimport datetime\nimport unidecode\nimport dateutil.tz\nimport dateutil.zoneinfo\nfrom mako.template import Template\nfrom pkg_resources import resource_filename\nimport tarfile\n\nimport nikola\nfrom nikola.nikola import DEFAULT_TRANSLATIONS_PATTERN, DEFAULT_INDEX_READ_MORE_LINK, DEFAULT_FEED_READ_MORE_LINK, LEGAL_VALUES, urlsplit, urlunsplit\nfrom nikola.plugin_categories import Command\nfrom nikola.utils import ask, ask_yesno, get_logger, makedirs, STDERR_HANDLER, load_messages\nfrom nikola.packages.tzlocal import get_localzone\n\n\nLOGGER = get_logger('init', STDERR_HANDLER)\n\nSAMPLE_CONF = {\n 'BLOG_AUTHOR': \"Your Name\",\n 'BLOG_TITLE': \"Demo Site\",\n 'SITE_URL': \"https://example.com/\",\n 'BLOG_EMAIL': \"[email protected]\",\n 'BLOG_DESCRIPTION': \"This is a demo site for Nikola.\",\n 'PRETTY_URLS': False,\n 'STRIP_INDEXES': False,\n 'DEFAULT_LANG': \"en\",\n 'TRANSLATIONS': \"\"\"{\n DEFAULT_LANG: \"\",\n # Example for another language:\n # \"es\": \"./es\",\n}\"\"\",\n 'THEME': 'bootstrap3',\n 'TIMEZONE': 'UTC',\n 'COMMENT_SYSTEM': 'disqus',\n 'COMMENT_SYSTEM_ID': 'nikolademo',\n 'CATEGORY_ALLOW_HIERARCHIES': False,\n 'CATEGORY_OUTPUT_FLAT_HIERARCHY': False,\n 'TRANSLATIONS_PATTERN': DEFAULT_TRANSLATIONS_PATTERN,\n 'INDEX_READ_MORE_LINK': DEFAULT_INDEX_READ_MORE_LINK,\n 'FEED_READ_MORE_LINK': DEFAULT_FEED_READ_MORE_LINK,\n 'POSTS': \"\"\"(\n (\"posts/*.rst\", \"posts\", \"post.tmpl\"),\n (\"posts/*.txt\", \"posts\", \"post.tmpl\"),\n (\"posts/*.html\", \"posts\", \"post.tmpl\"),\n)\"\"\",\n 'PAGES': \"\"\"(\n (\"pages/*.rst\", \"pages\", \"page.tmpl\"),\n (\"pages/*.txt\", \"pages\", \"page.tmpl\"),\n (\"pages/*.html\", \"pages\", \"page.tmpl\"),\n)\"\"\",\n 'COMPILERS': \"\"\"{\n \"rest\": ('.rst', '.txt'),\n \"markdown\": ('.md', '.mdown', '.markdown'),\n \"textile\": ('.textile',),\n \"txt2tags\": ('.t2t',),\n \"bbcode\": ('.bb',),\n \"wiki\": ('.wiki',),\n \"ipynb\": ('.ipynb',),\n \"html\": ('.html', '.htm'),\n # PHP files are rendered the usual way (i.e. with the full templates).\n # The resulting files have .php extensions, making it possible to run\n # them without reconfiguring your server to recognize them.\n \"php\": ('.php',),\n # Pandoc detects the input from the source filename\n # but is disabled by default as it would conflict\n # with many of the others.\n # \"pandoc\": ('.rst', '.md', '.txt'),\n}\"\"\",\n 'NAVIGATION_LINKS': \"\"\"{\n DEFAULT_LANG: (\n (\"/archive.html\", \"Archives\"),\n (\"/categories/index.html\", \"Tags\"),\n (\"/rss.xml\", \"RSS feed\"),\n ),\n}\"\"\",\n 'REDIRECTIONS': [],\n}\n\n\n# Generate a list of supported languages here.\n# Ugly code follows.\n_suplang = {}\n_sllength = 0\n\nfor k, v in LEGAL_VALUES['TRANSLATIONS'].items():\n if not isinstance(k, tuple):\n main = k\n _suplang[main] = v\n else:\n main = k[0]\n k = k[1:]\n bad = []\n good = []\n for i in k:\n if i.startswith('!'):\n bad.append(i[1:])\n else:\n good.append(i)\n different = ''\n if good or bad:\n different += ' ['\n if good:\n different += 'ALTERNATIVELY ' + ', '.join(good)\n if bad:\n if good:\n different += '; '\n different += 'NOT ' + ', '.join(bad)\n if good or bad:\n different += ']'\n _suplang[main] = v + different\n\n if len(main) > _sllength:\n _sllength = len(main)\n\n_sllength = str(_sllength)\nsuplang = (u'# {0:<' + _sllength + u'} {1}\\n').format('en', 'English')\ndel _suplang['en']\nfor k, v in sorted(_suplang.items()):\n suplang += (u'# {0:<' + _sllength + u'} {1}\\n').format(k, v)\n\nSAMPLE_CONF['_SUPPORTED_LANGUAGES'] = suplang.strip()\n\n# Generate a list of supported comment systems here.\n\nSAMPLE_CONF['_SUPPORTED_COMMENT_SYSTEMS'] = '\\n'.join(textwrap.wrap(\n u', '.join(LEGAL_VALUES['COMMENT_SYSTEM']),\n initial_indent=u'# ', subsequent_indent=u'# ', width=79))\n\n\ndef format_default_translations_config(additional_languages):\n \"\"\"Adapt TRANSLATIONS setting for all additional languages.\"\"\"\n if not additional_languages:\n return SAMPLE_CONF[\"TRANSLATIONS\"]\n lang_paths = [' DEFAULT_LANG: \"\",']\n for lang in sorted(additional_languages):\n lang_paths.append(' \"{0}\": \"./{0}\",'.format(lang))\n return \"{{\\n{0}\\n}}\".format(\"\\n\".join(lang_paths))\n\n\ndef format_navigation_links(additional_languages, default_lang, messages, strip_indexes=False):\n \"\"\"Return the string to configure NAVIGATION_LINKS.\"\"\"\n f = u\"\"\"\\\n {0}: (\n (\"{1}/archive.html\", \"{2[Archive]}\"),\n (\"{1}/categories/{3}\", \"{2[Tags]}\"),\n (\"{1}/rss.xml\", \"{2[RSS feed]}\"),\n ),\"\"\"\n\n pairs = []\n\n def get_msg(lang):\n \"\"\"Generate a smaller messages dict with fallback.\"\"\"\n fmsg = {}\n for i in (u'Archive', u'Tags', u'RSS feed'):\n if messages[lang][i]:\n fmsg[i] = messages[lang][i]\n else:\n fmsg[i] = i\n return fmsg\n\n if strip_indexes:\n index_html = ''\n else:\n index_html = 'index.html'\n\n # handle the default language\n pairs.append(f.format('DEFAULT_LANG', '', get_msg(default_lang), index_html))\n\n for l in additional_languages:\n pairs.append(f.format(json.dumps(l, ensure_ascii=False), '/' + l, get_msg(l), index_html))\n\n return u'{{\\n{0}\\n}}'.format('\\n\\n'.join(pairs))\n\n\n# In order to ensure proper escaping, all variables but the pre-formatted ones\n# are handled by json.dumps().\ndef prepare_config(config):\n \"\"\"Parse sample config with JSON.\"\"\"\n p = config.copy()\n p.update({k: json.dumps(v, ensure_ascii=False) for k, v in p.items()\n if k not in ('POSTS', 'PAGES', 'COMPILERS', 'TRANSLATIONS', 'NAVIGATION_LINKS', '_SUPPORTED_LANGUAGES', '_SUPPORTED_COMMENT_SYSTEMS', 'INDEX_READ_MORE_LINK', 'FEED_READ_MORE_LINK')})\n # READ_MORE_LINKs require some special treatment.\n p['INDEX_READ_MORE_LINK'] = \"'\" + p['INDEX_READ_MORE_LINK'].replace(\"'\", \"\\\\'\") + \"'\"\n p['FEED_READ_MORE_LINK'] = \"'\" + p['FEED_READ_MORE_LINK'].replace(\"'\", \"\\\\'\") + \"'\"\n # fix booleans and None\n p.update({k: str(v) for k, v in config.items() if isinstance(v, bool) or v is None})\n return p\n\n\ndef test_destination(destination, demo=False):\n \"\"\"Check if the destination already exists, which can break demo site creation.\"\"\"\n # Issue #2214\n if demo and os.path.exists(destination):\n LOGGER.warning(\"The directory {0} already exists, and a new demo site cannot be initialized in an existing directory.\".format(destination))\n LOGGER.warning(\"Please remove the directory and try again, or use another directory.\")\n LOGGER.info(\"Hint: If you want to initialize a git repository in this directory, run `git init` in the directory after creating a Nikola site.\")\n return False\n else:\n return True\n\n\nclass CommandInit(Command):\n \"\"\"Create a new site.\"\"\"\n\n name = \"init\"\n\n doc_usage = \"[--demo] [--quiet] folder\"\n needs_config = False\n doc_purpose = \"create a Nikola site in the specified folder\"\n cmd_options = [\n {\n 'name': 'quiet',\n 'long': 'quiet',\n 'short': 'q',\n 'default': False,\n 'type': bool,\n 'help': \"Do not ask questions about config.\",\n },\n {\n 'name': 'demo',\n 'long': 'demo',\n 'short': 'd',\n 'default': False,\n 'type': bool,\n 'help': \"Create a site filled with example data.\",\n }\n ]\n\n @classmethod\n def copy_sample_site(cls, target):\n \"\"\"Copy sample site data to target directory.\"\"\"\n src = resource_filename('nikola', os.path.join('data', 'samplesite'))\n shutil.copytree(src, target)\n\n @staticmethod\n def create_configuration(target):\n \"\"\"Create configuration file.\"\"\"\n template_path = resource_filename('nikola', 'conf.py.in')\n conf_template = Template(filename=template_path)\n conf_path = os.path.join(target, 'conf.py')\n with io.open(conf_path, 'w+', encoding='utf8') as fd:\n fd.write(conf_template.render(**prepare_config(SAMPLE_CONF)))\n\n @staticmethod\n def create_configuration_to_string():\n \"\"\"Return configuration file as a string.\"\"\"\n template_path = resource_filename('nikola', 'conf.py.in')\n conf_template = Template(filename=template_path)\n return conf_template.render(**prepare_config(SAMPLE_CONF))\n\n @classmethod\n def create_empty_site(cls, target):\n \"\"\"Create an empty site with directories only.\"\"\"\n for folder in ('files', 'galleries', 'listings', 'posts', 'pages'):\n makedirs(os.path.join(target, folder))\n\n @staticmethod\n def ask_questions(target, demo=False):\n \"\"\"Ask some questions about Nikola.\"\"\"\n def urlhandler(default, toconf):\n answer = ask('Site URL', 'https://example.com/')\n try:\n answer = answer.decode('utf-8')\n except (AttributeError, UnicodeDecodeError):\n pass\n if not answer.startswith(u'http'):\n print(\" ERROR: You must specify a protocol (http or https).\")\n urlhandler(default, toconf)\n return\n if not answer.endswith('/'):\n print(\" The URL does not end in '/' -- adding it.\")\n answer += '/'\n\n dst_url = urlsplit(answer)\n try:\n dst_url.netloc.encode('ascii')\n except (UnicodeEncodeError, UnicodeDecodeError):\n # The IDN contains characters beyond ASCII. We must convert it\n # to Punycode. (Issue #1644)\n nl = dst_url.netloc.encode('idna')\n answer = urlunsplit((dst_url.scheme,\n nl,\n dst_url.path,\n dst_url.query,\n dst_url.fragment))\n print(\" Converting to Punycode:\", answer)\n\n SAMPLE_CONF['SITE_URL'] = answer\n\n def prettyhandler(default, toconf):\n SAMPLE_CONF['PRETTY_URLS'] = ask_yesno('Enable pretty URLs (/page/ instead of /page.html) that don\\'t need web server configuration?', default=True)\n SAMPLE_CONF['STRIP_INDEXES'] = SAMPLE_CONF['PRETTY_URLS']\n\n def lhandler(default, toconf, show_header=True):\n if show_header:\n print(\"We will now ask you to provide the list of languages you want to use.\")\n print(\"Please list all the desired languages, comma-separated, using ISO 639-1 codes. The first language will be used as the default.\")\n print(\"Type '?' (a question mark, sans quotes) to list available languages.\")\n answer = ask('Language(s) to use', 'en')\n while answer.strip() == '?':\n print('\\n# Available languages:')\n try:\n print(SAMPLE_CONF['_SUPPORTED_LANGUAGES'] + '\\n')\n except UnicodeEncodeError:\n # avoid Unicode characters in supported language names\n print(unidecode.unidecode(SAMPLE_CONF['_SUPPORTED_LANGUAGES']) + '\\n')\n answer = ask('Language(s) to use', 'en')\n\n langs = [i.strip().lower().replace('-', '_') for i in answer.split(',')]\n for partial, full in LEGAL_VALUES['_TRANSLATIONS_WITH_COUNTRY_SPECIFIERS'].items():\n if partial in langs:\n langs[langs.index(partial)] = full\n print(\"NOTICE: Assuming '{0}' instead of '{1}'.\".format(full, partial))\n\n default = langs.pop(0)\n SAMPLE_CONF['DEFAULT_LANG'] = default\n # format_default_translations_config() is intelligent enough to\n # return the current value if there are no additional languages.\n SAMPLE_CONF['TRANSLATIONS'] = format_default_translations_config(langs)\n\n # Get messages for navigation_links. In order to do this, we need\n # to generate a throwaway TRANSLATIONS dict.\n tr = {default: ''}\n for l in langs:\n tr[l] = './' + l\n # Assuming that base contains all the locales, and that base does\n # not inherit from anywhere.\n try:\n messages = load_messages(['base'], tr, default, themes_dirs=['themes'])\n SAMPLE_CONF['NAVIGATION_LINKS'] = format_navigation_links(langs, default, messages, SAMPLE_CONF['STRIP_INDEXES'])\n except nikola.utils.LanguageNotFoundError as e:\n print(\" ERROR: the language '{0}' is not supported.\".format(e.lang))\n print(\" Are you sure you spelled the name correctly? Names are case-sensitive and need to be reproduced as-is (complete with the country specifier, if any).\")\n print(\"\\nType '?' (a question mark, sans quotes) to list available languages.\")\n lhandler(default, toconf, show_header=False)\n\n def tzhandler(default, toconf):\n print(\"\\nPlease choose the correct time zone for your blog. Nikola uses the tz database.\")\n print(\"You can find your time zone here:\")\n print(\"https://en.wikipedia.org/wiki/List_of_tz_database_time_zones\")\n print(\"\")\n answered = False\n while not answered:\n try:\n lz = get_localzone()\n except:\n lz = None\n answer = ask('Time zone', lz if lz else \"UTC\")\n tz = dateutil.tz.gettz(answer)\n\n if tz is None:\n print(\" WARNING: Time zone not found. Searching list of time zones for a match.\")\n zonesfile = tarfile.open(fileobj=dateutil.zoneinfo.getzoneinfofile_stream())\n zonenames = [zone for zone in zonesfile.getnames() if answer.lower() in zone.lower()]\n if len(zonenames) == 1:\n tz = dateutil.tz.gettz(zonenames[0])\n answer = zonenames[0]\n print(\" Picking '{0}'.\".format(answer))\n elif len(zonenames) > 1:\n print(\" The following time zones match your query:\")\n print(' ' + '\\n '.join(zonenames))\n continue\n\n if tz is not None:\n time = datetime.datetime.now(tz).strftime('%H:%M:%S')\n print(\" Current time in {0}: {1}\".format(answer, time))\n answered = ask_yesno(\"Use this time zone?\", True)\n else:\n print(\" ERROR: No matches found. Please try again.\")\n\n SAMPLE_CONF['TIMEZONE'] = answer\n\n def chandler(default, toconf):\n print(\"You can configure comments now. Type '?' (a question mark, sans quotes) to list available comment systems. If you do not want any comments, just leave the field blank.\")\n answer = ask('Comment system', '')\n while answer.strip() == '?':\n print('\\n# Available comment systems:')\n print(SAMPLE_CONF['_SUPPORTED_COMMENT_SYSTEMS'])\n print('')\n answer = ask('Comment system', '')\n\n while answer and answer not in LEGAL_VALUES['COMMENT_SYSTEM']:\n if answer != '?':\n print(' ERROR: Nikola does not know this comment system.')\n print('\\n# Available comment systems:')\n print(SAMPLE_CONF['_SUPPORTED_COMMENT_SYSTEMS'])\n print('')\n answer = ask('Comment system', '')\n\n SAMPLE_CONF['COMMENT_SYSTEM'] = answer\n SAMPLE_CONF['COMMENT_SYSTEM_ID'] = ''\n\n if answer:\n print(\"You need to provide the site identifier for your comment system. Consult the Nikola manual for details on what the value should be. (you can leave it empty and come back later)\")\n answer = ask('Comment system site identifier', '')\n SAMPLE_CONF['COMMENT_SYSTEM_ID'] = answer\n\n STORAGE = {'target': target}\n\n questions = [\n ('Questions about the site', None, None, None),\n # query, default, toconf, destination\n ('Destination', None, False, '!target'),\n ('Site title', 'My Nikola Site', True, 'BLOG_TITLE'),\n ('Site author', 'Nikola Tesla', True, 'BLOG_AUTHOR'),\n ('Site author\\'s e-mail', '[email protected]', True, 'BLOG_EMAIL'),\n ('Site description', 'This is a demo site for Nikola.', True, 'BLOG_DESCRIPTION'),\n (urlhandler, None, True, True),\n (prettyhandler, None, True, True),\n ('Questions about languages and locales', None, None, None),\n (lhandler, None, True, True),\n (tzhandler, None, True, True),\n ('Questions about comments', None, None, None),\n (chandler, None, True, True),\n ]\n\n print(\"Creating Nikola Site\")\n print(\"====================\\n\")\n print(\"This is Nikola v{0}. We will now ask you a few easy questions about your new site.\".format(nikola.__version__))\n print(\"If you do not want to answer and want to go with the defaults instead, simply restart with the `-q` parameter.\")\n\n for query, default, toconf, destination in questions:\n if target and destination == '!target' and test_destination(target, demo):\n # Skip the destination question if we know it already\n pass\n else:\n if default is toconf is destination is None:\n print('--- {0} ---'.format(query))\n elif destination is True:\n query(default, toconf)\n else:\n answer = ask(query, default)\n try:\n answer = answer.decode('utf-8')\n except (AttributeError, UnicodeDecodeError):\n pass\n if toconf:\n SAMPLE_CONF[destination] = answer\n if destination == '!target':\n while not answer or not test_destination(answer, demo):\n if not answer:\n print(' ERROR: you need to specify a target directory.\\n')\n answer = ask(query, default)\n STORAGE['target'] = answer\n\n print(\"\\nThat's it, Nikola is now configured. Make sure to edit conf.py to your liking.\")\n print(\"If you are looking for themes and addons, check out https://themes.getnikola.com/ and https://plugins.getnikola.com/.\")\n print(\"Have fun!\")\n return STORAGE\n\n def _execute(self, options={}, args=None):\n \"\"\"Create a new site.\"\"\"\n try:\n target = args[0]\n except IndexError:\n target = None\n if not options.get('quiet'):\n st = self.ask_questions(target=target, demo=options.get('demo'))\n try:\n if not target:\n target = st['target']\n except KeyError:\n pass\n\n if not target:\n print(\"Usage: nikola init [--demo] [--quiet] folder\")\n print(\"\"\"\nOptions:\n -q, --quiet Do not ask questions about config.\n -d, --demo Create a site filled with example data.\"\"\")\n return 1\n if not options.get('demo'):\n self.create_empty_site(target)\n LOGGER.info('Created empty site at {0}.'.format(target))\n else:\n if not test_destination(target, True):\n return 2\n self.copy_sample_site(target)\n LOGGER.info(\"A new site with example data has been created at \"\n \"{0}.\".format(target))\n LOGGER.info(\"See README.txt in that folder for more information.\")\n\n self.create_configuration(target)\n", "path": "nikola/plugins/command/init.py"}]} |
gh_patches_debug_1372 | rasdani/github-patches | git_diff | sanic-org__sanic-1232 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Possible memory leak in websocket_handler function
Hey! It seems that I found a possible memory leak in `websocket_handler` function inside `Sanic.websocket` https://github.com/channelcat/sanic/blob/master/sanic/app.py#L301
If arbitrary exception occurred in websocket handler, it won't be catched down there and `fut` object will stay in `self.websocket_tasks` list. Little by little this list will become bigger and will consume more memory.
Probably it makes sense to catch all exceptions in `try: except:` block, not only `(CancelledError, ConnectionClosed)`?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sanic/app.py`
Content:
```
1 import os
2 import logging
3 import logging.config
4 import re
5 import warnings
6 from asyncio import get_event_loop, ensure_future, CancelledError
7 from collections import deque, defaultdict
8 from functools import partial
9 from inspect import getmodulename, isawaitable, signature, stack
10 from traceback import format_exc
11 from urllib.parse import urlencode, urlunparse
12 from ssl import create_default_context, Purpose
13
14 from sanic.config import Config
15 from sanic.constants import HTTP_METHODS
16 from sanic.exceptions import ServerError, URLBuildError, SanicException
17 from sanic.handlers import ErrorHandler
18 from sanic.log import logger, error_logger, LOGGING_CONFIG_DEFAULTS
19 from sanic.response import HTTPResponse, StreamingHTTPResponse
20 from sanic.router import Router
21 from sanic.server import serve, serve_multiple, HttpProtocol, Signal
22 from sanic.static import register as static_register
23 from sanic.testing import SanicTestClient
24 from sanic.views import CompositionView
25 from sanic.websocket import WebSocketProtocol, ConnectionClosed
26 import sanic.reloader_helpers as reloader_helpers
27
28
29 class Sanic:
30 def __init__(self, name=None, router=None, error_handler=None,
31 load_env=True, request_class=None,
32 strict_slashes=False, log_config=None,
33 configure_logging=True):
34
35 # Get name from previous stack frame
36 if name is None:
37 frame_records = stack()[1]
38 name = getmodulename(frame_records[1])
39
40 # logging
41 if configure_logging:
42 logging.config.dictConfig(log_config or LOGGING_CONFIG_DEFAULTS)
43
44 self.name = name
45 self.router = router or Router()
46 self.request_class = request_class
47 self.error_handler = error_handler or ErrorHandler()
48 self.config = Config(load_env=load_env)
49 self.request_middleware = deque()
50 self.response_middleware = deque()
51 self.blueprints = {}
52 self._blueprint_order = []
53 self.configure_logging = configure_logging
54 self.debug = None
55 self.sock = None
56 self.strict_slashes = strict_slashes
57 self.listeners = defaultdict(list)
58 self.is_running = False
59 self.is_request_stream = False
60 self.websocket_enabled = False
61 self.websocket_tasks = set()
62
63 # Register alternative method names
64 self.go_fast = self.run
65
66 @property
67 def loop(self):
68 """Synonymous with asyncio.get_event_loop().
69
70 Only supported when using the `app.run` method.
71 """
72 if not self.is_running:
73 raise SanicException(
74 'Loop can only be retrieved after the app has started '
75 'running. Not supported with `create_server` function')
76 return get_event_loop()
77
78 # -------------------------------------------------------------------- #
79 # Registration
80 # -------------------------------------------------------------------- #
81
82 def add_task(self, task):
83 """Schedule a task to run later, after the loop has started.
84 Different from asyncio.ensure_future in that it does not
85 also return a future, and the actual ensure_future call
86 is delayed until before server start.
87
88 :param task: future, couroutine or awaitable
89 """
90 try:
91 if callable(task):
92 try:
93 self.loop.create_task(task(self))
94 except TypeError:
95 self.loop.create_task(task())
96 else:
97 self.loop.create_task(task)
98 except SanicException:
99 @self.listener('before_server_start')
100 def run(app, loop):
101 if callable(task):
102 try:
103 loop.create_task(task(self))
104 except TypeError:
105 loop.create_task(task())
106 else:
107 loop.create_task(task)
108
109 # Decorator
110 def listener(self, event):
111 """Create a listener from a decorated function.
112
113 :param event: event to listen to
114 """
115
116 def decorator(listener):
117 self.listeners[event].append(listener)
118 return listener
119
120 return decorator
121
122 def register_listener(self, listener, event):
123 """
124 Register the listener for a given event.
125
126 Args:
127 listener: callable i.e. setup_db(app, loop)
128 event: when to register listener i.e. 'before_server_start'
129
130 Returns: listener
131 """
132
133 return self.listener(event)(listener)
134
135 # Decorator
136 def route(self, uri, methods=frozenset({'GET'}), host=None,
137 strict_slashes=None, stream=False, version=None, name=None):
138 """Decorate a function to be registered as a route
139
140 :param uri: path of the URL
141 :param methods: list or tuple of methods allowed
142 :param host:
143 :param strict_slashes:
144 :param stream:
145 :param version:
146 :param name: user defined route name for url_for
147 :return: decorated function
148 """
149
150 # Fix case where the user did not prefix the URL with a /
151 # and will probably get confused as to why it's not working
152 if not uri.startswith('/'):
153 uri = '/' + uri
154
155 if stream:
156 self.is_request_stream = True
157
158 if strict_slashes is None:
159 strict_slashes = self.strict_slashes
160
161 def response(handler):
162 args = [key for key in signature(handler).parameters.keys()]
163 if args:
164 if stream:
165 handler.is_stream = stream
166
167 self.router.add(uri=uri, methods=methods, handler=handler,
168 host=host, strict_slashes=strict_slashes,
169 version=version, name=name)
170 return handler
171 else:
172 raise ValueError(
173 'Required parameter `request` missing'
174 'in the {0}() route?'.format(
175 handler.__name__))
176
177 return response
178
179 # Shorthand method decorators
180 def get(self, uri, host=None, strict_slashes=None, version=None,
181 name=None):
182 return self.route(uri, methods=frozenset({"GET"}), host=host,
183 strict_slashes=strict_slashes, version=version,
184 name=name)
185
186 def post(self, uri, host=None, strict_slashes=None, stream=False,
187 version=None, name=None):
188 return self.route(uri, methods=frozenset({"POST"}), host=host,
189 strict_slashes=strict_slashes, stream=stream,
190 version=version, name=name)
191
192 def put(self, uri, host=None, strict_slashes=None, stream=False,
193 version=None, name=None):
194 return self.route(uri, methods=frozenset({"PUT"}), host=host,
195 strict_slashes=strict_slashes, stream=stream,
196 version=version, name=name)
197
198 def head(self, uri, host=None, strict_slashes=None, version=None,
199 name=None):
200 return self.route(uri, methods=frozenset({"HEAD"}), host=host,
201 strict_slashes=strict_slashes, version=version,
202 name=name)
203
204 def options(self, uri, host=None, strict_slashes=None, version=None,
205 name=None):
206 return self.route(uri, methods=frozenset({"OPTIONS"}), host=host,
207 strict_slashes=strict_slashes, version=version,
208 name=name)
209
210 def patch(self, uri, host=None, strict_slashes=None, stream=False,
211 version=None, name=None):
212 return self.route(uri, methods=frozenset({"PATCH"}), host=host,
213 strict_slashes=strict_slashes, stream=stream,
214 version=version, name=name)
215
216 def delete(self, uri, host=None, strict_slashes=None, version=None,
217 name=None):
218 return self.route(uri, methods=frozenset({"DELETE"}), host=host,
219 strict_slashes=strict_slashes, version=version,
220 name=name)
221
222 def add_route(self, handler, uri, methods=frozenset({'GET'}), host=None,
223 strict_slashes=None, version=None, name=None, stream=False):
224 """A helper method to register class instance or
225 functions as a handler to the application url
226 routes.
227
228 :param handler: function or class instance
229 :param uri: path of the URL
230 :param methods: list or tuple of methods allowed, these are overridden
231 if using a HTTPMethodView
232 :param host:
233 :param strict_slashes:
234 :param version:
235 :param name: user defined route name for url_for
236 :param stream: boolean specifying if the handler is a stream handler
237 :return: function or class instance
238 """
239 # Handle HTTPMethodView differently
240 if hasattr(handler, 'view_class'):
241 methods = set()
242
243 for method in HTTP_METHODS:
244 _handler = getattr(handler.view_class, method.lower(), None)
245 if _handler:
246 methods.add(method)
247 if hasattr(_handler, 'is_stream'):
248 stream = True
249
250 # handle composition view differently
251 if isinstance(handler, CompositionView):
252 methods = handler.handlers.keys()
253 for _handler in handler.handlers.values():
254 if hasattr(_handler, 'is_stream'):
255 stream = True
256 break
257
258 if strict_slashes is None:
259 strict_slashes = self.strict_slashes
260
261 self.route(uri=uri, methods=methods, host=host,
262 strict_slashes=strict_slashes, stream=stream,
263 version=version, name=name)(handler)
264 return handler
265
266 # Decorator
267 def websocket(self, uri, host=None, strict_slashes=None,
268 subprotocols=None, name=None):
269 """Decorate a function to be registered as a websocket route
270 :param uri: path of the URL
271 :param subprotocols: optional list of strings with the supported
272 subprotocols
273 :param host:
274 :return: decorated function
275 """
276 self.enable_websocket()
277
278 # Fix case where the user did not prefix the URL with a /
279 # and will probably get confused as to why it's not working
280 if not uri.startswith('/'):
281 uri = '/' + uri
282
283 if strict_slashes is None:
284 strict_slashes = self.strict_slashes
285
286 def response(handler):
287 async def websocket_handler(request, *args, **kwargs):
288 request.app = self
289 try:
290 protocol = request.transport.get_protocol()
291 except AttributeError:
292 # On Python3.5 the Transport classes in asyncio do not
293 # have a get_protocol() method as in uvloop
294 protocol = request.transport._protocol
295 ws = await protocol.websocket_handshake(request, subprotocols)
296
297 # schedule the application handler
298 # its future is kept in self.websocket_tasks in case it
299 # needs to be cancelled due to the server being stopped
300 fut = ensure_future(handler(request, ws, *args, **kwargs))
301 self.websocket_tasks.add(fut)
302 try:
303 await fut
304 except (CancelledError, ConnectionClosed):
305 pass
306 self.websocket_tasks.remove(fut)
307 await ws.close()
308
309 self.router.add(uri=uri, handler=websocket_handler,
310 methods=frozenset({'GET'}), host=host,
311 strict_slashes=strict_slashes, name=name)
312 return handler
313
314 return response
315
316 def add_websocket_route(self, handler, uri, host=None,
317 strict_slashes=None, name=None):
318 """A helper method to register a function as a websocket route."""
319 if strict_slashes is None:
320 strict_slashes = self.strict_slashes
321
322 return self.websocket(uri, host=host, strict_slashes=strict_slashes,
323 name=name)(handler)
324
325 def enable_websocket(self, enable=True):
326 """Enable or disable the support for websocket.
327
328 Websocket is enabled automatically if websocket routes are
329 added to the application.
330 """
331 if not self.websocket_enabled:
332 # if the server is stopped, we want to cancel any ongoing
333 # websocket tasks, to allow the server to exit promptly
334 @self.listener('before_server_stop')
335 def cancel_websocket_tasks(app, loop):
336 for task in self.websocket_tasks:
337 task.cancel()
338
339 self.websocket_enabled = enable
340
341 def remove_route(self, uri, clean_cache=True, host=None):
342 self.router.remove(uri, clean_cache, host)
343
344 # Decorator
345 def exception(self, *exceptions):
346 """Decorate a function to be registered as a handler for exceptions
347
348 :param exceptions: exceptions
349 :return: decorated function
350 """
351
352 def response(handler):
353 for exception in exceptions:
354 if isinstance(exception, (tuple, list)):
355 for e in exception:
356 self.error_handler.add(e, handler)
357 else:
358 self.error_handler.add(exception, handler)
359 return handler
360
361 return response
362
363 def register_middleware(self, middleware, attach_to='request'):
364 if attach_to == 'request':
365 self.request_middleware.append(middleware)
366 if attach_to == 'response':
367 self.response_middleware.appendleft(middleware)
368 return middleware
369
370 # Decorator
371 def middleware(self, middleware_or_request):
372 """Decorate and register middleware to be called before a request.
373 Can either be called as @app.middleware or @app.middleware('request')
374 """
375
376 # Detect which way this was called, @middleware or @middleware('AT')
377 if callable(middleware_or_request):
378 return self.register_middleware(middleware_or_request)
379
380 else:
381 return partial(self.register_middleware,
382 attach_to=middleware_or_request)
383
384 # Static Files
385 def static(self, uri, file_or_directory, pattern=r'/?.+',
386 use_modified_since=True, use_content_range=False,
387 stream_large_files=False, name='static', host=None,
388 strict_slashes=None):
389 """Register a root to serve files from. The input can either be a
390 file or a directory. See
391 """
392 static_register(self, uri, file_or_directory, pattern,
393 use_modified_since, use_content_range,
394 stream_large_files, name, host, strict_slashes)
395
396 def blueprint(self, blueprint, **options):
397 """Register a blueprint on the application.
398
399 :param blueprint: Blueprint object or (list, tuple) thereof
400 :param options: option dictionary with blueprint defaults
401 :return: Nothing
402 """
403 if isinstance(blueprint, (list, tuple)):
404 for item in blueprint:
405 self.blueprint(item, **options)
406 return
407 if blueprint.name in self.blueprints:
408 assert self.blueprints[blueprint.name] is blueprint, \
409 'A blueprint with the name "%s" is already registered. ' \
410 'Blueprint names must be unique.' % \
411 (blueprint.name,)
412 else:
413 self.blueprints[blueprint.name] = blueprint
414 self._blueprint_order.append(blueprint)
415 blueprint.register(self, options)
416
417 def register_blueprint(self, *args, **kwargs):
418 # TODO: deprecate 1.0
419 if self.debug:
420 warnings.simplefilter('default')
421 warnings.warn("Use of register_blueprint will be deprecated in "
422 "version 1.0. Please use the blueprint method"
423 " instead",
424 DeprecationWarning)
425 return self.blueprint(*args, **kwargs)
426
427 def url_for(self, view_name: str, **kwargs):
428 """Build a URL based on a view name and the values provided.
429
430 In order to build a URL, all request parameters must be supplied as
431 keyword arguments, and each parameter must pass the test for the
432 specified parameter type. If these conditions are not met, a
433 `URLBuildError` will be thrown.
434
435 Keyword arguments that are not request parameters will be included in
436 the output URL's query string.
437
438 :param view_name: string referencing the view name
439 :param \*\*kwargs: keys and values that are used to build request
440 parameters and query string arguments.
441
442 :return: the built URL
443
444 Raises:
445 URLBuildError
446 """
447 # find the route by the supplied view name
448 kw = {}
449 # special static files url_for
450 if view_name == 'static':
451 kw.update(name=kwargs.pop('name', 'static'))
452 elif view_name.endswith('.static'): # blueprint.static
453 kwargs.pop('name', None)
454 kw.update(name=view_name)
455
456 uri, route = self.router.find_route_by_view_name(view_name, **kw)
457 if not (uri and route):
458 raise URLBuildError('Endpoint with name `{}` was not found'.format(
459 view_name))
460
461 if view_name == 'static' or view_name.endswith('.static'):
462 filename = kwargs.pop('filename', None)
463 # it's static folder
464 if '<file_uri:' in uri:
465 folder_ = uri.split('<file_uri:', 1)[0]
466 if folder_.endswith('/'):
467 folder_ = folder_[:-1]
468
469 if filename.startswith('/'):
470 filename = filename[1:]
471
472 uri = '{}/{}'.format(folder_, filename)
473
474 if uri != '/' and uri.endswith('/'):
475 uri = uri[:-1]
476
477 out = uri
478
479 # find all the parameters we will need to build in the URL
480 matched_params = re.findall(
481 self.router.parameter_pattern, uri)
482
483 # _method is only a placeholder now, don't know how to support it
484 kwargs.pop('_method', None)
485 anchor = kwargs.pop('_anchor', '')
486 # _external need SERVER_NAME in config or pass _server arg
487 external = kwargs.pop('_external', False)
488 scheme = kwargs.pop('_scheme', '')
489 if scheme and not external:
490 raise ValueError('When specifying _scheme, _external must be True')
491
492 netloc = kwargs.pop('_server', None)
493 if netloc is None and external:
494 netloc = self.config.get('SERVER_NAME', '')
495
496 if external:
497 if not scheme:
498 if ':' in netloc[:8]:
499 scheme = netloc[:8].split(':', 1)[0]
500 else:
501 scheme = 'http'
502
503 if '://' in netloc[:8]:
504 netloc = netloc.split('://', 1)[-1]
505
506 for match in matched_params:
507 name, _type, pattern = self.router.parse_parameter_string(
508 match)
509 # we only want to match against each individual parameter
510 specific_pattern = '^{}$'.format(pattern)
511 supplied_param = None
512
513 if name in kwargs:
514 supplied_param = kwargs.get(name)
515 del kwargs[name]
516 else:
517 raise URLBuildError(
518 'Required parameter `{}` was not passed to url_for'.format(
519 name))
520
521 supplied_param = str(supplied_param)
522 # determine if the parameter supplied by the caller passes the test
523 # in the URL
524 passes_pattern = re.match(specific_pattern, supplied_param)
525
526 if not passes_pattern:
527 if _type != str:
528 msg = (
529 'Value "{}" for parameter `{}` does not '
530 'match pattern for type `{}`: {}'.format(
531 supplied_param, name, _type.__name__, pattern))
532 else:
533 msg = (
534 'Value "{}" for parameter `{}` '
535 'does not satisfy pattern {}'.format(
536 supplied_param, name, pattern))
537 raise URLBuildError(msg)
538
539 # replace the parameter in the URL with the supplied value
540 replacement_regex = '(<{}.*?>)'.format(name)
541
542 out = re.sub(
543 replacement_regex, supplied_param, out)
544
545 # parse the remainder of the keyword arguments into a querystring
546 query_string = urlencode(kwargs, doseq=True) if kwargs else ''
547 # scheme://netloc/path;parameters?query#fragment
548 out = urlunparse((scheme, netloc, out, '', query_string, anchor))
549
550 return out
551
552 # -------------------------------------------------------------------- #
553 # Request Handling
554 # -------------------------------------------------------------------- #
555
556 def converted_response_type(self, response):
557 pass
558
559 async def handle_request(self, request, write_callback, stream_callback):
560 """Take a request from the HTTP Server and return a response object
561 to be sent back The HTTP Server only expects a response object, so
562 exception handling must be done here
563
564 :param request: HTTP Request object
565 :param write_callback: Synchronous response function to be
566 called with the response as the only argument
567 :param stream_callback: Coroutine that handles streaming a
568 StreamingHTTPResponse if produced by the handler.
569
570 :return: Nothing
571 """
572 try:
573 # -------------------------------------------- #
574 # Request Middleware
575 # -------------------------------------------- #
576
577 request.app = self
578 response = await self._run_request_middleware(request)
579 # No middleware results
580 if not response:
581 # -------------------------------------------- #
582 # Execute Handler
583 # -------------------------------------------- #
584
585 # Fetch handler from router
586 handler, args, kwargs, uri = self.router.get(request)
587
588 request.uri_template = uri
589 if handler is None:
590 raise ServerError(
591 ("'None' was returned while requesting a "
592 "handler from the router"))
593
594 # Run response handler
595 response = handler(request, *args, **kwargs)
596 if isawaitable(response):
597 response = await response
598 except Exception as e:
599 # -------------------------------------------- #
600 # Response Generation Failed
601 # -------------------------------------------- #
602
603 try:
604 response = self.error_handler.response(request, e)
605 if isawaitable(response):
606 response = await response
607 except Exception as e:
608 if isinstance(e, SanicException):
609 response = self.error_handler.default(request=request,
610 exception=e)
611 elif self.debug:
612 response = HTTPResponse(
613 "Error while handling error: {}\nStack: {}".format(
614 e, format_exc()), status=500)
615 else:
616 response = HTTPResponse(
617 "An error occurred while handling an error",
618 status=500)
619 finally:
620 # -------------------------------------------- #
621 # Response Middleware
622 # -------------------------------------------- #
623 try:
624 response = await self._run_response_middleware(request,
625 response)
626 except BaseException:
627 error_logger.exception(
628 'Exception occurred in one of response middleware handlers'
629 )
630
631 # pass the response to the correct callback
632 if isinstance(response, StreamingHTTPResponse):
633 await stream_callback(response)
634 else:
635 write_callback(response)
636
637 # -------------------------------------------------------------------- #
638 # Testing
639 # -------------------------------------------------------------------- #
640
641 @property
642 def test_client(self):
643 return SanicTestClient(self)
644
645 # -------------------------------------------------------------------- #
646 # Execution
647 # -------------------------------------------------------------------- #
648
649 def run(self, host=None, port=None, debug=False, ssl=None,
650 sock=None, workers=1, protocol=None,
651 backlog=100, stop_event=None, register_sys_signals=True,
652 access_log=True, **kwargs):
653 """Run the HTTP Server and listen until keyboard interrupt or term
654 signal. On termination, drain connections before closing.
655
656 :param host: Address to host on
657 :param port: Port to host on
658 :param debug: Enables debug output (slows server)
659 :param ssl: SSLContext, or location of certificate and key
660 for SSL encryption of worker(s)
661 :param sock: Socket for the server to accept connections from
662 :param workers: Number of processes
663 received before it is respected
664 :param backlog:
665 :param stop_event:
666 :param register_sys_signals:
667 :param protocol: Subclass of asyncio protocol class
668 :return: Nothing
669 """
670 # Default auto_reload to false
671 auto_reload = False
672 # If debug is set, default it to true
673 if debug:
674 auto_reload = True
675 # Allow for overriding either of the defaults
676 auto_reload = kwargs.get("auto_reload", auto_reload)
677
678 if sock is None:
679 host, port = host or "127.0.0.1", port or 8000
680
681 if protocol is None:
682 protocol = (WebSocketProtocol if self.websocket_enabled
683 else HttpProtocol)
684 if stop_event is not None:
685 if debug:
686 warnings.simplefilter('default')
687 warnings.warn("stop_event will be removed from future versions.",
688 DeprecationWarning)
689 server_settings = self._helper(
690 host=host, port=port, debug=debug, ssl=ssl, sock=sock,
691 workers=workers, protocol=protocol, backlog=backlog,
692 register_sys_signals=register_sys_signals,
693 access_log=access_log, auto_reload=auto_reload)
694
695 try:
696 self.is_running = True
697 if workers == 1:
698 if auto_reload and os.name != 'posix':
699 # This condition must be removed after implementing
700 # auto reloader for other operating systems.
701 raise NotImplementedError
702
703 if auto_reload and \
704 os.environ.get('SANIC_SERVER_RUNNING') != 'true':
705 reloader_helpers.watchdog(2)
706 else:
707 serve(**server_settings)
708 else:
709 serve_multiple(server_settings, workers)
710 except BaseException:
711 error_logger.exception(
712 'Experienced exception while trying to serve')
713 raise
714 finally:
715 self.is_running = False
716 logger.info("Server Stopped")
717
718 def stop(self):
719 """This kills the Sanic"""
720 get_event_loop().stop()
721
722 def __call__(self):
723 """gunicorn compatibility"""
724 return self
725
726 async def create_server(self, host=None, port=None, debug=False,
727 ssl=None, sock=None, protocol=None,
728 backlog=100, stop_event=None,
729 access_log=True):
730 """Asynchronous version of `run`.
731
732 NOTE: This does not support multiprocessing and is not the preferred
733 way to run a Sanic application.
734 """
735
736 if sock is None:
737 host, port = host or "127.0.0.1", port or 8000
738
739 if protocol is None:
740 protocol = (WebSocketProtocol if self.websocket_enabled
741 else HttpProtocol)
742 if stop_event is not None:
743 if debug:
744 warnings.simplefilter('default')
745 warnings.warn("stop_event will be removed from future versions.",
746 DeprecationWarning)
747
748 server_settings = self._helper(
749 host=host, port=port, debug=debug, ssl=ssl, sock=sock,
750 loop=get_event_loop(), protocol=protocol,
751 backlog=backlog, run_async=True,
752 access_log=access_log)
753
754 # Trigger before_start events
755 await self.trigger_events(
756 server_settings.get('before_start', []),
757 server_settings.get('loop')
758 )
759
760 return await serve(**server_settings)
761
762 async def trigger_events(self, events, loop):
763 """Trigger events (functions or async)
764 :param events: one or more sync or async functions to execute
765 :param loop: event loop
766 """
767 for event in events:
768 result = event(loop)
769 if isawaitable(result):
770 await result
771
772 async def _run_request_middleware(self, request):
773 # The if improves speed. I don't know why
774 if self.request_middleware:
775 for middleware in self.request_middleware:
776 response = middleware(request)
777 if isawaitable(response):
778 response = await response
779 if response:
780 return response
781 return None
782
783 async def _run_response_middleware(self, request, response):
784 if self.response_middleware:
785 for middleware in self.response_middleware:
786 _response = middleware(request, response)
787 if isawaitable(_response):
788 _response = await _response
789 if _response:
790 response = _response
791 break
792 return response
793
794 def _helper(self, host=None, port=None, debug=False,
795 ssl=None, sock=None, workers=1, loop=None,
796 protocol=HttpProtocol, backlog=100, stop_event=None,
797 register_sys_signals=True, run_async=False, access_log=True,
798 auto_reload=False):
799 """Helper function used by `run` and `create_server`."""
800 if isinstance(ssl, dict):
801 # try common aliaseses
802 cert = ssl.get('cert') or ssl.get('certificate')
803 key = ssl.get('key') or ssl.get('keyfile')
804 if cert is None or key is None:
805 raise ValueError("SSLContext or certificate and key required.")
806 context = create_default_context(purpose=Purpose.CLIENT_AUTH)
807 context.load_cert_chain(cert, keyfile=key)
808 ssl = context
809 if stop_event is not None:
810 if debug:
811 warnings.simplefilter('default')
812 warnings.warn("stop_event will be removed from future versions.",
813 DeprecationWarning)
814
815 self.error_handler.debug = debug
816 self.debug = debug
817
818 server_settings = {
819 'protocol': protocol,
820 'request_class': self.request_class,
821 'is_request_stream': self.is_request_stream,
822 'router': self.router,
823 'host': host,
824 'port': port,
825 'sock': sock,
826 'ssl': ssl,
827 'signal': Signal(),
828 'debug': debug,
829 'request_handler': self.handle_request,
830 'error_handler': self.error_handler,
831 'request_timeout': self.config.REQUEST_TIMEOUT,
832 'response_timeout': self.config.RESPONSE_TIMEOUT,
833 'keep_alive_timeout': self.config.KEEP_ALIVE_TIMEOUT,
834 'request_max_size': self.config.REQUEST_MAX_SIZE,
835 'keep_alive': self.config.KEEP_ALIVE,
836 'loop': loop,
837 'register_sys_signals': register_sys_signals,
838 'backlog': backlog,
839 'access_log': access_log,
840 'websocket_max_size': self.config.WEBSOCKET_MAX_SIZE,
841 'websocket_max_queue': self.config.WEBSOCKET_MAX_QUEUE,
842 'websocket_read_limit': self.config.WEBSOCKET_READ_LIMIT,
843 'websocket_write_limit': self.config.WEBSOCKET_WRITE_LIMIT,
844 'graceful_shutdown_timeout': self.config.GRACEFUL_SHUTDOWN_TIMEOUT
845 }
846
847 # -------------------------------------------- #
848 # Register start/stop events
849 # -------------------------------------------- #
850
851 for event_name, settings_name, reverse in (
852 ("before_server_start", "before_start", False),
853 ("after_server_start", "after_start", False),
854 ("before_server_stop", "before_stop", True),
855 ("after_server_stop", "after_stop", True),
856 ):
857 listeners = self.listeners[event_name].copy()
858 if reverse:
859 listeners.reverse()
860 # Prepend sanic to the arguments when listeners are triggered
861 listeners = [partial(listener, self) for listener in listeners]
862 server_settings[settings_name] = listeners
863
864 if self.configure_logging and debug:
865 logger.setLevel(logging.DEBUG)
866
867 if self.config.LOGO is not None and \
868 os.environ.get('SANIC_SERVER_RUNNING') != 'true':
869 logger.debug(self.config.LOGO)
870
871 if run_async:
872 server_settings['run_async'] = True
873
874 # Serve
875 if host and port and os.environ.get('SANIC_SERVER_RUNNING') != 'true':
876 proto = "http"
877 if ssl is not None:
878 proto = "https"
879 logger.info('Goin\' Fast @ {}://{}:{}'.format(proto, host, port))
880
881 return server_settings
882
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sanic/app.py b/sanic/app.py
--- a/sanic/app.py
+++ b/sanic/app.py
@@ -303,7 +303,8 @@
await fut
except (CancelledError, ConnectionClosed):
pass
- self.websocket_tasks.remove(fut)
+ finally:
+ self.websocket_tasks.remove(fut)
await ws.close()
self.router.add(uri=uri, handler=websocket_handler,
| {"golden_diff": "diff --git a/sanic/app.py b/sanic/app.py\n--- a/sanic/app.py\n+++ b/sanic/app.py\n@@ -303,7 +303,8 @@\n await fut\n except (CancelledError, ConnectionClosed):\n pass\n- self.websocket_tasks.remove(fut)\n+ finally:\n+ self.websocket_tasks.remove(fut)\n await ws.close()\n \n self.router.add(uri=uri, handler=websocket_handler,\n", "issue": "Possible memory leak in websocket_handler function\nHey! It seems that I found a possible memory leak in `websocket_handler` function inside `Sanic.websocket` https://github.com/channelcat/sanic/blob/master/sanic/app.py#L301\r\n\r\nIf arbitrary exception occurred in websocket handler, it won't be catched down there and `fut` object will stay in `self.websocket_tasks` list. Little by little this list will become bigger and will consume more memory.\r\n\r\nProbably it makes sense to catch all exceptions in `try: except:` block, not only `(CancelledError, ConnectionClosed)`?\n", "before_files": [{"content": "import os\nimport logging\nimport logging.config\nimport re\nimport warnings\nfrom asyncio import get_event_loop, ensure_future, CancelledError\nfrom collections import deque, defaultdict\nfrom functools import partial\nfrom inspect import getmodulename, isawaitable, signature, stack\nfrom traceback import format_exc\nfrom urllib.parse import urlencode, urlunparse\nfrom ssl import create_default_context, Purpose\n\nfrom sanic.config import Config\nfrom sanic.constants import HTTP_METHODS\nfrom sanic.exceptions import ServerError, URLBuildError, SanicException\nfrom sanic.handlers import ErrorHandler\nfrom sanic.log import logger, error_logger, LOGGING_CONFIG_DEFAULTS\nfrom sanic.response import HTTPResponse, StreamingHTTPResponse\nfrom sanic.router import Router\nfrom sanic.server import serve, serve_multiple, HttpProtocol, Signal\nfrom sanic.static import register as static_register\nfrom sanic.testing import SanicTestClient\nfrom sanic.views import CompositionView\nfrom sanic.websocket import WebSocketProtocol, ConnectionClosed\nimport sanic.reloader_helpers as reloader_helpers\n\n\nclass Sanic:\n def __init__(self, name=None, router=None, error_handler=None,\n load_env=True, request_class=None,\n strict_slashes=False, log_config=None,\n configure_logging=True):\n\n # Get name from previous stack frame\n if name is None:\n frame_records = stack()[1]\n name = getmodulename(frame_records[1])\n\n # logging\n if configure_logging:\n logging.config.dictConfig(log_config or LOGGING_CONFIG_DEFAULTS)\n\n self.name = name\n self.router = router or Router()\n self.request_class = request_class\n self.error_handler = error_handler or ErrorHandler()\n self.config = Config(load_env=load_env)\n self.request_middleware = deque()\n self.response_middleware = deque()\n self.blueprints = {}\n self._blueprint_order = []\n self.configure_logging = configure_logging\n self.debug = None\n self.sock = None\n self.strict_slashes = strict_slashes\n self.listeners = defaultdict(list)\n self.is_running = False\n self.is_request_stream = False\n self.websocket_enabled = False\n self.websocket_tasks = set()\n\n # Register alternative method names\n self.go_fast = self.run\n\n @property\n def loop(self):\n \"\"\"Synonymous with asyncio.get_event_loop().\n\n Only supported when using the `app.run` method.\n \"\"\"\n if not self.is_running:\n raise SanicException(\n 'Loop can only be retrieved after the app has started '\n 'running. Not supported with `create_server` function')\n return get_event_loop()\n\n # -------------------------------------------------------------------- #\n # Registration\n # -------------------------------------------------------------------- #\n\n def add_task(self, task):\n \"\"\"Schedule a task to run later, after the loop has started.\n Different from asyncio.ensure_future in that it does not\n also return a future, and the actual ensure_future call\n is delayed until before server start.\n\n :param task: future, couroutine or awaitable\n \"\"\"\n try:\n if callable(task):\n try:\n self.loop.create_task(task(self))\n except TypeError:\n self.loop.create_task(task())\n else:\n self.loop.create_task(task)\n except SanicException:\n @self.listener('before_server_start')\n def run(app, loop):\n if callable(task):\n try:\n loop.create_task(task(self))\n except TypeError:\n loop.create_task(task())\n else:\n loop.create_task(task)\n\n # Decorator\n def listener(self, event):\n \"\"\"Create a listener from a decorated function.\n\n :param event: event to listen to\n \"\"\"\n\n def decorator(listener):\n self.listeners[event].append(listener)\n return listener\n\n return decorator\n\n def register_listener(self, listener, event):\n \"\"\"\n Register the listener for a given event.\n\n Args:\n listener: callable i.e. setup_db(app, loop)\n event: when to register listener i.e. 'before_server_start'\n\n Returns: listener\n \"\"\"\n\n return self.listener(event)(listener)\n\n # Decorator\n def route(self, uri, methods=frozenset({'GET'}), host=None,\n strict_slashes=None, stream=False, version=None, name=None):\n \"\"\"Decorate a function to be registered as a route\n\n :param uri: path of the URL\n :param methods: list or tuple of methods allowed\n :param host:\n :param strict_slashes:\n :param stream:\n :param version:\n :param name: user defined route name for url_for\n :return: decorated function\n \"\"\"\n\n # Fix case where the user did not prefix the URL with a /\n # and will probably get confused as to why it's not working\n if not uri.startswith('/'):\n uri = '/' + uri\n\n if stream:\n self.is_request_stream = True\n\n if strict_slashes is None:\n strict_slashes = self.strict_slashes\n\n def response(handler):\n args = [key for key in signature(handler).parameters.keys()]\n if args:\n if stream:\n handler.is_stream = stream\n\n self.router.add(uri=uri, methods=methods, handler=handler,\n host=host, strict_slashes=strict_slashes,\n version=version, name=name)\n return handler\n else:\n raise ValueError(\n 'Required parameter `request` missing'\n 'in the {0}() route?'.format(\n handler.__name__))\n\n return response\n\n # Shorthand method decorators\n def get(self, uri, host=None, strict_slashes=None, version=None,\n name=None):\n return self.route(uri, methods=frozenset({\"GET\"}), host=host,\n strict_slashes=strict_slashes, version=version,\n name=name)\n\n def post(self, uri, host=None, strict_slashes=None, stream=False,\n version=None, name=None):\n return self.route(uri, methods=frozenset({\"POST\"}), host=host,\n strict_slashes=strict_slashes, stream=stream,\n version=version, name=name)\n\n def put(self, uri, host=None, strict_slashes=None, stream=False,\n version=None, name=None):\n return self.route(uri, methods=frozenset({\"PUT\"}), host=host,\n strict_slashes=strict_slashes, stream=stream,\n version=version, name=name)\n\n def head(self, uri, host=None, strict_slashes=None, version=None,\n name=None):\n return self.route(uri, methods=frozenset({\"HEAD\"}), host=host,\n strict_slashes=strict_slashes, version=version,\n name=name)\n\n def options(self, uri, host=None, strict_slashes=None, version=None,\n name=None):\n return self.route(uri, methods=frozenset({\"OPTIONS\"}), host=host,\n strict_slashes=strict_slashes, version=version,\n name=name)\n\n def patch(self, uri, host=None, strict_slashes=None, stream=False,\n version=None, name=None):\n return self.route(uri, methods=frozenset({\"PATCH\"}), host=host,\n strict_slashes=strict_slashes, stream=stream,\n version=version, name=name)\n\n def delete(self, uri, host=None, strict_slashes=None, version=None,\n name=None):\n return self.route(uri, methods=frozenset({\"DELETE\"}), host=host,\n strict_slashes=strict_slashes, version=version,\n name=name)\n\n def add_route(self, handler, uri, methods=frozenset({'GET'}), host=None,\n strict_slashes=None, version=None, name=None, stream=False):\n \"\"\"A helper method to register class instance or\n functions as a handler to the application url\n routes.\n\n :param handler: function or class instance\n :param uri: path of the URL\n :param methods: list or tuple of methods allowed, these are overridden\n if using a HTTPMethodView\n :param host:\n :param strict_slashes:\n :param version:\n :param name: user defined route name for url_for\n :param stream: boolean specifying if the handler is a stream handler\n :return: function or class instance\n \"\"\"\n # Handle HTTPMethodView differently\n if hasattr(handler, 'view_class'):\n methods = set()\n\n for method in HTTP_METHODS:\n _handler = getattr(handler.view_class, method.lower(), None)\n if _handler:\n methods.add(method)\n if hasattr(_handler, 'is_stream'):\n stream = True\n\n # handle composition view differently\n if isinstance(handler, CompositionView):\n methods = handler.handlers.keys()\n for _handler in handler.handlers.values():\n if hasattr(_handler, 'is_stream'):\n stream = True\n break\n\n if strict_slashes is None:\n strict_slashes = self.strict_slashes\n\n self.route(uri=uri, methods=methods, host=host,\n strict_slashes=strict_slashes, stream=stream,\n version=version, name=name)(handler)\n return handler\n\n # Decorator\n def websocket(self, uri, host=None, strict_slashes=None,\n subprotocols=None, name=None):\n \"\"\"Decorate a function to be registered as a websocket route\n :param uri: path of the URL\n :param subprotocols: optional list of strings with the supported\n subprotocols\n :param host:\n :return: decorated function\n \"\"\"\n self.enable_websocket()\n\n # Fix case where the user did not prefix the URL with a /\n # and will probably get confused as to why it's not working\n if not uri.startswith('/'):\n uri = '/' + uri\n\n if strict_slashes is None:\n strict_slashes = self.strict_slashes\n\n def response(handler):\n async def websocket_handler(request, *args, **kwargs):\n request.app = self\n try:\n protocol = request.transport.get_protocol()\n except AttributeError:\n # On Python3.5 the Transport classes in asyncio do not\n # have a get_protocol() method as in uvloop\n protocol = request.transport._protocol\n ws = await protocol.websocket_handshake(request, subprotocols)\n\n # schedule the application handler\n # its future is kept in self.websocket_tasks in case it\n # needs to be cancelled due to the server being stopped\n fut = ensure_future(handler(request, ws, *args, **kwargs))\n self.websocket_tasks.add(fut)\n try:\n await fut\n except (CancelledError, ConnectionClosed):\n pass\n self.websocket_tasks.remove(fut)\n await ws.close()\n\n self.router.add(uri=uri, handler=websocket_handler,\n methods=frozenset({'GET'}), host=host,\n strict_slashes=strict_slashes, name=name)\n return handler\n\n return response\n\n def add_websocket_route(self, handler, uri, host=None,\n strict_slashes=None, name=None):\n \"\"\"A helper method to register a function as a websocket route.\"\"\"\n if strict_slashes is None:\n strict_slashes = self.strict_slashes\n\n return self.websocket(uri, host=host, strict_slashes=strict_slashes,\n name=name)(handler)\n\n def enable_websocket(self, enable=True):\n \"\"\"Enable or disable the support for websocket.\n\n Websocket is enabled automatically if websocket routes are\n added to the application.\n \"\"\"\n if not self.websocket_enabled:\n # if the server is stopped, we want to cancel any ongoing\n # websocket tasks, to allow the server to exit promptly\n @self.listener('before_server_stop')\n def cancel_websocket_tasks(app, loop):\n for task in self.websocket_tasks:\n task.cancel()\n\n self.websocket_enabled = enable\n\n def remove_route(self, uri, clean_cache=True, host=None):\n self.router.remove(uri, clean_cache, host)\n\n # Decorator\n def exception(self, *exceptions):\n \"\"\"Decorate a function to be registered as a handler for exceptions\n\n :param exceptions: exceptions\n :return: decorated function\n \"\"\"\n\n def response(handler):\n for exception in exceptions:\n if isinstance(exception, (tuple, list)):\n for e in exception:\n self.error_handler.add(e, handler)\n else:\n self.error_handler.add(exception, handler)\n return handler\n\n return response\n\n def register_middleware(self, middleware, attach_to='request'):\n if attach_to == 'request':\n self.request_middleware.append(middleware)\n if attach_to == 'response':\n self.response_middleware.appendleft(middleware)\n return middleware\n\n # Decorator\n def middleware(self, middleware_or_request):\n \"\"\"Decorate and register middleware to be called before a request.\n Can either be called as @app.middleware or @app.middleware('request')\n \"\"\"\n\n # Detect which way this was called, @middleware or @middleware('AT')\n if callable(middleware_or_request):\n return self.register_middleware(middleware_or_request)\n\n else:\n return partial(self.register_middleware,\n attach_to=middleware_or_request)\n\n # Static Files\n def static(self, uri, file_or_directory, pattern=r'/?.+',\n use_modified_since=True, use_content_range=False,\n stream_large_files=False, name='static', host=None,\n strict_slashes=None):\n \"\"\"Register a root to serve files from. The input can either be a\n file or a directory. See\n \"\"\"\n static_register(self, uri, file_or_directory, pattern,\n use_modified_since, use_content_range,\n stream_large_files, name, host, strict_slashes)\n\n def blueprint(self, blueprint, **options):\n \"\"\"Register a blueprint on the application.\n\n :param blueprint: Blueprint object or (list, tuple) thereof\n :param options: option dictionary with blueprint defaults\n :return: Nothing\n \"\"\"\n if isinstance(blueprint, (list, tuple)):\n for item in blueprint:\n self.blueprint(item, **options)\n return\n if blueprint.name in self.blueprints:\n assert self.blueprints[blueprint.name] is blueprint, \\\n 'A blueprint with the name \"%s\" is already registered. ' \\\n 'Blueprint names must be unique.' % \\\n (blueprint.name,)\n else:\n self.blueprints[blueprint.name] = blueprint\n self._blueprint_order.append(blueprint)\n blueprint.register(self, options)\n\n def register_blueprint(self, *args, **kwargs):\n # TODO: deprecate 1.0\n if self.debug:\n warnings.simplefilter('default')\n warnings.warn(\"Use of register_blueprint will be deprecated in \"\n \"version 1.0. Please use the blueprint method\"\n \" instead\",\n DeprecationWarning)\n return self.blueprint(*args, **kwargs)\n\n def url_for(self, view_name: str, **kwargs):\n \"\"\"Build a URL based on a view name and the values provided.\n\n In order to build a URL, all request parameters must be supplied as\n keyword arguments, and each parameter must pass the test for the\n specified parameter type. If these conditions are not met, a\n `URLBuildError` will be thrown.\n\n Keyword arguments that are not request parameters will be included in\n the output URL's query string.\n\n :param view_name: string referencing the view name\n :param \\*\\*kwargs: keys and values that are used to build request\n parameters and query string arguments.\n\n :return: the built URL\n\n Raises:\n URLBuildError\n \"\"\"\n # find the route by the supplied view name\n kw = {}\n # special static files url_for\n if view_name == 'static':\n kw.update(name=kwargs.pop('name', 'static'))\n elif view_name.endswith('.static'): # blueprint.static\n kwargs.pop('name', None)\n kw.update(name=view_name)\n\n uri, route = self.router.find_route_by_view_name(view_name, **kw)\n if not (uri and route):\n raise URLBuildError('Endpoint with name `{}` was not found'.format(\n view_name))\n\n if view_name == 'static' or view_name.endswith('.static'):\n filename = kwargs.pop('filename', None)\n # it's static folder\n if '<file_uri:' in uri:\n folder_ = uri.split('<file_uri:', 1)[0]\n if folder_.endswith('/'):\n folder_ = folder_[:-1]\n\n if filename.startswith('/'):\n filename = filename[1:]\n\n uri = '{}/{}'.format(folder_, filename)\n\n if uri != '/' and uri.endswith('/'):\n uri = uri[:-1]\n\n out = uri\n\n # find all the parameters we will need to build in the URL\n matched_params = re.findall(\n self.router.parameter_pattern, uri)\n\n # _method is only a placeholder now, don't know how to support it\n kwargs.pop('_method', None)\n anchor = kwargs.pop('_anchor', '')\n # _external need SERVER_NAME in config or pass _server arg\n external = kwargs.pop('_external', False)\n scheme = kwargs.pop('_scheme', '')\n if scheme and not external:\n raise ValueError('When specifying _scheme, _external must be True')\n\n netloc = kwargs.pop('_server', None)\n if netloc is None and external:\n netloc = self.config.get('SERVER_NAME', '')\n\n if external:\n if not scheme:\n if ':' in netloc[:8]:\n scheme = netloc[:8].split(':', 1)[0]\n else:\n scheme = 'http'\n\n if '://' in netloc[:8]:\n netloc = netloc.split('://', 1)[-1]\n\n for match in matched_params:\n name, _type, pattern = self.router.parse_parameter_string(\n match)\n # we only want to match against each individual parameter\n specific_pattern = '^{}$'.format(pattern)\n supplied_param = None\n\n if name in kwargs:\n supplied_param = kwargs.get(name)\n del kwargs[name]\n else:\n raise URLBuildError(\n 'Required parameter `{}` was not passed to url_for'.format(\n name))\n\n supplied_param = str(supplied_param)\n # determine if the parameter supplied by the caller passes the test\n # in the URL\n passes_pattern = re.match(specific_pattern, supplied_param)\n\n if not passes_pattern:\n if _type != str:\n msg = (\n 'Value \"{}\" for parameter `{}` does not '\n 'match pattern for type `{}`: {}'.format(\n supplied_param, name, _type.__name__, pattern))\n else:\n msg = (\n 'Value \"{}\" for parameter `{}` '\n 'does not satisfy pattern {}'.format(\n supplied_param, name, pattern))\n raise URLBuildError(msg)\n\n # replace the parameter in the URL with the supplied value\n replacement_regex = '(<{}.*?>)'.format(name)\n\n out = re.sub(\n replacement_regex, supplied_param, out)\n\n # parse the remainder of the keyword arguments into a querystring\n query_string = urlencode(kwargs, doseq=True) if kwargs else ''\n # scheme://netloc/path;parameters?query#fragment\n out = urlunparse((scheme, netloc, out, '', query_string, anchor))\n\n return out\n\n # -------------------------------------------------------------------- #\n # Request Handling\n # -------------------------------------------------------------------- #\n\n def converted_response_type(self, response):\n pass\n\n async def handle_request(self, request, write_callback, stream_callback):\n \"\"\"Take a request from the HTTP Server and return a response object\n to be sent back The HTTP Server only expects a response object, so\n exception handling must be done here\n\n :param request: HTTP Request object\n :param write_callback: Synchronous response function to be\n called with the response as the only argument\n :param stream_callback: Coroutine that handles streaming a\n StreamingHTTPResponse if produced by the handler.\n\n :return: Nothing\n \"\"\"\n try:\n # -------------------------------------------- #\n # Request Middleware\n # -------------------------------------------- #\n\n request.app = self\n response = await self._run_request_middleware(request)\n # No middleware results\n if not response:\n # -------------------------------------------- #\n # Execute Handler\n # -------------------------------------------- #\n\n # Fetch handler from router\n handler, args, kwargs, uri = self.router.get(request)\n\n request.uri_template = uri\n if handler is None:\n raise ServerError(\n (\"'None' was returned while requesting a \"\n \"handler from the router\"))\n\n # Run response handler\n response = handler(request, *args, **kwargs)\n if isawaitable(response):\n response = await response\n except Exception as e:\n # -------------------------------------------- #\n # Response Generation Failed\n # -------------------------------------------- #\n\n try:\n response = self.error_handler.response(request, e)\n if isawaitable(response):\n response = await response\n except Exception as e:\n if isinstance(e, SanicException):\n response = self.error_handler.default(request=request,\n exception=e)\n elif self.debug:\n response = HTTPResponse(\n \"Error while handling error: {}\\nStack: {}\".format(\n e, format_exc()), status=500)\n else:\n response = HTTPResponse(\n \"An error occurred while handling an error\",\n status=500)\n finally:\n # -------------------------------------------- #\n # Response Middleware\n # -------------------------------------------- #\n try:\n response = await self._run_response_middleware(request,\n response)\n except BaseException:\n error_logger.exception(\n 'Exception occurred in one of response middleware handlers'\n )\n\n # pass the response to the correct callback\n if isinstance(response, StreamingHTTPResponse):\n await stream_callback(response)\n else:\n write_callback(response)\n\n # -------------------------------------------------------------------- #\n # Testing\n # -------------------------------------------------------------------- #\n\n @property\n def test_client(self):\n return SanicTestClient(self)\n\n # -------------------------------------------------------------------- #\n # Execution\n # -------------------------------------------------------------------- #\n\n def run(self, host=None, port=None, debug=False, ssl=None,\n sock=None, workers=1, protocol=None,\n backlog=100, stop_event=None, register_sys_signals=True,\n access_log=True, **kwargs):\n \"\"\"Run the HTTP Server and listen until keyboard interrupt or term\n signal. On termination, drain connections before closing.\n\n :param host: Address to host on\n :param port: Port to host on\n :param debug: Enables debug output (slows server)\n :param ssl: SSLContext, or location of certificate and key\n for SSL encryption of worker(s)\n :param sock: Socket for the server to accept connections from\n :param workers: Number of processes\n received before it is respected\n :param backlog:\n :param stop_event:\n :param register_sys_signals:\n :param protocol: Subclass of asyncio protocol class\n :return: Nothing\n \"\"\"\n # Default auto_reload to false\n auto_reload = False\n # If debug is set, default it to true\n if debug:\n auto_reload = True\n # Allow for overriding either of the defaults\n auto_reload = kwargs.get(\"auto_reload\", auto_reload)\n\n if sock is None:\n host, port = host or \"127.0.0.1\", port or 8000\n\n if protocol is None:\n protocol = (WebSocketProtocol if self.websocket_enabled\n else HttpProtocol)\n if stop_event is not None:\n if debug:\n warnings.simplefilter('default')\n warnings.warn(\"stop_event will be removed from future versions.\",\n DeprecationWarning)\n server_settings = self._helper(\n host=host, port=port, debug=debug, ssl=ssl, sock=sock,\n workers=workers, protocol=protocol, backlog=backlog,\n register_sys_signals=register_sys_signals,\n access_log=access_log, auto_reload=auto_reload)\n\n try:\n self.is_running = True\n if workers == 1:\n if auto_reload and os.name != 'posix':\n # This condition must be removed after implementing\n # auto reloader for other operating systems.\n raise NotImplementedError\n\n if auto_reload and \\\n os.environ.get('SANIC_SERVER_RUNNING') != 'true':\n reloader_helpers.watchdog(2)\n else:\n serve(**server_settings)\n else:\n serve_multiple(server_settings, workers)\n except BaseException:\n error_logger.exception(\n 'Experienced exception while trying to serve')\n raise\n finally:\n self.is_running = False\n logger.info(\"Server Stopped\")\n\n def stop(self):\n \"\"\"This kills the Sanic\"\"\"\n get_event_loop().stop()\n\n def __call__(self):\n \"\"\"gunicorn compatibility\"\"\"\n return self\n\n async def create_server(self, host=None, port=None, debug=False,\n ssl=None, sock=None, protocol=None,\n backlog=100, stop_event=None,\n access_log=True):\n \"\"\"Asynchronous version of `run`.\n\n NOTE: This does not support multiprocessing and is not the preferred\n way to run a Sanic application.\n \"\"\"\n\n if sock is None:\n host, port = host or \"127.0.0.1\", port or 8000\n\n if protocol is None:\n protocol = (WebSocketProtocol if self.websocket_enabled\n else HttpProtocol)\n if stop_event is not None:\n if debug:\n warnings.simplefilter('default')\n warnings.warn(\"stop_event will be removed from future versions.\",\n DeprecationWarning)\n\n server_settings = self._helper(\n host=host, port=port, debug=debug, ssl=ssl, sock=sock,\n loop=get_event_loop(), protocol=protocol,\n backlog=backlog, run_async=True,\n access_log=access_log)\n\n # Trigger before_start events\n await self.trigger_events(\n server_settings.get('before_start', []),\n server_settings.get('loop')\n )\n\n return await serve(**server_settings)\n\n async def trigger_events(self, events, loop):\n \"\"\"Trigger events (functions or async)\n :param events: one or more sync or async functions to execute\n :param loop: event loop\n \"\"\"\n for event in events:\n result = event(loop)\n if isawaitable(result):\n await result\n\n async def _run_request_middleware(self, request):\n # The if improves speed. I don't know why\n if self.request_middleware:\n for middleware in self.request_middleware:\n response = middleware(request)\n if isawaitable(response):\n response = await response\n if response:\n return response\n return None\n\n async def _run_response_middleware(self, request, response):\n if self.response_middleware:\n for middleware in self.response_middleware:\n _response = middleware(request, response)\n if isawaitable(_response):\n _response = await _response\n if _response:\n response = _response\n break\n return response\n\n def _helper(self, host=None, port=None, debug=False,\n ssl=None, sock=None, workers=1, loop=None,\n protocol=HttpProtocol, backlog=100, stop_event=None,\n register_sys_signals=True, run_async=False, access_log=True,\n auto_reload=False):\n \"\"\"Helper function used by `run` and `create_server`.\"\"\"\n if isinstance(ssl, dict):\n # try common aliaseses\n cert = ssl.get('cert') or ssl.get('certificate')\n key = ssl.get('key') or ssl.get('keyfile')\n if cert is None or key is None:\n raise ValueError(\"SSLContext or certificate and key required.\")\n context = create_default_context(purpose=Purpose.CLIENT_AUTH)\n context.load_cert_chain(cert, keyfile=key)\n ssl = context\n if stop_event is not None:\n if debug:\n warnings.simplefilter('default')\n warnings.warn(\"stop_event will be removed from future versions.\",\n DeprecationWarning)\n\n self.error_handler.debug = debug\n self.debug = debug\n\n server_settings = {\n 'protocol': protocol,\n 'request_class': self.request_class,\n 'is_request_stream': self.is_request_stream,\n 'router': self.router,\n 'host': host,\n 'port': port,\n 'sock': sock,\n 'ssl': ssl,\n 'signal': Signal(),\n 'debug': debug,\n 'request_handler': self.handle_request,\n 'error_handler': self.error_handler,\n 'request_timeout': self.config.REQUEST_TIMEOUT,\n 'response_timeout': self.config.RESPONSE_TIMEOUT,\n 'keep_alive_timeout': self.config.KEEP_ALIVE_TIMEOUT,\n 'request_max_size': self.config.REQUEST_MAX_SIZE,\n 'keep_alive': self.config.KEEP_ALIVE,\n 'loop': loop,\n 'register_sys_signals': register_sys_signals,\n 'backlog': backlog,\n 'access_log': access_log,\n 'websocket_max_size': self.config.WEBSOCKET_MAX_SIZE,\n 'websocket_max_queue': self.config.WEBSOCKET_MAX_QUEUE,\n 'websocket_read_limit': self.config.WEBSOCKET_READ_LIMIT,\n 'websocket_write_limit': self.config.WEBSOCKET_WRITE_LIMIT,\n 'graceful_shutdown_timeout': self.config.GRACEFUL_SHUTDOWN_TIMEOUT\n }\n\n # -------------------------------------------- #\n # Register start/stop events\n # -------------------------------------------- #\n\n for event_name, settings_name, reverse in (\n (\"before_server_start\", \"before_start\", False),\n (\"after_server_start\", \"after_start\", False),\n (\"before_server_stop\", \"before_stop\", True),\n (\"after_server_stop\", \"after_stop\", True),\n ):\n listeners = self.listeners[event_name].copy()\n if reverse:\n listeners.reverse()\n # Prepend sanic to the arguments when listeners are triggered\n listeners = [partial(listener, self) for listener in listeners]\n server_settings[settings_name] = listeners\n\n if self.configure_logging and debug:\n logger.setLevel(logging.DEBUG)\n\n if self.config.LOGO is not None and \\\n os.environ.get('SANIC_SERVER_RUNNING') != 'true':\n logger.debug(self.config.LOGO)\n\n if run_async:\n server_settings['run_async'] = True\n\n # Serve\n if host and port and os.environ.get('SANIC_SERVER_RUNNING') != 'true':\n proto = \"http\"\n if ssl is not None:\n proto = \"https\"\n logger.info('Goin\\' Fast @ {}://{}:{}'.format(proto, host, port))\n\n return server_settings\n", "path": "sanic/app.py"}], "after_files": [{"content": "import os\nimport logging\nimport logging.config\nimport re\nimport warnings\nfrom asyncio import get_event_loop, ensure_future, CancelledError\nfrom collections import deque, defaultdict\nfrom functools import partial\nfrom inspect import getmodulename, isawaitable, signature, stack\nfrom traceback import format_exc\nfrom urllib.parse import urlencode, urlunparse\nfrom ssl import create_default_context, Purpose\n\nfrom sanic.config import Config\nfrom sanic.constants import HTTP_METHODS\nfrom sanic.exceptions import ServerError, URLBuildError, SanicException\nfrom sanic.handlers import ErrorHandler\nfrom sanic.log import logger, error_logger, LOGGING_CONFIG_DEFAULTS\nfrom sanic.response import HTTPResponse, StreamingHTTPResponse\nfrom sanic.router import Router\nfrom sanic.server import serve, serve_multiple, HttpProtocol, Signal\nfrom sanic.static import register as static_register\nfrom sanic.testing import SanicTestClient\nfrom sanic.views import CompositionView\nfrom sanic.websocket import WebSocketProtocol, ConnectionClosed\nimport sanic.reloader_helpers as reloader_helpers\n\n\nclass Sanic:\n def __init__(self, name=None, router=None, error_handler=None,\n load_env=True, request_class=None,\n strict_slashes=False, log_config=None,\n configure_logging=True):\n\n # Get name from previous stack frame\n if name is None:\n frame_records = stack()[1]\n name = getmodulename(frame_records[1])\n\n # logging\n if configure_logging:\n logging.config.dictConfig(log_config or LOGGING_CONFIG_DEFAULTS)\n\n self.name = name\n self.router = router or Router()\n self.request_class = request_class\n self.error_handler = error_handler or ErrorHandler()\n self.config = Config(load_env=load_env)\n self.request_middleware = deque()\n self.response_middleware = deque()\n self.blueprints = {}\n self._blueprint_order = []\n self.configure_logging = configure_logging\n self.debug = None\n self.sock = None\n self.strict_slashes = strict_slashes\n self.listeners = defaultdict(list)\n self.is_running = False\n self.is_request_stream = False\n self.websocket_enabled = False\n self.websocket_tasks = set()\n\n # Register alternative method names\n self.go_fast = self.run\n\n @property\n def loop(self):\n \"\"\"Synonymous with asyncio.get_event_loop().\n\n Only supported when using the `app.run` method.\n \"\"\"\n if not self.is_running:\n raise SanicException(\n 'Loop can only be retrieved after the app has started '\n 'running. Not supported with `create_server` function')\n return get_event_loop()\n\n # -------------------------------------------------------------------- #\n # Registration\n # -------------------------------------------------------------------- #\n\n def add_task(self, task):\n \"\"\"Schedule a task to run later, after the loop has started.\n Different from asyncio.ensure_future in that it does not\n also return a future, and the actual ensure_future call\n is delayed until before server start.\n\n :param task: future, couroutine or awaitable\n \"\"\"\n try:\n if callable(task):\n try:\n self.loop.create_task(task(self))\n except TypeError:\n self.loop.create_task(task())\n else:\n self.loop.create_task(task)\n except SanicException:\n @self.listener('before_server_start')\n def run(app, loop):\n if callable(task):\n try:\n loop.create_task(task(self))\n except TypeError:\n loop.create_task(task())\n else:\n loop.create_task(task)\n\n # Decorator\n def listener(self, event):\n \"\"\"Create a listener from a decorated function.\n\n :param event: event to listen to\n \"\"\"\n\n def decorator(listener):\n self.listeners[event].append(listener)\n return listener\n\n return decorator\n\n def register_listener(self, listener, event):\n \"\"\"\n Register the listener for a given event.\n\n Args:\n listener: callable i.e. setup_db(app, loop)\n event: when to register listener i.e. 'before_server_start'\n\n Returns: listener\n \"\"\"\n\n return self.listener(event)(listener)\n\n # Decorator\n def route(self, uri, methods=frozenset({'GET'}), host=None,\n strict_slashes=None, stream=False, version=None, name=None):\n \"\"\"Decorate a function to be registered as a route\n\n :param uri: path of the URL\n :param methods: list or tuple of methods allowed\n :param host:\n :param strict_slashes:\n :param stream:\n :param version:\n :param name: user defined route name for url_for\n :return: decorated function\n \"\"\"\n\n # Fix case where the user did not prefix the URL with a /\n # and will probably get confused as to why it's not working\n if not uri.startswith('/'):\n uri = '/' + uri\n\n if stream:\n self.is_request_stream = True\n\n if strict_slashes is None:\n strict_slashes = self.strict_slashes\n\n def response(handler):\n args = [key for key in signature(handler).parameters.keys()]\n if args:\n if stream:\n handler.is_stream = stream\n\n self.router.add(uri=uri, methods=methods, handler=handler,\n host=host, strict_slashes=strict_slashes,\n version=version, name=name)\n return handler\n else:\n raise ValueError(\n 'Required parameter `request` missing'\n 'in the {0}() route?'.format(\n handler.__name__))\n\n return response\n\n # Shorthand method decorators\n def get(self, uri, host=None, strict_slashes=None, version=None,\n name=None):\n return self.route(uri, methods=frozenset({\"GET\"}), host=host,\n strict_slashes=strict_slashes, version=version,\n name=name)\n\n def post(self, uri, host=None, strict_slashes=None, stream=False,\n version=None, name=None):\n return self.route(uri, methods=frozenset({\"POST\"}), host=host,\n strict_slashes=strict_slashes, stream=stream,\n version=version, name=name)\n\n def put(self, uri, host=None, strict_slashes=None, stream=False,\n version=None, name=None):\n return self.route(uri, methods=frozenset({\"PUT\"}), host=host,\n strict_slashes=strict_slashes, stream=stream,\n version=version, name=name)\n\n def head(self, uri, host=None, strict_slashes=None, version=None,\n name=None):\n return self.route(uri, methods=frozenset({\"HEAD\"}), host=host,\n strict_slashes=strict_slashes, version=version,\n name=name)\n\n def options(self, uri, host=None, strict_slashes=None, version=None,\n name=None):\n return self.route(uri, methods=frozenset({\"OPTIONS\"}), host=host,\n strict_slashes=strict_slashes, version=version,\n name=name)\n\n def patch(self, uri, host=None, strict_slashes=None, stream=False,\n version=None, name=None):\n return self.route(uri, methods=frozenset({\"PATCH\"}), host=host,\n strict_slashes=strict_slashes, stream=stream,\n version=version, name=name)\n\n def delete(self, uri, host=None, strict_slashes=None, version=None,\n name=None):\n return self.route(uri, methods=frozenset({\"DELETE\"}), host=host,\n strict_slashes=strict_slashes, version=version,\n name=name)\n\n def add_route(self, handler, uri, methods=frozenset({'GET'}), host=None,\n strict_slashes=None, version=None, name=None, stream=False):\n \"\"\"A helper method to register class instance or\n functions as a handler to the application url\n routes.\n\n :param handler: function or class instance\n :param uri: path of the URL\n :param methods: list or tuple of methods allowed, these are overridden\n if using a HTTPMethodView\n :param host:\n :param strict_slashes:\n :param version:\n :param name: user defined route name for url_for\n :param stream: boolean specifying if the handler is a stream handler\n :return: function or class instance\n \"\"\"\n # Handle HTTPMethodView differently\n if hasattr(handler, 'view_class'):\n methods = set()\n\n for method in HTTP_METHODS:\n _handler = getattr(handler.view_class, method.lower(), None)\n if _handler:\n methods.add(method)\n if hasattr(_handler, 'is_stream'):\n stream = True\n\n # handle composition view differently\n if isinstance(handler, CompositionView):\n methods = handler.handlers.keys()\n for _handler in handler.handlers.values():\n if hasattr(_handler, 'is_stream'):\n stream = True\n break\n\n if strict_slashes is None:\n strict_slashes = self.strict_slashes\n\n self.route(uri=uri, methods=methods, host=host,\n strict_slashes=strict_slashes, stream=stream,\n version=version, name=name)(handler)\n return handler\n\n # Decorator\n def websocket(self, uri, host=None, strict_slashes=None,\n subprotocols=None, name=None):\n \"\"\"Decorate a function to be registered as a websocket route\n :param uri: path of the URL\n :param subprotocols: optional list of strings with the supported\n subprotocols\n :param host:\n :return: decorated function\n \"\"\"\n self.enable_websocket()\n\n # Fix case where the user did not prefix the URL with a /\n # and will probably get confused as to why it's not working\n if not uri.startswith('/'):\n uri = '/' + uri\n\n if strict_slashes is None:\n strict_slashes = self.strict_slashes\n\n def response(handler):\n async def websocket_handler(request, *args, **kwargs):\n request.app = self\n try:\n protocol = request.transport.get_protocol()\n except AttributeError:\n # On Python3.5 the Transport classes in asyncio do not\n # have a get_protocol() method as in uvloop\n protocol = request.transport._protocol\n ws = await protocol.websocket_handshake(request, subprotocols)\n\n # schedule the application handler\n # its future is kept in self.websocket_tasks in case it\n # needs to be cancelled due to the server being stopped\n fut = ensure_future(handler(request, ws, *args, **kwargs))\n self.websocket_tasks.add(fut)\n try:\n await fut\n except (CancelledError, ConnectionClosed):\n pass\n finally:\n self.websocket_tasks.remove(fut)\n await ws.close()\n\n self.router.add(uri=uri, handler=websocket_handler,\n methods=frozenset({'GET'}), host=host,\n strict_slashes=strict_slashes, name=name)\n return handler\n\n return response\n\n def add_websocket_route(self, handler, uri, host=None,\n strict_slashes=None, name=None):\n \"\"\"A helper method to register a function as a websocket route.\"\"\"\n if strict_slashes is None:\n strict_slashes = self.strict_slashes\n\n return self.websocket(uri, host=host, strict_slashes=strict_slashes,\n name=name)(handler)\n\n def enable_websocket(self, enable=True):\n \"\"\"Enable or disable the support for websocket.\n\n Websocket is enabled automatically if websocket routes are\n added to the application.\n \"\"\"\n if not self.websocket_enabled:\n # if the server is stopped, we want to cancel any ongoing\n # websocket tasks, to allow the server to exit promptly\n @self.listener('before_server_stop')\n def cancel_websocket_tasks(app, loop):\n for task in self.websocket_tasks:\n task.cancel()\n\n self.websocket_enabled = enable\n\n def remove_route(self, uri, clean_cache=True, host=None):\n self.router.remove(uri, clean_cache, host)\n\n # Decorator\n def exception(self, *exceptions):\n \"\"\"Decorate a function to be registered as a handler for exceptions\n\n :param exceptions: exceptions\n :return: decorated function\n \"\"\"\n\n def response(handler):\n for exception in exceptions:\n if isinstance(exception, (tuple, list)):\n for e in exception:\n self.error_handler.add(e, handler)\n else:\n self.error_handler.add(exception, handler)\n return handler\n\n return response\n\n def register_middleware(self, middleware, attach_to='request'):\n if attach_to == 'request':\n self.request_middleware.append(middleware)\n if attach_to == 'response':\n self.response_middleware.appendleft(middleware)\n return middleware\n\n # Decorator\n def middleware(self, middleware_or_request):\n \"\"\"Decorate and register middleware to be called before a request.\n Can either be called as @app.middleware or @app.middleware('request')\n \"\"\"\n\n # Detect which way this was called, @middleware or @middleware('AT')\n if callable(middleware_or_request):\n return self.register_middleware(middleware_or_request)\n\n else:\n return partial(self.register_middleware,\n attach_to=middleware_or_request)\n\n # Static Files\n def static(self, uri, file_or_directory, pattern=r'/?.+',\n use_modified_since=True, use_content_range=False,\n stream_large_files=False, name='static', host=None,\n strict_slashes=None):\n \"\"\"Register a root to serve files from. The input can either be a\n file or a directory. See\n \"\"\"\n static_register(self, uri, file_or_directory, pattern,\n use_modified_since, use_content_range,\n stream_large_files, name, host, strict_slashes)\n\n def blueprint(self, blueprint, **options):\n \"\"\"Register a blueprint on the application.\n\n :param blueprint: Blueprint object or (list, tuple) thereof\n :param options: option dictionary with blueprint defaults\n :return: Nothing\n \"\"\"\n if isinstance(blueprint, (list, tuple)):\n for item in blueprint:\n self.blueprint(item, **options)\n return\n if blueprint.name in self.blueprints:\n assert self.blueprints[blueprint.name] is blueprint, \\\n 'A blueprint with the name \"%s\" is already registered. ' \\\n 'Blueprint names must be unique.' % \\\n (blueprint.name,)\n else:\n self.blueprints[blueprint.name] = blueprint\n self._blueprint_order.append(blueprint)\n blueprint.register(self, options)\n\n def register_blueprint(self, *args, **kwargs):\n # TODO: deprecate 1.0\n if self.debug:\n warnings.simplefilter('default')\n warnings.warn(\"Use of register_blueprint will be deprecated in \"\n \"version 1.0. Please use the blueprint method\"\n \" instead\",\n DeprecationWarning)\n return self.blueprint(*args, **kwargs)\n\n def url_for(self, view_name: str, **kwargs):\n \"\"\"Build a URL based on a view name and the values provided.\n\n In order to build a URL, all request parameters must be supplied as\n keyword arguments, and each parameter must pass the test for the\n specified parameter type. If these conditions are not met, a\n `URLBuildError` will be thrown.\n\n Keyword arguments that are not request parameters will be included in\n the output URL's query string.\n\n :param view_name: string referencing the view name\n :param \\*\\*kwargs: keys and values that are used to build request\n parameters and query string arguments.\n\n :return: the built URL\n\n Raises:\n URLBuildError\n \"\"\"\n # find the route by the supplied view name\n kw = {}\n # special static files url_for\n if view_name == 'static':\n kw.update(name=kwargs.pop('name', 'static'))\n elif view_name.endswith('.static'): # blueprint.static\n kwargs.pop('name', None)\n kw.update(name=view_name)\n\n uri, route = self.router.find_route_by_view_name(view_name, **kw)\n if not (uri and route):\n raise URLBuildError('Endpoint with name `{}` was not found'.format(\n view_name))\n\n if view_name == 'static' or view_name.endswith('.static'):\n filename = kwargs.pop('filename', None)\n # it's static folder\n if '<file_uri:' in uri:\n folder_ = uri.split('<file_uri:', 1)[0]\n if folder_.endswith('/'):\n folder_ = folder_[:-1]\n\n if filename.startswith('/'):\n filename = filename[1:]\n\n uri = '{}/{}'.format(folder_, filename)\n\n if uri != '/' and uri.endswith('/'):\n uri = uri[:-1]\n\n out = uri\n\n # find all the parameters we will need to build in the URL\n matched_params = re.findall(\n self.router.parameter_pattern, uri)\n\n # _method is only a placeholder now, don't know how to support it\n kwargs.pop('_method', None)\n anchor = kwargs.pop('_anchor', '')\n # _external need SERVER_NAME in config or pass _server arg\n external = kwargs.pop('_external', False)\n scheme = kwargs.pop('_scheme', '')\n if scheme and not external:\n raise ValueError('When specifying _scheme, _external must be True')\n\n netloc = kwargs.pop('_server', None)\n if netloc is None and external:\n netloc = self.config.get('SERVER_NAME', '')\n\n if external:\n if not scheme:\n if ':' in netloc[:8]:\n scheme = netloc[:8].split(':', 1)[0]\n else:\n scheme = 'http'\n\n if '://' in netloc[:8]:\n netloc = netloc.split('://', 1)[-1]\n\n for match in matched_params:\n name, _type, pattern = self.router.parse_parameter_string(\n match)\n # we only want to match against each individual parameter\n specific_pattern = '^{}$'.format(pattern)\n supplied_param = None\n\n if name in kwargs:\n supplied_param = kwargs.get(name)\n del kwargs[name]\n else:\n raise URLBuildError(\n 'Required parameter `{}` was not passed to url_for'.format(\n name))\n\n supplied_param = str(supplied_param)\n # determine if the parameter supplied by the caller passes the test\n # in the URL\n passes_pattern = re.match(specific_pattern, supplied_param)\n\n if not passes_pattern:\n if _type != str:\n msg = (\n 'Value \"{}\" for parameter `{}` does not '\n 'match pattern for type `{}`: {}'.format(\n supplied_param, name, _type.__name__, pattern))\n else:\n msg = (\n 'Value \"{}\" for parameter `{}` '\n 'does not satisfy pattern {}'.format(\n supplied_param, name, pattern))\n raise URLBuildError(msg)\n\n # replace the parameter in the URL with the supplied value\n replacement_regex = '(<{}.*?>)'.format(name)\n\n out = re.sub(\n replacement_regex, supplied_param, out)\n\n # parse the remainder of the keyword arguments into a querystring\n query_string = urlencode(kwargs, doseq=True) if kwargs else ''\n # scheme://netloc/path;parameters?query#fragment\n out = urlunparse((scheme, netloc, out, '', query_string, anchor))\n\n return out\n\n # -------------------------------------------------------------------- #\n # Request Handling\n # -------------------------------------------------------------------- #\n\n def converted_response_type(self, response):\n pass\n\n async def handle_request(self, request, write_callback, stream_callback):\n \"\"\"Take a request from the HTTP Server and return a response object\n to be sent back The HTTP Server only expects a response object, so\n exception handling must be done here\n\n :param request: HTTP Request object\n :param write_callback: Synchronous response function to be\n called with the response as the only argument\n :param stream_callback: Coroutine that handles streaming a\n StreamingHTTPResponse if produced by the handler.\n\n :return: Nothing\n \"\"\"\n try:\n # -------------------------------------------- #\n # Request Middleware\n # -------------------------------------------- #\n\n request.app = self\n response = await self._run_request_middleware(request)\n # No middleware results\n if not response:\n # -------------------------------------------- #\n # Execute Handler\n # -------------------------------------------- #\n\n # Fetch handler from router\n handler, args, kwargs, uri = self.router.get(request)\n\n request.uri_template = uri\n if handler is None:\n raise ServerError(\n (\"'None' was returned while requesting a \"\n \"handler from the router\"))\n\n # Run response handler\n response = handler(request, *args, **kwargs)\n if isawaitable(response):\n response = await response\n except Exception as e:\n # -------------------------------------------- #\n # Response Generation Failed\n # -------------------------------------------- #\n\n try:\n response = self.error_handler.response(request, e)\n if isawaitable(response):\n response = await response\n except Exception as e:\n if isinstance(e, SanicException):\n response = self.error_handler.default(request=request,\n exception=e)\n elif self.debug:\n response = HTTPResponse(\n \"Error while handling error: {}\\nStack: {}\".format(\n e, format_exc()), status=500)\n else:\n response = HTTPResponse(\n \"An error occurred while handling an error\",\n status=500)\n finally:\n # -------------------------------------------- #\n # Response Middleware\n # -------------------------------------------- #\n try:\n response = await self._run_response_middleware(request,\n response)\n except BaseException:\n error_logger.exception(\n 'Exception occurred in one of response middleware handlers'\n )\n\n # pass the response to the correct callback\n if isinstance(response, StreamingHTTPResponse):\n await stream_callback(response)\n else:\n write_callback(response)\n\n # -------------------------------------------------------------------- #\n # Testing\n # -------------------------------------------------------------------- #\n\n @property\n def test_client(self):\n return SanicTestClient(self)\n\n # -------------------------------------------------------------------- #\n # Execution\n # -------------------------------------------------------------------- #\n\n def run(self, host=None, port=None, debug=False, ssl=None,\n sock=None, workers=1, protocol=None,\n backlog=100, stop_event=None, register_sys_signals=True,\n access_log=True, **kwargs):\n \"\"\"Run the HTTP Server and listen until keyboard interrupt or term\n signal. On termination, drain connections before closing.\n\n :param host: Address to host on\n :param port: Port to host on\n :param debug: Enables debug output (slows server)\n :param ssl: SSLContext, or location of certificate and key\n for SSL encryption of worker(s)\n :param sock: Socket for the server to accept connections from\n :param workers: Number of processes\n received before it is respected\n :param backlog:\n :param stop_event:\n :param register_sys_signals:\n :param protocol: Subclass of asyncio protocol class\n :return: Nothing\n \"\"\"\n # Default auto_reload to false\n auto_reload = False\n # If debug is set, default it to true\n if debug:\n auto_reload = True\n # Allow for overriding either of the defaults\n auto_reload = kwargs.get(\"auto_reload\", auto_reload)\n\n if sock is None:\n host, port = host or \"127.0.0.1\", port or 8000\n\n if protocol is None:\n protocol = (WebSocketProtocol if self.websocket_enabled\n else HttpProtocol)\n if stop_event is not None:\n if debug:\n warnings.simplefilter('default')\n warnings.warn(\"stop_event will be removed from future versions.\",\n DeprecationWarning)\n server_settings = self._helper(\n host=host, port=port, debug=debug, ssl=ssl, sock=sock,\n workers=workers, protocol=protocol, backlog=backlog,\n register_sys_signals=register_sys_signals,\n access_log=access_log, auto_reload=auto_reload)\n\n try:\n self.is_running = True\n if workers == 1:\n if auto_reload and os.name != 'posix':\n # This condition must be removed after implementing\n # auto reloader for other operating systems.\n raise NotImplementedError\n\n if auto_reload and \\\n os.environ.get('SANIC_SERVER_RUNNING') != 'true':\n reloader_helpers.watchdog(2)\n else:\n serve(**server_settings)\n else:\n serve_multiple(server_settings, workers)\n except BaseException:\n error_logger.exception(\n 'Experienced exception while trying to serve')\n raise\n finally:\n self.is_running = False\n logger.info(\"Server Stopped\")\n\n def stop(self):\n \"\"\"This kills the Sanic\"\"\"\n get_event_loop().stop()\n\n def __call__(self):\n \"\"\"gunicorn compatibility\"\"\"\n return self\n\n async def create_server(self, host=None, port=None, debug=False,\n ssl=None, sock=None, protocol=None,\n backlog=100, stop_event=None,\n access_log=True):\n \"\"\"Asynchronous version of `run`.\n\n NOTE: This does not support multiprocessing and is not the preferred\n way to run a Sanic application.\n \"\"\"\n\n if sock is None:\n host, port = host or \"127.0.0.1\", port or 8000\n\n if protocol is None:\n protocol = (WebSocketProtocol if self.websocket_enabled\n else HttpProtocol)\n if stop_event is not None:\n if debug:\n warnings.simplefilter('default')\n warnings.warn(\"stop_event will be removed from future versions.\",\n DeprecationWarning)\n\n server_settings = self._helper(\n host=host, port=port, debug=debug, ssl=ssl, sock=sock,\n loop=get_event_loop(), protocol=protocol,\n backlog=backlog, run_async=True,\n access_log=access_log)\n\n # Trigger before_start events\n await self.trigger_events(\n server_settings.get('before_start', []),\n server_settings.get('loop')\n )\n\n return await serve(**server_settings)\n\n async def trigger_events(self, events, loop):\n \"\"\"Trigger events (functions or async)\n :param events: one or more sync or async functions to execute\n :param loop: event loop\n \"\"\"\n for event in events:\n result = event(loop)\n if isawaitable(result):\n await result\n\n async def _run_request_middleware(self, request):\n # The if improves speed. I don't know why\n if self.request_middleware:\n for middleware in self.request_middleware:\n response = middleware(request)\n if isawaitable(response):\n response = await response\n if response:\n return response\n return None\n\n async def _run_response_middleware(self, request, response):\n if self.response_middleware:\n for middleware in self.response_middleware:\n _response = middleware(request, response)\n if isawaitable(_response):\n _response = await _response\n if _response:\n response = _response\n break\n return response\n\n def _helper(self, host=None, port=None, debug=False,\n ssl=None, sock=None, workers=1, loop=None,\n protocol=HttpProtocol, backlog=100, stop_event=None,\n register_sys_signals=True, run_async=False, access_log=True,\n auto_reload=False):\n \"\"\"Helper function used by `run` and `create_server`.\"\"\"\n if isinstance(ssl, dict):\n # try common aliaseses\n cert = ssl.get('cert') or ssl.get('certificate')\n key = ssl.get('key') or ssl.get('keyfile')\n if cert is None or key is None:\n raise ValueError(\"SSLContext or certificate and key required.\")\n context = create_default_context(purpose=Purpose.CLIENT_AUTH)\n context.load_cert_chain(cert, keyfile=key)\n ssl = context\n if stop_event is not None:\n if debug:\n warnings.simplefilter('default')\n warnings.warn(\"stop_event will be removed from future versions.\",\n DeprecationWarning)\n\n self.error_handler.debug = debug\n self.debug = debug\n\n server_settings = {\n 'protocol': protocol,\n 'request_class': self.request_class,\n 'is_request_stream': self.is_request_stream,\n 'router': self.router,\n 'host': host,\n 'port': port,\n 'sock': sock,\n 'ssl': ssl,\n 'signal': Signal(),\n 'debug': debug,\n 'request_handler': self.handle_request,\n 'error_handler': self.error_handler,\n 'request_timeout': self.config.REQUEST_TIMEOUT,\n 'response_timeout': self.config.RESPONSE_TIMEOUT,\n 'keep_alive_timeout': self.config.KEEP_ALIVE_TIMEOUT,\n 'request_max_size': self.config.REQUEST_MAX_SIZE,\n 'keep_alive': self.config.KEEP_ALIVE,\n 'loop': loop,\n 'register_sys_signals': register_sys_signals,\n 'backlog': backlog,\n 'access_log': access_log,\n 'websocket_max_size': self.config.WEBSOCKET_MAX_SIZE,\n 'websocket_max_queue': self.config.WEBSOCKET_MAX_QUEUE,\n 'websocket_read_limit': self.config.WEBSOCKET_READ_LIMIT,\n 'websocket_write_limit': self.config.WEBSOCKET_WRITE_LIMIT,\n 'graceful_shutdown_timeout': self.config.GRACEFUL_SHUTDOWN_TIMEOUT\n }\n\n # -------------------------------------------- #\n # Register start/stop events\n # -------------------------------------------- #\n\n for event_name, settings_name, reverse in (\n (\"before_server_start\", \"before_start\", False),\n (\"after_server_start\", \"after_start\", False),\n (\"before_server_stop\", \"before_stop\", True),\n (\"after_server_stop\", \"after_stop\", True),\n ):\n listeners = self.listeners[event_name].copy()\n if reverse:\n listeners.reverse()\n # Prepend sanic to the arguments when listeners are triggered\n listeners = [partial(listener, self) for listener in listeners]\n server_settings[settings_name] = listeners\n\n if self.configure_logging and debug:\n logger.setLevel(logging.DEBUG)\n\n if self.config.LOGO is not None and \\\n os.environ.get('SANIC_SERVER_RUNNING') != 'true':\n logger.debug(self.config.LOGO)\n\n if run_async:\n server_settings['run_async'] = True\n\n # Serve\n if host and port and os.environ.get('SANIC_SERVER_RUNNING') != 'true':\n proto = \"http\"\n if ssl is not None:\n proto = \"https\"\n logger.info('Goin\\' Fast @ {}://{}:{}'.format(proto, host, port))\n\n return server_settings\n", "path": "sanic/app.py"}]} |
gh_patches_debug_1373 | rasdani/github-patches | git_diff | django-cms__django-filer-1079 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
django-mptt<0.9
Is there any good reason to limit django-mptt to `<0.9`?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 from __future__ import absolute_import, unicode_literals
4
5 import os
6
7 from setuptools import find_packages, setup
8
9 version = __import__('filer').__version__
10
11
12 def read(fname):
13 # read the contents of a text file
14 return open(os.path.join(os.path.dirname(__file__), fname)).read()
15
16 setup(
17 name="django-filer",
18 version=version,
19 url='http://github.com/divio/django-filer',
20 license='BSD',
21 platforms=['OS Independent'],
22 description="A file management application for django that makes handling "
23 "of files and images a breeze.",
24 long_description=read('README.rst'),
25 author='Stefan Foulis',
26 author_email='[email protected]',
27 packages=find_packages(),
28 install_requires=(
29 'Django>=1.8,<1.11.999', # Django is known to use rc versions
30 'easy-thumbnails>=2,<3.0',
31 'django-mptt>=0.6,<0.9', # the exact version depends on Django
32 'django_polymorphic>=0.7,<2.1',
33 'Unidecode>=0.04,<0.05',
34 ),
35 include_package_data=True,
36 zip_safe=False,
37 # https://pypi.python.org/pypi?%3Aaction=list_classifiers
38 classifiers=[
39 'Development Status :: 5 - Production/Stable',
40 'Framework :: Django',
41 'Intended Audience :: Developers',
42 'License :: OSI Approved :: BSD License',
43 'Operating System :: OS Independent',
44 'Programming Language :: Python',
45 'Topic :: Internet :: WWW/HTTP',
46 'Framework :: Django',
47 'Framework :: Django :: 1.8',
48 'Framework :: Django :: 1.9',
49 'Framework :: Django :: 1.10',
50 'Framework :: Django :: 1.11',
51 'Programming Language :: Python :: 2.7',
52 'Programming Language :: Python :: 3.4',
53 'Programming Language :: Python :: 3.5',
54 'Programming Language :: Python :: 3.6',
55 ],
56 )
57
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -28,7 +28,7 @@
install_requires=(
'Django>=1.8,<1.11.999', # Django is known to use rc versions
'easy-thumbnails>=2,<3.0',
- 'django-mptt>=0.6,<0.9', # the exact version depends on Django
+ 'django-mptt>=0.6,<0.10', # the exact version depends on Django
'django_polymorphic>=0.7,<2.1',
'Unidecode>=0.04,<0.05',
),
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -28,7 +28,7 @@\n install_requires=(\n 'Django>=1.8,<1.11.999', # Django is known to use rc versions\n 'easy-thumbnails>=2,<3.0',\n- 'django-mptt>=0.6,<0.9', # the exact version depends on Django\n+ 'django-mptt>=0.6,<0.10', # the exact version depends on Django\n 'django_polymorphic>=0.7,<2.1',\n 'Unidecode>=0.04,<0.05',\n ),\n", "issue": "django-mptt<0.9\nIs there any good reason to limit django-mptt to `<0.9`?\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import, unicode_literals\n\nimport os\n\nfrom setuptools import find_packages, setup\n\nversion = __import__('filer').__version__\n\n\ndef read(fname):\n # read the contents of a text file\n return open(os.path.join(os.path.dirname(__file__), fname)).read()\n\nsetup(\n name=\"django-filer\",\n version=version,\n url='http://github.com/divio/django-filer',\n license='BSD',\n platforms=['OS Independent'],\n description=\"A file management application for django that makes handling \"\n \"of files and images a breeze.\",\n long_description=read('README.rst'),\n author='Stefan Foulis',\n author_email='[email protected]',\n packages=find_packages(),\n install_requires=(\n 'Django>=1.8,<1.11.999', # Django is known to use rc versions\n 'easy-thumbnails>=2,<3.0',\n 'django-mptt>=0.6,<0.9', # the exact version depends on Django\n 'django_polymorphic>=0.7,<2.1',\n 'Unidecode>=0.04,<0.05',\n ),\n include_package_data=True,\n zip_safe=False,\n # https://pypi.python.org/pypi?%3Aaction=list_classifiers\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Framework :: Django',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Topic :: Internet :: WWW/HTTP',\n 'Framework :: Django',\n 'Framework :: Django :: 1.8',\n 'Framework :: Django :: 1.9',\n 'Framework :: Django :: 1.10',\n 'Framework :: Django :: 1.11',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n ],\n)\n", "path": "setup.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import, unicode_literals\n\nimport os\n\nfrom setuptools import find_packages, setup\n\nversion = __import__('filer').__version__\n\n\ndef read(fname):\n # read the contents of a text file\n return open(os.path.join(os.path.dirname(__file__), fname)).read()\n\nsetup(\n name=\"django-filer\",\n version=version,\n url='http://github.com/divio/django-filer',\n license='BSD',\n platforms=['OS Independent'],\n description=\"A file management application for django that makes handling \"\n \"of files and images a breeze.\",\n long_description=read('README.rst'),\n author='Stefan Foulis',\n author_email='[email protected]',\n packages=find_packages(),\n install_requires=(\n 'Django>=1.8,<1.11.999', # Django is known to use rc versions\n 'easy-thumbnails>=2,<3.0',\n 'django-mptt>=0.6,<0.10', # the exact version depends on Django\n 'django_polymorphic>=0.7,<2.1',\n 'Unidecode>=0.04,<0.05',\n ),\n include_package_data=True,\n zip_safe=False,\n # https://pypi.python.org/pypi?%3Aaction=list_classifiers\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Framework :: Django',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Topic :: Internet :: WWW/HTTP',\n 'Framework :: Django',\n 'Framework :: Django :: 1.8',\n 'Framework :: Django :: 1.9',\n 'Framework :: Django :: 1.10',\n 'Framework :: Django :: 1.11',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n ],\n)\n", "path": "setup.py"}]} |
gh_patches_debug_1374 | rasdani/github-patches | git_diff | bentoml__BentoML-822 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add support for Alpine based docker image
**Is your feature request related to a problem? Please describe.**
Allow users to use `bentoml/model-server:0.7.8-alpine` as the base image, which is currently defined here: https://github.com/bentoml/BentoML/blob/master/docker/model-server/Dockerfile-alpine
**Describe the solution you'd like**
Improve the `bentoml_init.sh` script to make sure it works on both debian and alpine based docker image.
**Describe alternatives you've considered**
n/a
**Additional context**
See https://github.com/bentoml/BentoML/issues/693
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bentoml/saved_bundle/bundler.py`
Content:
```
1 # Copyright 2019 Atalaya Tech, Inc.
2
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6
7 # http://www.apache.org/licenses/LICENSE-2.0
8
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 import importlib
15 import os
16 import shutil
17 import stat
18 import logging
19
20 from setuptools import sandbox
21
22 from bentoml.configuration import _is_pypi_release
23
24 from bentoml.exceptions import BentoMLException
25 from bentoml.saved_bundle.py_module_utils import copy_used_py_modules
26 from bentoml.saved_bundle.templates import (
27 BENTO_SERVICE_BUNDLE_SETUP_PY_TEMPLATE,
28 MANIFEST_IN_TEMPLATE,
29 MODEL_SERVER_DOCKERFILE_CPU,
30 INIT_PY_TEMPLATE,
31 )
32 from bentoml.utils.usage_stats import track_save
33 from bentoml.saved_bundle.config import SavedBundleConfig
34
35
36 DEFAULT_SAVED_BUNDLE_README = """\
37 # Generated BentoService bundle - {}:{}
38
39 This is a ML Service bundle created with BentoML, it is not recommended to edit
40 code or files contained in this directory. Instead, edit the code that uses BentoML
41 to create this bundle, and save a new BentoService bundle.
42 """
43
44 logger = logging.getLogger(__name__)
45
46
47 def save_to_dir(bento_service, path, version=None, silent=False):
48 """Save given BentoService along with all its artifacts, source code and
49 dependencies to target file path, assuming path exist and empty. If target path
50 is not empty, this call may override existing files in the given path.
51
52 :param bento_service (bentoml.service.BentoService): a Bento Service instance
53 :param path (str): Destination of where the bento service will be saved
54 :param version (str): Override the service version with given version string
55 :param silent (boolean): whether to hide the log message showing target save path
56 """
57 track_save(bento_service)
58
59 from bentoml.service import BentoService
60
61 if not isinstance(bento_service, BentoService):
62 raise BentoMLException(
63 "save_to_dir only work with instance of custom BentoService class"
64 )
65
66 if version is not None:
67 # If parameter version provided, set bento_service version
68 # Otherwise it will bet set the first time the `version` property get accessed
69 bento_service.set_version(version)
70
71 if not os.path.exists(path):
72 raise BentoMLException("Directory '{}' not found".format(path))
73
74 for artifact in bento_service._artifacts:
75 if artifact.name not in bento_service._packed_artifacts:
76 logger.warning(
77 "Missing declared artifact '%s' for BentoService '%s'",
78 artifact.name,
79 bento_service.name,
80 )
81
82 module_base_path = os.path.join(path, bento_service.name)
83 try:
84 os.mkdir(module_base_path)
85 except FileExistsError:
86 raise BentoMLException(
87 f"Existing module file found for BentoService {bento_service.name}"
88 )
89
90 # write README.md with custom BentoService's docstring if presented
91 saved_bundle_readme = DEFAULT_SAVED_BUNDLE_README.format(
92 bento_service.name, bento_service.version
93 )
94 if bento_service.__class__.__doc__:
95 saved_bundle_readme += "\n"
96 saved_bundle_readme += bento_service.__class__.__doc__.strip()
97
98 with open(os.path.join(path, "README.md"), "w") as f:
99 f.write(saved_bundle_readme)
100
101 # save all model artifacts to 'base_path/name/artifacts/' directory
102 if bento_service.artifacts:
103 bento_service.artifacts.save(module_base_path)
104
105 # write conda environment, requirement.txt
106 bento_service.env.save(path, bento_service)
107
108 # TODO: add bentoml.find_packages helper for more fine grained control over this
109 # process, e.g. packages=find_packages(base, [], exclude=[], used_module_only=True)
110 # copy over all custom model code
111 module_name, module_file = copy_used_py_modules(
112 bento_service.__class__.__module__, os.path.join(path, bento_service.name)
113 )
114
115 # create __init__.py
116 with open(os.path.join(path, bento_service.name, "__init__.py"), "w") as f:
117 f.write(
118 INIT_PY_TEMPLATE.format(
119 service_name=bento_service.name,
120 module_name=module_name,
121 pypi_package_version=bento_service.version,
122 )
123 )
124
125 # write setup.py, this make saved BentoService bundle pip installable
126 setup_py_content = BENTO_SERVICE_BUNDLE_SETUP_PY_TEMPLATE.format(
127 name=bento_service.name,
128 pypi_package_version=bento_service.version,
129 long_description=saved_bundle_readme,
130 )
131 with open(os.path.join(path, "setup.py"), "w") as f:
132 f.write(setup_py_content)
133
134 with open(os.path.join(path, "MANIFEST.in"), "w") as f:
135 f.write(MANIFEST_IN_TEMPLATE.format(service_name=bento_service.name))
136
137 # write Dockerfile
138 with open(os.path.join(path, "Dockerfile"), "w") as f:
139 f.write(
140 MODEL_SERVER_DOCKERFILE_CPU.format(
141 docker_base_image=bento_service._env._docker_base_image
142 )
143 )
144
145 # Copy docker-entrypoint.sh
146 docker_entrypoint_sh_file_src = os.path.join(
147 os.path.dirname(__file__), "docker-entrypoint.sh"
148 )
149 docker_entrypoint_sh_file_dst = os.path.join(path, "docker-entrypoint.sh")
150 shutil.copyfile(docker_entrypoint_sh_file_src, docker_entrypoint_sh_file_dst)
151 # chmod +x docker-entrypoint.sh
152 st = os.stat(docker_entrypoint_sh_file_dst)
153 os.chmod(docker_entrypoint_sh_file_dst, st.st_mode | stat.S_IEXEC)
154
155 # copy bentoml-init.sh for install targz bundles
156 bentoml_init_sh_file_src = os.path.join(
157 os.path.dirname(__file__), "bentoml-init.sh"
158 )
159 bentoml_init_sh_file_dst = os.path.join(path, "bentoml-init.sh")
160 shutil.copyfile(bentoml_init_sh_file_src, bentoml_init_sh_file_dst)
161 # chmod +x bentoml_init_script file
162 st = os.stat(bentoml_init_sh_file_dst)
163 os.chmod(bentoml_init_sh_file_dst, st.st_mode | stat.S_IEXEC)
164
165 # write bentoml.yml
166 config = SavedBundleConfig(bento_service)
167 config["metadata"].update({"module_name": module_name, "module_file": module_file})
168
169 config.write_to_path(path)
170 # Also write bentoml.yml to module base path to make it accessible
171 # as package data after pip installed as a python package
172 config.write_to_path(module_base_path)
173
174 bundled_pip_dependencies_path = os.path.join(path, 'bundled_pip_dependencies')
175 _bundle_local_bentoml_if_installed_from_source(bundled_pip_dependencies_path)
176
177 if not silent:
178 logger.info(
179 "BentoService bundle '%s:%s' created at: %s",
180 bento_service.name,
181 bento_service.version,
182 path,
183 )
184
185
186 def _bundle_local_bentoml_if_installed_from_source(target_path):
187 """
188 if bentoml is installed in editor mode(pip install -e), this will build a source
189 distribution with the local bentoml fork and add it to saved BentoService bundle
190 path under bundled_pip_dependencies directory
191 """
192
193 # Find bentoml module path
194 (module_location,) = importlib.util.find_spec('bentoml').submodule_search_locations
195
196 bentoml_setup_py = os.path.abspath(os.path.join(module_location, '..', 'setup.py'))
197
198 # this is for BentoML developer to create BentoService containing custom develop
199 # branches of BentoML library, it is True only when BentoML module is installed in
200 # development mode via "pip install --editable ."
201 if not _is_pypi_release() and os.path.isfile(bentoml_setup_py):
202 logger.info(
203 "Detect BentoML installed in development model, copying local BentoML "
204 "module file to target saved bundle path"
205 )
206
207 # Create tmp directory inside bentoml module for storing the bundled
208 # targz file. Since dist-dir can only be inside of the module directory
209 bundle_dir_name = '__bentoml_tmp_sdist_build'
210 source_dir = os.path.abspath(
211 os.path.join(module_location, '..', bundle_dir_name)
212 )
213
214 if os.path.isdir(source_dir):
215 shutil.rmtree(source_dir, ignore_errors=True)
216 os.mkdir(source_dir)
217
218 sandbox.run_setup(
219 bentoml_setup_py,
220 ['sdist', '--format', 'gztar', '--dist-dir', bundle_dir_name],
221 )
222
223 # copy the generated targz to saved bundle directory and remove it from
224 # bentoml module directory
225 shutil.copytree(source_dir, target_path)
226
227 # clean up sdist build files
228 shutil.rmtree(source_dir)
229
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bentoml/saved_bundle/bundler.py b/bentoml/saved_bundle/bundler.py
--- a/bentoml/saved_bundle/bundler.py
+++ b/bentoml/saved_bundle/bundler.py
@@ -135,6 +135,7 @@
f.write(MANIFEST_IN_TEMPLATE.format(service_name=bento_service.name))
# write Dockerfile
+ logger.debug("Using Docker Base Image %s", bento_service._env._docker_base_image)
with open(os.path.join(path, "Dockerfile"), "w") as f:
f.write(
MODEL_SERVER_DOCKERFILE_CPU.format(
| {"golden_diff": "diff --git a/bentoml/saved_bundle/bundler.py b/bentoml/saved_bundle/bundler.py\n--- a/bentoml/saved_bundle/bundler.py\n+++ b/bentoml/saved_bundle/bundler.py\n@@ -135,6 +135,7 @@\n f.write(MANIFEST_IN_TEMPLATE.format(service_name=bento_service.name))\n \n # write Dockerfile\n+ logger.debug(\"Using Docker Base Image %s\", bento_service._env._docker_base_image)\n with open(os.path.join(path, \"Dockerfile\"), \"w\") as f:\n f.write(\n MODEL_SERVER_DOCKERFILE_CPU.format(\n", "issue": "Add support for Alpine based docker image\n**Is your feature request related to a problem? Please describe.**\r\n\r\nAllow users to use `bentoml/model-server:0.7.8-alpine` as the base image, which is currently defined here: https://github.com/bentoml/BentoML/blob/master/docker/model-server/Dockerfile-alpine\r\n\r\n**Describe the solution you'd like**\r\n\r\nImprove the `bentoml_init.sh` script to make sure it works on both debian and alpine based docker image.\r\n\r\n**Describe alternatives you've considered**\r\nn/a\r\n\r\n**Additional context**\r\n\r\nSee https://github.com/bentoml/BentoML/issues/693\r\n\n", "before_files": [{"content": "# Copyright 2019 Atalaya Tech, Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport importlib\nimport os\nimport shutil\nimport stat\nimport logging\n\nfrom setuptools import sandbox\n\nfrom bentoml.configuration import _is_pypi_release\n\nfrom bentoml.exceptions import BentoMLException\nfrom bentoml.saved_bundle.py_module_utils import copy_used_py_modules\nfrom bentoml.saved_bundle.templates import (\n BENTO_SERVICE_BUNDLE_SETUP_PY_TEMPLATE,\n MANIFEST_IN_TEMPLATE,\n MODEL_SERVER_DOCKERFILE_CPU,\n INIT_PY_TEMPLATE,\n)\nfrom bentoml.utils.usage_stats import track_save\nfrom bentoml.saved_bundle.config import SavedBundleConfig\n\n\nDEFAULT_SAVED_BUNDLE_README = \"\"\"\\\n# Generated BentoService bundle - {}:{}\n\nThis is a ML Service bundle created with BentoML, it is not recommended to edit\ncode or files contained in this directory. Instead, edit the code that uses BentoML\nto create this bundle, and save a new BentoService bundle.\n\"\"\"\n\nlogger = logging.getLogger(__name__)\n\n\ndef save_to_dir(bento_service, path, version=None, silent=False):\n \"\"\"Save given BentoService along with all its artifacts, source code and\n dependencies to target file path, assuming path exist and empty. If target path\n is not empty, this call may override existing files in the given path.\n\n :param bento_service (bentoml.service.BentoService): a Bento Service instance\n :param path (str): Destination of where the bento service will be saved\n :param version (str): Override the service version with given version string\n :param silent (boolean): whether to hide the log message showing target save path\n \"\"\"\n track_save(bento_service)\n\n from bentoml.service import BentoService\n\n if not isinstance(bento_service, BentoService):\n raise BentoMLException(\n \"save_to_dir only work with instance of custom BentoService class\"\n )\n\n if version is not None:\n # If parameter version provided, set bento_service version\n # Otherwise it will bet set the first time the `version` property get accessed\n bento_service.set_version(version)\n\n if not os.path.exists(path):\n raise BentoMLException(\"Directory '{}' not found\".format(path))\n\n for artifact in bento_service._artifacts:\n if artifact.name not in bento_service._packed_artifacts:\n logger.warning(\n \"Missing declared artifact '%s' for BentoService '%s'\",\n artifact.name,\n bento_service.name,\n )\n\n module_base_path = os.path.join(path, bento_service.name)\n try:\n os.mkdir(module_base_path)\n except FileExistsError:\n raise BentoMLException(\n f\"Existing module file found for BentoService {bento_service.name}\"\n )\n\n # write README.md with custom BentoService's docstring if presented\n saved_bundle_readme = DEFAULT_SAVED_BUNDLE_README.format(\n bento_service.name, bento_service.version\n )\n if bento_service.__class__.__doc__:\n saved_bundle_readme += \"\\n\"\n saved_bundle_readme += bento_service.__class__.__doc__.strip()\n\n with open(os.path.join(path, \"README.md\"), \"w\") as f:\n f.write(saved_bundle_readme)\n\n # save all model artifacts to 'base_path/name/artifacts/' directory\n if bento_service.artifacts:\n bento_service.artifacts.save(module_base_path)\n\n # write conda environment, requirement.txt\n bento_service.env.save(path, bento_service)\n\n # TODO: add bentoml.find_packages helper for more fine grained control over this\n # process, e.g. packages=find_packages(base, [], exclude=[], used_module_only=True)\n # copy over all custom model code\n module_name, module_file = copy_used_py_modules(\n bento_service.__class__.__module__, os.path.join(path, bento_service.name)\n )\n\n # create __init__.py\n with open(os.path.join(path, bento_service.name, \"__init__.py\"), \"w\") as f:\n f.write(\n INIT_PY_TEMPLATE.format(\n service_name=bento_service.name,\n module_name=module_name,\n pypi_package_version=bento_service.version,\n )\n )\n\n # write setup.py, this make saved BentoService bundle pip installable\n setup_py_content = BENTO_SERVICE_BUNDLE_SETUP_PY_TEMPLATE.format(\n name=bento_service.name,\n pypi_package_version=bento_service.version,\n long_description=saved_bundle_readme,\n )\n with open(os.path.join(path, \"setup.py\"), \"w\") as f:\n f.write(setup_py_content)\n\n with open(os.path.join(path, \"MANIFEST.in\"), \"w\") as f:\n f.write(MANIFEST_IN_TEMPLATE.format(service_name=bento_service.name))\n\n # write Dockerfile\n with open(os.path.join(path, \"Dockerfile\"), \"w\") as f:\n f.write(\n MODEL_SERVER_DOCKERFILE_CPU.format(\n docker_base_image=bento_service._env._docker_base_image\n )\n )\n\n # Copy docker-entrypoint.sh\n docker_entrypoint_sh_file_src = os.path.join(\n os.path.dirname(__file__), \"docker-entrypoint.sh\"\n )\n docker_entrypoint_sh_file_dst = os.path.join(path, \"docker-entrypoint.sh\")\n shutil.copyfile(docker_entrypoint_sh_file_src, docker_entrypoint_sh_file_dst)\n # chmod +x docker-entrypoint.sh\n st = os.stat(docker_entrypoint_sh_file_dst)\n os.chmod(docker_entrypoint_sh_file_dst, st.st_mode | stat.S_IEXEC)\n\n # copy bentoml-init.sh for install targz bundles\n bentoml_init_sh_file_src = os.path.join(\n os.path.dirname(__file__), \"bentoml-init.sh\"\n )\n bentoml_init_sh_file_dst = os.path.join(path, \"bentoml-init.sh\")\n shutil.copyfile(bentoml_init_sh_file_src, bentoml_init_sh_file_dst)\n # chmod +x bentoml_init_script file\n st = os.stat(bentoml_init_sh_file_dst)\n os.chmod(bentoml_init_sh_file_dst, st.st_mode | stat.S_IEXEC)\n\n # write bentoml.yml\n config = SavedBundleConfig(bento_service)\n config[\"metadata\"].update({\"module_name\": module_name, \"module_file\": module_file})\n\n config.write_to_path(path)\n # Also write bentoml.yml to module base path to make it accessible\n # as package data after pip installed as a python package\n config.write_to_path(module_base_path)\n\n bundled_pip_dependencies_path = os.path.join(path, 'bundled_pip_dependencies')\n _bundle_local_bentoml_if_installed_from_source(bundled_pip_dependencies_path)\n\n if not silent:\n logger.info(\n \"BentoService bundle '%s:%s' created at: %s\",\n bento_service.name,\n bento_service.version,\n path,\n )\n\n\ndef _bundle_local_bentoml_if_installed_from_source(target_path):\n \"\"\"\n if bentoml is installed in editor mode(pip install -e), this will build a source\n distribution with the local bentoml fork and add it to saved BentoService bundle\n path under bundled_pip_dependencies directory\n \"\"\"\n\n # Find bentoml module path\n (module_location,) = importlib.util.find_spec('bentoml').submodule_search_locations\n\n bentoml_setup_py = os.path.abspath(os.path.join(module_location, '..', 'setup.py'))\n\n # this is for BentoML developer to create BentoService containing custom develop\n # branches of BentoML library, it is True only when BentoML module is installed in\n # development mode via \"pip install --editable .\"\n if not _is_pypi_release() and os.path.isfile(bentoml_setup_py):\n logger.info(\n \"Detect BentoML installed in development model, copying local BentoML \"\n \"module file to target saved bundle path\"\n )\n\n # Create tmp directory inside bentoml module for storing the bundled\n # targz file. Since dist-dir can only be inside of the module directory\n bundle_dir_name = '__bentoml_tmp_sdist_build'\n source_dir = os.path.abspath(\n os.path.join(module_location, '..', bundle_dir_name)\n )\n\n if os.path.isdir(source_dir):\n shutil.rmtree(source_dir, ignore_errors=True)\n os.mkdir(source_dir)\n\n sandbox.run_setup(\n bentoml_setup_py,\n ['sdist', '--format', 'gztar', '--dist-dir', bundle_dir_name],\n )\n\n # copy the generated targz to saved bundle directory and remove it from\n # bentoml module directory\n shutil.copytree(source_dir, target_path)\n\n # clean up sdist build files\n shutil.rmtree(source_dir)\n", "path": "bentoml/saved_bundle/bundler.py"}], "after_files": [{"content": "# Copyright 2019 Atalaya Tech, Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport importlib\nimport os\nimport shutil\nimport stat\nimport logging\n\nfrom setuptools import sandbox\n\nfrom bentoml.configuration import _is_pypi_release\n\nfrom bentoml.exceptions import BentoMLException\nfrom bentoml.saved_bundle.py_module_utils import copy_used_py_modules\nfrom bentoml.saved_bundle.templates import (\n BENTO_SERVICE_BUNDLE_SETUP_PY_TEMPLATE,\n MANIFEST_IN_TEMPLATE,\n MODEL_SERVER_DOCKERFILE_CPU,\n INIT_PY_TEMPLATE,\n)\nfrom bentoml.utils.usage_stats import track_save\nfrom bentoml.saved_bundle.config import SavedBundleConfig\n\n\nDEFAULT_SAVED_BUNDLE_README = \"\"\"\\\n# Generated BentoService bundle - {}:{}\n\nThis is a ML Service bundle created with BentoML, it is not recommended to edit\ncode or files contained in this directory. Instead, edit the code that uses BentoML\nto create this bundle, and save a new BentoService bundle.\n\"\"\"\n\nlogger = logging.getLogger(__name__)\n\n\ndef save_to_dir(bento_service, path, version=None, silent=False):\n \"\"\"Save given BentoService along with all its artifacts, source code and\n dependencies to target file path, assuming path exist and empty. If target path\n is not empty, this call may override existing files in the given path.\n\n :param bento_service (bentoml.service.BentoService): a Bento Service instance\n :param path (str): Destination of where the bento service will be saved\n :param version (str): Override the service version with given version string\n :param silent (boolean): whether to hide the log message showing target save path\n \"\"\"\n track_save(bento_service)\n\n from bentoml.service import BentoService\n\n if not isinstance(bento_service, BentoService):\n raise BentoMLException(\n \"save_to_dir only work with instance of custom BentoService class\"\n )\n\n if version is not None:\n # If parameter version provided, set bento_service version\n # Otherwise it will bet set the first time the `version` property get accessed\n bento_service.set_version(version)\n\n if not os.path.exists(path):\n raise BentoMLException(\"Directory '{}' not found\".format(path))\n\n for artifact in bento_service._artifacts:\n if artifact.name not in bento_service._packed_artifacts:\n logger.warning(\n \"Missing declared artifact '%s' for BentoService '%s'\",\n artifact.name,\n bento_service.name,\n )\n\n module_base_path = os.path.join(path, bento_service.name)\n try:\n os.mkdir(module_base_path)\n except FileExistsError:\n raise BentoMLException(\n f\"Existing module file found for BentoService {bento_service.name}\"\n )\n\n # write README.md with custom BentoService's docstring if presented\n saved_bundle_readme = DEFAULT_SAVED_BUNDLE_README.format(\n bento_service.name, bento_service.version\n )\n if bento_service.__class__.__doc__:\n saved_bundle_readme += \"\\n\"\n saved_bundle_readme += bento_service.__class__.__doc__.strip()\n\n with open(os.path.join(path, \"README.md\"), \"w\") as f:\n f.write(saved_bundle_readme)\n\n # save all model artifacts to 'base_path/name/artifacts/' directory\n if bento_service.artifacts:\n bento_service.artifacts.save(module_base_path)\n\n # write conda environment, requirement.txt\n bento_service.env.save(path, bento_service)\n\n # TODO: add bentoml.find_packages helper for more fine grained control over this\n # process, e.g. packages=find_packages(base, [], exclude=[], used_module_only=True)\n # copy over all custom model code\n module_name, module_file = copy_used_py_modules(\n bento_service.__class__.__module__, os.path.join(path, bento_service.name)\n )\n\n # create __init__.py\n with open(os.path.join(path, bento_service.name, \"__init__.py\"), \"w\") as f:\n f.write(\n INIT_PY_TEMPLATE.format(\n service_name=bento_service.name,\n module_name=module_name,\n pypi_package_version=bento_service.version,\n )\n )\n\n # write setup.py, this make saved BentoService bundle pip installable\n setup_py_content = BENTO_SERVICE_BUNDLE_SETUP_PY_TEMPLATE.format(\n name=bento_service.name,\n pypi_package_version=bento_service.version,\n long_description=saved_bundle_readme,\n )\n with open(os.path.join(path, \"setup.py\"), \"w\") as f:\n f.write(setup_py_content)\n\n with open(os.path.join(path, \"MANIFEST.in\"), \"w\") as f:\n f.write(MANIFEST_IN_TEMPLATE.format(service_name=bento_service.name))\n\n # write Dockerfile\n logger.debug(\"Using Docker Base Image %s\", bento_service._env._docker_base_image)\n with open(os.path.join(path, \"Dockerfile\"), \"w\") as f:\n f.write(\n MODEL_SERVER_DOCKERFILE_CPU.format(\n docker_base_image=bento_service._env._docker_base_image\n )\n )\n\n # Copy docker-entrypoint.sh\n docker_entrypoint_sh_file_src = os.path.join(\n os.path.dirname(__file__), \"docker-entrypoint.sh\"\n )\n docker_entrypoint_sh_file_dst = os.path.join(path, \"docker-entrypoint.sh\")\n shutil.copyfile(docker_entrypoint_sh_file_src, docker_entrypoint_sh_file_dst)\n # chmod +x docker-entrypoint.sh\n st = os.stat(docker_entrypoint_sh_file_dst)\n os.chmod(docker_entrypoint_sh_file_dst, st.st_mode | stat.S_IEXEC)\n\n # copy bentoml-init.sh for install targz bundles\n bentoml_init_sh_file_src = os.path.join(\n os.path.dirname(__file__), \"bentoml-init.sh\"\n )\n bentoml_init_sh_file_dst = os.path.join(path, \"bentoml-init.sh\")\n shutil.copyfile(bentoml_init_sh_file_src, bentoml_init_sh_file_dst)\n # chmod +x bentoml_init_script file\n st = os.stat(bentoml_init_sh_file_dst)\n os.chmod(bentoml_init_sh_file_dst, st.st_mode | stat.S_IEXEC)\n\n # write bentoml.yml\n config = SavedBundleConfig(bento_service)\n config[\"metadata\"].update({\"module_name\": module_name, \"module_file\": module_file})\n\n config.write_to_path(path)\n # Also write bentoml.yml to module base path to make it accessible\n # as package data after pip installed as a python package\n config.write_to_path(module_base_path)\n\n bundled_pip_dependencies_path = os.path.join(path, 'bundled_pip_dependencies')\n _bundle_local_bentoml_if_installed_from_source(bundled_pip_dependencies_path)\n\n if not silent:\n logger.info(\n \"BentoService bundle '%s:%s' created at: %s\",\n bento_service.name,\n bento_service.version,\n path,\n )\n\n\ndef _bundle_local_bentoml_if_installed_from_source(target_path):\n \"\"\"\n if bentoml is installed in editor mode(pip install -e), this will build a source\n distribution with the local bentoml fork and add it to saved BentoService bundle\n path under bundled_pip_dependencies directory\n \"\"\"\n\n # Find bentoml module path\n (module_location,) = importlib.util.find_spec('bentoml').submodule_search_locations\n\n bentoml_setup_py = os.path.abspath(os.path.join(module_location, '..', 'setup.py'))\n\n # this is for BentoML developer to create BentoService containing custom develop\n # branches of BentoML library, it is True only when BentoML module is installed in\n # development mode via \"pip install --editable .\"\n if not _is_pypi_release() and os.path.isfile(bentoml_setup_py):\n logger.info(\n \"Detect BentoML installed in development model, copying local BentoML \"\n \"module file to target saved bundle path\"\n )\n\n # Create tmp directory inside bentoml module for storing the bundled\n # targz file. Since dist-dir can only be inside of the module directory\n bundle_dir_name = '__bentoml_tmp_sdist_build'\n source_dir = os.path.abspath(\n os.path.join(module_location, '..', bundle_dir_name)\n )\n\n if os.path.isdir(source_dir):\n shutil.rmtree(source_dir, ignore_errors=True)\n os.mkdir(source_dir)\n\n sandbox.run_setup(\n bentoml_setup_py,\n ['sdist', '--format', 'gztar', '--dist-dir', bundle_dir_name],\n )\n\n # copy the generated targz to saved bundle directory and remove it from\n # bentoml module directory\n shutil.copytree(source_dir, target_path)\n\n # clean up sdist build files\n shutil.rmtree(source_dir)\n", "path": "bentoml/saved_bundle/bundler.py"}]} |
gh_patches_debug_1375 | rasdani/github-patches | git_diff | pypa__pip-1855 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
pip fail after upgrade to 1.5.5
```
$ pip install --upgrade pip
Downloading/unpacking pip from https://pypi.python.org/packages/py2.py3/p/pip/pip-1.5.5-py2.py3-none-any.whl#md5=03a932d6f82a3887d8de1cdb837c87ed
Using download cache from /Users/robinho/Library/Caches/pip-downloads/https%3A%2F%2Fpypi.python.org%2Fpackages%2Fpy2.py3%2Fp%2Fpip%2Fpip-1.5.5-py2.py3-none-any.whl
Installing collected packages: pip
Found existing installation: pip 1.5.4
Uninstalling pip:
Successfully uninstalled pip
Successfully installed pip
Cleaning up...
$ pip
Traceback (most recent call last):
File "/usr/local/bin/pip", line 5, in <module>
from pkg_resources import load_entry_point
File "/usr/local/lib/python2.7/site-packages/pkg_resources.py", line 2749, in <module>
working_set = WorkingSet._build_master()
File "/usr/local/lib/python2.7/site-packages/pkg_resources.py", line 446, in _build_master
return cls._build_from_requirements(__requires__)
File "/usr/local/lib/python2.7/site-packages/pkg_resources.py", line 459, in _build_from_requirements
dists = ws.resolve(reqs, Environment())
File "/usr/local/lib/python2.7/site-packages/pkg_resources.py", line 628, in resolve
raise DistributionNotFound(req)
pkg_resources.DistributionNotFound: pip==1.5.4
```
Using a python installed by homebrew.
```
$ python --version
Python 2.7.6
$ brew --version
0.9.5
$ sw_vers
ProductName: Mac OS X
ProductVersion: 10.9.2
BuildVersion: 13C1021
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pip/wheel.py`
Content:
```
1 """
2 Support for installing and building the "wheel" binary package format.
3 """
4 from __future__ import with_statement
5
6 import compileall
7 import csv
8 import functools
9 import hashlib
10 import os
11 import re
12 import shutil
13 import sys
14
15 from base64 import urlsafe_b64encode
16 from email.parser import Parser
17
18 from pip.compat import ConfigParser, StringIO, binary
19 from pip.exceptions import InvalidWheelFilename, UnsupportedWheel
20 from pip.locations import distutils_scheme
21 from pip.log import logger
22 from pip import pep425tags
23 from pip.util import call_subprocess, normalize_path, make_path_relative
24 from pip._vendor.distlib.scripts import ScriptMaker
25 from pip._vendor import pkg_resources
26
27
28 wheel_ext = '.whl'
29
30 VERSION_COMPATIBLE = (1, 0)
31
32
33 def rehash(path, algo='sha256', blocksize=1 << 20):
34 """Return (hash, length) for path using hashlib.new(algo)"""
35 h = hashlib.new(algo)
36 length = 0
37 with open(path, 'rb') as f:
38 block = f.read(blocksize)
39 while block:
40 length += len(block)
41 h.update(block)
42 block = f.read(blocksize)
43 digest = 'sha256=' + urlsafe_b64encode(
44 h.digest()
45 ).decode('latin1').rstrip('=')
46 return (digest, length)
47
48
49 def open_for_csv(name, mode):
50 if sys.version_info[0] < 3:
51 nl = {}
52 bin = 'b'
53 else:
54 nl = {'newline': ''}
55 bin = ''
56 return open(name, mode + bin, **nl)
57
58
59 def fix_script(path):
60 """Replace #!python with #!/path/to/python
61 Return True if file was changed."""
62 # XXX RECORD hashes will need to be updated
63 if os.path.isfile(path):
64 script = open(path, 'rb')
65 try:
66 firstline = script.readline()
67 if not firstline.startswith(binary('#!python')):
68 return False
69 exename = sys.executable.encode(sys.getfilesystemencoding())
70 firstline = binary('#!') + exename + binary(os.linesep)
71 rest = script.read()
72 finally:
73 script.close()
74 script = open(path, 'wb')
75 try:
76 script.write(firstline)
77 script.write(rest)
78 finally:
79 script.close()
80 return True
81
82 dist_info_re = re.compile(r"""^(?P<namever>(?P<name>.+?)(-(?P<ver>\d.+?))?)
83 \.dist-info$""", re.VERBOSE)
84
85
86 def root_is_purelib(name, wheeldir):
87 """
88 Return True if the extracted wheel in wheeldir should go into purelib.
89 """
90 name_folded = name.replace("-", "_")
91 for item in os.listdir(wheeldir):
92 match = dist_info_re.match(item)
93 if match and match.group('name') == name_folded:
94 with open(os.path.join(wheeldir, item, 'WHEEL')) as wheel:
95 for line in wheel:
96 line = line.lower().rstrip()
97 if line == "root-is-purelib: true":
98 return True
99 return False
100
101
102 def get_entrypoints(filename):
103 if not os.path.exists(filename):
104 return {}, {}
105
106 # This is done because you can pass a string to entry_points wrappers which
107 # means that they may or may not be valid INI files. The attempt here is to
108 # strip leading and trailing whitespace in order to make them valid INI
109 # files.
110 with open(filename) as fp:
111 data = StringIO()
112 for line in fp:
113 data.write(line.strip())
114 data.write("\n")
115 data.seek(0)
116
117 cp = ConfigParser.RawConfigParser()
118 cp.readfp(data)
119
120 console = {}
121 gui = {}
122 if cp.has_section('console_scripts'):
123 console = dict(cp.items('console_scripts'))
124 if cp.has_section('gui_scripts'):
125 gui = dict(cp.items('gui_scripts'))
126 return console, gui
127
128
129 def move_wheel_files(name, req, wheeldir, user=False, home=None, root=None,
130 pycompile=True, scheme=None):
131 """Install a wheel"""
132
133 if not scheme:
134 scheme = distutils_scheme(name, user=user, home=home, root=root)
135
136 if root_is_purelib(name, wheeldir):
137 lib_dir = scheme['purelib']
138 else:
139 lib_dir = scheme['platlib']
140
141 info_dir = []
142 data_dirs = []
143 source = wheeldir.rstrip(os.path.sep) + os.path.sep
144
145 # Record details of the files moved
146 # installed = files copied from the wheel to the destination
147 # changed = files changed while installing (scripts #! line typically)
148 # generated = files newly generated during the install (script wrappers)
149 installed = {}
150 changed = set()
151 generated = []
152
153 # Compile all of the pyc files that we're going to be installing
154 if pycompile:
155 compileall.compile_dir(source, force=True, quiet=True)
156
157 def normpath(src, p):
158 return make_path_relative(src, p).replace(os.path.sep, '/')
159
160 def record_installed(srcfile, destfile, modified=False):
161 """Map archive RECORD paths to installation RECORD paths."""
162 oldpath = normpath(srcfile, wheeldir)
163 newpath = normpath(destfile, lib_dir)
164 installed[oldpath] = newpath
165 if modified:
166 changed.add(destfile)
167
168 def clobber(source, dest, is_base, fixer=None, filter=None):
169 if not os.path.exists(dest): # common for the 'include' path
170 os.makedirs(dest)
171
172 for dir, subdirs, files in os.walk(source):
173 basedir = dir[len(source):].lstrip(os.path.sep)
174 destdir = os.path.join(dest, basedir)
175 if is_base and basedir.split(os.path.sep, 1)[0].endswith('.data'):
176 continue
177 for s in subdirs:
178 destsubdir = os.path.join(dest, basedir, s)
179 if is_base and basedir == '' and destsubdir.endswith('.data'):
180 data_dirs.append(s)
181 continue
182 elif (is_base
183 and s.endswith('.dist-info')
184 # is self.req.project_name case preserving?
185 and s.lower().startswith(
186 req.project_name.replace('-', '_').lower())):
187 assert not info_dir, 'Multiple .dist-info directories'
188 info_dir.append(destsubdir)
189 for f in files:
190 # Skip unwanted files
191 if filter and filter(f):
192 continue
193 srcfile = os.path.join(dir, f)
194 destfile = os.path.join(dest, basedir, f)
195 # directory creation is lazy and after the file filtering above
196 # to ensure we don't install empty dirs; empty dirs can't be
197 # uninstalled.
198 if not os.path.exists(destdir):
199 os.makedirs(destdir)
200 # use copy2 (not move) to be extra sure we're not moving
201 # directories over; copy2 fails for directories. this would
202 # fail tests (not during released/user execution)
203 shutil.copy2(srcfile, destfile)
204 changed = False
205 if fixer:
206 changed = fixer(destfile)
207 record_installed(srcfile, destfile, changed)
208
209 clobber(source, lib_dir, True)
210
211 assert info_dir, "%s .dist-info directory not found" % req
212
213 # Get the defined entry points
214 ep_file = os.path.join(info_dir[0], 'entry_points.txt')
215 console, gui = get_entrypoints(ep_file)
216
217 def is_entrypoint_wrapper(name):
218 # EP, EP.exe and EP-script.py are scripts generated for
219 # entry point EP by setuptools
220 if name.lower().endswith('.exe'):
221 matchname = name[:-4]
222 elif name.lower().endswith('-script.py'):
223 matchname = name[:-10]
224 elif name.lower().endswith(".pya"):
225 matchname = name[:-4]
226 else:
227 matchname = name
228 # Ignore setuptools-generated scripts
229 return (matchname in console or matchname in gui)
230
231 for datadir in data_dirs:
232 fixer = None
233 filter = None
234 for subdir in os.listdir(os.path.join(wheeldir, datadir)):
235 fixer = None
236 if subdir == 'scripts':
237 fixer = fix_script
238 filter = is_entrypoint_wrapper
239 source = os.path.join(wheeldir, datadir, subdir)
240 dest = scheme[subdir]
241 clobber(source, dest, False, fixer=fixer, filter=filter)
242
243 maker = ScriptMaker(None, scheme['scripts'])
244
245 # Ensure we don't generate any variants for scripts because this is almost
246 # never what somebody wants.
247 # See https://bitbucket.org/pypa/distlib/issue/35/
248 maker.variants = set(('', ))
249
250 # This is required because otherwise distlib creates scripts that are not
251 # executable.
252 # See https://bitbucket.org/pypa/distlib/issue/32/
253 maker.set_mode = True
254
255 # Simplify the script and fix the fact that the default script swallows
256 # every single stack trace.
257 # See https://bitbucket.org/pypa/distlib/issue/34/
258 # See https://bitbucket.org/pypa/distlib/issue/33/
259 def _get_script_text(entry):
260 return maker.script_template % {
261 "module": entry.prefix,
262 "import_name": entry.suffix.split(".")[0],
263 "func": entry.suffix,
264 }
265
266 maker._get_script_text = _get_script_text
267 maker.script_template = """# -*- coding: utf-8 -*-
268 import re
269 import sys
270
271 from %(module)s import %(import_name)s
272
273 if __name__ == '__main__':
274 sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
275 sys.exit(%(func)s())
276 """
277
278 # Special case pip and setuptools to generate versioned wrappers
279 #
280 # The issue is that some projects (specifically, pip and setuptools) use
281 # code in setup.py to create "versioned" entry points - pip2.7 on Python
282 # 2.7, pip3.3 on Python 3.3, etc. But these entry points are baked into
283 # the wheel metadata at build time, and so if the wheel is installed with
284 # a *different* version of Python the entry points will be wrong. The
285 # correct fix for this is to enhance the metadata to be able to describe
286 # such versioned entry points, but that won't happen till Metadata 2.0 is
287 # available.
288 # In the meantime, projects using versioned entry points will either have
289 # incorrect versioned entry points, or they will not be able to distribute
290 # "universal" wheels (i.e., they will need a wheel per Python version).
291 #
292 # Because setuptools and pip are bundled with _ensurepip and virtualenv,
293 # we need to use universal wheels. So, as a stopgap until Metadata 2.0, we
294 # override the versioned entry points in the wheel and generate the
295 # correct ones. This code is purely a short-term measure until Metadat 2.0
296 # is available.
297 #
298 # To add the level of hack in this section of code, in order to support
299 # ensurepip this code will look for an ``ENSUREPIP_OPTIONS`` environment
300 # variable which will control which version scripts get installed.
301 #
302 # ENSUREPIP_OPTIONS=altinstall
303 # - Only pipX.Y and easy_install-X.Y will be generated and installed
304 # ENSUREPIP_OPTIONS=install
305 # - pipX.Y, pipX, easy_install-X.Y will be generated and installed. Note
306 # that this option is technically if ENSUREPIP_OPTIONS is set and is
307 # not altinstall
308 # DEFAULT
309 # - The default behavior is to install pip, pipX, pipX.Y, easy_install
310 # and easy_install-X.Y.
311 pip_script = console.pop('pip', None)
312 if pip_script:
313 if "ENSUREPIP_OPTIONS" not in os.environ:
314 spec = 'pip = ' + pip_script
315 generated.extend(maker.make(spec))
316
317 if os.environ.get("ENSUREPIP_OPTIONS", "") != "altinstall":
318 spec = 'pip%s = %s' % (sys.version[:1], pip_script)
319 generated.extend(maker.make(spec))
320
321 spec = 'pip%s = %s' % (sys.version[:3], pip_script)
322 generated.extend(maker.make(spec))
323 # Delete any other versioned pip entry points
324 pip_ep = [k for k in console if re.match(r'pip(\d(\.\d)?)?$', k)]
325 for k in pip_ep:
326 del console[k]
327 easy_install_script = console.pop('easy_install', None)
328 if easy_install_script:
329 if "ENSUREPIP_OPTIONS" not in os.environ:
330 spec = 'easy_install = ' + easy_install_script
331 generated.extend(maker.make(spec))
332
333 spec = 'easy_install-%s = %s' % (sys.version[:3], easy_install_script)
334 generated.extend(maker.make(spec))
335 # Delete any other versioned easy_install entry points
336 easy_install_ep = [
337 k for k in console if re.match(r'easy_install(-\d\.\d)?$', k)
338 ]
339 for k in easy_install_ep:
340 del console[k]
341
342 # Generate the console and GUI entry points specified in the wheel
343 if len(console) > 0:
344 generated.extend(
345 maker.make_multiple(['%s = %s' % kv for kv in console.items()])
346 )
347 if len(gui) > 0:
348 generated.extend(
349 maker.make_multiple(
350 ['%s = %s' % kv for kv in gui.items()],
351 {'gui': True}
352 )
353 )
354
355 record = os.path.join(info_dir[0], 'RECORD')
356 temp_record = os.path.join(info_dir[0], 'RECORD.pip')
357 with open_for_csv(record, 'r') as record_in:
358 with open_for_csv(temp_record, 'w+') as record_out:
359 reader = csv.reader(record_in)
360 writer = csv.writer(record_out)
361 for row in reader:
362 row[0] = installed.pop(row[0], row[0])
363 if row[0] in changed:
364 row[1], row[2] = rehash(row[0])
365 writer.writerow(row)
366 for f in generated:
367 h, l = rehash(f)
368 writer.writerow((f, h, l))
369 for f in installed:
370 writer.writerow((installed[f], '', ''))
371 shutil.move(temp_record, record)
372
373
374 def _unique(fn):
375 @functools.wraps(fn)
376 def unique(*args, **kw):
377 seen = set()
378 for item in fn(*args, **kw):
379 if item not in seen:
380 seen.add(item)
381 yield item
382 return unique
383
384
385 # TODO: this goes somewhere besides the wheel module
386 @_unique
387 def uninstallation_paths(dist):
388 """
389 Yield all the uninstallation paths for dist based on RECORD-without-.pyc
390
391 Yield paths to all the files in RECORD. For each .py file in RECORD, add
392 the .pyc in the same directory.
393
394 UninstallPathSet.add() takes care of the __pycache__ .pyc.
395 """
396 from pip.util import FakeFile # circular import
397 r = csv.reader(FakeFile(dist.get_metadata_lines('RECORD')))
398 for row in r:
399 path = os.path.join(dist.location, row[0])
400 yield path
401 if path.endswith('.py'):
402 dn, fn = os.path.split(path)
403 base = fn[:-3]
404 path = os.path.join(dn, base + '.pyc')
405 yield path
406
407
408 def wheel_version(source_dir):
409 """
410 Return the Wheel-Version of an extracted wheel, if possible.
411
412 Otherwise, return False if we couldn't parse / extract it.
413 """
414 try:
415 dist = [d for d in pkg_resources.find_on_path(None, source_dir)][0]
416
417 wheel_data = dist.get_metadata('WHEEL')
418 wheel_data = Parser().parsestr(wheel_data)
419
420 version = wheel_data['Wheel-Version'].strip()
421 version = tuple(map(int, version.split('.')))
422 return version
423 except:
424 return False
425
426
427 def check_compatibility(version, name):
428 """
429 Raises errors or warns if called with an incompatible Wheel-Version.
430
431 Pip should refuse to install a Wheel-Version that's a major series
432 ahead of what it's compatible with (e.g 2.0 > 1.1); and warn when
433 installing a version only minor version ahead (e.g 1.2 > 1.1).
434
435 version: a 2-tuple representing a Wheel-Version (Major, Minor)
436 name: name of wheel or package to raise exception about
437
438 :raises UnsupportedWheel: when an incompatible Wheel-Version is given
439 """
440 if not version:
441 raise UnsupportedWheel(
442 "%s is in an unsupported or invalid wheel" % name
443 )
444 if version[0] > VERSION_COMPATIBLE[0]:
445 raise UnsupportedWheel(
446 "%s's Wheel-Version (%s) is not compatible with this version "
447 "of pip" % (name, '.'.join(map(str, version)))
448 )
449 elif version > VERSION_COMPATIBLE:
450 logger.warn('Installing from a newer Wheel-Version (%s)'
451 % '.'.join(map(str, version)))
452
453
454 class Wheel(object):
455 """A wheel file"""
456
457 # TODO: maybe move the install code into this class
458
459 wheel_file_re = re.compile(
460 r"""^(?P<namever>(?P<name>.+?)-(?P<ver>\d.*?))
461 ((-(?P<build>\d.*?))?-(?P<pyver>.+?)-(?P<abi>.+?)-(?P<plat>.+?)
462 \.whl|\.dist-info)$""",
463 re.VERBOSE
464 )
465
466 def __init__(self, filename):
467 """
468 :raises InvalidWheelFilename: when the filename is invalid for a wheel
469 """
470 wheel_info = self.wheel_file_re.match(filename)
471 if not wheel_info:
472 raise InvalidWheelFilename(
473 "%s is not a valid wheel filename." % filename
474 )
475 self.filename = filename
476 self.name = wheel_info.group('name').replace('_', '-')
477 # we'll assume "_" means "-" due to wheel naming scheme
478 # (https://github.com/pypa/pip/issues/1150)
479 self.version = wheel_info.group('ver').replace('_', '-')
480 self.pyversions = wheel_info.group('pyver').split('.')
481 self.abis = wheel_info.group('abi').split('.')
482 self.plats = wheel_info.group('plat').split('.')
483
484 # All the tag combinations from this file
485 self.file_tags = set(
486 (x, y, z) for x in self.pyversions
487 for y in self.abis for z in self.plats
488 )
489
490 def support_index_min(self, tags=None):
491 """
492 Return the lowest index that one of the wheel's file_tag combinations
493 achieves in the supported_tags list e.g. if there are 8 supported tags,
494 and one of the file tags is first in the list, then return 0. Returns
495 None is the wheel is not supported.
496 """
497 if tags is None: # for mock
498 tags = pep425tags.supported_tags
499 indexes = [tags.index(c) for c in self.file_tags if c in tags]
500 return min(indexes) if indexes else None
501
502 def supported(self, tags=None):
503 """Is this wheel supported on this system?"""
504 if tags is None: # for mock
505 tags = pep425tags.supported_tags
506 return bool(set(tags).intersection(self.file_tags))
507
508
509 class WheelBuilder(object):
510 """Build wheels from a RequirementSet."""
511
512 def __init__(self, requirement_set, finder, wheel_dir, build_options=[],
513 global_options=[]):
514 self.requirement_set = requirement_set
515 self.finder = finder
516 self.wheel_dir = normalize_path(wheel_dir)
517 self.build_options = build_options
518 self.global_options = global_options
519
520 def _build_one(self, req):
521 """Build one wheel."""
522
523 base_args = [
524 sys.executable, '-c',
525 "import setuptools;__file__=%r;"
526 "exec(compile(open(__file__).read().replace('\\r\\n', '\\n'), "
527 "__file__, 'exec'))" % req.setup_py
528 ] + list(self.global_options)
529
530 logger.notify('Running setup.py bdist_wheel for %s' % req.name)
531 logger.notify('Destination directory: %s' % self.wheel_dir)
532 wheel_args = base_args + ['bdist_wheel', '-d', self.wheel_dir] \
533 + self.build_options
534 try:
535 call_subprocess(wheel_args, cwd=req.source_dir, show_stdout=False)
536 return True
537 except:
538 logger.error('Failed building wheel for %s' % req.name)
539 return False
540
541 def build(self):
542 """Build wheels."""
543
544 # unpack and constructs req set
545 self.requirement_set.prepare_files(self.finder)
546
547 reqset = self.requirement_set.requirements.values()
548
549 buildset = []
550 for req in reqset:
551 if req.is_wheel:
552 logger.notify(
553 'Skipping %s, due to already being wheel.' % req.name)
554 elif req.editable:
555 logger.notify(
556 'Skipping %s, due to being editable' % req.name)
557 else:
558 buildset.append(req)
559
560 if not buildset:
561 return True
562
563 # Build the wheels.
564 logger.notify(
565 'Building wheels for collected packages: %s' %
566 ', '.join([req.name for req in buildset])
567 )
568 logger.indent += 2
569 build_success, build_failure = [], []
570 for req in buildset:
571 if self._build_one(req):
572 build_success.append(req)
573 else:
574 build_failure.append(req)
575 logger.indent -= 2
576
577 # notify success/failure
578 if build_success:
579 logger.notify(
580 'Successfully built %s' %
581 ' '.join([req.name for req in build_success])
582 )
583 if build_failure:
584 logger.notify(
585 'Failed to build %s' %
586 ' '.join([req.name for req in build_failure])
587 )
588 # Return True if all builds were successful
589 return len(build_failure) == 0
590
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pip/wheel.py b/pip/wheel.py
--- a/pip/wheel.py
+++ b/pip/wheel.py
@@ -242,6 +242,10 @@
maker = ScriptMaker(None, scheme['scripts'])
+ # Ensure old scripts are overwritten.
+ # See https://github.com/pypa/pip/issues/1800
+ maker.clobber = True
+
# Ensure we don't generate any variants for scripts because this is almost
# never what somebody wants.
# See https://bitbucket.org/pypa/distlib/issue/35/
| {"golden_diff": "diff --git a/pip/wheel.py b/pip/wheel.py\n--- a/pip/wheel.py\n+++ b/pip/wheel.py\n@@ -242,6 +242,10 @@\n \n maker = ScriptMaker(None, scheme['scripts'])\n \n+ # Ensure old scripts are overwritten.\n+ # See https://github.com/pypa/pip/issues/1800\n+ maker.clobber = True\n+\n # Ensure we don't generate any variants for scripts because this is almost\n # never what somebody wants.\n # See https://bitbucket.org/pypa/distlib/issue/35/\n", "issue": "pip fail after upgrade to 1.5.5\n```\n$ pip install --upgrade pip\nDownloading/unpacking pip from https://pypi.python.org/packages/py2.py3/p/pip/pip-1.5.5-py2.py3-none-any.whl#md5=03a932d6f82a3887d8de1cdb837c87ed\n Using download cache from /Users/robinho/Library/Caches/pip-downloads/https%3A%2F%2Fpypi.python.org%2Fpackages%2Fpy2.py3%2Fp%2Fpip%2Fpip-1.5.5-py2.py3-none-any.whl\nInstalling collected packages: pip\n Found existing installation: pip 1.5.4\n Uninstalling pip:\n Successfully uninstalled pip\nSuccessfully installed pip\nCleaning up...\n$ pip\nTraceback (most recent call last):\n File \"/usr/local/bin/pip\", line 5, in <module>\n from pkg_resources import load_entry_point\n File \"/usr/local/lib/python2.7/site-packages/pkg_resources.py\", line 2749, in <module>\n working_set = WorkingSet._build_master()\n File \"/usr/local/lib/python2.7/site-packages/pkg_resources.py\", line 446, in _build_master\n return cls._build_from_requirements(__requires__)\n File \"/usr/local/lib/python2.7/site-packages/pkg_resources.py\", line 459, in _build_from_requirements\n dists = ws.resolve(reqs, Environment())\n File \"/usr/local/lib/python2.7/site-packages/pkg_resources.py\", line 628, in resolve\n raise DistributionNotFound(req)\npkg_resources.DistributionNotFound: pip==1.5.4\n```\n\nUsing a python installed by homebrew.\n\n```\n$ python --version\nPython 2.7.6\n$ brew --version\n0.9.5\n$ sw_vers\nProductName: Mac OS X\nProductVersion: 10.9.2\nBuildVersion: 13C1021\n```\n\n", "before_files": [{"content": "\"\"\"\nSupport for installing and building the \"wheel\" binary package format.\n\"\"\"\nfrom __future__ import with_statement\n\nimport compileall\nimport csv\nimport functools\nimport hashlib\nimport os\nimport re\nimport shutil\nimport sys\n\nfrom base64 import urlsafe_b64encode\nfrom email.parser import Parser\n\nfrom pip.compat import ConfigParser, StringIO, binary\nfrom pip.exceptions import InvalidWheelFilename, UnsupportedWheel\nfrom pip.locations import distutils_scheme\nfrom pip.log import logger\nfrom pip import pep425tags\nfrom pip.util import call_subprocess, normalize_path, make_path_relative\nfrom pip._vendor.distlib.scripts import ScriptMaker\nfrom pip._vendor import pkg_resources\n\n\nwheel_ext = '.whl'\n\nVERSION_COMPATIBLE = (1, 0)\n\n\ndef rehash(path, algo='sha256', blocksize=1 << 20):\n \"\"\"Return (hash, length) for path using hashlib.new(algo)\"\"\"\n h = hashlib.new(algo)\n length = 0\n with open(path, 'rb') as f:\n block = f.read(blocksize)\n while block:\n length += len(block)\n h.update(block)\n block = f.read(blocksize)\n digest = 'sha256=' + urlsafe_b64encode(\n h.digest()\n ).decode('latin1').rstrip('=')\n return (digest, length)\n\n\ndef open_for_csv(name, mode):\n if sys.version_info[0] < 3:\n nl = {}\n bin = 'b'\n else:\n nl = {'newline': ''}\n bin = ''\n return open(name, mode + bin, **nl)\n\n\ndef fix_script(path):\n \"\"\"Replace #!python with #!/path/to/python\n Return True if file was changed.\"\"\"\n # XXX RECORD hashes will need to be updated\n if os.path.isfile(path):\n script = open(path, 'rb')\n try:\n firstline = script.readline()\n if not firstline.startswith(binary('#!python')):\n return False\n exename = sys.executable.encode(sys.getfilesystemencoding())\n firstline = binary('#!') + exename + binary(os.linesep)\n rest = script.read()\n finally:\n script.close()\n script = open(path, 'wb')\n try:\n script.write(firstline)\n script.write(rest)\n finally:\n script.close()\n return True\n\ndist_info_re = re.compile(r\"\"\"^(?P<namever>(?P<name>.+?)(-(?P<ver>\\d.+?))?)\n \\.dist-info$\"\"\", re.VERBOSE)\n\n\ndef root_is_purelib(name, wheeldir):\n \"\"\"\n Return True if the extracted wheel in wheeldir should go into purelib.\n \"\"\"\n name_folded = name.replace(\"-\", \"_\")\n for item in os.listdir(wheeldir):\n match = dist_info_re.match(item)\n if match and match.group('name') == name_folded:\n with open(os.path.join(wheeldir, item, 'WHEEL')) as wheel:\n for line in wheel:\n line = line.lower().rstrip()\n if line == \"root-is-purelib: true\":\n return True\n return False\n\n\ndef get_entrypoints(filename):\n if not os.path.exists(filename):\n return {}, {}\n\n # This is done because you can pass a string to entry_points wrappers which\n # means that they may or may not be valid INI files. The attempt here is to\n # strip leading and trailing whitespace in order to make them valid INI\n # files.\n with open(filename) as fp:\n data = StringIO()\n for line in fp:\n data.write(line.strip())\n data.write(\"\\n\")\n data.seek(0)\n\n cp = ConfigParser.RawConfigParser()\n cp.readfp(data)\n\n console = {}\n gui = {}\n if cp.has_section('console_scripts'):\n console = dict(cp.items('console_scripts'))\n if cp.has_section('gui_scripts'):\n gui = dict(cp.items('gui_scripts'))\n return console, gui\n\n\ndef move_wheel_files(name, req, wheeldir, user=False, home=None, root=None,\n pycompile=True, scheme=None):\n \"\"\"Install a wheel\"\"\"\n\n if not scheme:\n scheme = distutils_scheme(name, user=user, home=home, root=root)\n\n if root_is_purelib(name, wheeldir):\n lib_dir = scheme['purelib']\n else:\n lib_dir = scheme['platlib']\n\n info_dir = []\n data_dirs = []\n source = wheeldir.rstrip(os.path.sep) + os.path.sep\n\n # Record details of the files moved\n # installed = files copied from the wheel to the destination\n # changed = files changed while installing (scripts #! line typically)\n # generated = files newly generated during the install (script wrappers)\n installed = {}\n changed = set()\n generated = []\n\n # Compile all of the pyc files that we're going to be installing\n if pycompile:\n compileall.compile_dir(source, force=True, quiet=True)\n\n def normpath(src, p):\n return make_path_relative(src, p).replace(os.path.sep, '/')\n\n def record_installed(srcfile, destfile, modified=False):\n \"\"\"Map archive RECORD paths to installation RECORD paths.\"\"\"\n oldpath = normpath(srcfile, wheeldir)\n newpath = normpath(destfile, lib_dir)\n installed[oldpath] = newpath\n if modified:\n changed.add(destfile)\n\n def clobber(source, dest, is_base, fixer=None, filter=None):\n if not os.path.exists(dest): # common for the 'include' path\n os.makedirs(dest)\n\n for dir, subdirs, files in os.walk(source):\n basedir = dir[len(source):].lstrip(os.path.sep)\n destdir = os.path.join(dest, basedir)\n if is_base and basedir.split(os.path.sep, 1)[0].endswith('.data'):\n continue\n for s in subdirs:\n destsubdir = os.path.join(dest, basedir, s)\n if is_base and basedir == '' and destsubdir.endswith('.data'):\n data_dirs.append(s)\n continue\n elif (is_base\n and s.endswith('.dist-info')\n # is self.req.project_name case preserving?\n and s.lower().startswith(\n req.project_name.replace('-', '_').lower())):\n assert not info_dir, 'Multiple .dist-info directories'\n info_dir.append(destsubdir)\n for f in files:\n # Skip unwanted files\n if filter and filter(f):\n continue\n srcfile = os.path.join(dir, f)\n destfile = os.path.join(dest, basedir, f)\n # directory creation is lazy and after the file filtering above\n # to ensure we don't install empty dirs; empty dirs can't be\n # uninstalled.\n if not os.path.exists(destdir):\n os.makedirs(destdir)\n # use copy2 (not move) to be extra sure we're not moving\n # directories over; copy2 fails for directories. this would\n # fail tests (not during released/user execution)\n shutil.copy2(srcfile, destfile)\n changed = False\n if fixer:\n changed = fixer(destfile)\n record_installed(srcfile, destfile, changed)\n\n clobber(source, lib_dir, True)\n\n assert info_dir, \"%s .dist-info directory not found\" % req\n\n # Get the defined entry points\n ep_file = os.path.join(info_dir[0], 'entry_points.txt')\n console, gui = get_entrypoints(ep_file)\n\n def is_entrypoint_wrapper(name):\n # EP, EP.exe and EP-script.py are scripts generated for\n # entry point EP by setuptools\n if name.lower().endswith('.exe'):\n matchname = name[:-4]\n elif name.lower().endswith('-script.py'):\n matchname = name[:-10]\n elif name.lower().endswith(\".pya\"):\n matchname = name[:-4]\n else:\n matchname = name\n # Ignore setuptools-generated scripts\n return (matchname in console or matchname in gui)\n\n for datadir in data_dirs:\n fixer = None\n filter = None\n for subdir in os.listdir(os.path.join(wheeldir, datadir)):\n fixer = None\n if subdir == 'scripts':\n fixer = fix_script\n filter = is_entrypoint_wrapper\n source = os.path.join(wheeldir, datadir, subdir)\n dest = scheme[subdir]\n clobber(source, dest, False, fixer=fixer, filter=filter)\n\n maker = ScriptMaker(None, scheme['scripts'])\n\n # Ensure we don't generate any variants for scripts because this is almost\n # never what somebody wants.\n # See https://bitbucket.org/pypa/distlib/issue/35/\n maker.variants = set(('', ))\n\n # This is required because otherwise distlib creates scripts that are not\n # executable.\n # See https://bitbucket.org/pypa/distlib/issue/32/\n maker.set_mode = True\n\n # Simplify the script and fix the fact that the default script swallows\n # every single stack trace.\n # See https://bitbucket.org/pypa/distlib/issue/34/\n # See https://bitbucket.org/pypa/distlib/issue/33/\n def _get_script_text(entry):\n return maker.script_template % {\n \"module\": entry.prefix,\n \"import_name\": entry.suffix.split(\".\")[0],\n \"func\": entry.suffix,\n }\n\n maker._get_script_text = _get_script_text\n maker.script_template = \"\"\"# -*- coding: utf-8 -*-\nimport re\nimport sys\n\nfrom %(module)s import %(import_name)s\n\nif __name__ == '__main__':\n sys.argv[0] = re.sub(r'(-script\\.pyw|\\.exe)?$', '', sys.argv[0])\n sys.exit(%(func)s())\n\"\"\"\n\n # Special case pip and setuptools to generate versioned wrappers\n #\n # The issue is that some projects (specifically, pip and setuptools) use\n # code in setup.py to create \"versioned\" entry points - pip2.7 on Python\n # 2.7, pip3.3 on Python 3.3, etc. But these entry points are baked into\n # the wheel metadata at build time, and so if the wheel is installed with\n # a *different* version of Python the entry points will be wrong. The\n # correct fix for this is to enhance the metadata to be able to describe\n # such versioned entry points, but that won't happen till Metadata 2.0 is\n # available.\n # In the meantime, projects using versioned entry points will either have\n # incorrect versioned entry points, or they will not be able to distribute\n # \"universal\" wheels (i.e., they will need a wheel per Python version).\n #\n # Because setuptools and pip are bundled with _ensurepip and virtualenv,\n # we need to use universal wheels. So, as a stopgap until Metadata 2.0, we\n # override the versioned entry points in the wheel and generate the\n # correct ones. This code is purely a short-term measure until Metadat 2.0\n # is available.\n #\n # To add the level of hack in this section of code, in order to support\n # ensurepip this code will look for an ``ENSUREPIP_OPTIONS`` environment\n # variable which will control which version scripts get installed.\n #\n # ENSUREPIP_OPTIONS=altinstall\n # - Only pipX.Y and easy_install-X.Y will be generated and installed\n # ENSUREPIP_OPTIONS=install\n # - pipX.Y, pipX, easy_install-X.Y will be generated and installed. Note\n # that this option is technically if ENSUREPIP_OPTIONS is set and is\n # not altinstall\n # DEFAULT\n # - The default behavior is to install pip, pipX, pipX.Y, easy_install\n # and easy_install-X.Y.\n pip_script = console.pop('pip', None)\n if pip_script:\n if \"ENSUREPIP_OPTIONS\" not in os.environ:\n spec = 'pip = ' + pip_script\n generated.extend(maker.make(spec))\n\n if os.environ.get(\"ENSUREPIP_OPTIONS\", \"\") != \"altinstall\":\n spec = 'pip%s = %s' % (sys.version[:1], pip_script)\n generated.extend(maker.make(spec))\n\n spec = 'pip%s = %s' % (sys.version[:3], pip_script)\n generated.extend(maker.make(spec))\n # Delete any other versioned pip entry points\n pip_ep = [k for k in console if re.match(r'pip(\\d(\\.\\d)?)?$', k)]\n for k in pip_ep:\n del console[k]\n easy_install_script = console.pop('easy_install', None)\n if easy_install_script:\n if \"ENSUREPIP_OPTIONS\" not in os.environ:\n spec = 'easy_install = ' + easy_install_script\n generated.extend(maker.make(spec))\n\n spec = 'easy_install-%s = %s' % (sys.version[:3], easy_install_script)\n generated.extend(maker.make(spec))\n # Delete any other versioned easy_install entry points\n easy_install_ep = [\n k for k in console if re.match(r'easy_install(-\\d\\.\\d)?$', k)\n ]\n for k in easy_install_ep:\n del console[k]\n\n # Generate the console and GUI entry points specified in the wheel\n if len(console) > 0:\n generated.extend(\n maker.make_multiple(['%s = %s' % kv for kv in console.items()])\n )\n if len(gui) > 0:\n generated.extend(\n maker.make_multiple(\n ['%s = %s' % kv for kv in gui.items()],\n {'gui': True}\n )\n )\n\n record = os.path.join(info_dir[0], 'RECORD')\n temp_record = os.path.join(info_dir[0], 'RECORD.pip')\n with open_for_csv(record, 'r') as record_in:\n with open_for_csv(temp_record, 'w+') as record_out:\n reader = csv.reader(record_in)\n writer = csv.writer(record_out)\n for row in reader:\n row[0] = installed.pop(row[0], row[0])\n if row[0] in changed:\n row[1], row[2] = rehash(row[0])\n writer.writerow(row)\n for f in generated:\n h, l = rehash(f)\n writer.writerow((f, h, l))\n for f in installed:\n writer.writerow((installed[f], '', ''))\n shutil.move(temp_record, record)\n\n\ndef _unique(fn):\n @functools.wraps(fn)\n def unique(*args, **kw):\n seen = set()\n for item in fn(*args, **kw):\n if item not in seen:\n seen.add(item)\n yield item\n return unique\n\n\n# TODO: this goes somewhere besides the wheel module\n@_unique\ndef uninstallation_paths(dist):\n \"\"\"\n Yield all the uninstallation paths for dist based on RECORD-without-.pyc\n\n Yield paths to all the files in RECORD. For each .py file in RECORD, add\n the .pyc in the same directory.\n\n UninstallPathSet.add() takes care of the __pycache__ .pyc.\n \"\"\"\n from pip.util import FakeFile # circular import\n r = csv.reader(FakeFile(dist.get_metadata_lines('RECORD')))\n for row in r:\n path = os.path.join(dist.location, row[0])\n yield path\n if path.endswith('.py'):\n dn, fn = os.path.split(path)\n base = fn[:-3]\n path = os.path.join(dn, base + '.pyc')\n yield path\n\n\ndef wheel_version(source_dir):\n \"\"\"\n Return the Wheel-Version of an extracted wheel, if possible.\n\n Otherwise, return False if we couldn't parse / extract it.\n \"\"\"\n try:\n dist = [d for d in pkg_resources.find_on_path(None, source_dir)][0]\n\n wheel_data = dist.get_metadata('WHEEL')\n wheel_data = Parser().parsestr(wheel_data)\n\n version = wheel_data['Wheel-Version'].strip()\n version = tuple(map(int, version.split('.')))\n return version\n except:\n return False\n\n\ndef check_compatibility(version, name):\n \"\"\"\n Raises errors or warns if called with an incompatible Wheel-Version.\n\n Pip should refuse to install a Wheel-Version that's a major series\n ahead of what it's compatible with (e.g 2.0 > 1.1); and warn when\n installing a version only minor version ahead (e.g 1.2 > 1.1).\n\n version: a 2-tuple representing a Wheel-Version (Major, Minor)\n name: name of wheel or package to raise exception about\n\n :raises UnsupportedWheel: when an incompatible Wheel-Version is given\n \"\"\"\n if not version:\n raise UnsupportedWheel(\n \"%s is in an unsupported or invalid wheel\" % name\n )\n if version[0] > VERSION_COMPATIBLE[0]:\n raise UnsupportedWheel(\n \"%s's Wheel-Version (%s) is not compatible with this version \"\n \"of pip\" % (name, '.'.join(map(str, version)))\n )\n elif version > VERSION_COMPATIBLE:\n logger.warn('Installing from a newer Wheel-Version (%s)'\n % '.'.join(map(str, version)))\n\n\nclass Wheel(object):\n \"\"\"A wheel file\"\"\"\n\n # TODO: maybe move the install code into this class\n\n wheel_file_re = re.compile(\n r\"\"\"^(?P<namever>(?P<name>.+?)-(?P<ver>\\d.*?))\n ((-(?P<build>\\d.*?))?-(?P<pyver>.+?)-(?P<abi>.+?)-(?P<plat>.+?)\n \\.whl|\\.dist-info)$\"\"\",\n re.VERBOSE\n )\n\n def __init__(self, filename):\n \"\"\"\n :raises InvalidWheelFilename: when the filename is invalid for a wheel\n \"\"\"\n wheel_info = self.wheel_file_re.match(filename)\n if not wheel_info:\n raise InvalidWheelFilename(\n \"%s is not a valid wheel filename.\" % filename\n )\n self.filename = filename\n self.name = wheel_info.group('name').replace('_', '-')\n # we'll assume \"_\" means \"-\" due to wheel naming scheme\n # (https://github.com/pypa/pip/issues/1150)\n self.version = wheel_info.group('ver').replace('_', '-')\n self.pyversions = wheel_info.group('pyver').split('.')\n self.abis = wheel_info.group('abi').split('.')\n self.plats = wheel_info.group('plat').split('.')\n\n # All the tag combinations from this file\n self.file_tags = set(\n (x, y, z) for x in self.pyversions\n for y in self.abis for z in self.plats\n )\n\n def support_index_min(self, tags=None):\n \"\"\"\n Return the lowest index that one of the wheel's file_tag combinations\n achieves in the supported_tags list e.g. if there are 8 supported tags,\n and one of the file tags is first in the list, then return 0. Returns\n None is the wheel is not supported.\n \"\"\"\n if tags is None: # for mock\n tags = pep425tags.supported_tags\n indexes = [tags.index(c) for c in self.file_tags if c in tags]\n return min(indexes) if indexes else None\n\n def supported(self, tags=None):\n \"\"\"Is this wheel supported on this system?\"\"\"\n if tags is None: # for mock\n tags = pep425tags.supported_tags\n return bool(set(tags).intersection(self.file_tags))\n\n\nclass WheelBuilder(object):\n \"\"\"Build wheels from a RequirementSet.\"\"\"\n\n def __init__(self, requirement_set, finder, wheel_dir, build_options=[],\n global_options=[]):\n self.requirement_set = requirement_set\n self.finder = finder\n self.wheel_dir = normalize_path(wheel_dir)\n self.build_options = build_options\n self.global_options = global_options\n\n def _build_one(self, req):\n \"\"\"Build one wheel.\"\"\"\n\n base_args = [\n sys.executable, '-c',\n \"import setuptools;__file__=%r;\"\n \"exec(compile(open(__file__).read().replace('\\\\r\\\\n', '\\\\n'), \"\n \"__file__, 'exec'))\" % req.setup_py\n ] + list(self.global_options)\n\n logger.notify('Running setup.py bdist_wheel for %s' % req.name)\n logger.notify('Destination directory: %s' % self.wheel_dir)\n wheel_args = base_args + ['bdist_wheel', '-d', self.wheel_dir] \\\n + self.build_options\n try:\n call_subprocess(wheel_args, cwd=req.source_dir, show_stdout=False)\n return True\n except:\n logger.error('Failed building wheel for %s' % req.name)\n return False\n\n def build(self):\n \"\"\"Build wheels.\"\"\"\n\n # unpack and constructs req set\n self.requirement_set.prepare_files(self.finder)\n\n reqset = self.requirement_set.requirements.values()\n\n buildset = []\n for req in reqset:\n if req.is_wheel:\n logger.notify(\n 'Skipping %s, due to already being wheel.' % req.name)\n elif req.editable:\n logger.notify(\n 'Skipping %s, due to being editable' % req.name)\n else:\n buildset.append(req)\n\n if not buildset:\n return True\n\n # Build the wheels.\n logger.notify(\n 'Building wheels for collected packages: %s' %\n ', '.join([req.name for req in buildset])\n )\n logger.indent += 2\n build_success, build_failure = [], []\n for req in buildset:\n if self._build_one(req):\n build_success.append(req)\n else:\n build_failure.append(req)\n logger.indent -= 2\n\n # notify success/failure\n if build_success:\n logger.notify(\n 'Successfully built %s' %\n ' '.join([req.name for req in build_success])\n )\n if build_failure:\n logger.notify(\n 'Failed to build %s' %\n ' '.join([req.name for req in build_failure])\n )\n # Return True if all builds were successful\n return len(build_failure) == 0\n", "path": "pip/wheel.py"}], "after_files": [{"content": "\"\"\"\nSupport for installing and building the \"wheel\" binary package format.\n\"\"\"\nfrom __future__ import with_statement\n\nimport compileall\nimport csv\nimport functools\nimport hashlib\nimport os\nimport re\nimport shutil\nimport sys\n\nfrom base64 import urlsafe_b64encode\nfrom email.parser import Parser\n\nfrom pip.compat import ConfigParser, StringIO, binary\nfrom pip.exceptions import InvalidWheelFilename, UnsupportedWheel\nfrom pip.locations import distutils_scheme\nfrom pip.log import logger\nfrom pip import pep425tags\nfrom pip.util import call_subprocess, normalize_path, make_path_relative\nfrom pip._vendor.distlib.scripts import ScriptMaker\nfrom pip._vendor import pkg_resources\n\n\nwheel_ext = '.whl'\n\nVERSION_COMPATIBLE = (1, 0)\n\n\ndef rehash(path, algo='sha256', blocksize=1 << 20):\n \"\"\"Return (hash, length) for path using hashlib.new(algo)\"\"\"\n h = hashlib.new(algo)\n length = 0\n with open(path, 'rb') as f:\n block = f.read(blocksize)\n while block:\n length += len(block)\n h.update(block)\n block = f.read(blocksize)\n digest = 'sha256=' + urlsafe_b64encode(\n h.digest()\n ).decode('latin1').rstrip('=')\n return (digest, length)\n\n\ndef open_for_csv(name, mode):\n if sys.version_info[0] < 3:\n nl = {}\n bin = 'b'\n else:\n nl = {'newline': ''}\n bin = ''\n return open(name, mode + bin, **nl)\n\n\ndef fix_script(path):\n \"\"\"Replace #!python with #!/path/to/python\n Return True if file was changed.\"\"\"\n # XXX RECORD hashes will need to be updated\n if os.path.isfile(path):\n script = open(path, 'rb')\n try:\n firstline = script.readline()\n if not firstline.startswith(binary('#!python')):\n return False\n exename = sys.executable.encode(sys.getfilesystemencoding())\n firstline = binary('#!') + exename + binary(os.linesep)\n rest = script.read()\n finally:\n script.close()\n script = open(path, 'wb')\n try:\n script.write(firstline)\n script.write(rest)\n finally:\n script.close()\n return True\n\ndist_info_re = re.compile(r\"\"\"^(?P<namever>(?P<name>.+?)(-(?P<ver>\\d.+?))?)\n \\.dist-info$\"\"\", re.VERBOSE)\n\n\ndef root_is_purelib(name, wheeldir):\n \"\"\"\n Return True if the extracted wheel in wheeldir should go into purelib.\n \"\"\"\n name_folded = name.replace(\"-\", \"_\")\n for item in os.listdir(wheeldir):\n match = dist_info_re.match(item)\n if match and match.group('name') == name_folded:\n with open(os.path.join(wheeldir, item, 'WHEEL')) as wheel:\n for line in wheel:\n line = line.lower().rstrip()\n if line == \"root-is-purelib: true\":\n return True\n return False\n\n\ndef get_entrypoints(filename):\n if not os.path.exists(filename):\n return {}, {}\n\n # This is done because you can pass a string to entry_points wrappers which\n # means that they may or may not be valid INI files. The attempt here is to\n # strip leading and trailing whitespace in order to make them valid INI\n # files.\n with open(filename) as fp:\n data = StringIO()\n for line in fp:\n data.write(line.strip())\n data.write(\"\\n\")\n data.seek(0)\n\n cp = ConfigParser.RawConfigParser()\n cp.readfp(data)\n\n console = {}\n gui = {}\n if cp.has_section('console_scripts'):\n console = dict(cp.items('console_scripts'))\n if cp.has_section('gui_scripts'):\n gui = dict(cp.items('gui_scripts'))\n return console, gui\n\n\ndef move_wheel_files(name, req, wheeldir, user=False, home=None, root=None,\n pycompile=True, scheme=None):\n \"\"\"Install a wheel\"\"\"\n\n if not scheme:\n scheme = distutils_scheme(name, user=user, home=home, root=root)\n\n if root_is_purelib(name, wheeldir):\n lib_dir = scheme['purelib']\n else:\n lib_dir = scheme['platlib']\n\n info_dir = []\n data_dirs = []\n source = wheeldir.rstrip(os.path.sep) + os.path.sep\n\n # Record details of the files moved\n # installed = files copied from the wheel to the destination\n # changed = files changed while installing (scripts #! line typically)\n # generated = files newly generated during the install (script wrappers)\n installed = {}\n changed = set()\n generated = []\n\n # Compile all of the pyc files that we're going to be installing\n if pycompile:\n compileall.compile_dir(source, force=True, quiet=True)\n\n def normpath(src, p):\n return make_path_relative(src, p).replace(os.path.sep, '/')\n\n def record_installed(srcfile, destfile, modified=False):\n \"\"\"Map archive RECORD paths to installation RECORD paths.\"\"\"\n oldpath = normpath(srcfile, wheeldir)\n newpath = normpath(destfile, lib_dir)\n installed[oldpath] = newpath\n if modified:\n changed.add(destfile)\n\n def clobber(source, dest, is_base, fixer=None, filter=None):\n if not os.path.exists(dest): # common for the 'include' path\n os.makedirs(dest)\n\n for dir, subdirs, files in os.walk(source):\n basedir = dir[len(source):].lstrip(os.path.sep)\n destdir = os.path.join(dest, basedir)\n if is_base and basedir.split(os.path.sep, 1)[0].endswith('.data'):\n continue\n for s in subdirs:\n destsubdir = os.path.join(dest, basedir, s)\n if is_base and basedir == '' and destsubdir.endswith('.data'):\n data_dirs.append(s)\n continue\n elif (is_base\n and s.endswith('.dist-info')\n # is self.req.project_name case preserving?\n and s.lower().startswith(\n req.project_name.replace('-', '_').lower())):\n assert not info_dir, 'Multiple .dist-info directories'\n info_dir.append(destsubdir)\n for f in files:\n # Skip unwanted files\n if filter and filter(f):\n continue\n srcfile = os.path.join(dir, f)\n destfile = os.path.join(dest, basedir, f)\n # directory creation is lazy and after the file filtering above\n # to ensure we don't install empty dirs; empty dirs can't be\n # uninstalled.\n if not os.path.exists(destdir):\n os.makedirs(destdir)\n # use copy2 (not move) to be extra sure we're not moving\n # directories over; copy2 fails for directories. this would\n # fail tests (not during released/user execution)\n shutil.copy2(srcfile, destfile)\n changed = False\n if fixer:\n changed = fixer(destfile)\n record_installed(srcfile, destfile, changed)\n\n clobber(source, lib_dir, True)\n\n assert info_dir, \"%s .dist-info directory not found\" % req\n\n # Get the defined entry points\n ep_file = os.path.join(info_dir[0], 'entry_points.txt')\n console, gui = get_entrypoints(ep_file)\n\n def is_entrypoint_wrapper(name):\n # EP, EP.exe and EP-script.py are scripts generated for\n # entry point EP by setuptools\n if name.lower().endswith('.exe'):\n matchname = name[:-4]\n elif name.lower().endswith('-script.py'):\n matchname = name[:-10]\n elif name.lower().endswith(\".pya\"):\n matchname = name[:-4]\n else:\n matchname = name\n # Ignore setuptools-generated scripts\n return (matchname in console or matchname in gui)\n\n for datadir in data_dirs:\n fixer = None\n filter = None\n for subdir in os.listdir(os.path.join(wheeldir, datadir)):\n fixer = None\n if subdir == 'scripts':\n fixer = fix_script\n filter = is_entrypoint_wrapper\n source = os.path.join(wheeldir, datadir, subdir)\n dest = scheme[subdir]\n clobber(source, dest, False, fixer=fixer, filter=filter)\n\n maker = ScriptMaker(None, scheme['scripts'])\n\n # Ensure old scripts are overwritten.\n # See https://github.com/pypa/pip/issues/1800\n maker.clobber = True\n\n # Ensure we don't generate any variants for scripts because this is almost\n # never what somebody wants.\n # See https://bitbucket.org/pypa/distlib/issue/35/\n maker.variants = set(('', ))\n\n # This is required because otherwise distlib creates scripts that are not\n # executable.\n # See https://bitbucket.org/pypa/distlib/issue/32/\n maker.set_mode = True\n\n # Simplify the script and fix the fact that the default script swallows\n # every single stack trace.\n # See https://bitbucket.org/pypa/distlib/issue/34/\n # See https://bitbucket.org/pypa/distlib/issue/33/\n def _get_script_text(entry):\n return maker.script_template % {\n \"module\": entry.prefix,\n \"import_name\": entry.suffix.split(\".\")[0],\n \"func\": entry.suffix,\n }\n\n maker._get_script_text = _get_script_text\n maker.script_template = \"\"\"# -*- coding: utf-8 -*-\nimport re\nimport sys\n\nfrom %(module)s import %(import_name)s\n\nif __name__ == '__main__':\n sys.argv[0] = re.sub(r'(-script\\.pyw|\\.exe)?$', '', sys.argv[0])\n sys.exit(%(func)s())\n\"\"\"\n\n # Special case pip and setuptools to generate versioned wrappers\n #\n # The issue is that some projects (specifically, pip and setuptools) use\n # code in setup.py to create \"versioned\" entry points - pip2.7 on Python\n # 2.7, pip3.3 on Python 3.3, etc. But these entry points are baked into\n # the wheel metadata at build time, and so if the wheel is installed with\n # a *different* version of Python the entry points will be wrong. The\n # correct fix for this is to enhance the metadata to be able to describe\n # such versioned entry points, but that won't happen till Metadata 2.0 is\n # available.\n # In the meantime, projects using versioned entry points will either have\n # incorrect versioned entry points, or they will not be able to distribute\n # \"universal\" wheels (i.e., they will need a wheel per Python version).\n #\n # Because setuptools and pip are bundled with _ensurepip and virtualenv,\n # we need to use universal wheels. So, as a stopgap until Metadata 2.0, we\n # override the versioned entry points in the wheel and generate the\n # correct ones. This code is purely a short-term measure until Metadat 2.0\n # is available.\n #\n # To add the level of hack in this section of code, in order to support\n # ensurepip this code will look for an ``ENSUREPIP_OPTIONS`` environment\n # variable which will control which version scripts get installed.\n #\n # ENSUREPIP_OPTIONS=altinstall\n # - Only pipX.Y and easy_install-X.Y will be generated and installed\n # ENSUREPIP_OPTIONS=install\n # - pipX.Y, pipX, easy_install-X.Y will be generated and installed. Note\n # that this option is technically if ENSUREPIP_OPTIONS is set and is\n # not altinstall\n # DEFAULT\n # - The default behavior is to install pip, pipX, pipX.Y, easy_install\n # and easy_install-X.Y.\n pip_script = console.pop('pip', None)\n if pip_script:\n if \"ENSUREPIP_OPTIONS\" not in os.environ:\n spec = 'pip = ' + pip_script\n generated.extend(maker.make(spec))\n\n if os.environ.get(\"ENSUREPIP_OPTIONS\", \"\") != \"altinstall\":\n spec = 'pip%s = %s' % (sys.version[:1], pip_script)\n generated.extend(maker.make(spec))\n\n spec = 'pip%s = %s' % (sys.version[:3], pip_script)\n generated.extend(maker.make(spec))\n # Delete any other versioned pip entry points\n pip_ep = [k for k in console if re.match(r'pip(\\d(\\.\\d)?)?$', k)]\n for k in pip_ep:\n del console[k]\n easy_install_script = console.pop('easy_install', None)\n if easy_install_script:\n if \"ENSUREPIP_OPTIONS\" not in os.environ:\n spec = 'easy_install = ' + easy_install_script\n generated.extend(maker.make(spec))\n\n spec = 'easy_install-%s = %s' % (sys.version[:3], easy_install_script)\n generated.extend(maker.make(spec))\n # Delete any other versioned easy_install entry points\n easy_install_ep = [\n k for k in console if re.match(r'easy_install(-\\d\\.\\d)?$', k)\n ]\n for k in easy_install_ep:\n del console[k]\n\n # Generate the console and GUI entry points specified in the wheel\n if len(console) > 0:\n generated.extend(\n maker.make_multiple(['%s = %s' % kv for kv in console.items()])\n )\n if len(gui) > 0:\n generated.extend(\n maker.make_multiple(\n ['%s = %s' % kv for kv in gui.items()],\n {'gui': True}\n )\n )\n\n record = os.path.join(info_dir[0], 'RECORD')\n temp_record = os.path.join(info_dir[0], 'RECORD.pip')\n with open_for_csv(record, 'r') as record_in:\n with open_for_csv(temp_record, 'w+') as record_out:\n reader = csv.reader(record_in)\n writer = csv.writer(record_out)\n for row in reader:\n row[0] = installed.pop(row[0], row[0])\n if row[0] in changed:\n row[1], row[2] = rehash(row[0])\n writer.writerow(row)\n for f in generated:\n h, l = rehash(f)\n writer.writerow((f, h, l))\n for f in installed:\n writer.writerow((installed[f], '', ''))\n shutil.move(temp_record, record)\n\n\ndef _unique(fn):\n @functools.wraps(fn)\n def unique(*args, **kw):\n seen = set()\n for item in fn(*args, **kw):\n if item not in seen:\n seen.add(item)\n yield item\n return unique\n\n\n# TODO: this goes somewhere besides the wheel module\n@_unique\ndef uninstallation_paths(dist):\n \"\"\"\n Yield all the uninstallation paths for dist based on RECORD-without-.pyc\n\n Yield paths to all the files in RECORD. For each .py file in RECORD, add\n the .pyc in the same directory.\n\n UninstallPathSet.add() takes care of the __pycache__ .pyc.\n \"\"\"\n from pip.util import FakeFile # circular import\n r = csv.reader(FakeFile(dist.get_metadata_lines('RECORD')))\n for row in r:\n path = os.path.join(dist.location, row[0])\n yield path\n if path.endswith('.py'):\n dn, fn = os.path.split(path)\n base = fn[:-3]\n path = os.path.join(dn, base + '.pyc')\n yield path\n\n\ndef wheel_version(source_dir):\n \"\"\"\n Return the Wheel-Version of an extracted wheel, if possible.\n\n Otherwise, return False if we couldn't parse / extract it.\n \"\"\"\n try:\n dist = [d for d in pkg_resources.find_on_path(None, source_dir)][0]\n\n wheel_data = dist.get_metadata('WHEEL')\n wheel_data = Parser().parsestr(wheel_data)\n\n version = wheel_data['Wheel-Version'].strip()\n version = tuple(map(int, version.split('.')))\n return version\n except:\n return False\n\n\ndef check_compatibility(version, name):\n \"\"\"\n Raises errors or warns if called with an incompatible Wheel-Version.\n\n Pip should refuse to install a Wheel-Version that's a major series\n ahead of what it's compatible with (e.g 2.0 > 1.1); and warn when\n installing a version only minor version ahead (e.g 1.2 > 1.1).\n\n version: a 2-tuple representing a Wheel-Version (Major, Minor)\n name: name of wheel or package to raise exception about\n\n :raises UnsupportedWheel: when an incompatible Wheel-Version is given\n \"\"\"\n if not version:\n raise UnsupportedWheel(\n \"%s is in an unsupported or invalid wheel\" % name\n )\n if version[0] > VERSION_COMPATIBLE[0]:\n raise UnsupportedWheel(\n \"%s's Wheel-Version (%s) is not compatible with this version \"\n \"of pip\" % (name, '.'.join(map(str, version)))\n )\n elif version > VERSION_COMPATIBLE:\n logger.warn('Installing from a newer Wheel-Version (%s)'\n % '.'.join(map(str, version)))\n\n\nclass Wheel(object):\n \"\"\"A wheel file\"\"\"\n\n # TODO: maybe move the install code into this class\n\n wheel_file_re = re.compile(\n r\"\"\"^(?P<namever>(?P<name>.+?)-(?P<ver>\\d.*?))\n ((-(?P<build>\\d.*?))?-(?P<pyver>.+?)-(?P<abi>.+?)-(?P<plat>.+?)\n \\.whl|\\.dist-info)$\"\"\",\n re.VERBOSE\n )\n\n def __init__(self, filename):\n \"\"\"\n :raises InvalidWheelFilename: when the filename is invalid for a wheel\n \"\"\"\n wheel_info = self.wheel_file_re.match(filename)\n if not wheel_info:\n raise InvalidWheelFilename(\n \"%s is not a valid wheel filename.\" % filename\n )\n self.filename = filename\n self.name = wheel_info.group('name').replace('_', '-')\n # we'll assume \"_\" means \"-\" due to wheel naming scheme\n # (https://github.com/pypa/pip/issues/1150)\n self.version = wheel_info.group('ver').replace('_', '-')\n self.pyversions = wheel_info.group('pyver').split('.')\n self.abis = wheel_info.group('abi').split('.')\n self.plats = wheel_info.group('plat').split('.')\n\n # All the tag combinations from this file\n self.file_tags = set(\n (x, y, z) for x in self.pyversions\n for y in self.abis for z in self.plats\n )\n\n def support_index_min(self, tags=None):\n \"\"\"\n Return the lowest index that one of the wheel's file_tag combinations\n achieves in the supported_tags list e.g. if there are 8 supported tags,\n and one of the file tags is first in the list, then return 0. Returns\n None is the wheel is not supported.\n \"\"\"\n if tags is None: # for mock\n tags = pep425tags.supported_tags\n indexes = [tags.index(c) for c in self.file_tags if c in tags]\n return min(indexes) if indexes else None\n\n def supported(self, tags=None):\n \"\"\"Is this wheel supported on this system?\"\"\"\n if tags is None: # for mock\n tags = pep425tags.supported_tags\n return bool(set(tags).intersection(self.file_tags))\n\n\nclass WheelBuilder(object):\n \"\"\"Build wheels from a RequirementSet.\"\"\"\n\n def __init__(self, requirement_set, finder, wheel_dir, build_options=[],\n global_options=[]):\n self.requirement_set = requirement_set\n self.finder = finder\n self.wheel_dir = normalize_path(wheel_dir)\n self.build_options = build_options\n self.global_options = global_options\n\n def _build_one(self, req):\n \"\"\"Build one wheel.\"\"\"\n\n base_args = [\n sys.executable, '-c',\n \"import setuptools;__file__=%r;\"\n \"exec(compile(open(__file__).read().replace('\\\\r\\\\n', '\\\\n'), \"\n \"__file__, 'exec'))\" % req.setup_py\n ] + list(self.global_options)\n\n logger.notify('Running setup.py bdist_wheel for %s' % req.name)\n logger.notify('Destination directory: %s' % self.wheel_dir)\n wheel_args = base_args + ['bdist_wheel', '-d', self.wheel_dir] \\\n + self.build_options\n try:\n call_subprocess(wheel_args, cwd=req.source_dir, show_stdout=False)\n return True\n except:\n logger.error('Failed building wheel for %s' % req.name)\n return False\n\n def build(self):\n \"\"\"Build wheels.\"\"\"\n\n # unpack and constructs req set\n self.requirement_set.prepare_files(self.finder)\n\n reqset = self.requirement_set.requirements.values()\n\n buildset = []\n for req in reqset:\n if req.is_wheel:\n logger.notify(\n 'Skipping %s, due to already being wheel.' % req.name)\n elif req.editable:\n logger.notify(\n 'Skipping %s, due to being editable' % req.name)\n else:\n buildset.append(req)\n\n if not buildset:\n return True\n\n # Build the wheels.\n logger.notify(\n 'Building wheels for collected packages: %s' %\n ', '.join([req.name for req in buildset])\n )\n logger.indent += 2\n build_success, build_failure = [], []\n for req in buildset:\n if self._build_one(req):\n build_success.append(req)\n else:\n build_failure.append(req)\n logger.indent -= 2\n\n # notify success/failure\n if build_success:\n logger.notify(\n 'Successfully built %s' %\n ' '.join([req.name for req in build_success])\n )\n if build_failure:\n logger.notify(\n 'Failed to build %s' %\n ' '.join([req.name for req in build_failure])\n )\n # Return True if all builds were successful\n return len(build_failure) == 0\n", "path": "pip/wheel.py"}]} |
gh_patches_debug_1376 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-3312 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Spider upsstore is broken
During the global build at 2021-10-13-14-42-23, spider **upsstore** failed with **5176 features** and **5 errors**.
Here's [the log](https://data.alltheplaces.xyz/runs/2021-10-13-14-42-23/logs/upsstore.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-10-13-14-42-23/output/upsstore.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-10-13-14-42-23/output/upsstore.geojson))
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `locations/spiders/upsstore.py`
Content:
```
1 import scrapy
2 import json
3 import re
4 from locations.items import GeojsonPointItem
5 from locations.hours import OpeningHours
6
7 DAY_MAPPING = {
8 "MONDAY": "Mo",
9 "TUESDAY": "Tu",
10 "WEDNESDAY": "We",
11 "THURSDAY": "Th",
12 "FRIDAY": "Fr",
13 "SATURDAY": "Sa",
14 "SUNDAY": "Su"
15 }
16
17
18 class UpsStoreSpider(scrapy.Spider):
19 name = "upsstore"
20 item_attributes = { 'brand': "UPS Store" }
21 allowed_domains = ["theupsstore.com"]
22 download_delay = 0.1
23 start_urls = (
24 'https://locations.theupsstore.com/',
25 )
26
27 def parse_hours(self, hours):
28 """
29 :param hours:
30 :return:
31 """
32 hours = json.loads(hours)
33 o = OpeningHours()
34
35 for day in hours["hours"]["days"]:
36 if not day["isClosed"]:
37 interval = day["intervals"][0]
38
39 o.add_range(DAY_MAPPING[day["day"]],
40 open_time=str(interval["start"]),
41 close_time=str(interval["end"]),
42 time_format="%H%M")
43 return o.as_opening_hours()
44
45 def parse_store(self, response):
46 ref = response.xpath('//input[@id="store_id"]/@value').extract_first()
47 if not ref:
48 ref = re.search(r'store(\d+)@theupsstore.com',
49 response.xpath('//a[@itemprop="email"]/text()').extract_first()).groups()
50
51 properties = {
52 'name': response.xpath('//span[@class="LocationName-geo"]/text()').extract_first(),
53 'phone': response.xpath('//span[@itemprop="telephone"]/text()').extract_first(),
54 'addr_full': response.xpath('//meta[@itemprop="streetAddress"]/@content').extract_first(),
55 'city': response.xpath('//meta[@itemprop="addressLocality"]/@content').extract_first(),
56 'state': response.xpath('//abbr[@itemprop="addressRegion"]/text()').extract_first(),
57 'country': response.xpath('//abbr[@itemprop="addressCountry"]/text()').extract_first(),
58 'postcode': response.xpath('//span[@itemprop="postalCode"]/text()').extract_first(),
59 'ref': ref,
60 'website': response.url,
61 'lat': float(response.xpath('//meta[@itemprop="latitude"]/@content').extract_first()),
62 'lon': float(response.xpath('//meta[@itemprop="longitude"]/@content').extract_first()),
63 }
64
65 hours = response.xpath('//script[@id="location_info_hours"]/text()').extract_first()
66 try:
67 hours = self.parse_hours(hours)
68 if hours:
69 properties['opening_hours'] = hours
70 except:
71 pass
72
73 yield GeojsonPointItem(**properties)
74
75 def parse(self, response):
76 urls = response.xpath('//a[@class="Directory-listLink"]/@href').extract()
77
78 if urls:
79 for url in urls:
80 if len(url.split('/')) == 3:
81 callback = self.parse_store
82 else:
83 callback = self.parse
84
85 yield scrapy.Request(
86 response.urljoin(url),
87 callback=callback,
88 )
89
90 else:
91 urls = response.xpath('//a[@class="Link"]/@href').extract()
92 for url in urls:
93 yield scrapy.Request(
94 response.urljoin(url),
95 callback=self.parse_store,
96 )
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/locations/spiders/upsstore.py b/locations/spiders/upsstore.py
--- a/locations/spiders/upsstore.py
+++ b/locations/spiders/upsstore.py
@@ -43,6 +43,9 @@
return o.as_opening_hours()
def parse_store(self, response):
+ if "Permanently Closed" in response.text:
+ return
+
ref = response.xpath('//input[@id="store_id"]/@value').extract_first()
if not ref:
ref = re.search(r'store(\d+)@theupsstore.com',
| {"golden_diff": "diff --git a/locations/spiders/upsstore.py b/locations/spiders/upsstore.py\n--- a/locations/spiders/upsstore.py\n+++ b/locations/spiders/upsstore.py\n@@ -43,6 +43,9 @@\n return o.as_opening_hours()\n \n def parse_store(self, response):\n+ if \"Permanently Closed\" in response.text:\n+ return\n+\n ref = response.xpath('//input[@id=\"store_id\"]/@value').extract_first()\n if not ref:\n ref = re.search(r'store(\\d+)@theupsstore.com',\n", "issue": "Spider upsstore is broken\nDuring the global build at 2021-10-13-14-42-23, spider **upsstore** failed with **5176 features** and **5 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-10-13-14-42-23/logs/upsstore.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-10-13-14-42-23/output/upsstore.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-10-13-14-42-23/output/upsstore.geojson))\n", "before_files": [{"content": "import scrapy\nimport json\nimport re\nfrom locations.items import GeojsonPointItem\nfrom locations.hours import OpeningHours\n\nDAY_MAPPING = {\n \"MONDAY\": \"Mo\",\n \"TUESDAY\": \"Tu\",\n \"WEDNESDAY\": \"We\",\n \"THURSDAY\": \"Th\",\n \"FRIDAY\": \"Fr\",\n \"SATURDAY\": \"Sa\",\n \"SUNDAY\": \"Su\"\n}\n\n\nclass UpsStoreSpider(scrapy.Spider):\n name = \"upsstore\"\n item_attributes = { 'brand': \"UPS Store\" }\n allowed_domains = [\"theupsstore.com\"]\n download_delay = 0.1\n start_urls = (\n 'https://locations.theupsstore.com/',\n )\n\n def parse_hours(self, hours):\n \"\"\"\n :param hours:\n :return:\n \"\"\"\n hours = json.loads(hours)\n o = OpeningHours()\n\n for day in hours[\"hours\"][\"days\"]:\n if not day[\"isClosed\"]:\n interval = day[\"intervals\"][0]\n\n o.add_range(DAY_MAPPING[day[\"day\"]],\n open_time=str(interval[\"start\"]),\n close_time=str(interval[\"end\"]),\n time_format=\"%H%M\")\n return o.as_opening_hours()\n\n def parse_store(self, response):\n ref = response.xpath('//input[@id=\"store_id\"]/@value').extract_first()\n if not ref:\n ref = re.search(r'store(\\d+)@theupsstore.com',\n response.xpath('//a[@itemprop=\"email\"]/text()').extract_first()).groups()\n\n properties = {\n 'name': response.xpath('//span[@class=\"LocationName-geo\"]/text()').extract_first(),\n 'phone': response.xpath('//span[@itemprop=\"telephone\"]/text()').extract_first(),\n 'addr_full': response.xpath('//meta[@itemprop=\"streetAddress\"]/@content').extract_first(),\n 'city': response.xpath('//meta[@itemprop=\"addressLocality\"]/@content').extract_first(),\n 'state': response.xpath('//abbr[@itemprop=\"addressRegion\"]/text()').extract_first(),\n 'country': response.xpath('//abbr[@itemprop=\"addressCountry\"]/text()').extract_first(),\n 'postcode': response.xpath('//span[@itemprop=\"postalCode\"]/text()').extract_first(),\n 'ref': ref,\n 'website': response.url,\n 'lat': float(response.xpath('//meta[@itemprop=\"latitude\"]/@content').extract_first()),\n 'lon': float(response.xpath('//meta[@itemprop=\"longitude\"]/@content').extract_first()),\n }\n\n hours = response.xpath('//script[@id=\"location_info_hours\"]/text()').extract_first()\n try:\n hours = self.parse_hours(hours)\n if hours:\n properties['opening_hours'] = hours\n except:\n pass\n\n yield GeojsonPointItem(**properties)\n\n def parse(self, response):\n urls = response.xpath('//a[@class=\"Directory-listLink\"]/@href').extract()\n\n if urls:\n for url in urls:\n if len(url.split('/')) == 3:\n callback = self.parse_store\n else:\n callback = self.parse\n\n yield scrapy.Request(\n response.urljoin(url),\n callback=callback,\n )\n\n else:\n urls = response.xpath('//a[@class=\"Link\"]/@href').extract()\n for url in urls:\n yield scrapy.Request(\n response.urljoin(url),\n callback=self.parse_store,\n )", "path": "locations/spiders/upsstore.py"}], "after_files": [{"content": "import scrapy\nimport json\nimport re\nfrom locations.items import GeojsonPointItem\nfrom locations.hours import OpeningHours\n\nDAY_MAPPING = {\n \"MONDAY\": \"Mo\",\n \"TUESDAY\": \"Tu\",\n \"WEDNESDAY\": \"We\",\n \"THURSDAY\": \"Th\",\n \"FRIDAY\": \"Fr\",\n \"SATURDAY\": \"Sa\",\n \"SUNDAY\": \"Su\"\n}\n\n\nclass UpsStoreSpider(scrapy.Spider):\n name = \"upsstore\"\n item_attributes = { 'brand': \"UPS Store\" }\n allowed_domains = [\"theupsstore.com\"]\n download_delay = 0.1\n start_urls = (\n 'https://locations.theupsstore.com/',\n )\n\n def parse_hours(self, hours):\n \"\"\"\n :param hours:\n :return:\n \"\"\"\n hours = json.loads(hours)\n o = OpeningHours()\n\n for day in hours[\"hours\"][\"days\"]:\n if not day[\"isClosed\"]:\n interval = day[\"intervals\"][0]\n\n o.add_range(DAY_MAPPING[day[\"day\"]],\n open_time=str(interval[\"start\"]),\n close_time=str(interval[\"end\"]),\n time_format=\"%H%M\")\n return o.as_opening_hours()\n\n def parse_store(self, response):\n if \"Permanently Closed\" in response.text:\n return\n\n ref = response.xpath('//input[@id=\"store_id\"]/@value').extract_first()\n if not ref:\n ref = re.search(r'store(\\d+)@theupsstore.com',\n response.xpath('//a[@itemprop=\"email\"]/text()').extract_first()).groups()\n\n properties = {\n 'name': response.xpath('//span[@class=\"LocationName-geo\"]/text()').extract_first(),\n 'phone': response.xpath('//span[@itemprop=\"telephone\"]/text()').extract_first(),\n 'addr_full': response.xpath('//meta[@itemprop=\"streetAddress\"]/@content').extract_first(),\n 'city': response.xpath('//meta[@itemprop=\"addressLocality\"]/@content').extract_first(),\n 'state': response.xpath('//abbr[@itemprop=\"addressRegion\"]/text()').extract_first(),\n 'country': response.xpath('//abbr[@itemprop=\"addressCountry\"]/text()').extract_first(),\n 'postcode': response.xpath('//span[@itemprop=\"postalCode\"]/text()').extract_first(),\n 'ref': ref,\n 'website': response.url,\n 'lat': float(response.xpath('//meta[@itemprop=\"latitude\"]/@content').extract_first()),\n 'lon': float(response.xpath('//meta[@itemprop=\"longitude\"]/@content').extract_first()),\n }\n\n hours = response.xpath('//script[@id=\"location_info_hours\"]/text()').extract_first()\n try:\n hours = self.parse_hours(hours)\n if hours:\n properties['opening_hours'] = hours\n except:\n pass\n\n yield GeojsonPointItem(**properties)\n\n def parse(self, response):\n urls = response.xpath('//a[@class=\"Directory-listLink\"]/@href').extract()\n\n if urls:\n for url in urls:\n if len(url.split('/')) == 3:\n callback = self.parse_store\n else:\n callback = self.parse\n\n yield scrapy.Request(\n response.urljoin(url),\n callback=callback,\n )\n\n else:\n urls = response.xpath('//a[@class=\"Link\"]/@href').extract()\n for url in urls:\n yield scrapy.Request(\n response.urljoin(url),\n callback=self.parse_store,\n )", "path": "locations/spiders/upsstore.py"}]} |
gh_patches_debug_1377 | rasdani/github-patches | git_diff | saulpw__visidata-509 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[html saver] Saving typed columns as html (int/vlen/bool) causes exception
I tried to copy (yank) a couple of rows from the frequency sheet and it provided me the following error. I believe this is due to the html parser expecting strings? A similar error also occurs in other sheets when using unexpected py types (e.g. bool).
FrequencySheet error
```
Traceback (most recent call last):
File "/Documents/pyv/py3/lib/python3.7/site-packages/visidata/threads.py", line 201, in _toplevelTryFunc
t.status = func(*args, **kwargs)
File "/Documents/pyv/py3/lib/python3.7/site-packages/visidata/loaders/html.py", line 124, in save_html
fp.write(html.escape(val))
File "/usr/local/Cellar/python/3.7.6_1/Frameworks/Python.framework/Versions/3.7/lib/python3.7/html/__init__.py", line 19, in escape
s = s.replace("&", "&") # Must be done first!
AttributeError: 'vlen' object has no attribute 'replace'
```
Sheet with a bool column error:
```
Traceback (most recent call last):
File "/Documents/pyv/py3/lib/python3.7/site-packages/visidata/threads.py", line 201, in _toplevelTryFunc
t.status = func(*args, **kwargs)
File "/Documents/pyv/py3/lib/python3.7/site-packages/visidata/loaders/html.py", line 124, in save_html
fp.write(html.escape(val))
File "/usr/local/Cellar/python/3.7.6_1/Frameworks/Python.framework/Versions/3.7/lib/python3.7/html/__init__.py", line 19, in escape
s = s.replace("&", "&") # Must be done first!
AttributeError: 'bool' object has no attribute 'replace'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `visidata/loaders/html.py`
Content:
```
1 import html
2 from visidata import *
3
4
5 class HtmlTablesSheet(IndexSheet):
6 rowtype = 'sheets' # rowdef: HtmlTableSheet (sheet.html = lxml.html.HtmlElement)
7 columns = IndexSheet.columns + [
8 Column('tag', width=0, getter=lambda col,row: row.html.tag),
9 Column('id', getter=lambda col,row: row.html.attrib.get('id')),
10 Column('classes', getter=lambda col,row: row.html.attrib.get('class')),
11 ]
12 def iterload(self):
13 import lxml.html
14 from lxml import etree
15 utf8_parser = etree.HTMLParser(encoding='utf-8')
16 with self.source.open_text() as fp:
17 html = lxml.html.etree.parse(fp, parser=utf8_parser)
18 self.setKeys([self.column('name')])
19 self.column('keys').hide()
20 self.column('source').hide()
21
22 for i, e in enumerate(html.iter('table')):
23 if e.tag == 'table':
24 vs = HtmlTableSheet(e.attrib.get("id", "table_" + str(i)), source=e)
25 vs.reload()
26 vs.html = e
27 yield vs
28
29
30 def is_header(elem):
31 scope = elem.attrib.get('scope', '')
32
33 if elem.tag == 'th':
34 if not scope or scope == 'col':
35 return True
36
37 return False
38
39 class HtmlTableSheet(Sheet):
40 rowtype = 'rows' # list of strings
41 columns = []
42
43 def iterload(self):
44 headers = []
45
46 maxlinks = {} # [colnum] -> nlinks:int
47
48 for rownum, r in enumerate(self.source.iter('tr')):
49 row = []
50
51 colnum = 0
52 # get starting column, which might be different if there were rowspan>1 already
53 if rownum < len(headers):
54 while colnum < len(headers[rownum]):
55 if headers[rownum][colnum] is None:
56 break
57 colnum += 1
58
59 for cell in r.getchildren():
60 colspan = int(cell.attrib.get('colspan', 1))
61 rowspan = int(cell.attrib.get('rowspan', 1))
62 cellval = ' '.join(x.strip() for x in cell.itertext()) # text only without markup
63 links = [x.get('href') for x in cell.iter('a')]
64 maxlinks[colnum] = max(maxlinks.get(colnum, 0), len(links))
65
66 if is_header(cell):
67 for k in range(rownum, rownum+rowspan):
68 while k >= len(headers): # extend headers list with lists for all header rows
69 headers.append([])
70
71 for j in range(colnum, colnum+colspan):
72 while j >= len(headers[k]):
73 headers[k].append(None)
74 headers[k][j] = cellval
75 cellval = '' # use empty non-None value for subsequent rows in the rowspan
76 else:
77 while colnum >= len(row):
78 row.append(None)
79 row[colnum] = (cellval, links)
80
81 colnum += colspan
82
83 if any(row):
84 yield row
85
86 self.columns = []
87 if headers:
88 it = itertools.zip_longest(*headers, fillvalue='')
89 else:
90 it = [list(x) for x in self.rows[0]]
91 self.rows = self.rows[1:]
92
93 for colnum, names in enumerate(it):
94 name = '_'.join(str(x) for x in names if x)
95 self.addColumn(Column(name, getter=lambda c,r,i=colnum: r[i][0]))
96 for linknum in range(maxlinks.get(colnum, 0)):
97 self.addColumn(Column(name+'_link'+str(linknum), width=20, getter=lambda c,r,i=colnum,j=linknum: r[i][1][j]))
98
99
100 @VisiData.api
101 def save_html(vd, p, *vsheets):
102 'Save vsheets as HTML tables in a single file'
103
104 with open(p, 'w', encoding='ascii', errors='xmlcharrefreplace') as fp:
105 for sheet in vsheets:
106
107 fp.write('<h2 class="sheetname">%s</h2>\n'.format(sheetname=html.escape(sheet.name)))
108
109 fp.write('<table id="{sheetname}">\n'.format(sheetname=html.escape(sheet.name)))
110
111 # headers
112 fp.write('<tr>')
113 for col in sheet.visibleCols:
114 contents = html.escape(col.name)
115 fp.write('<th>{colname}</th>'.format(colname=contents))
116 fp.write('</tr>\n')
117
118 # rows
119 with Progress(gerund='saving'):
120 for typedvals in sheet.iterdispvals(format=False):
121 fp.write('<tr>')
122 for col, val in typedvals.items():
123 fp.write('<td>')
124 fp.write(html.escape(val))
125 fp.write('</td>')
126 fp.write('</tr>\n')
127
128 fp.write('</table>')
129 vd.status('%s save finished' % p)
130
131
132 VisiData.save_htm = VisiData.save_html
133
134
135 vd.filetype('html', HtmlTablesSheet)
136 vd.filetype('htm', HtmlTablesSheet)
137
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/visidata/loaders/html.py b/visidata/loaders/html.py
--- a/visidata/loaders/html.py
+++ b/visidata/loaders/html.py
@@ -121,7 +121,7 @@
fp.write('<tr>')
for col, val in typedvals.items():
fp.write('<td>')
- fp.write(html.escape(val))
+ fp.write(html.escape(str(val)))
fp.write('</td>')
fp.write('</tr>\n')
| {"golden_diff": "diff --git a/visidata/loaders/html.py b/visidata/loaders/html.py\n--- a/visidata/loaders/html.py\n+++ b/visidata/loaders/html.py\n@@ -121,7 +121,7 @@\n fp.write('<tr>')\n for col, val in typedvals.items():\n fp.write('<td>')\n- fp.write(html.escape(val))\n+ fp.write(html.escape(str(val)))\n fp.write('</td>')\n fp.write('</tr>\\n')\n", "issue": "[html saver] Saving typed columns as html (int/vlen/bool) causes exception\nI tried to copy (yank) a couple of rows from the frequency sheet and it provided me the following error. I believe this is due to the html parser expecting strings? A similar error also occurs in other sheets when using unexpected py types (e.g. bool).\r\n\r\nFrequencySheet error\r\n```\r\nTraceback (most recent call last):\r\n File \"/Documents/pyv/py3/lib/python3.7/site-packages/visidata/threads.py\", line 201, in _toplevelTryFunc\r\n t.status = func(*args, **kwargs)\r\n File \"/Documents/pyv/py3/lib/python3.7/site-packages/visidata/loaders/html.py\", line 124, in save_html\r\n fp.write(html.escape(val))\r\n File \"/usr/local/Cellar/python/3.7.6_1/Frameworks/Python.framework/Versions/3.7/lib/python3.7/html/__init__.py\", line 19, in escape\r\n s = s.replace(\"&\", \"&\") # Must be done first!\r\nAttributeError: 'vlen' object has no attribute 'replace'\r\n```\r\n\r\nSheet with a bool column error:\r\n```\r\nTraceback (most recent call last):\r\n File \"/Documents/pyv/py3/lib/python3.7/site-packages/visidata/threads.py\", line 201, in _toplevelTryFunc\r\n t.status = func(*args, **kwargs)\r\n File \"/Documents/pyv/py3/lib/python3.7/site-packages/visidata/loaders/html.py\", line 124, in save_html\r\n fp.write(html.escape(val))\r\n File \"/usr/local/Cellar/python/3.7.6_1/Frameworks/Python.framework/Versions/3.7/lib/python3.7/html/__init__.py\", line 19, in escape\r\n s = s.replace(\"&\", \"&\") # Must be done first!\r\nAttributeError: 'bool' object has no attribute 'replace'\r\n```\n", "before_files": [{"content": "import html\nfrom visidata import *\n\n\nclass HtmlTablesSheet(IndexSheet):\n rowtype = 'sheets' # rowdef: HtmlTableSheet (sheet.html = lxml.html.HtmlElement)\n columns = IndexSheet.columns + [\n Column('tag', width=0, getter=lambda col,row: row.html.tag),\n Column('id', getter=lambda col,row: row.html.attrib.get('id')),\n Column('classes', getter=lambda col,row: row.html.attrib.get('class')),\n ]\n def iterload(self):\n import lxml.html\n from lxml import etree\n utf8_parser = etree.HTMLParser(encoding='utf-8')\n with self.source.open_text() as fp:\n html = lxml.html.etree.parse(fp, parser=utf8_parser)\n self.setKeys([self.column('name')])\n self.column('keys').hide()\n self.column('source').hide()\n\n for i, e in enumerate(html.iter('table')):\n if e.tag == 'table':\n vs = HtmlTableSheet(e.attrib.get(\"id\", \"table_\" + str(i)), source=e)\n vs.reload()\n vs.html = e\n yield vs\n\n\ndef is_header(elem):\n scope = elem.attrib.get('scope', '')\n\n if elem.tag == 'th':\n if not scope or scope == 'col':\n return True\n\n return False\n\nclass HtmlTableSheet(Sheet):\n rowtype = 'rows' # list of strings\n columns = []\n\n def iterload(self):\n headers = []\n\n maxlinks = {} # [colnum] -> nlinks:int\n\n for rownum, r in enumerate(self.source.iter('tr')):\n row = []\n\n colnum = 0\n # get starting column, which might be different if there were rowspan>1 already\n if rownum < len(headers):\n while colnum < len(headers[rownum]):\n if headers[rownum][colnum] is None:\n break\n colnum += 1\n\n for cell in r.getchildren():\n colspan = int(cell.attrib.get('colspan', 1))\n rowspan = int(cell.attrib.get('rowspan', 1))\n cellval = ' '.join(x.strip() for x in cell.itertext()) # text only without markup\n links = [x.get('href') for x in cell.iter('a')]\n maxlinks[colnum] = max(maxlinks.get(colnum, 0), len(links))\n\n if is_header(cell):\n for k in range(rownum, rownum+rowspan):\n while k >= len(headers): # extend headers list with lists for all header rows\n headers.append([])\n\n for j in range(colnum, colnum+colspan):\n while j >= len(headers[k]):\n headers[k].append(None)\n headers[k][j] = cellval\n cellval = '' # use empty non-None value for subsequent rows in the rowspan\n else:\n while colnum >= len(row):\n row.append(None)\n row[colnum] = (cellval, links)\n\n colnum += colspan\n\n if any(row):\n yield row\n\n self.columns = []\n if headers:\n it = itertools.zip_longest(*headers, fillvalue='')\n else:\n it = [list(x) for x in self.rows[0]]\n self.rows = self.rows[1:]\n\n for colnum, names in enumerate(it):\n name = '_'.join(str(x) for x in names if x)\n self.addColumn(Column(name, getter=lambda c,r,i=colnum: r[i][0]))\n for linknum in range(maxlinks.get(colnum, 0)):\n self.addColumn(Column(name+'_link'+str(linknum), width=20, getter=lambda c,r,i=colnum,j=linknum: r[i][1][j]))\n\n\[email protected]\ndef save_html(vd, p, *vsheets):\n 'Save vsheets as HTML tables in a single file'\n\n with open(p, 'w', encoding='ascii', errors='xmlcharrefreplace') as fp:\n for sheet in vsheets:\n\n fp.write('<h2 class=\"sheetname\">%s</h2>\\n'.format(sheetname=html.escape(sheet.name)))\n\n fp.write('<table id=\"{sheetname}\">\\n'.format(sheetname=html.escape(sheet.name)))\n\n # headers\n fp.write('<tr>')\n for col in sheet.visibleCols:\n contents = html.escape(col.name)\n fp.write('<th>{colname}</th>'.format(colname=contents))\n fp.write('</tr>\\n')\n\n # rows\n with Progress(gerund='saving'):\n for typedvals in sheet.iterdispvals(format=False):\n fp.write('<tr>')\n for col, val in typedvals.items():\n fp.write('<td>')\n fp.write(html.escape(val))\n fp.write('</td>')\n fp.write('</tr>\\n')\n\n fp.write('</table>')\n vd.status('%s save finished' % p)\n\n\nVisiData.save_htm = VisiData.save_html\n\n\nvd.filetype('html', HtmlTablesSheet)\nvd.filetype('htm', HtmlTablesSheet)\n", "path": "visidata/loaders/html.py"}], "after_files": [{"content": "import html\nfrom visidata import *\n\n\nclass HtmlTablesSheet(IndexSheet):\n rowtype = 'sheets' # rowdef: HtmlTableSheet (sheet.html = lxml.html.HtmlElement)\n columns = IndexSheet.columns + [\n Column('tag', width=0, getter=lambda col,row: row.html.tag),\n Column('id', getter=lambda col,row: row.html.attrib.get('id')),\n Column('classes', getter=lambda col,row: row.html.attrib.get('class')),\n ]\n def iterload(self):\n import lxml.html\n from lxml import etree\n utf8_parser = etree.HTMLParser(encoding='utf-8')\n with self.source.open_text() as fp:\n html = lxml.html.etree.parse(fp, parser=utf8_parser)\n self.setKeys([self.column('name')])\n self.column('keys').hide()\n self.column('source').hide()\n\n for i, e in enumerate(html.iter('table')):\n if e.tag == 'table':\n vs = HtmlTableSheet(e.attrib.get(\"id\", \"table_\" + str(i)), source=e)\n vs.reload()\n vs.html = e\n yield vs\n\n\ndef is_header(elem):\n scope = elem.attrib.get('scope', '')\n\n if elem.tag == 'th':\n if not scope or scope == 'col':\n return True\n\n return False\n\nclass HtmlTableSheet(Sheet):\n rowtype = 'rows' # list of strings\n columns = []\n\n def iterload(self):\n headers = []\n\n maxlinks = {} # [colnum] -> nlinks:int\n\n for rownum, r in enumerate(self.source.iter('tr')):\n row = []\n\n colnum = 0\n # get starting column, which might be different if there were rowspan>1 already\n if rownum < len(headers):\n while colnum < len(headers[rownum]):\n if headers[rownum][colnum] is None:\n break\n colnum += 1\n\n for cell in r.getchildren():\n colspan = int(cell.attrib.get('colspan', 1))\n rowspan = int(cell.attrib.get('rowspan', 1))\n cellval = ' '.join(x.strip() for x in cell.itertext()) # text only without markup\n links = [x.get('href') for x in cell.iter('a')]\n maxlinks[colnum] = max(maxlinks.get(colnum, 0), len(links))\n\n if is_header(cell):\n for k in range(rownum, rownum+rowspan):\n while k >= len(headers): # extend headers list with lists for all header rows\n headers.append([])\n\n for j in range(colnum, colnum+colspan):\n while j >= len(headers[k]):\n headers[k].append(None)\n headers[k][j] = cellval\n cellval = '' # use empty non-None value for subsequent rows in the rowspan\n else:\n while colnum >= len(row):\n row.append(None)\n row[colnum] = (cellval, links)\n\n colnum += colspan\n\n if any(row):\n yield row\n\n self.columns = []\n if headers:\n it = itertools.zip_longest(*headers, fillvalue='')\n else:\n it = [list(x) for x in self.rows[0]]\n self.rows = self.rows[1:]\n\n for colnum, names in enumerate(it):\n name = '_'.join(str(x) for x in names if x)\n self.addColumn(Column(name, getter=lambda c,r,i=colnum: r[i][0]))\n for linknum in range(maxlinks.get(colnum, 0)):\n self.addColumn(Column(name+'_link'+str(linknum), width=20, getter=lambda c,r,i=colnum,j=linknum: r[i][1][j]))\n\n\[email protected]\ndef save_html(vd, p, *vsheets):\n 'Save vsheets as HTML tables in a single file'\n\n with open(p, 'w', encoding='ascii', errors='xmlcharrefreplace') as fp:\n for sheet in vsheets:\n\n fp.write('<h2 class=\"sheetname\">%s</h2>\\n'.format(sheetname=html.escape(sheet.name)))\n\n fp.write('<table id=\"{sheetname}\">\\n'.format(sheetname=html.escape(sheet.name)))\n\n # headers\n fp.write('<tr>')\n for col in sheet.visibleCols:\n contents = html.escape(col.name)\n fp.write('<th>{colname}</th>'.format(colname=contents))\n fp.write('</tr>\\n')\n\n # rows\n with Progress(gerund='saving'):\n for typedvals in sheet.iterdispvals(format=False):\n fp.write('<tr>')\n for col, val in typedvals.items():\n fp.write('<td>')\n fp.write(html.escape(str(val)))\n fp.write('</td>')\n fp.write('</tr>\\n')\n\n fp.write('</table>')\n vd.status('%s save finished' % p)\n\n\nVisiData.save_htm = VisiData.save_html\n\n\nvd.filetype('html', HtmlTablesSheet)\nvd.filetype('htm', HtmlTablesSheet)\n", "path": "visidata/loaders/html.py"}]} |
gh_patches_debug_1378 | rasdani/github-patches | git_diff | great-expectations__great_expectations-3469 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Use cleaner solution for non-truncating division in python 2
Prefer `from __future__ import division` to `1.*x/y`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `great_expectations/expectations/metrics/multicolumn_map_metrics/select_column_values_unique_within_record.py`
Content:
```
1 import logging
2 from functools import reduce
3
4 from great_expectations.execution_engine import (
5 PandasExecutionEngine,
6 SparkDFExecutionEngine,
7 SqlAlchemyExecutionEngine,
8 )
9 from great_expectations.expectations.metrics.import_manager import F, sa
10 from great_expectations.expectations.metrics.map_metric_provider import (
11 MulticolumnMapMetricProvider,
12 multicolumn_condition_partial,
13 )
14
15 logger = logging.getLogger(__name__)
16
17
18 class SelectColumnValuesUniqueWithinRecord(MulticolumnMapMetricProvider):
19 condition_metric_name = "select_column_values.unique.within_record"
20 condition_domain_keys = (
21 "batch_id",
22 "table",
23 "column_list",
24 "row_condition",
25 "condition_parser",
26 "ignore_row_if",
27 )
28
29 @multicolumn_condition_partial(engine=PandasExecutionEngine)
30 def _pandas(cls, column_list, **kwargs):
31 num_columns = len(column_list.columns)
32 row_wise_cond = column_list.nunique(dropna=False, axis=1) >= num_columns
33 return row_wise_cond
34
35 @multicolumn_condition_partial(engine=SqlAlchemyExecutionEngine)
36 def _sqlalchemy(cls, column_list, **kwargs):
37 """
38 The present approach relies on an inefficient query condition construction implementation, whose computational
39 cost is O(num_columns^2). However, until a more efficient implementation compatible with SQLAlchemy is
40 available, this is the only feasible mechanism under the current architecture, where map metric providers must
41 return a condition. Nevertheless, SQL query length limit is 1GB (sufficient for most practical scenarios).
42 """
43 num_columns = len(column_list)
44
45 # An arbitrary "num_columns" value used for issuing an explanatory message as a warning.
46 if num_columns > 100:
47 logger.warning(
48 f"""Batch data with {num_columns} columns is detected. Computing the "{cls.condition_metric_name}" \
49 metric for wide tables using SQLAlchemy leads to long WHERE clauses for the underlying database engine to process.
50 """
51 )
52
53 conditions = sa.or_(
54 *(
55 sa.or_(
56 column_list[idx_src] == column_list[idx_dest],
57 sa.and_(
58 column_list[idx_src] == None, column_list[idx_dest] == None
59 ),
60 )
61 for idx_src in range(num_columns - 1)
62 for idx_dest in range(idx_src + 1, num_columns)
63 )
64 )
65 row_wise_cond = sa.not_(sa.or_(conditions))
66 return row_wise_cond
67
68 @multicolumn_condition_partial(engine=SparkDFExecutionEngine)
69 def _spark(cls, column_list, **kwargs):
70 column_names = column_list.columns
71 num_columns = len(column_names)
72
73 conditions = []
74 for idx_src in range(num_columns - 1):
75 for idx_dest in range(idx_src + 1, num_columns):
76 conditions.append(
77 F.col(column_names[idx_src]).eqNullSafe(
78 F.col(column_names[idx_dest])
79 )
80 )
81
82 row_wise_cond = ~reduce(lambda a, b: a | b, conditions)
83 return row_wise_cond
84
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/great_expectations/expectations/metrics/multicolumn_map_metrics/select_column_values_unique_within_record.py b/great_expectations/expectations/metrics/multicolumn_map_metrics/select_column_values_unique_within_record.py
--- a/great_expectations/expectations/metrics/multicolumn_map_metrics/select_column_values_unique_within_record.py
+++ b/great_expectations/expectations/metrics/multicolumn_map_metrics/select_column_values_unique_within_record.py
@@ -62,7 +62,7 @@
for idx_dest in range(idx_src + 1, num_columns)
)
)
- row_wise_cond = sa.not_(sa.or_(conditions))
+ row_wise_cond = sa.not_(conditions)
return row_wise_cond
@multicolumn_condition_partial(engine=SparkDFExecutionEngine)
| {"golden_diff": "diff --git a/great_expectations/expectations/metrics/multicolumn_map_metrics/select_column_values_unique_within_record.py b/great_expectations/expectations/metrics/multicolumn_map_metrics/select_column_values_unique_within_record.py\n--- a/great_expectations/expectations/metrics/multicolumn_map_metrics/select_column_values_unique_within_record.py\n+++ b/great_expectations/expectations/metrics/multicolumn_map_metrics/select_column_values_unique_within_record.py\n@@ -62,7 +62,7 @@\n for idx_dest in range(idx_src + 1, num_columns)\n )\n )\n- row_wise_cond = sa.not_(sa.or_(conditions))\n+ row_wise_cond = sa.not_(conditions)\n return row_wise_cond\n \n @multicolumn_condition_partial(engine=SparkDFExecutionEngine)\n", "issue": "Use cleaner solution for non-truncating division in python 2\nPrefer `from __future__ import division` to `1.*x/y`\n", "before_files": [{"content": "import logging\nfrom functools import reduce\n\nfrom great_expectations.execution_engine import (\n PandasExecutionEngine,\n SparkDFExecutionEngine,\n SqlAlchemyExecutionEngine,\n)\nfrom great_expectations.expectations.metrics.import_manager import F, sa\nfrom great_expectations.expectations.metrics.map_metric_provider import (\n MulticolumnMapMetricProvider,\n multicolumn_condition_partial,\n)\n\nlogger = logging.getLogger(__name__)\n\n\nclass SelectColumnValuesUniqueWithinRecord(MulticolumnMapMetricProvider):\n condition_metric_name = \"select_column_values.unique.within_record\"\n condition_domain_keys = (\n \"batch_id\",\n \"table\",\n \"column_list\",\n \"row_condition\",\n \"condition_parser\",\n \"ignore_row_if\",\n )\n\n @multicolumn_condition_partial(engine=PandasExecutionEngine)\n def _pandas(cls, column_list, **kwargs):\n num_columns = len(column_list.columns)\n row_wise_cond = column_list.nunique(dropna=False, axis=1) >= num_columns\n return row_wise_cond\n\n @multicolumn_condition_partial(engine=SqlAlchemyExecutionEngine)\n def _sqlalchemy(cls, column_list, **kwargs):\n \"\"\"\n The present approach relies on an inefficient query condition construction implementation, whose computational\n cost is O(num_columns^2). However, until a more efficient implementation compatible with SQLAlchemy is\n available, this is the only feasible mechanism under the current architecture, where map metric providers must\n return a condition. Nevertheless, SQL query length limit is 1GB (sufficient for most practical scenarios).\n \"\"\"\n num_columns = len(column_list)\n\n # An arbitrary \"num_columns\" value used for issuing an explanatory message as a warning.\n if num_columns > 100:\n logger.warning(\n f\"\"\"Batch data with {num_columns} columns is detected. Computing the \"{cls.condition_metric_name}\" \\\nmetric for wide tables using SQLAlchemy leads to long WHERE clauses for the underlying database engine to process.\n\"\"\"\n )\n\n conditions = sa.or_(\n *(\n sa.or_(\n column_list[idx_src] == column_list[idx_dest],\n sa.and_(\n column_list[idx_src] == None, column_list[idx_dest] == None\n ),\n )\n for idx_src in range(num_columns - 1)\n for idx_dest in range(idx_src + 1, num_columns)\n )\n )\n row_wise_cond = sa.not_(sa.or_(conditions))\n return row_wise_cond\n\n @multicolumn_condition_partial(engine=SparkDFExecutionEngine)\n def _spark(cls, column_list, **kwargs):\n column_names = column_list.columns\n num_columns = len(column_names)\n\n conditions = []\n for idx_src in range(num_columns - 1):\n for idx_dest in range(idx_src + 1, num_columns):\n conditions.append(\n F.col(column_names[idx_src]).eqNullSafe(\n F.col(column_names[idx_dest])\n )\n )\n\n row_wise_cond = ~reduce(lambda a, b: a | b, conditions)\n return row_wise_cond\n", "path": "great_expectations/expectations/metrics/multicolumn_map_metrics/select_column_values_unique_within_record.py"}], "after_files": [{"content": "import logging\nfrom functools import reduce\n\nfrom great_expectations.execution_engine import (\n PandasExecutionEngine,\n SparkDFExecutionEngine,\n SqlAlchemyExecutionEngine,\n)\nfrom great_expectations.expectations.metrics.import_manager import F, sa\nfrom great_expectations.expectations.metrics.map_metric_provider import (\n MulticolumnMapMetricProvider,\n multicolumn_condition_partial,\n)\n\nlogger = logging.getLogger(__name__)\n\n\nclass SelectColumnValuesUniqueWithinRecord(MulticolumnMapMetricProvider):\n condition_metric_name = \"select_column_values.unique.within_record\"\n condition_domain_keys = (\n \"batch_id\",\n \"table\",\n \"column_list\",\n \"row_condition\",\n \"condition_parser\",\n \"ignore_row_if\",\n )\n\n @multicolumn_condition_partial(engine=PandasExecutionEngine)\n def _pandas(cls, column_list, **kwargs):\n num_columns = len(column_list.columns)\n row_wise_cond = column_list.nunique(dropna=False, axis=1) >= num_columns\n return row_wise_cond\n\n @multicolumn_condition_partial(engine=SqlAlchemyExecutionEngine)\n def _sqlalchemy(cls, column_list, **kwargs):\n \"\"\"\n The present approach relies on an inefficient query condition construction implementation, whose computational\n cost is O(num_columns^2). However, until a more efficient implementation compatible with SQLAlchemy is\n available, this is the only feasible mechanism under the current architecture, where map metric providers must\n return a condition. Nevertheless, SQL query length limit is 1GB (sufficient for most practical scenarios).\n \"\"\"\n num_columns = len(column_list)\n\n # An arbitrary \"num_columns\" value used for issuing an explanatory message as a warning.\n if num_columns > 100:\n logger.warning(\n f\"\"\"Batch data with {num_columns} columns is detected. Computing the \"{cls.condition_metric_name}\" \\\nmetric for wide tables using SQLAlchemy leads to long WHERE clauses for the underlying database engine to process.\n\"\"\"\n )\n\n conditions = sa.or_(\n *(\n sa.or_(\n column_list[idx_src] == column_list[idx_dest],\n sa.and_(\n column_list[idx_src] == None, column_list[idx_dest] == None\n ),\n )\n for idx_src in range(num_columns - 1)\n for idx_dest in range(idx_src + 1, num_columns)\n )\n )\n row_wise_cond = sa.not_(conditions)\n return row_wise_cond\n\n @multicolumn_condition_partial(engine=SparkDFExecutionEngine)\n def _spark(cls, column_list, **kwargs):\n column_names = column_list.columns\n num_columns = len(column_names)\n\n conditions = []\n for idx_src in range(num_columns - 1):\n for idx_dest in range(idx_src + 1, num_columns):\n conditions.append(\n F.col(column_names[idx_src]).eqNullSafe(\n F.col(column_names[idx_dest])\n )\n )\n\n row_wise_cond = ~reduce(lambda a, b: a | b, conditions)\n return row_wise_cond\n", "path": "great_expectations/expectations/metrics/multicolumn_map_metrics/select_column_values_unique_within_record.py"}]} |
gh_patches_debug_1379 | rasdani/github-patches | git_diff | docker__docker-py-1819 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Log message not replacing string format placeholder
[This line](https://github.com/docker/docker-py/blob/e9fab1432b974ceaa888b371e382dfcf2f6556e4/docker/auth.py#L205) in the `parse_auth` function in `auth.py` is using a string format placeholder but is not calling `format` on the string to replace it.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docker/auth.py`
Content:
```
1 import base64
2 import json
3 import logging
4 import os
5
6 import dockerpycreds
7 import six
8
9 from . import errors
10 from .constants import IS_WINDOWS_PLATFORM
11
12 INDEX_NAME = 'docker.io'
13 INDEX_URL = 'https://index.{0}/v1/'.format(INDEX_NAME)
14 DOCKER_CONFIG_FILENAME = os.path.join('.docker', 'config.json')
15 LEGACY_DOCKER_CONFIG_FILENAME = '.dockercfg'
16 TOKEN_USERNAME = '<token>'
17
18 log = logging.getLogger(__name__)
19
20
21 def resolve_repository_name(repo_name):
22 if '://' in repo_name:
23 raise errors.InvalidRepository(
24 'Repository name cannot contain a scheme ({0})'.format(repo_name)
25 )
26
27 index_name, remote_name = split_repo_name(repo_name)
28 if index_name[0] == '-' or index_name[-1] == '-':
29 raise errors.InvalidRepository(
30 'Invalid index name ({0}). Cannot begin or end with a'
31 ' hyphen.'.format(index_name)
32 )
33 return resolve_index_name(index_name), remote_name
34
35
36 def resolve_index_name(index_name):
37 index_name = convert_to_hostname(index_name)
38 if index_name == 'index.' + INDEX_NAME:
39 index_name = INDEX_NAME
40 return index_name
41
42
43 def get_config_header(client, registry):
44 log.debug('Looking for auth config')
45 if not client._auth_configs:
46 log.debug(
47 "No auth config in memory - loading from filesystem"
48 )
49 client._auth_configs = load_config()
50 authcfg = resolve_authconfig(client._auth_configs, registry)
51 # Do not fail here if no authentication exists for this
52 # specific registry as we can have a readonly pull. Just
53 # put the header if we can.
54 if authcfg:
55 log.debug('Found auth config')
56 # auth_config needs to be a dict in the format used by
57 # auth.py username , password, serveraddress, email
58 return encode_header(authcfg)
59 log.debug('No auth config found')
60 return None
61
62
63 def split_repo_name(repo_name):
64 parts = repo_name.split('/', 1)
65 if len(parts) == 1 or (
66 '.' not in parts[0] and ':' not in parts[0] and parts[0] != 'localhost'
67 ):
68 # This is a docker index repo (ex: username/foobar or ubuntu)
69 return INDEX_NAME, repo_name
70 return tuple(parts)
71
72
73 def get_credential_store(authconfig, registry):
74 if not registry or registry == INDEX_NAME:
75 registry = 'https://index.docker.io/v1/'
76
77 return authconfig.get('credHelpers', {}).get(registry) or authconfig.get(
78 'credsStore'
79 )
80
81
82 def resolve_authconfig(authconfig, registry=None):
83 """
84 Returns the authentication data from the given auth configuration for a
85 specific registry. As with the Docker client, legacy entries in the config
86 with full URLs are stripped down to hostnames before checking for a match.
87 Returns None if no match was found.
88 """
89
90 if 'credHelpers' in authconfig or 'credsStore' in authconfig:
91 store_name = get_credential_store(authconfig, registry)
92 if store_name is not None:
93 log.debug(
94 'Using credentials store "{0}"'.format(store_name)
95 )
96 return _resolve_authconfig_credstore(
97 authconfig, registry, store_name
98 )
99
100 # Default to the public index server
101 registry = resolve_index_name(registry) if registry else INDEX_NAME
102 log.debug("Looking for auth entry for {0}".format(repr(registry)))
103
104 if registry in authconfig:
105 log.debug("Found {0}".format(repr(registry)))
106 return authconfig[registry]
107
108 for key, config in six.iteritems(authconfig):
109 if resolve_index_name(key) == registry:
110 log.debug("Found {0}".format(repr(key)))
111 return config
112
113 log.debug("No entry found")
114 return None
115
116
117 def _resolve_authconfig_credstore(authconfig, registry, credstore_name):
118 if not registry or registry == INDEX_NAME:
119 # The ecosystem is a little schizophrenic with index.docker.io VS
120 # docker.io - in that case, it seems the full URL is necessary.
121 registry = INDEX_URL
122 log.debug("Looking for auth entry for {0}".format(repr(registry)))
123 store = dockerpycreds.Store(credstore_name)
124 try:
125 data = store.get(registry)
126 res = {
127 'ServerAddress': registry,
128 }
129 if data['Username'] == TOKEN_USERNAME:
130 res['IdentityToken'] = data['Secret']
131 else:
132 res.update({
133 'Username': data['Username'],
134 'Password': data['Secret'],
135 })
136 return res
137 except dockerpycreds.CredentialsNotFound as e:
138 log.debug('No entry found')
139 return None
140 except dockerpycreds.StoreError as e:
141 raise errors.DockerException(
142 'Credentials store error: {0}'.format(repr(e))
143 )
144
145
146 def convert_to_hostname(url):
147 return url.replace('http://', '').replace('https://', '').split('/', 1)[0]
148
149
150 def decode_auth(auth):
151 if isinstance(auth, six.string_types):
152 auth = auth.encode('ascii')
153 s = base64.b64decode(auth)
154 login, pwd = s.split(b':', 1)
155 return login.decode('utf8'), pwd.decode('utf8')
156
157
158 def encode_header(auth):
159 auth_json = json.dumps(auth).encode('ascii')
160 return base64.urlsafe_b64encode(auth_json)
161
162
163 def parse_auth(entries, raise_on_error=False):
164 """
165 Parses authentication entries
166
167 Args:
168 entries: Dict of authentication entries.
169 raise_on_error: If set to true, an invalid format will raise
170 InvalidConfigFile
171
172 Returns:
173 Authentication registry.
174 """
175
176 conf = {}
177 for registry, entry in six.iteritems(entries):
178 if not isinstance(entry, dict):
179 log.debug(
180 'Config entry for key {0} is not auth config'.format(registry)
181 )
182 # We sometimes fall back to parsing the whole config as if it was
183 # the auth config by itself, for legacy purposes. In that case, we
184 # fail silently and return an empty conf if any of the keys is not
185 # formatted properly.
186 if raise_on_error:
187 raise errors.InvalidConfigFile(
188 'Invalid configuration for registry {0}'.format(registry)
189 )
190 return {}
191 if 'identitytoken' in entry:
192 log.debug('Found an IdentityToken entry for registry {0}'.format(
193 registry
194 ))
195 conf[registry] = {
196 'IdentityToken': entry['identitytoken']
197 }
198 continue # Other values are irrelevant if we have a token, skip.
199
200 if 'auth' not in entry:
201 # Starting with engine v1.11 (API 1.23), an empty dictionary is
202 # a valid value in the auths config.
203 # https://github.com/docker/compose/issues/3265
204 log.debug(
205 'Auth data for {0} is absent. Client might be using a '
206 'credentials store instead.'
207 )
208 conf[registry] = {}
209 continue
210
211 username, password = decode_auth(entry['auth'])
212 log.debug(
213 'Found entry (registry={0}, username={1})'
214 .format(repr(registry), repr(username))
215 )
216
217 conf[registry] = {
218 'username': username,
219 'password': password,
220 'email': entry.get('email'),
221 'serveraddress': registry,
222 }
223 return conf
224
225
226 def find_config_file(config_path=None):
227 paths = list(filter(None, [
228 config_path, # 1
229 config_path_from_environment(), # 2
230 os.path.join(home_dir(), DOCKER_CONFIG_FILENAME), # 3
231 os.path.join(home_dir(), LEGACY_DOCKER_CONFIG_FILENAME), # 4
232 ]))
233
234 log.debug("Trying paths: {0}".format(repr(paths)))
235
236 for path in paths:
237 if os.path.exists(path):
238 log.debug("Found file at path: {0}".format(path))
239 return path
240
241 log.debug("No config file found")
242
243 return None
244
245
246 def config_path_from_environment():
247 config_dir = os.environ.get('DOCKER_CONFIG')
248 if not config_dir:
249 return None
250 return os.path.join(config_dir, os.path.basename(DOCKER_CONFIG_FILENAME))
251
252
253 def home_dir():
254 """
255 Get the user's home directory, using the same logic as the Docker Engine
256 client - use %USERPROFILE% on Windows, $HOME/getuid on POSIX.
257 """
258 if IS_WINDOWS_PLATFORM:
259 return os.environ.get('USERPROFILE', '')
260 else:
261 return os.path.expanduser('~')
262
263
264 def load_config(config_path=None):
265 """
266 Loads authentication data from a Docker configuration file in the given
267 root directory or if config_path is passed use given path.
268 Lookup priority:
269 explicit config_path parameter > DOCKER_CONFIG environment variable >
270 ~/.docker/config.json > ~/.dockercfg
271 """
272 config_file = find_config_file(config_path)
273
274 if not config_file:
275 return {}
276
277 try:
278 with open(config_file) as f:
279 data = json.load(f)
280 res = {}
281 if data.get('auths'):
282 log.debug("Found 'auths' section")
283 res.update(parse_auth(data['auths'], raise_on_error=True))
284 if data.get('HttpHeaders'):
285 log.debug("Found 'HttpHeaders' section")
286 res.update({'HttpHeaders': data['HttpHeaders']})
287 if data.get('credsStore'):
288 log.debug("Found 'credsStore' section")
289 res.update({'credsStore': data['credsStore']})
290 if data.get('credHelpers'):
291 log.debug("Found 'credHelpers' section")
292 res.update({'credHelpers': data['credHelpers']})
293 if res:
294 return res
295 else:
296 log.debug("Couldn't find 'auths' or 'HttpHeaders' sections")
297 f.seek(0)
298 return parse_auth(json.load(f))
299 except (IOError, KeyError, ValueError) as e:
300 # Likely missing new Docker config file or it's in an
301 # unknown format, continue to attempt to read old location
302 # and format.
303 log.debug(e)
304
305 log.debug("Attempting to parse legacy auth file format")
306 try:
307 data = []
308 with open(config_file) as f:
309 for line in f.readlines():
310 data.append(line.strip().split(' = ')[1])
311 if len(data) < 2:
312 # Not enough data
313 raise errors.InvalidConfigFile(
314 'Invalid or empty configuration file!'
315 )
316
317 username, password = decode_auth(data[0])
318 return {
319 INDEX_NAME: {
320 'username': username,
321 'password': password,
322 'email': data[1],
323 'serveraddress': INDEX_URL,
324 }
325 }
326 except Exception as e:
327 log.debug(e)
328 pass
329
330 log.debug("All parsing attempts failed - returning empty config")
331 return {}
332
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docker/auth.py b/docker/auth.py
--- a/docker/auth.py
+++ b/docker/auth.py
@@ -203,7 +203,7 @@
# https://github.com/docker/compose/issues/3265
log.debug(
'Auth data for {0} is absent. Client might be using a '
- 'credentials store instead.'
+ 'credentials store instead.'.format(registry)
)
conf[registry] = {}
continue
| {"golden_diff": "diff --git a/docker/auth.py b/docker/auth.py\n--- a/docker/auth.py\n+++ b/docker/auth.py\n@@ -203,7 +203,7 @@\n # https://github.com/docker/compose/issues/3265\n log.debug(\n 'Auth data for {0} is absent. Client might be using a '\n- 'credentials store instead.'\n+ 'credentials store instead.'.format(registry)\n )\n conf[registry] = {}\n continue\n", "issue": "Log message not replacing string format placeholder\n[This line](https://github.com/docker/docker-py/blob/e9fab1432b974ceaa888b371e382dfcf2f6556e4/docker/auth.py#L205) in the `parse_auth` function in `auth.py` is using a string format placeholder but is not calling `format` on the string to replace it.\n", "before_files": [{"content": "import base64\nimport json\nimport logging\nimport os\n\nimport dockerpycreds\nimport six\n\nfrom . import errors\nfrom .constants import IS_WINDOWS_PLATFORM\n\nINDEX_NAME = 'docker.io'\nINDEX_URL = 'https://index.{0}/v1/'.format(INDEX_NAME)\nDOCKER_CONFIG_FILENAME = os.path.join('.docker', 'config.json')\nLEGACY_DOCKER_CONFIG_FILENAME = '.dockercfg'\nTOKEN_USERNAME = '<token>'\n\nlog = logging.getLogger(__name__)\n\n\ndef resolve_repository_name(repo_name):\n if '://' in repo_name:\n raise errors.InvalidRepository(\n 'Repository name cannot contain a scheme ({0})'.format(repo_name)\n )\n\n index_name, remote_name = split_repo_name(repo_name)\n if index_name[0] == '-' or index_name[-1] == '-':\n raise errors.InvalidRepository(\n 'Invalid index name ({0}). Cannot begin or end with a'\n ' hyphen.'.format(index_name)\n )\n return resolve_index_name(index_name), remote_name\n\n\ndef resolve_index_name(index_name):\n index_name = convert_to_hostname(index_name)\n if index_name == 'index.' + INDEX_NAME:\n index_name = INDEX_NAME\n return index_name\n\n\ndef get_config_header(client, registry):\n log.debug('Looking for auth config')\n if not client._auth_configs:\n log.debug(\n \"No auth config in memory - loading from filesystem\"\n )\n client._auth_configs = load_config()\n authcfg = resolve_authconfig(client._auth_configs, registry)\n # Do not fail here if no authentication exists for this\n # specific registry as we can have a readonly pull. Just\n # put the header if we can.\n if authcfg:\n log.debug('Found auth config')\n # auth_config needs to be a dict in the format used by\n # auth.py username , password, serveraddress, email\n return encode_header(authcfg)\n log.debug('No auth config found')\n return None\n\n\ndef split_repo_name(repo_name):\n parts = repo_name.split('/', 1)\n if len(parts) == 1 or (\n '.' not in parts[0] and ':' not in parts[0] and parts[0] != 'localhost'\n ):\n # This is a docker index repo (ex: username/foobar or ubuntu)\n return INDEX_NAME, repo_name\n return tuple(parts)\n\n\ndef get_credential_store(authconfig, registry):\n if not registry or registry == INDEX_NAME:\n registry = 'https://index.docker.io/v1/'\n\n return authconfig.get('credHelpers', {}).get(registry) or authconfig.get(\n 'credsStore'\n )\n\n\ndef resolve_authconfig(authconfig, registry=None):\n \"\"\"\n Returns the authentication data from the given auth configuration for a\n specific registry. As with the Docker client, legacy entries in the config\n with full URLs are stripped down to hostnames before checking for a match.\n Returns None if no match was found.\n \"\"\"\n\n if 'credHelpers' in authconfig or 'credsStore' in authconfig:\n store_name = get_credential_store(authconfig, registry)\n if store_name is not None:\n log.debug(\n 'Using credentials store \"{0}\"'.format(store_name)\n )\n return _resolve_authconfig_credstore(\n authconfig, registry, store_name\n )\n\n # Default to the public index server\n registry = resolve_index_name(registry) if registry else INDEX_NAME\n log.debug(\"Looking for auth entry for {0}\".format(repr(registry)))\n\n if registry in authconfig:\n log.debug(\"Found {0}\".format(repr(registry)))\n return authconfig[registry]\n\n for key, config in six.iteritems(authconfig):\n if resolve_index_name(key) == registry:\n log.debug(\"Found {0}\".format(repr(key)))\n return config\n\n log.debug(\"No entry found\")\n return None\n\n\ndef _resolve_authconfig_credstore(authconfig, registry, credstore_name):\n if not registry or registry == INDEX_NAME:\n # The ecosystem is a little schizophrenic with index.docker.io VS\n # docker.io - in that case, it seems the full URL is necessary.\n registry = INDEX_URL\n log.debug(\"Looking for auth entry for {0}\".format(repr(registry)))\n store = dockerpycreds.Store(credstore_name)\n try:\n data = store.get(registry)\n res = {\n 'ServerAddress': registry,\n }\n if data['Username'] == TOKEN_USERNAME:\n res['IdentityToken'] = data['Secret']\n else:\n res.update({\n 'Username': data['Username'],\n 'Password': data['Secret'],\n })\n return res\n except dockerpycreds.CredentialsNotFound as e:\n log.debug('No entry found')\n return None\n except dockerpycreds.StoreError as e:\n raise errors.DockerException(\n 'Credentials store error: {0}'.format(repr(e))\n )\n\n\ndef convert_to_hostname(url):\n return url.replace('http://', '').replace('https://', '').split('/', 1)[0]\n\n\ndef decode_auth(auth):\n if isinstance(auth, six.string_types):\n auth = auth.encode('ascii')\n s = base64.b64decode(auth)\n login, pwd = s.split(b':', 1)\n return login.decode('utf8'), pwd.decode('utf8')\n\n\ndef encode_header(auth):\n auth_json = json.dumps(auth).encode('ascii')\n return base64.urlsafe_b64encode(auth_json)\n\n\ndef parse_auth(entries, raise_on_error=False):\n \"\"\"\n Parses authentication entries\n\n Args:\n entries: Dict of authentication entries.\n raise_on_error: If set to true, an invalid format will raise\n InvalidConfigFile\n\n Returns:\n Authentication registry.\n \"\"\"\n\n conf = {}\n for registry, entry in six.iteritems(entries):\n if not isinstance(entry, dict):\n log.debug(\n 'Config entry for key {0} is not auth config'.format(registry)\n )\n # We sometimes fall back to parsing the whole config as if it was\n # the auth config by itself, for legacy purposes. In that case, we\n # fail silently and return an empty conf if any of the keys is not\n # formatted properly.\n if raise_on_error:\n raise errors.InvalidConfigFile(\n 'Invalid configuration for registry {0}'.format(registry)\n )\n return {}\n if 'identitytoken' in entry:\n log.debug('Found an IdentityToken entry for registry {0}'.format(\n registry\n ))\n conf[registry] = {\n 'IdentityToken': entry['identitytoken']\n }\n continue # Other values are irrelevant if we have a token, skip.\n\n if 'auth' not in entry:\n # Starting with engine v1.11 (API 1.23), an empty dictionary is\n # a valid value in the auths config.\n # https://github.com/docker/compose/issues/3265\n log.debug(\n 'Auth data for {0} is absent. Client might be using a '\n 'credentials store instead.'\n )\n conf[registry] = {}\n continue\n\n username, password = decode_auth(entry['auth'])\n log.debug(\n 'Found entry (registry={0}, username={1})'\n .format(repr(registry), repr(username))\n )\n\n conf[registry] = {\n 'username': username,\n 'password': password,\n 'email': entry.get('email'),\n 'serveraddress': registry,\n }\n return conf\n\n\ndef find_config_file(config_path=None):\n paths = list(filter(None, [\n config_path, # 1\n config_path_from_environment(), # 2\n os.path.join(home_dir(), DOCKER_CONFIG_FILENAME), # 3\n os.path.join(home_dir(), LEGACY_DOCKER_CONFIG_FILENAME), # 4\n ]))\n\n log.debug(\"Trying paths: {0}\".format(repr(paths)))\n\n for path in paths:\n if os.path.exists(path):\n log.debug(\"Found file at path: {0}\".format(path))\n return path\n\n log.debug(\"No config file found\")\n\n return None\n\n\ndef config_path_from_environment():\n config_dir = os.environ.get('DOCKER_CONFIG')\n if not config_dir:\n return None\n return os.path.join(config_dir, os.path.basename(DOCKER_CONFIG_FILENAME))\n\n\ndef home_dir():\n \"\"\"\n Get the user's home directory, using the same logic as the Docker Engine\n client - use %USERPROFILE% on Windows, $HOME/getuid on POSIX.\n \"\"\"\n if IS_WINDOWS_PLATFORM:\n return os.environ.get('USERPROFILE', '')\n else:\n return os.path.expanduser('~')\n\n\ndef load_config(config_path=None):\n \"\"\"\n Loads authentication data from a Docker configuration file in the given\n root directory or if config_path is passed use given path.\n Lookup priority:\n explicit config_path parameter > DOCKER_CONFIG environment variable >\n ~/.docker/config.json > ~/.dockercfg\n \"\"\"\n config_file = find_config_file(config_path)\n\n if not config_file:\n return {}\n\n try:\n with open(config_file) as f:\n data = json.load(f)\n res = {}\n if data.get('auths'):\n log.debug(\"Found 'auths' section\")\n res.update(parse_auth(data['auths'], raise_on_error=True))\n if data.get('HttpHeaders'):\n log.debug(\"Found 'HttpHeaders' section\")\n res.update({'HttpHeaders': data['HttpHeaders']})\n if data.get('credsStore'):\n log.debug(\"Found 'credsStore' section\")\n res.update({'credsStore': data['credsStore']})\n if data.get('credHelpers'):\n log.debug(\"Found 'credHelpers' section\")\n res.update({'credHelpers': data['credHelpers']})\n if res:\n return res\n else:\n log.debug(\"Couldn't find 'auths' or 'HttpHeaders' sections\")\n f.seek(0)\n return parse_auth(json.load(f))\n except (IOError, KeyError, ValueError) as e:\n # Likely missing new Docker config file or it's in an\n # unknown format, continue to attempt to read old location\n # and format.\n log.debug(e)\n\n log.debug(\"Attempting to parse legacy auth file format\")\n try:\n data = []\n with open(config_file) as f:\n for line in f.readlines():\n data.append(line.strip().split(' = ')[1])\n if len(data) < 2:\n # Not enough data\n raise errors.InvalidConfigFile(\n 'Invalid or empty configuration file!'\n )\n\n username, password = decode_auth(data[0])\n return {\n INDEX_NAME: {\n 'username': username,\n 'password': password,\n 'email': data[1],\n 'serveraddress': INDEX_URL,\n }\n }\n except Exception as e:\n log.debug(e)\n pass\n\n log.debug(\"All parsing attempts failed - returning empty config\")\n return {}\n", "path": "docker/auth.py"}], "after_files": [{"content": "import base64\nimport json\nimport logging\nimport os\n\nimport dockerpycreds\nimport six\n\nfrom . import errors\nfrom .constants import IS_WINDOWS_PLATFORM\n\nINDEX_NAME = 'docker.io'\nINDEX_URL = 'https://index.{0}/v1/'.format(INDEX_NAME)\nDOCKER_CONFIG_FILENAME = os.path.join('.docker', 'config.json')\nLEGACY_DOCKER_CONFIG_FILENAME = '.dockercfg'\nTOKEN_USERNAME = '<token>'\n\nlog = logging.getLogger(__name__)\n\n\ndef resolve_repository_name(repo_name):\n if '://' in repo_name:\n raise errors.InvalidRepository(\n 'Repository name cannot contain a scheme ({0})'.format(repo_name)\n )\n\n index_name, remote_name = split_repo_name(repo_name)\n if index_name[0] == '-' or index_name[-1] == '-':\n raise errors.InvalidRepository(\n 'Invalid index name ({0}). Cannot begin or end with a'\n ' hyphen.'.format(index_name)\n )\n return resolve_index_name(index_name), remote_name\n\n\ndef resolve_index_name(index_name):\n index_name = convert_to_hostname(index_name)\n if index_name == 'index.' + INDEX_NAME:\n index_name = INDEX_NAME\n return index_name\n\n\ndef get_config_header(client, registry):\n log.debug('Looking for auth config')\n if not client._auth_configs:\n log.debug(\n \"No auth config in memory - loading from filesystem\"\n )\n client._auth_configs = load_config()\n authcfg = resolve_authconfig(client._auth_configs, registry)\n # Do not fail here if no authentication exists for this\n # specific registry as we can have a readonly pull. Just\n # put the header if we can.\n if authcfg:\n log.debug('Found auth config')\n # auth_config needs to be a dict in the format used by\n # auth.py username , password, serveraddress, email\n return encode_header(authcfg)\n log.debug('No auth config found')\n return None\n\n\ndef split_repo_name(repo_name):\n parts = repo_name.split('/', 1)\n if len(parts) == 1 or (\n '.' not in parts[0] and ':' not in parts[0] and parts[0] != 'localhost'\n ):\n # This is a docker index repo (ex: username/foobar or ubuntu)\n return INDEX_NAME, repo_name\n return tuple(parts)\n\n\ndef get_credential_store(authconfig, registry):\n if not registry or registry == INDEX_NAME:\n registry = 'https://index.docker.io/v1/'\n\n return authconfig.get('credHelpers', {}).get(registry) or authconfig.get(\n 'credsStore'\n )\n\n\ndef resolve_authconfig(authconfig, registry=None):\n \"\"\"\n Returns the authentication data from the given auth configuration for a\n specific registry. As with the Docker client, legacy entries in the config\n with full URLs are stripped down to hostnames before checking for a match.\n Returns None if no match was found.\n \"\"\"\n\n if 'credHelpers' in authconfig or 'credsStore' in authconfig:\n store_name = get_credential_store(authconfig, registry)\n if store_name is not None:\n log.debug(\n 'Using credentials store \"{0}\"'.format(store_name)\n )\n return _resolve_authconfig_credstore(\n authconfig, registry, store_name\n )\n\n # Default to the public index server\n registry = resolve_index_name(registry) if registry else INDEX_NAME\n log.debug(\"Looking for auth entry for {0}\".format(repr(registry)))\n\n if registry in authconfig:\n log.debug(\"Found {0}\".format(repr(registry)))\n return authconfig[registry]\n\n for key, config in six.iteritems(authconfig):\n if resolve_index_name(key) == registry:\n log.debug(\"Found {0}\".format(repr(key)))\n return config\n\n log.debug(\"No entry found\")\n return None\n\n\ndef _resolve_authconfig_credstore(authconfig, registry, credstore_name):\n if not registry or registry == INDEX_NAME:\n # The ecosystem is a little schizophrenic with index.docker.io VS\n # docker.io - in that case, it seems the full URL is necessary.\n registry = INDEX_URL\n log.debug(\"Looking for auth entry for {0}\".format(repr(registry)))\n store = dockerpycreds.Store(credstore_name)\n try:\n data = store.get(registry)\n res = {\n 'ServerAddress': registry,\n }\n if data['Username'] == TOKEN_USERNAME:\n res['IdentityToken'] = data['Secret']\n else:\n res.update({\n 'Username': data['Username'],\n 'Password': data['Secret'],\n })\n return res\n except dockerpycreds.CredentialsNotFound as e:\n log.debug('No entry found')\n return None\n except dockerpycreds.StoreError as e:\n raise errors.DockerException(\n 'Credentials store error: {0}'.format(repr(e))\n )\n\n\ndef convert_to_hostname(url):\n return url.replace('http://', '').replace('https://', '').split('/', 1)[0]\n\n\ndef decode_auth(auth):\n if isinstance(auth, six.string_types):\n auth = auth.encode('ascii')\n s = base64.b64decode(auth)\n login, pwd = s.split(b':', 1)\n return login.decode('utf8'), pwd.decode('utf8')\n\n\ndef encode_header(auth):\n auth_json = json.dumps(auth).encode('ascii')\n return base64.urlsafe_b64encode(auth_json)\n\n\ndef parse_auth(entries, raise_on_error=False):\n \"\"\"\n Parses authentication entries\n\n Args:\n entries: Dict of authentication entries.\n raise_on_error: If set to true, an invalid format will raise\n InvalidConfigFile\n\n Returns:\n Authentication registry.\n \"\"\"\n\n conf = {}\n for registry, entry in six.iteritems(entries):\n if not isinstance(entry, dict):\n log.debug(\n 'Config entry for key {0} is not auth config'.format(registry)\n )\n # We sometimes fall back to parsing the whole config as if it was\n # the auth config by itself, for legacy purposes. In that case, we\n # fail silently and return an empty conf if any of the keys is not\n # formatted properly.\n if raise_on_error:\n raise errors.InvalidConfigFile(\n 'Invalid configuration for registry {0}'.format(registry)\n )\n return {}\n if 'identitytoken' in entry:\n log.debug('Found an IdentityToken entry for registry {0}'.format(\n registry\n ))\n conf[registry] = {\n 'IdentityToken': entry['identitytoken']\n }\n continue # Other values are irrelevant if we have a token, skip.\n\n if 'auth' not in entry:\n # Starting with engine v1.11 (API 1.23), an empty dictionary is\n # a valid value in the auths config.\n # https://github.com/docker/compose/issues/3265\n log.debug(\n 'Auth data for {0} is absent. Client might be using a '\n 'credentials store instead.'.format(registry)\n )\n conf[registry] = {}\n continue\n\n username, password = decode_auth(entry['auth'])\n log.debug(\n 'Found entry (registry={0}, username={1})'\n .format(repr(registry), repr(username))\n )\n\n conf[registry] = {\n 'username': username,\n 'password': password,\n 'email': entry.get('email'),\n 'serveraddress': registry,\n }\n return conf\n\n\ndef find_config_file(config_path=None):\n paths = list(filter(None, [\n config_path, # 1\n config_path_from_environment(), # 2\n os.path.join(home_dir(), DOCKER_CONFIG_FILENAME), # 3\n os.path.join(home_dir(), LEGACY_DOCKER_CONFIG_FILENAME), # 4\n ]))\n\n log.debug(\"Trying paths: {0}\".format(repr(paths)))\n\n for path in paths:\n if os.path.exists(path):\n log.debug(\"Found file at path: {0}\".format(path))\n return path\n\n log.debug(\"No config file found\")\n\n return None\n\n\ndef config_path_from_environment():\n config_dir = os.environ.get('DOCKER_CONFIG')\n if not config_dir:\n return None\n return os.path.join(config_dir, os.path.basename(DOCKER_CONFIG_FILENAME))\n\n\ndef home_dir():\n \"\"\"\n Get the user's home directory, using the same logic as the Docker Engine\n client - use %USERPROFILE% on Windows, $HOME/getuid on POSIX.\n \"\"\"\n if IS_WINDOWS_PLATFORM:\n return os.environ.get('USERPROFILE', '')\n else:\n return os.path.expanduser('~')\n\n\ndef load_config(config_path=None):\n \"\"\"\n Loads authentication data from a Docker configuration file in the given\n root directory or if config_path is passed use given path.\n Lookup priority:\n explicit config_path parameter > DOCKER_CONFIG environment variable >\n ~/.docker/config.json > ~/.dockercfg\n \"\"\"\n config_file = find_config_file(config_path)\n\n if not config_file:\n return {}\n\n try:\n with open(config_file) as f:\n data = json.load(f)\n res = {}\n if data.get('auths'):\n log.debug(\"Found 'auths' section\")\n res.update(parse_auth(data['auths'], raise_on_error=True))\n if data.get('HttpHeaders'):\n log.debug(\"Found 'HttpHeaders' section\")\n res.update({'HttpHeaders': data['HttpHeaders']})\n if data.get('credsStore'):\n log.debug(\"Found 'credsStore' section\")\n res.update({'credsStore': data['credsStore']})\n if data.get('credHelpers'):\n log.debug(\"Found 'credHelpers' section\")\n res.update({'credHelpers': data['credHelpers']})\n if res:\n return res\n else:\n log.debug(\"Couldn't find 'auths' or 'HttpHeaders' sections\")\n f.seek(0)\n return parse_auth(json.load(f))\n except (IOError, KeyError, ValueError) as e:\n # Likely missing new Docker config file or it's in an\n # unknown format, continue to attempt to read old location\n # and format.\n log.debug(e)\n\n log.debug(\"Attempting to parse legacy auth file format\")\n try:\n data = []\n with open(config_file) as f:\n for line in f.readlines():\n data.append(line.strip().split(' = ')[1])\n if len(data) < 2:\n # Not enough data\n raise errors.InvalidConfigFile(\n 'Invalid or empty configuration file!'\n )\n\n username, password = decode_auth(data[0])\n return {\n INDEX_NAME: {\n 'username': username,\n 'password': password,\n 'email': data[1],\n 'serveraddress': INDEX_URL,\n }\n }\n except Exception as e:\n log.debug(e)\n pass\n\n log.debug(\"All parsing attempts failed - returning empty config\")\n return {}\n", "path": "docker/auth.py"}]} |
gh_patches_debug_1380 | rasdani/github-patches | git_diff | LMFDB__lmfdb-1751 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
alpha / a mixup in q-expansions
As reported via the bug tracker:
"Not at all a serious problem, but a curious issue:
When looking at Fourier expansions of modular forms with coefficients in a number field, the number field is given as Q(alpha), and the initial q-expansion involves alpha, but for some reason when you click on "show more coefficients", the alpha's become a's."
For example
http://www.lmfdb.org/ModularForm/GL2/Q/holomorphic/23/2/1/a/
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lmfdb/modular_forms/elliptic_modular_forms/views/emf_main.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 #*****************************************************************************
3 # Copyright (C) 2010 Fredrik Strömberg <[email protected]>,
4 #
5 # Distributed under the terms of the GNU General Public License (GPL)
6 #
7 # This code is distributed in the hope that it will be useful,
8 # but WITHOUT ANY WARRANTY; without even the implied warranty of
9 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
10 # General Public License for more details.
11 #
12 # The full text of the GPL is available at:
13 #
14 # http://www.gnu.org/licenses/
15 #*****************************************************************************
16 r"""
17 Main file for viewing elliptical modular forms.
18
19 AUTHORS:
20 - Fredrik Strömberg
21 - Stephan Ehlen
22
23 """
24 from flask import url_for, request, redirect, make_response, send_from_directory,flash
25 import os, tempfile
26 import sage
27 from lmfdb.base import getDBConnection
28 from lmfdb.modular_forms.backend.mf_utils import my_get
29 from lmfdb.utils import to_dict, random_object_from_collection
30 from lmfdb.modular_forms.elliptic_modular_forms import EMF, emf_logger, emf
31 from lmfdb.modular_forms.elliptic_modular_forms.backend.web_modform_space import WebModFormSpace_cached
32 from lmfdb.modular_forms.elliptic_modular_forms.backend.emf_utils import (
33 render_fd_plot,
34 extract_data_from_jump_to,
35 newform_label,
36 parse_newform_label)
37 from emf_render_web_newform import render_web_newform
38 from emf_render_web_modform_space import render_web_modform_space
39 from emf_render_web_modform_space_gamma1 import render_web_modform_space_gamma1
40
41 from emf_render_navigation import render_elliptic_modular_form_navigation_wp
42
43 emf_logger.setLevel(int(100))
44
45 @emf.context_processor
46 def body_class():
47 return {'body_class': EMF}
48
49 emfdb = None
50
51 def db_emf():
52 global emfdb
53 if emfdb is None:
54 emfdb = getDBConnection().modularforms2.webnewforms
55 return emfdb
56
57 #################
58 # Top level
59 #################
60
61 ###########################################
62 # Search / Navigate
63 ###########################################
64
65 met = ['GET', 'POST']
66
67 @emf.route("/ranges", methods=["GET"])
68 @emf.route("/ranges/", methods=["GET"])
69 def browse_web_modform_spaces_in_ranges(**kwds):
70 r"""
71 Browse spaces with level and weight within given ranges. level and weight should be of the form N1-N2 and k1-k2
72
73 """
74 emf_logger.debug("request.args={0}".format(request.args))
75 level=request.args.getlist('level')
76 weight=request.args.getlist('weight')
77 group=request.args.getlist('group')
78 return render_elliptic_modular_form_navigation_wp(level=level,weight=weight,group=group)
79
80
81 @emf.route("/", methods=met)
82 @emf.route("/<level>/", methods=met)
83 @emf.route("/<level>/<weight>/", methods=met)
84 @emf.route("/<level>/<weight>/<character>/", methods=met)
85 @emf.route("/<level>/<weight>/<character>/<label>", methods=met)
86 @emf.route("/<level>/<weight>/<character>/<label>/", methods=met)
87 def render_elliptic_modular_forms(level=None, weight=None, character=None, label=None,group=None, **kwds):
88 r"""
89 Default input of same type as required. Note that for holomorphic modular forms: level=0 or weight=0 are non-existent.
90 """
91 emf_logger.debug(
92 "In render: level={0},weight={1},character={2},group={3},label={4}".format(level, weight, character, group, label))
93 emf_logger.debug("args={0}".format(request.args))
94 emf_logger.debug("args={0}".format(request.form))
95 emf_logger.debug("met={0}".format(request.method))
96 keys = ['download', 'jump_to']
97 info = get_args(request, level, weight, character, group, label, keys=keys)
98 valid = validate_parameters(level,weight,character,label,info)
99 if isinstance(valid,basestring):
100 return redirect(valid,code=301)
101 level = info['level']; weight = info['weight']; character = info['character']
102 #if info.has_key('error'):
103 # return render_elliptic_modular_form_navigation_wp(error=info['error'])
104 emf_logger.debug("info={0}".format(info))
105 emf_logger.debug("level=%s, %s" % (level, type(level)))
106 emf_logger.debug("label=%s, %s" % (label, type(label)))
107 emf_logger.debug("wt=%s, %s" % (weight, type(weight)))
108 group = info.get('group',None)
109 emf_logger.debug("group=%s, %s" % (group, type(group)))
110 if group == 0:
111 info['character'] = character = 1 # only trivial character for Gamma_0(N)
112 try:
113 if 'download' in info:
114 return get_downloads(**info)
115 emf_logger.debug("info=%s" % info)
116 ## Consistency of arguments>
117 # if level<=0: level=None
118 # if weight<=0: weight=None
119 if 'jump_to' in info: # try to find out which form we want to jump
120 s = my_get(info, 'jump_to', '', str)
121 emf_logger.info("info.keys1={0}".format(info.keys()))
122 info.pop('jump_to')
123 emf_logger.info("info.keys2={0}".format(info.keys()))
124 args = extract_data_from_jump_to(s)
125 emf_logger.debug("args=%s" % args)
126 return redirect(url_for("emf.render_elliptic_modular_forms", **args), code=301)
127 # return render_elliptic_modular_forms(**args)
128 emf_logger.debug("HERE! weight={0} level={1} char={2}".format(weight,level,character))
129 if level > 0 and weight > 0 and character > 0:
130 if label != '' and not label is None:
131 return render_web_newform(**info)
132 else:
133 return render_web_modform_space(**info)
134 if level > 0 and weight > 0 and (group != 0 or character == None):
135 return render_web_modform_space_gamma1(**info)
136 return render_elliptic_modular_form_navigation_wp(**info)
137 # Otherwise we go to the main navigation page
138 except IndexError as e: # catch everything here except KeyError below...
139 emf_logger.debug("catching exceptions. info={0} e={1}".format(info,e))
140 errst = str(e)
141 ## Try to customise some of the error messages:
142 if 'Character' and 'not exist' in errst:
143 errst += " Please choose a character from the table below!"
144 flash(errst,'error')
145 return render_elliptic_modular_form_navigation_wp(**info)
146 if 'WebNewForm_computing' in errst:
147 errst = "The space {0}.{1}.{2} is not in the database!".format(level,weight,character)
148 flash(errst)
149 return render_elliptic_modular_form_navigation_wp()
150 except KeyError as e:
151 emf_logger.debug("catching exceptions. info={0} e={1}".format(info,e))
152 errst = "The orbit {0} is not in the database!".format(newform_label(level,weight,character,label))
153 flash(errst)
154 return render_elliptic_modular_form_navigation_wp()
155
156
157 from lmfdb.modular_forms.elliptic_modular_forms.backend.emf_download_utils import get_coefficients
158
159 @emf.route("/Download/<int:level>/<int:weight>/<int:character>/<label>", methods=['GET', 'POST'])
160 def get_downloads(level=None, weight=None, character=None, label=None, **kwds):
161 keys = ['download', 'download_file', 'tempfile', 'format', 'number','bitprec']
162 info = get_args(request, level=level, weight=weight, character=character, label=label, keys=keys)
163 if 'download' not in info:
164 emf_logger.critical("Download called without specifying what to download! info={0}".format(info))
165 return ""
166 emf_logger.debug("in get_downloads: info={0}".format(info))
167 if info['download'] == 'coefficients':
168 info['tempfile'] = "/tmp/tmp_web_mod_form.txt"
169 return get_coefficients(info)
170 if info['download'] == 'file':
171 # there are only a certain number of fixed files that we want people to download
172 filename = info['download_file']
173 if filename == "web_modforms.py":
174 dirname = emf.app.root_static_folder
175 try:
176 emf_logger.debug("Dirname:{0}, Filename:{1}".format(dirname, filename))
177 return send_from_directory(dirname, filename, as_attachment=True, attachment_filename=filename)
178 except IOError:
179 info['error'] = "Could not find file! "
180
181 @emf.route("/random")
182 def random_form():
183 label = random_object_from_collection( db_emf() )['hecke_orbit_label']
184 level, weight, character, label = parse_newform_label(label)
185 args={}
186 args['level'] = level
187 args['weight'] = weight
188 args['character'] = character
189 args['label'] = label
190 return redirect(url_for(".render_elliptic_modular_forms", **args), 301)
191
192 @emf.route("/Plots/<int:grouptype>/<int:level>/")
193 def render_plot(grouptype=0, level=1):
194 domain = render_fd_plot(level, {'grouptype': grouptype})
195 if isinstance(domain, sage.plot.plot.Graphics):
196 emf_logger.debug('Got a Graphics object')
197 _, filename = tempfile.mkstemp('.png')
198 domain.save(filename)
199 data = open(filename).read()
200 os.unlink(filename)
201 else:
202 data = domain
203 response = make_response(data)
204 response.headers['Content-type'] = 'image/png'
205 return response
206
207 @emf.route("/Qexp/<int:level>/<int:weight>/<int:character>/<label>/<int:prec>")
208 def get_qexp(level, weight, character, label, prec, latex=False, **kwds):
209 emf_logger.debug(
210 "get_qexp for: level={0},weight={1},character={2},label={3}".format(level, weight, character, label))
211 #latex = my_get(request.args, "latex", False, bool)
212 emf_logger.debug(
213 "get_qexp latex: {0}, prec: {1}".format(latex, prec))
214 #if not arg:
215 # return flask.abort(404)
216 try:
217 M = WebModFormSpace_cached(level=level,weight=weight,character=character)
218 WNF = M.hecke_orbits[label]
219 WNF.prec = prec
220 if not latex:
221 c = WNF.q_expansion
222 else:
223 c = WNF.q_expansion_latex(prec=prec, name = 'a')
224 return c
225 except Exception as e:
226 return "<span style='color:red;'>ERROR: %s</span>" % e.message
227
228 @emf.route("/qexp_latex/<int:level>/<int:weight>/<int:character>/<label>/<int:prec>")
229 @emf.route("/qexp_latex/<int:level>/<int:weight>/<int:character>/<label>/")
230 def get_qexp_latex(level, weight, character, label, prec=10, **kwds):
231 return get_qexp(level, weight, character, label, prec, latex=True, **kwds)
232
233
234 ###
235 ### Helper functions.
236 ###
237
238 def get_args(request, level=0, weight=0, character=-1, group=2, label='', keys=[]):
239 r"""
240 Use default input of the same type as desired output.
241 """
242 if request.method == 'GET':
243 dd = to_dict(request.args)
244 else:
245 dd = to_dict(request.form)
246 emf_logger.debug("REQUEST:{0}".format(dd))
247 info = dict()
248 info['level'] = my_get(dd, 'level', level, int)
249 info['weight'] = my_get(dd, 'weight', weight, int)
250 info['character'] = my_get(dd, 'character', character, int)
251 emf_logger.debug("group={0}".format(group))
252 info['group'] = my_get(dd, 'group', group, int)
253 emf_logger.debug("info[group]={0}".format(info['group']))
254 info['label'] = my_get(dd, 'label', label, str)
255 for key in keys:
256 if key in dd:
257 info[key] = my_get(dd, key, '', str)
258 return info
259
260
261 from markupsafe import Markup
262 from ..backend.emf_utils import is_range
263
264 def validate_character(level, character):
265 """Assumes level is a positive integer N, checks that 0<character<=N
266 and gcd(character,N)=1. Returns None if OK, else a suitable error
267 message.
268 """
269 #print "validate_character(%s,%s)" % (level, character)
270 if not isinstance(character,int):
271 return "The character number should be an integer. You gave: %s" % character
272 from sage.all import GCD
273 if character <= 0 or character > level or GCD(level,character)!=1:
274 return "The character number should be a positive integer less than or equal to and coprime to the level %s. You gave: %s" % (level, character)
275 return 0
276
277 def validate_parameters(level=0,weight=0,character=None,label='',info={}):
278 #print app.url_map
279 emf_logger.debug("validating info={0}".format(info))
280 level= info['level']; weight=info['weight']
281 character = info['character']; label = info['label']
282 t = True
283 m = []
284 if not info.get('jump_to',None) is None:
285 return t
286 if is_range(level) or is_range(weight):
287 new_url = url_for("emf.browse_web_modform_spaces_in_ranges",**info)
288 emf_logger.debug("level or weight is a range so we redirect! url={0}".format(new_url))
289 return new_url
290
291 if not level is None and (not isinstance(level,int) or level <= 0):
292 m.append("Please provide a positive integer level! You gave: {0}".format(level)); t = False
293 if level is None:
294 info['level'] = None
295 else:
296 info['level'] = 0
297 if not weight is None and (not isinstance(weight,int) or weight <=0):
298 m.append("Please provide a positive integer weight! You gave: {0}".format(weight)); t = False
299 if weight is None:
300 info['weight']=None
301 info['weight'] = 0
302 if not character is None:
303 res = validate_character(level, character)
304 if res:
305 m.append(res); t = False
306 info['character'] = None
307 if not label is None and (not isinstance(label,basestring)):
308 m.append('Please provide a label in string format! You gave: {0}'.format(label)); t=False
309 info['label']=''
310 if label is None:
311 info['label'] = None
312 if not t:
313 msg = "<br>".join(m)
314 flash(Markup(msg),'error')
315 emf_logger.debug("validate: {0}".format(msg))
316
317 # If we don't match any arglist above we see if we have only a label
318 # or else catch malformed urls
319 @emf.route("/<level>")
320 @emf.route("/<level>/")
321 @emf.route("/<level>/<weight>")
322 @emf.route("/<level>/<weight>/")
323 @emf.route("/<level>/<weight>/<character>")
324 @emf.route("/<level>/<weight>/<character>/")
325 @emf.route("/<level>/<weight>/<character>/<label>")
326 @emf.route("/<level>/<weight>/<character>/<label>/")
327 @emf.route("/<level>/<weight>/<character>/<label>/<emb>")
328 @emf.route("/<level>/<weight>/<character>/<label>/<emb>/")
329 def redirect_false_route(level=None,weight=None,character=None,label='',emb=None):
330 ## jumps only have one field (here level)
331 if weight is None:
332 args = extract_data_from_jump_to(level)
333 emf_logger.debug("args={0}".format(args))
334 else:
335 args = {'level':level,'weight':weight,'character':character,'label':label}
336 #validate_parameters(level,weight,character,label,args)
337
338 return redirect(url_for("emf.render_elliptic_modular_forms",**args), code=301)
339 # return render_elliptic_modular_form_navigation_wp(**info)
340
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lmfdb/modular_forms/elliptic_modular_forms/views/emf_main.py b/lmfdb/modular_forms/elliptic_modular_forms/views/emf_main.py
--- a/lmfdb/modular_forms/elliptic_modular_forms/views/emf_main.py
+++ b/lmfdb/modular_forms/elliptic_modular_forms/views/emf_main.py
@@ -220,7 +220,7 @@
if not latex:
c = WNF.q_expansion
else:
- c = WNF.q_expansion_latex(prec=prec, name = 'a')
+ c = WNF.q_expansion_latex(prec=prec, name = '\\alpha ')
return c
except Exception as e:
return "<span style='color:red;'>ERROR: %s</span>" % e.message
| {"golden_diff": "diff --git a/lmfdb/modular_forms/elliptic_modular_forms/views/emf_main.py b/lmfdb/modular_forms/elliptic_modular_forms/views/emf_main.py\n--- a/lmfdb/modular_forms/elliptic_modular_forms/views/emf_main.py\n+++ b/lmfdb/modular_forms/elliptic_modular_forms/views/emf_main.py\n@@ -220,7 +220,7 @@\n if not latex:\n c = WNF.q_expansion\n else:\n- c = WNF.q_expansion_latex(prec=prec, name = 'a')\n+ c = WNF.q_expansion_latex(prec=prec, name = '\\\\alpha ')\n return c\n except Exception as e:\n return \"<span style='color:red;'>ERROR: %s</span>\" % e.message\n", "issue": "alpha / a mixup in q-expansions\nAs reported via the bug tracker:\n\n\"Not at all a serious problem, but a curious issue:\n\nWhen looking at Fourier expansions of modular forms with coefficients in a number field, the number field is given as Q(alpha), and the initial q-expansion involves alpha, but for some reason when you click on \"show more coefficients\", the alpha's become a's.\"\n\nFor example\n\nhttp://www.lmfdb.org/ModularForm/GL2/Q/holomorphic/23/2/1/a/\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#*****************************************************************************\n# Copyright (C) 2010 Fredrik Str\u00f6mberg <[email protected]>,\n#\n# Distributed under the terms of the GNU General Public License (GPL)\n#\n# This code is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n# General Public License for more details.\n#\n# The full text of the GPL is available at:\n#\n# http://www.gnu.org/licenses/\n#*****************************************************************************\nr\"\"\"\nMain file for viewing elliptical modular forms.\n\nAUTHORS: \n - Fredrik Str\u00f6mberg\n - Stephan Ehlen\n\n\"\"\"\nfrom flask import url_for, request, redirect, make_response, send_from_directory,flash\nimport os, tempfile\nimport sage\nfrom lmfdb.base import getDBConnection\nfrom lmfdb.modular_forms.backend.mf_utils import my_get\nfrom lmfdb.utils import to_dict, random_object_from_collection\nfrom lmfdb.modular_forms.elliptic_modular_forms import EMF, emf_logger, emf\nfrom lmfdb.modular_forms.elliptic_modular_forms.backend.web_modform_space import WebModFormSpace_cached\nfrom lmfdb.modular_forms.elliptic_modular_forms.backend.emf_utils import (\n render_fd_plot,\n extract_data_from_jump_to,\n newform_label,\n parse_newform_label)\nfrom emf_render_web_newform import render_web_newform\nfrom emf_render_web_modform_space import render_web_modform_space\nfrom emf_render_web_modform_space_gamma1 import render_web_modform_space_gamma1\n\nfrom emf_render_navigation import render_elliptic_modular_form_navigation_wp\n\nemf_logger.setLevel(int(100))\n\[email protected]_processor\ndef body_class():\n return {'body_class': EMF}\n\nemfdb = None\n\ndef db_emf():\n global emfdb\n if emfdb is None:\n emfdb = getDBConnection().modularforms2.webnewforms\n return emfdb\n\n#################\n# Top level\n#################\n\n###########################################\n# Search / Navigate\n###########################################\n\nmet = ['GET', 'POST']\n\[email protected](\"/ranges\", methods=[\"GET\"])\[email protected](\"/ranges/\", methods=[\"GET\"])\ndef browse_web_modform_spaces_in_ranges(**kwds):\n r\"\"\"\n Browse spaces with level and weight within given ranges. level and weight should be of the form N1-N2 and k1-k2\n\n \"\"\"\n emf_logger.debug(\"request.args={0}\".format(request.args))\n level=request.args.getlist('level')\n weight=request.args.getlist('weight')\n group=request.args.getlist('group')\n return render_elliptic_modular_form_navigation_wp(level=level,weight=weight,group=group)\n\n\[email protected](\"/\", methods=met)\[email protected](\"/<level>/\", methods=met)\[email protected](\"/<level>/<weight>/\", methods=met)\[email protected](\"/<level>/<weight>/<character>/\", methods=met)\[email protected](\"/<level>/<weight>/<character>/<label>\", methods=met)\[email protected](\"/<level>/<weight>/<character>/<label>/\", methods=met)\ndef render_elliptic_modular_forms(level=None, weight=None, character=None, label=None,group=None, **kwds):\n r\"\"\"\n Default input of same type as required. Note that for holomorphic modular forms: level=0 or weight=0 are non-existent.\n \"\"\"\n emf_logger.debug(\n \"In render: level={0},weight={1},character={2},group={3},label={4}\".format(level, weight, character, group, label))\n emf_logger.debug(\"args={0}\".format(request.args))\n emf_logger.debug(\"args={0}\".format(request.form))\n emf_logger.debug(\"met={0}\".format(request.method))\n keys = ['download', 'jump_to']\n info = get_args(request, level, weight, character, group, label, keys=keys)\n valid = validate_parameters(level,weight,character,label,info)\n if isinstance(valid,basestring):\n return redirect(valid,code=301)\n level = info['level']; weight = info['weight']; character = info['character']\n #if info.has_key('error'):\n # return render_elliptic_modular_form_navigation_wp(error=info['error'])\n emf_logger.debug(\"info={0}\".format(info))\n emf_logger.debug(\"level=%s, %s\" % (level, type(level)))\n emf_logger.debug(\"label=%s, %s\" % (label, type(label)))\n emf_logger.debug(\"wt=%s, %s\" % (weight, type(weight)))\n group = info.get('group',None)\n emf_logger.debug(\"group=%s, %s\" % (group, type(group)))\n if group == 0:\n info['character'] = character = 1 # only trivial character for Gamma_0(N)\n try:\n if 'download' in info:\n return get_downloads(**info)\n emf_logger.debug(\"info=%s\" % info)\n ## Consistency of arguments>\n # if level<=0: level=None\n # if weight<=0: weight=None\n if 'jump_to' in info: # try to find out which form we want to jump\n s = my_get(info, 'jump_to', '', str)\n emf_logger.info(\"info.keys1={0}\".format(info.keys()))\n info.pop('jump_to')\n emf_logger.info(\"info.keys2={0}\".format(info.keys()))\n args = extract_data_from_jump_to(s)\n emf_logger.debug(\"args=%s\" % args)\n return redirect(url_for(\"emf.render_elliptic_modular_forms\", **args), code=301)\n # return render_elliptic_modular_forms(**args)\n emf_logger.debug(\"HERE! weight={0} level={1} char={2}\".format(weight,level,character))\n if level > 0 and weight > 0 and character > 0:\n if label != '' and not label is None:\n return render_web_newform(**info)\n else:\n return render_web_modform_space(**info)\n if level > 0 and weight > 0 and (group != 0 or character == None):\n return render_web_modform_space_gamma1(**info)\n return render_elliptic_modular_form_navigation_wp(**info)\n # Otherwise we go to the main navigation page\n except IndexError as e: # catch everything here except KeyError below...\n emf_logger.debug(\"catching exceptions. info={0} e={1}\".format(info,e))\n errst = str(e)\n ## Try to customise some of the error messages:\n if 'Character' and 'not exist' in errst:\n errst += \" Please choose a character from the table below!\"\n flash(errst,'error')\n return render_elliptic_modular_form_navigation_wp(**info)\n if 'WebNewForm_computing' in errst:\n errst = \"The space {0}.{1}.{2} is not in the database!\".format(level,weight,character)\n flash(errst)\n return render_elliptic_modular_form_navigation_wp()\n except KeyError as e:\n emf_logger.debug(\"catching exceptions. info={0} e={1}\".format(info,e))\n errst = \"The orbit {0} is not in the database!\".format(newform_label(level,weight,character,label))\n flash(errst)\n return render_elliptic_modular_form_navigation_wp()\n\n\nfrom lmfdb.modular_forms.elliptic_modular_forms.backend.emf_download_utils import get_coefficients\n\[email protected](\"/Download/<int:level>/<int:weight>/<int:character>/<label>\", methods=['GET', 'POST'])\ndef get_downloads(level=None, weight=None, character=None, label=None, **kwds):\n keys = ['download', 'download_file', 'tempfile', 'format', 'number','bitprec']\n info = get_args(request, level=level, weight=weight, character=character, label=label, keys=keys)\n if 'download' not in info:\n emf_logger.critical(\"Download called without specifying what to download! info={0}\".format(info))\n return \"\"\n emf_logger.debug(\"in get_downloads: info={0}\".format(info))\n if info['download'] == 'coefficients':\n info['tempfile'] = \"/tmp/tmp_web_mod_form.txt\"\n return get_coefficients(info)\n if info['download'] == 'file':\n # there are only a certain number of fixed files that we want people to download\n filename = info['download_file']\n if filename == \"web_modforms.py\":\n dirname = emf.app.root_static_folder\n try:\n emf_logger.debug(\"Dirname:{0}, Filename:{1}\".format(dirname, filename))\n return send_from_directory(dirname, filename, as_attachment=True, attachment_filename=filename)\n except IOError:\n info['error'] = \"Could not find file! \"\n\[email protected](\"/random\")\ndef random_form():\n label = random_object_from_collection( db_emf() )['hecke_orbit_label']\n level, weight, character, label = parse_newform_label(label)\n args={}\n args['level'] = level\n args['weight'] = weight\n args['character'] = character\n args['label'] = label\n return redirect(url_for(\".render_elliptic_modular_forms\", **args), 301)\n\[email protected](\"/Plots/<int:grouptype>/<int:level>/\")\ndef render_plot(grouptype=0, level=1):\n domain = render_fd_plot(level, {'grouptype': grouptype})\n if isinstance(domain, sage.plot.plot.Graphics):\n emf_logger.debug('Got a Graphics object')\n _, filename = tempfile.mkstemp('.png')\n domain.save(filename)\n data = open(filename).read()\n os.unlink(filename)\n else:\n data = domain\n response = make_response(data)\n response.headers['Content-type'] = 'image/png'\n return response\n\[email protected](\"/Qexp/<int:level>/<int:weight>/<int:character>/<label>/<int:prec>\")\ndef get_qexp(level, weight, character, label, prec, latex=False, **kwds):\n emf_logger.debug(\n \"get_qexp for: level={0},weight={1},character={2},label={3}\".format(level, weight, character, label))\n #latex = my_get(request.args, \"latex\", False, bool)\n emf_logger.debug(\n \"get_qexp latex: {0}, prec: {1}\".format(latex, prec))\n #if not arg:\n # return flask.abort(404)\n try:\n M = WebModFormSpace_cached(level=level,weight=weight,character=character)\n WNF = M.hecke_orbits[label]\n WNF.prec = prec\n if not latex:\n c = WNF.q_expansion\n else:\n c = WNF.q_expansion_latex(prec=prec, name = 'a')\n return c\n except Exception as e:\n return \"<span style='color:red;'>ERROR: %s</span>\" % e.message\n\[email protected](\"/qexp_latex/<int:level>/<int:weight>/<int:character>/<label>/<int:prec>\")\[email protected](\"/qexp_latex/<int:level>/<int:weight>/<int:character>/<label>/\")\ndef get_qexp_latex(level, weight, character, label, prec=10, **kwds):\n return get_qexp(level, weight, character, label, prec, latex=True, **kwds)\n\n\n###\n### Helper functions.\n###\n\ndef get_args(request, level=0, weight=0, character=-1, group=2, label='', keys=[]):\n r\"\"\"\n Use default input of the same type as desired output.\n \"\"\"\n if request.method == 'GET':\n dd = to_dict(request.args)\n else:\n dd = to_dict(request.form)\n emf_logger.debug(\"REQUEST:{0}\".format(dd))\n info = dict()\n info['level'] = my_get(dd, 'level', level, int)\n info['weight'] = my_get(dd, 'weight', weight, int)\n info['character'] = my_get(dd, 'character', character, int)\n emf_logger.debug(\"group={0}\".format(group))\n info['group'] = my_get(dd, 'group', group, int)\n emf_logger.debug(\"info[group]={0}\".format(info['group']))\n info['label'] = my_get(dd, 'label', label, str)\n for key in keys:\n if key in dd:\n info[key] = my_get(dd, key, '', str)\n return info\n\n\nfrom markupsafe import Markup\nfrom ..backend.emf_utils import is_range\n\ndef validate_character(level, character):\n \"\"\"Assumes level is a positive integer N, checks that 0<character<=N\n and gcd(character,N)=1. Returns None if OK, else a suitable error\n message.\n \"\"\"\n #print \"validate_character(%s,%s)\" % (level, character)\n if not isinstance(character,int):\n return \"The character number should be an integer. You gave: %s\" % character\n from sage.all import GCD\n if character <= 0 or character > level or GCD(level,character)!=1:\n return \"The character number should be a positive integer less than or equal to and coprime to the level %s. You gave: %s\" % (level, character)\n return 0\n\ndef validate_parameters(level=0,weight=0,character=None,label='',info={}):\n #print app.url_map\n emf_logger.debug(\"validating info={0}\".format(info))\n level= info['level']; weight=info['weight']\n character = info['character']; label = info['label']\n t = True\n m = []\n if not info.get('jump_to',None) is None:\n return t\n if is_range(level) or is_range(weight):\n new_url = url_for(\"emf.browse_web_modform_spaces_in_ranges\",**info)\n emf_logger.debug(\"level or weight is a range so we redirect! url={0}\".format(new_url))\n return new_url\n\n if not level is None and (not isinstance(level,int) or level <= 0):\n m.append(\"Please provide a positive integer level! You gave: {0}\".format(level)); t = False\n if level is None:\n info['level'] = None\n else:\n info['level'] = 0\n if not weight is None and (not isinstance(weight,int) or weight <=0):\n m.append(\"Please provide a positive integer weight! You gave: {0}\".format(weight)); t = False\n if weight is None:\n info['weight']=None\n info['weight'] = 0\n if not character is None:\n res = validate_character(level, character)\n if res:\n m.append(res); t = False\n info['character'] = None\n if not label is None and (not isinstance(label,basestring)):\n m.append('Please provide a label in string format! You gave: {0}'.format(label)); t=False\n info['label']=''\n if label is None:\n info['label'] = None\n if not t:\n msg = \"<br>\".join(m)\n flash(Markup(msg),'error')\n emf_logger.debug(\"validate: {0}\".format(msg))\n \n# If we don't match any arglist above we see if we have only a label\n# or else catch malformed urls\[email protected](\"/<level>\")\[email protected](\"/<level>/\")\[email protected](\"/<level>/<weight>\")\[email protected](\"/<level>/<weight>/\")\[email protected](\"/<level>/<weight>/<character>\")\[email protected](\"/<level>/<weight>/<character>/\")\[email protected](\"/<level>/<weight>/<character>/<label>\")\[email protected](\"/<level>/<weight>/<character>/<label>/\")\[email protected](\"/<level>/<weight>/<character>/<label>/<emb>\")\[email protected](\"/<level>/<weight>/<character>/<label>/<emb>/\")\ndef redirect_false_route(level=None,weight=None,character=None,label='',emb=None):\n ## jumps only have one field (here level)\n if weight is None:\n args = extract_data_from_jump_to(level)\n emf_logger.debug(\"args={0}\".format(args))\n else:\n args = {'level':level,'weight':weight,'character':character,'label':label}\n #validate_parameters(level,weight,character,label,args)\n\n return redirect(url_for(\"emf.render_elliptic_modular_forms\",**args), code=301)\n # return render_elliptic_modular_form_navigation_wp(**info)\n", "path": "lmfdb/modular_forms/elliptic_modular_forms/views/emf_main.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#*****************************************************************************\n# Copyright (C) 2010 Fredrik Str\u00f6mberg <[email protected]>,\n#\n# Distributed under the terms of the GNU General Public License (GPL)\n#\n# This code is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n# General Public License for more details.\n#\n# The full text of the GPL is available at:\n#\n# http://www.gnu.org/licenses/\n#*****************************************************************************\nr\"\"\"\nMain file for viewing elliptical modular forms.\n\nAUTHORS: \n - Fredrik Str\u00f6mberg\n - Stephan Ehlen\n\n\"\"\"\nfrom flask import url_for, request, redirect, make_response, send_from_directory,flash\nimport os, tempfile\nimport sage\nfrom lmfdb.base import getDBConnection\nfrom lmfdb.modular_forms.backend.mf_utils import my_get\nfrom lmfdb.utils import to_dict, random_object_from_collection\nfrom lmfdb.modular_forms.elliptic_modular_forms import EMF, emf_logger, emf\nfrom lmfdb.modular_forms.elliptic_modular_forms.backend.web_modform_space import WebModFormSpace_cached\nfrom lmfdb.modular_forms.elliptic_modular_forms.backend.emf_utils import (\n render_fd_plot,\n extract_data_from_jump_to,\n newform_label,\n parse_newform_label)\nfrom emf_render_web_newform import render_web_newform\nfrom emf_render_web_modform_space import render_web_modform_space\nfrom emf_render_web_modform_space_gamma1 import render_web_modform_space_gamma1\n\nfrom emf_render_navigation import render_elliptic_modular_form_navigation_wp\n\nemf_logger.setLevel(int(100))\n\[email protected]_processor\ndef body_class():\n return {'body_class': EMF}\n\nemfdb = None\n\ndef db_emf():\n global emfdb\n if emfdb is None:\n emfdb = getDBConnection().modularforms2.webnewforms\n return emfdb\n\n#################\n# Top level\n#################\n\n###########################################\n# Search / Navigate\n###########################################\n\nmet = ['GET', 'POST']\n\[email protected](\"/ranges\", methods=[\"GET\"])\[email protected](\"/ranges/\", methods=[\"GET\"])\ndef browse_web_modform_spaces_in_ranges(**kwds):\n r\"\"\"\n Browse spaces with level and weight within given ranges. level and weight should be of the form N1-N2 and k1-k2\n\n \"\"\"\n emf_logger.debug(\"request.args={0}\".format(request.args))\n level=request.args.getlist('level')\n weight=request.args.getlist('weight')\n group=request.args.getlist('group')\n return render_elliptic_modular_form_navigation_wp(level=level,weight=weight,group=group)\n\n\[email protected](\"/\", methods=met)\[email protected](\"/<level>/\", methods=met)\[email protected](\"/<level>/<weight>/\", methods=met)\[email protected](\"/<level>/<weight>/<character>/\", methods=met)\[email protected](\"/<level>/<weight>/<character>/<label>\", methods=met)\[email protected](\"/<level>/<weight>/<character>/<label>/\", methods=met)\ndef render_elliptic_modular_forms(level=None, weight=None, character=None, label=None,group=None, **kwds):\n r\"\"\"\n Default input of same type as required. Note that for holomorphic modular forms: level=0 or weight=0 are non-existent.\n \"\"\"\n emf_logger.debug(\n \"In render: level={0},weight={1},character={2},group={3},label={4}\".format(level, weight, character, group, label))\n emf_logger.debug(\"args={0}\".format(request.args))\n emf_logger.debug(\"args={0}\".format(request.form))\n emf_logger.debug(\"met={0}\".format(request.method))\n keys = ['download', 'jump_to']\n info = get_args(request, level, weight, character, group, label, keys=keys)\n valid = validate_parameters(level,weight,character,label,info)\n if isinstance(valid,basestring):\n return redirect(valid,code=301)\n level = info['level']; weight = info['weight']; character = info['character']\n #if info.has_key('error'):\n # return render_elliptic_modular_form_navigation_wp(error=info['error'])\n emf_logger.debug(\"info={0}\".format(info))\n emf_logger.debug(\"level=%s, %s\" % (level, type(level)))\n emf_logger.debug(\"label=%s, %s\" % (label, type(label)))\n emf_logger.debug(\"wt=%s, %s\" % (weight, type(weight)))\n group = info.get('group',None)\n emf_logger.debug(\"group=%s, %s\" % (group, type(group)))\n if group == 0:\n info['character'] = character = 1 # only trivial character for Gamma_0(N)\n try:\n if 'download' in info:\n return get_downloads(**info)\n emf_logger.debug(\"info=%s\" % info)\n ## Consistency of arguments>\n # if level<=0: level=None\n # if weight<=0: weight=None\n if 'jump_to' in info: # try to find out which form we want to jump\n s = my_get(info, 'jump_to', '', str)\n emf_logger.info(\"info.keys1={0}\".format(info.keys()))\n info.pop('jump_to')\n emf_logger.info(\"info.keys2={0}\".format(info.keys()))\n args = extract_data_from_jump_to(s)\n emf_logger.debug(\"args=%s\" % args)\n return redirect(url_for(\"emf.render_elliptic_modular_forms\", **args), code=301)\n # return render_elliptic_modular_forms(**args)\n emf_logger.debug(\"HERE! weight={0} level={1} char={2}\".format(weight,level,character))\n if level > 0 and weight > 0 and character > 0:\n if label != '' and not label is None:\n return render_web_newform(**info)\n else:\n return render_web_modform_space(**info)\n if level > 0 and weight > 0 and (group != 0 or character == None):\n return render_web_modform_space_gamma1(**info)\n return render_elliptic_modular_form_navigation_wp(**info)\n # Otherwise we go to the main navigation page\n except IndexError as e: # catch everything here except KeyError below...\n emf_logger.debug(\"catching exceptions. info={0} e={1}\".format(info,e))\n errst = str(e)\n ## Try to customise some of the error messages:\n if 'Character' and 'not exist' in errst:\n errst += \" Please choose a character from the table below!\"\n flash(errst,'error')\n return render_elliptic_modular_form_navigation_wp(**info)\n if 'WebNewForm_computing' in errst:\n errst = \"The space {0}.{1}.{2} is not in the database!\".format(level,weight,character)\n flash(errst)\n return render_elliptic_modular_form_navigation_wp()\n except KeyError as e:\n emf_logger.debug(\"catching exceptions. info={0} e={1}\".format(info,e))\n errst = \"The orbit {0} is not in the database!\".format(newform_label(level,weight,character,label))\n flash(errst)\n return render_elliptic_modular_form_navigation_wp()\n\n\nfrom lmfdb.modular_forms.elliptic_modular_forms.backend.emf_download_utils import get_coefficients\n\[email protected](\"/Download/<int:level>/<int:weight>/<int:character>/<label>\", methods=['GET', 'POST'])\ndef get_downloads(level=None, weight=None, character=None, label=None, **kwds):\n keys = ['download', 'download_file', 'tempfile', 'format', 'number','bitprec']\n info = get_args(request, level=level, weight=weight, character=character, label=label, keys=keys)\n if 'download' not in info:\n emf_logger.critical(\"Download called without specifying what to download! info={0}\".format(info))\n return \"\"\n emf_logger.debug(\"in get_downloads: info={0}\".format(info))\n if info['download'] == 'coefficients':\n info['tempfile'] = \"/tmp/tmp_web_mod_form.txt\"\n return get_coefficients(info)\n if info['download'] == 'file':\n # there are only a certain number of fixed files that we want people to download\n filename = info['download_file']\n if filename == \"web_modforms.py\":\n dirname = emf.app.root_static_folder\n try:\n emf_logger.debug(\"Dirname:{0}, Filename:{1}\".format(dirname, filename))\n return send_from_directory(dirname, filename, as_attachment=True, attachment_filename=filename)\n except IOError:\n info['error'] = \"Could not find file! \"\n\[email protected](\"/random\")\ndef random_form():\n label = random_object_from_collection( db_emf() )['hecke_orbit_label']\n level, weight, character, label = parse_newform_label(label)\n args={}\n args['level'] = level\n args['weight'] = weight\n args['character'] = character\n args['label'] = label\n return redirect(url_for(\".render_elliptic_modular_forms\", **args), 301)\n\[email protected](\"/Plots/<int:grouptype>/<int:level>/\")\ndef render_plot(grouptype=0, level=1):\n domain = render_fd_plot(level, {'grouptype': grouptype})\n if isinstance(domain, sage.plot.plot.Graphics):\n emf_logger.debug('Got a Graphics object')\n _, filename = tempfile.mkstemp('.png')\n domain.save(filename)\n data = open(filename).read()\n os.unlink(filename)\n else:\n data = domain\n response = make_response(data)\n response.headers['Content-type'] = 'image/png'\n return response\n\[email protected](\"/Qexp/<int:level>/<int:weight>/<int:character>/<label>/<int:prec>\")\ndef get_qexp(level, weight, character, label, prec, latex=False, **kwds):\n emf_logger.debug(\n \"get_qexp for: level={0},weight={1},character={2},label={3}\".format(level, weight, character, label))\n #latex = my_get(request.args, \"latex\", False, bool)\n emf_logger.debug(\n \"get_qexp latex: {0}, prec: {1}\".format(latex, prec))\n #if not arg:\n # return flask.abort(404)\n try:\n M = WebModFormSpace_cached(level=level,weight=weight,character=character)\n WNF = M.hecke_orbits[label]\n WNF.prec = prec\n if not latex:\n c = WNF.q_expansion\n else:\n c = WNF.q_expansion_latex(prec=prec, name = '\\\\alpha ')\n return c\n except Exception as e:\n return \"<span style='color:red;'>ERROR: %s</span>\" % e.message\n\[email protected](\"/qexp_latex/<int:level>/<int:weight>/<int:character>/<label>/<int:prec>\")\[email protected](\"/qexp_latex/<int:level>/<int:weight>/<int:character>/<label>/\")\ndef get_qexp_latex(level, weight, character, label, prec=10, **kwds):\n return get_qexp(level, weight, character, label, prec, latex=True, **kwds)\n\n\n###\n### Helper functions.\n###\n\ndef get_args(request, level=0, weight=0, character=-1, group=2, label='', keys=[]):\n r\"\"\"\n Use default input of the same type as desired output.\n \"\"\"\n if request.method == 'GET':\n dd = to_dict(request.args)\n else:\n dd = to_dict(request.form)\n emf_logger.debug(\"REQUEST:{0}\".format(dd))\n info = dict()\n info['level'] = my_get(dd, 'level', level, int)\n info['weight'] = my_get(dd, 'weight', weight, int)\n info['character'] = my_get(dd, 'character', character, int)\n emf_logger.debug(\"group={0}\".format(group))\n info['group'] = my_get(dd, 'group', group, int)\n emf_logger.debug(\"info[group]={0}\".format(info['group']))\n info['label'] = my_get(dd, 'label', label, str)\n for key in keys:\n if key in dd:\n info[key] = my_get(dd, key, '', str)\n return info\n\n\nfrom markupsafe import Markup\nfrom ..backend.emf_utils import is_range\n\ndef validate_character(level, character):\n \"\"\"Assumes level is a positive integer N, checks that 0<character<=N\n and gcd(character,N)=1. Returns None if OK, else a suitable error\n message.\n \"\"\"\n #print \"validate_character(%s,%s)\" % (level, character)\n if not isinstance(character,int):\n return \"The character number should be an integer. You gave: %s\" % character\n from sage.all import GCD\n if character <= 0 or character > level or GCD(level,character)!=1:\n return \"The character number should be a positive integer less than or equal to and coprime to the level %s. You gave: %s\" % (level, character)\n return 0\n\ndef validate_parameters(level=0,weight=0,character=None,label='',info={}):\n #print app.url_map\n emf_logger.debug(\"validating info={0}\".format(info))\n level= info['level']; weight=info['weight']\n character = info['character']; label = info['label']\n t = True\n m = []\n if not info.get('jump_to',None) is None:\n return t\n if is_range(level) or is_range(weight):\n new_url = url_for(\"emf.browse_web_modform_spaces_in_ranges\",**info)\n emf_logger.debug(\"level or weight is a range so we redirect! url={0}\".format(new_url))\n return new_url\n\n if not level is None and (not isinstance(level,int) or level <= 0):\n m.append(\"Please provide a positive integer level! You gave: {0}\".format(level)); t = False\n if level is None:\n info['level'] = None\n else:\n info['level'] = 0\n if not weight is None and (not isinstance(weight,int) or weight <=0):\n m.append(\"Please provide a positive integer weight! You gave: {0}\".format(weight)); t = False\n if weight is None:\n info['weight']=None\n info['weight'] = 0\n if not character is None:\n res = validate_character(level, character)\n if res:\n m.append(res); t = False\n info['character'] = None\n if not label is None and (not isinstance(label,basestring)):\n m.append('Please provide a label in string format! You gave: {0}'.format(label)); t=False\n info['label']=''\n if label is None:\n info['label'] = None\n if not t:\n msg = \"<br>\".join(m)\n flash(Markup(msg),'error')\n emf_logger.debug(\"validate: {0}\".format(msg))\n \n# If we don't match any arglist above we see if we have only a label\n# or else catch malformed urls\[email protected](\"/<level>\")\[email protected](\"/<level>/\")\[email protected](\"/<level>/<weight>\")\[email protected](\"/<level>/<weight>/\")\[email protected](\"/<level>/<weight>/<character>\")\[email protected](\"/<level>/<weight>/<character>/\")\[email protected](\"/<level>/<weight>/<character>/<label>\")\[email protected](\"/<level>/<weight>/<character>/<label>/\")\[email protected](\"/<level>/<weight>/<character>/<label>/<emb>\")\[email protected](\"/<level>/<weight>/<character>/<label>/<emb>/\")\ndef redirect_false_route(level=None,weight=None,character=None,label='',emb=None):\n ## jumps only have one field (here level)\n if weight is None:\n args = extract_data_from_jump_to(level)\n emf_logger.debug(\"args={0}\".format(args))\n else:\n args = {'level':level,'weight':weight,'character':character,'label':label}\n #validate_parameters(level,weight,character,label,args)\n\n return redirect(url_for(\"emf.render_elliptic_modular_forms\",**args), code=301)\n # return render_elliptic_modular_form_navigation_wp(**info)\n", "path": "lmfdb/modular_forms/elliptic_modular_forms/views/emf_main.py"}]} |
gh_patches_debug_1381 | rasdani/github-patches | git_diff | getsentry__sentry-17425 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Event migration 9.1.2 -> 10
<!--
Do you want to ask a question? Are you looking for support? The Sentry message
board is the best place for getting support: https://forum.sentry.io
-->
## Important Details
How are you running Sentry?
* [X] On-Premise docker [Version 9.1.2]
* [ ] Saas (sentry.io)
* [ ] Other [briefly describe your environment]
## Description
I followed the migration guide, alongside all fixes and workaround and managed to get to the actual migration routine. Sentry tries to process all existing postgres events but fails to (for every event):
```
An error occured while trying to instert the following event: <sentry.eventstore.models.Event object at 0x7f2f08e552d0>
.----
insert() takes at least 8 arguments (8 given)
[...]
Event migration done. Migrated 0 of 197988 events.
```
## Steps to Reproduce
1. Have a 9.1.2 onpremise setup and have event data
2. Upgrade to 10 (dev-master), run `install.sh` etc.
### What you expected to happen
Migration scripts succeeds and I have all event data in the new version.
### Possible Solution
Error message suggests a syntax error?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/sentry/migrations/0024_auto_20191230_2052.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # Generated by Django 1.9.13 on 2019-12-30 20:52
3 from __future__ import unicode_literals, print_function
4
5 import os
6 import types
7 from datetime import timedelta, datetime
8
9 from django.db import migrations
10 from django.utils import timezone
11
12 from sentry import options
13 from sentry.eventstore.models import Event as NewEvent
14
15
16 def backfill_eventstream(apps, schema_editor):
17 """
18 Inserts Postgres events into the eventstream if there are recent events in Postgres.
19
20 This is for open source users migrating from 9.x who want to keep their events.
21 If there are no recent events in Postgres, skip the backfill.
22 """
23 from sentry import eventstore, eventstream
24 from sentry.utils.query import RangeQuerySetWrapper
25
26 Event = apps.get_model("sentry", "Event")
27 Group = apps.get_model("sentry", "Group")
28 Project = apps.get_model("sentry", "Project")
29
30 # Kill switch to skip this migration
31 skip_backfill = os.environ.get("SENTRY_SKIP_EVENTS_BACKFILL_FOR_10", False)
32
33 # Use 90 day retention if the option has not been set or set to 0
34 DEFAULT_RETENTION = 90
35 retention_days = options.get("system.event-retention-days") or DEFAULT_RETENTION
36
37 def get_events(last_days):
38 to_date = timezone.now()
39 from_date = to_date - timedelta(days=last_days)
40 return Event.objects.filter(
41 datetime__gte=from_date, datetime__lte=to_date, group_id__isnull=False
42 )
43
44 def _attach_related(_events):
45 project_ids = set()
46 group_ids = set()
47 for event in _events:
48 project_ids.add(event.project_id)
49 group_ids.add(event.group_id)
50 projects = {p.id: p for p in Project.objects.filter(id__in=project_ids)}
51 groups = {g.id: g for g in Group.objects.filter(id__in=group_ids)}
52
53 for event in _events:
54 event.project = projects.get(event.project_id)
55 event.group = groups.get(event.group_id)
56 eventstore.bind_nodes(_events, "data")
57
58 if skip_backfill:
59 print("Skipping backfill.\n")
60 return
61
62 events = get_events(retention_days)
63 count = events.count()
64
65 if count == 0:
66 print("Nothing to do, skipping migration.\n")
67 return
68
69 print("Events to process: {}\n".format(count))
70
71 processed = 0
72 for e in RangeQuerySetWrapper(events, step=100, callbacks=(_attach_related,)):
73 event = NewEvent(
74 project_id=e.project_id, event_id=e.event_id, group_id=e.group_id, data=e.data.data
75 )
76 primary_hash = event.get_primary_hash()
77 if event.project is None or event.group is None:
78 print("Skipped {} as group or project information is invalid.\n".format(event))
79 continue
80
81 try:
82 eventstream.insert(
83 group=event.group,
84 event=event,
85 is_new=False,
86 is_regression=False,
87 is_new_group_environment=False,
88 primary_hash=primary_hash,
89 skip_consume=True,
90 )
91 processed += 1
92 except Exception as error:
93 print(
94 "An error occured while trying to instert the following event: {}\n.----\n{}".format(
95 event, error
96 )
97 )
98
99 print("Event migration done. Migrated {} of {} events.\n".format(processed, count))
100
101
102 class Migration(migrations.Migration):
103 # This flag is used to mark that a migration shouldn't be automatically run in
104 # production. We set this to True for operations that we think are risky and want
105 # someone from ops to run manually and monitor.
106 # General advice is that if in doubt, mark your migration as `is_dangerous`.
107 # Some things you should always mark as dangerous:
108 # - Adding indexes to large tables. These indexes should be created concurrently,
109 # unfortunately we can't run migrations outside of a transaction until Django
110 # 1.10. So until then these should be run manually.
111 # - Large data migrations. Typically we want these to be run manually by ops so that
112 # they can be monitored. Since data migrations will now hold a transaction open
113 # this is even more important.
114 # - Adding columns to highly active tables, even ones that are NULL.
115 is_dangerous = True
116
117 dependencies = [
118 ("sentry", "0023_hide_environment_none_20191126"),
119 ]
120
121 operations = [
122 migrations.RunPython(backfill_eventstream, reverse_code=migrations.RunPython.noop),
123 ]
124
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/sentry/migrations/0024_auto_20191230_2052.py b/src/sentry/migrations/0024_auto_20191230_2052.py
--- a/src/sentry/migrations/0024_auto_20191230_2052.py
+++ b/src/sentry/migrations/0024_auto_20191230_2052.py
@@ -86,6 +86,8 @@
is_regression=False,
is_new_group_environment=False,
primary_hash=primary_hash,
+ received_timestamp=event.data.get("received")
+ or float(event.datetime.strftime("%s")),
skip_consume=True,
)
processed += 1
| {"golden_diff": "diff --git a/src/sentry/migrations/0024_auto_20191230_2052.py b/src/sentry/migrations/0024_auto_20191230_2052.py\n--- a/src/sentry/migrations/0024_auto_20191230_2052.py\n+++ b/src/sentry/migrations/0024_auto_20191230_2052.py\n@@ -86,6 +86,8 @@\n is_regression=False,\n is_new_group_environment=False,\n primary_hash=primary_hash,\n+ received_timestamp=event.data.get(\"received\")\n+ or float(event.datetime.strftime(\"%s\")),\n skip_consume=True,\n )\n processed += 1\n", "issue": "Event migration 9.1.2 -> 10\n<!--\r\n\r\nDo you want to ask a question? Are you looking for support? The Sentry message\r\nboard is the best place for getting support: https://forum.sentry.io\r\n-->\r\n\r\n## Important Details\r\n\r\nHow are you running Sentry?\r\n\r\n* [X] On-Premise docker [Version 9.1.2]\r\n* [ ] Saas (sentry.io)\r\n* [ ] Other [briefly describe your environment]\r\n\r\n## Description\r\n\r\nI followed the migration guide, alongside all fixes and workaround and managed to get to the actual migration routine. Sentry tries to process all existing postgres events but fails to (for every event):\r\n\r\n```\r\nAn error occured while trying to instert the following event: <sentry.eventstore.models.Event object at 0x7f2f08e552d0>\r\n.----\r\ninsert() takes at least 8 arguments (8 given)\r\n[...]\r\nEvent migration done. Migrated 0 of 197988 events.\r\n```\r\n\r\n## Steps to Reproduce\r\n\r\n1. Have a 9.1.2 onpremise setup and have event data\r\n2. Upgrade to 10 (dev-master), run `install.sh` etc.\r\n\r\n### What you expected to happen\r\n\r\nMigration scripts succeeds and I have all event data in the new version.\r\n\r\n### Possible Solution\r\n\r\nError message suggests a syntax error?\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9.13 on 2019-12-30 20:52\nfrom __future__ import unicode_literals, print_function\n\nimport os\nimport types\nfrom datetime import timedelta, datetime\n\nfrom django.db import migrations\nfrom django.utils import timezone\n\nfrom sentry import options\nfrom sentry.eventstore.models import Event as NewEvent\n\n\ndef backfill_eventstream(apps, schema_editor):\n \"\"\"\n Inserts Postgres events into the eventstream if there are recent events in Postgres.\n\n This is for open source users migrating from 9.x who want to keep their events.\n If there are no recent events in Postgres, skip the backfill.\n \"\"\"\n from sentry import eventstore, eventstream\n from sentry.utils.query import RangeQuerySetWrapper\n\n Event = apps.get_model(\"sentry\", \"Event\")\n Group = apps.get_model(\"sentry\", \"Group\")\n Project = apps.get_model(\"sentry\", \"Project\")\n\n # Kill switch to skip this migration\n skip_backfill = os.environ.get(\"SENTRY_SKIP_EVENTS_BACKFILL_FOR_10\", False)\n\n # Use 90 day retention if the option has not been set or set to 0\n DEFAULT_RETENTION = 90\n retention_days = options.get(\"system.event-retention-days\") or DEFAULT_RETENTION\n\n def get_events(last_days):\n to_date = timezone.now()\n from_date = to_date - timedelta(days=last_days)\n return Event.objects.filter(\n datetime__gte=from_date, datetime__lte=to_date, group_id__isnull=False\n )\n\n def _attach_related(_events):\n project_ids = set()\n group_ids = set()\n for event in _events:\n project_ids.add(event.project_id)\n group_ids.add(event.group_id)\n projects = {p.id: p for p in Project.objects.filter(id__in=project_ids)}\n groups = {g.id: g for g in Group.objects.filter(id__in=group_ids)}\n\n for event in _events:\n event.project = projects.get(event.project_id)\n event.group = groups.get(event.group_id)\n eventstore.bind_nodes(_events, \"data\")\n\n if skip_backfill:\n print(\"Skipping backfill.\\n\")\n return\n\n events = get_events(retention_days)\n count = events.count()\n\n if count == 0:\n print(\"Nothing to do, skipping migration.\\n\")\n return\n\n print(\"Events to process: {}\\n\".format(count))\n\n processed = 0\n for e in RangeQuerySetWrapper(events, step=100, callbacks=(_attach_related,)):\n event = NewEvent(\n project_id=e.project_id, event_id=e.event_id, group_id=e.group_id, data=e.data.data\n )\n primary_hash = event.get_primary_hash()\n if event.project is None or event.group is None:\n print(\"Skipped {} as group or project information is invalid.\\n\".format(event))\n continue\n\n try:\n eventstream.insert(\n group=event.group,\n event=event,\n is_new=False,\n is_regression=False,\n is_new_group_environment=False,\n primary_hash=primary_hash,\n skip_consume=True,\n )\n processed += 1\n except Exception as error:\n print(\n \"An error occured while trying to instert the following event: {}\\n.----\\n{}\".format(\n event, error\n )\n )\n\n print(\"Event migration done. Migrated {} of {} events.\\n\".format(processed, count))\n\n\nclass Migration(migrations.Migration):\n # This flag is used to mark that a migration shouldn't be automatically run in\n # production. We set this to True for operations that we think are risky and want\n # someone from ops to run manually and monitor.\n # General advice is that if in doubt, mark your migration as `is_dangerous`.\n # Some things you should always mark as dangerous:\n # - Adding indexes to large tables. These indexes should be created concurrently,\n # unfortunately we can't run migrations outside of a transaction until Django\n # 1.10. So until then these should be run manually.\n # - Large data migrations. Typically we want these to be run manually by ops so that\n # they can be monitored. Since data migrations will now hold a transaction open\n # this is even more important.\n # - Adding columns to highly active tables, even ones that are NULL.\n is_dangerous = True\n\n dependencies = [\n (\"sentry\", \"0023_hide_environment_none_20191126\"),\n ]\n\n operations = [\n migrations.RunPython(backfill_eventstream, reverse_code=migrations.RunPython.noop),\n ]\n", "path": "src/sentry/migrations/0024_auto_20191230_2052.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9.13 on 2019-12-30 20:52\nfrom __future__ import unicode_literals, print_function\n\nimport os\nimport types\nfrom datetime import timedelta, datetime\n\nfrom django.db import migrations\nfrom django.utils import timezone\n\nfrom sentry import options\nfrom sentry.eventstore.models import Event as NewEvent\n\n\ndef backfill_eventstream(apps, schema_editor):\n \"\"\"\n Inserts Postgres events into the eventstream if there are recent events in Postgres.\n\n This is for open source users migrating from 9.x who want to keep their events.\n If there are no recent events in Postgres, skip the backfill.\n \"\"\"\n from sentry import eventstore, eventstream\n from sentry.utils.query import RangeQuerySetWrapper\n\n Event = apps.get_model(\"sentry\", \"Event\")\n Group = apps.get_model(\"sentry\", \"Group\")\n Project = apps.get_model(\"sentry\", \"Project\")\n\n # Kill switch to skip this migration\n skip_backfill = os.environ.get(\"SENTRY_SKIP_EVENTS_BACKFILL_FOR_10\", False)\n\n # Use 90 day retention if the option has not been set or set to 0\n DEFAULT_RETENTION = 90\n retention_days = options.get(\"system.event-retention-days\") or DEFAULT_RETENTION\n\n def get_events(last_days):\n to_date = timezone.now()\n from_date = to_date - timedelta(days=last_days)\n return Event.objects.filter(\n datetime__gte=from_date, datetime__lte=to_date, group_id__isnull=False\n )\n\n def _attach_related(_events):\n project_ids = set()\n group_ids = set()\n for event in _events:\n project_ids.add(event.project_id)\n group_ids.add(event.group_id)\n projects = {p.id: p for p in Project.objects.filter(id__in=project_ids)}\n groups = {g.id: g for g in Group.objects.filter(id__in=group_ids)}\n\n for event in _events:\n event.project = projects.get(event.project_id)\n event.group = groups.get(event.group_id)\n eventstore.bind_nodes(_events, \"data\")\n\n if skip_backfill:\n print(\"Skipping backfill.\\n\")\n return\n\n events = get_events(retention_days)\n count = events.count()\n\n if count == 0:\n print(\"Nothing to do, skipping migration.\\n\")\n return\n\n print(\"Events to process: {}\\n\".format(count))\n\n processed = 0\n for e in RangeQuerySetWrapper(events, step=100, callbacks=(_attach_related,)):\n event = NewEvent(\n project_id=e.project_id, event_id=e.event_id, group_id=e.group_id, data=e.data.data\n )\n primary_hash = event.get_primary_hash()\n if event.project is None or event.group is None:\n print(\"Skipped {} as group or project information is invalid.\\n\".format(event))\n continue\n\n try:\n eventstream.insert(\n group=event.group,\n event=event,\n is_new=False,\n is_regression=False,\n is_new_group_environment=False,\n primary_hash=primary_hash,\n received_timestamp=event.data.get(\"received\")\n or float(event.datetime.strftime(\"%s\")),\n skip_consume=True,\n )\n processed += 1\n except Exception as error:\n print(\n \"An error occured while trying to instert the following event: {}\\n.----\\n{}\".format(\n event, error\n )\n )\n\n print(\"Event migration done. Migrated {} of {} events.\\n\".format(processed, count))\n\n\nclass Migration(migrations.Migration):\n # This flag is used to mark that a migration shouldn't be automatically run in\n # production. We set this to True for operations that we think are risky and want\n # someone from ops to run manually and monitor.\n # General advice is that if in doubt, mark your migration as `is_dangerous`.\n # Some things you should always mark as dangerous:\n # - Adding indexes to large tables. These indexes should be created concurrently,\n # unfortunately we can't run migrations outside of a transaction until Django\n # 1.10. So until then these should be run manually.\n # - Large data migrations. Typically we want these to be run manually by ops so that\n # they can be monitored. Since data migrations will now hold a transaction open\n # this is even more important.\n # - Adding columns to highly active tables, even ones that are NULL.\n is_dangerous = True\n\n dependencies = [\n (\"sentry\", \"0023_hide_environment_none_20191126\"),\n ]\n\n operations = [\n migrations.RunPython(backfill_eventstream, reverse_code=migrations.RunPython.noop),\n ]\n", "path": "src/sentry/migrations/0024_auto_20191230_2052.py"}]} |
gh_patches_debug_1382 | rasdani/github-patches | git_diff | Lightning-Universe__lightning-flash-1509 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Semantic Segmentation target masks broken >0.7.5
## 🐛 Bug
The switch to albumentation in newer releases of lightning-flash seem to have broken transformation of segmentation targets.
This is what I expect masks to look like (screenshot showing below code sample running on 0.7.5):

This is what it looks like on the latest release (0.8.1):

### To Reproduce
Run the below sample with lightning-flash=0.7.5 and lightning-flash=0.8.1 and compare behavior.
#### Code sample
```py
import torch
import flash
from flash.core.data.utils import download_data
from flash.image import SemanticSegmentation, SemanticSegmentationData
import matplotlib.pyplot as plt
import numpy as np
# 1. Create the DataModule
# The data was generated with the CARLA self-driving simulator as part of the Kaggle Lyft Udacity Challenge.
# More info here: https://www.kaggle.com/kumaresanmanickavelu/lyft-udacity-challenge
# download_data(
# "https://github.com/ongchinkiat/LyftPerceptionChallenge/releases/download/v0.1/carla-capture-20180513A.zip",
# "./data",
# )
datamodule = SemanticSegmentationData.from_folders(
train_folder="data/CameraRGB",
train_target_folder="data/CameraSeg",
val_split=0.1,
transform_kwargs=dict(image_size=(256, 256)),
num_classes=21,
batch_size=4,
)
# 2. Build the task
model = SemanticSegmentation(
backbone="mobilenetv3_large_100",
head="fpn",
num_classes=datamodule.num_classes,
)
n = 3
fig, axarr = plt.subplots(ncols=2, nrows=n, figsize=(8, 4*n))
for batch in datamodule.train_dataloader():
print(batch.keys())
for i in range(n):
segm = batch['target'][i]
print(segm.shape)
img = np.rollaxis(batch['input'][i].numpy(), 0, 3)
axarr[i, 0].imshow(img)
axarr[i, 1].imshow(segm)
break
```
### Environment
- OS: Ubuntu WSL2
- Python version: 3.10.8
- GPU model: RTX 3080
- CUDA Version: 11.6
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/flash/image/segmentation/input.py`
Content:
```
1 # Copyright The PyTorch Lightning team.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 import os
15 from typing import Any, Dict, List, Optional, Tuple, Union
16
17 import numpy as np
18
19 from flash.core.data.io.input import DataKeys, Input
20 from flash.core.data.utilities.loading import IMG_EXTENSIONS, NP_EXTENSIONS, load_image
21 from flash.core.data.utilities.paths import PATH_TYPE, filter_valid_files
22 from flash.core.data.utilities.samples import to_samples
23 from flash.core.integrations.fiftyone.utils import FiftyOneLabelUtilities
24 from flash.core.utilities.imports import _FIFTYONE_AVAILABLE, lazy_import
25 from flash.image.data import ImageFilesInput, ImageNumpyInput, ImageTensorInput
26 from flash.image.segmentation.output import SegmentationLabelsOutput
27
28 if _FIFTYONE_AVAILABLE:
29 fo = lazy_import("fiftyone")
30 SampleCollection = "fiftyone.core.collections.SampleCollection"
31 else:
32 fo = None
33 SampleCollection = None
34
35
36 class SemanticSegmentationInput(Input):
37 num_classes: int
38 labels_map: Dict[int, Tuple[int, int, int]]
39
40 def load_labels_map(
41 self, num_classes: Optional[int] = None, labels_map: Optional[Dict[int, Tuple[int, int, int]]] = None
42 ) -> None:
43 if num_classes is not None:
44 self.num_classes = num_classes
45 labels_map = labels_map or SegmentationLabelsOutput.create_random_labels_map(num_classes)
46
47 if labels_map is not None:
48 self.labels_map = labels_map
49
50
51 class SemanticSegmentationTensorInput(SemanticSegmentationInput, ImageTensorInput):
52 def load_data(
53 self,
54 tensor: Any,
55 masks: Any = None,
56 num_classes: Optional[int] = None,
57 labels_map: Optional[Dict[int, Tuple[int, int, int]]] = None,
58 ) -> List[Dict[str, Any]]:
59 self.load_labels_map(num_classes, labels_map)
60 return to_samples(tensor, masks)
61
62 def load_sample(self, sample: Dict[str, Any]) -> Dict[str, Any]:
63 if DataKeys.TARGET in sample:
64 sample[DataKeys.TARGET] = sample[DataKeys.TARGET].numpy()
65 return super().load_sample(sample)
66
67
68 class SemanticSegmentationNumpyInput(SemanticSegmentationInput, ImageNumpyInput):
69 def load_data(
70 self,
71 array: Any,
72 masks: Any = None,
73 num_classes: Optional[int] = None,
74 labels_map: Optional[Dict[int, Tuple[int, int, int]]] = None,
75 ) -> List[Dict[str, Any]]:
76 self.load_labels_map(num_classes, labels_map)
77 return to_samples(array, masks)
78
79
80 class SemanticSegmentationFilesInput(SemanticSegmentationInput, ImageFilesInput):
81 def load_data(
82 self,
83 files: Union[PATH_TYPE, List[PATH_TYPE]],
84 mask_files: Optional[Union[PATH_TYPE, List[PATH_TYPE]]] = None,
85 num_classes: Optional[int] = None,
86 labels_map: Optional[Dict[int, Tuple[int, int, int]]] = None,
87 ) -> List[Dict[str, Any]]:
88 self.load_labels_map(num_classes, labels_map)
89 if mask_files is None:
90 files = filter_valid_files(files, valid_extensions=IMG_EXTENSIONS + NP_EXTENSIONS)
91 else:
92 files, mask_files = filter_valid_files(files, mask_files, valid_extensions=IMG_EXTENSIONS + NP_EXTENSIONS)
93 return to_samples(files, mask_files)
94
95 def load_sample(self, sample: Dict[str, Any]) -> Dict[str, Any]:
96 if DataKeys.TARGET in sample:
97 sample[DataKeys.TARGET] = np.array(load_image(sample[DataKeys.TARGET])).transpose((2, 0, 1))[:, :, 0]
98 return super().load_sample(sample)
99
100
101 class SemanticSegmentationFolderInput(SemanticSegmentationFilesInput):
102 def load_data(
103 self,
104 folder: PATH_TYPE,
105 mask_folder: Optional[PATH_TYPE] = None,
106 num_classes: Optional[int] = None,
107 labels_map: Optional[Dict[int, Tuple[int, int, int]]] = None,
108 ) -> List[Dict[str, Any]]:
109 self.load_labels_map(num_classes, labels_map)
110 files = os.listdir(folder)
111 files.sort()
112 if mask_folder is not None:
113 mask_files = {os.path.splitext(file)[0]: file for file in os.listdir(mask_folder)}
114 file_names = [os.path.splitext(file)[0] for file in files]
115
116 if len(set(file_names) - mask_files.keys()) != 0:
117 raise ValueError(
118 f"Found inconsistent files in input folder: {folder} and mask folder: {mask_folder}. All input "
119 f"files must have a corresponding mask file with the same name."
120 )
121
122 files = [os.path.join(folder, file) for file in files]
123 mask_files = [os.path.join(mask_folder, mask_files[file_name]) for file_name in file_names]
124 return super().load_data(files, mask_files)
125 return super().load_data([os.path.join(folder, file) for file in files])
126
127
128 class SemanticSegmentationFiftyOneInput(SemanticSegmentationFilesInput):
129 label_field: str
130
131 def load_data(
132 self,
133 sample_collection: SampleCollection,
134 label_field: str = "ground_truth",
135 num_classes: Optional[int] = None,
136 labels_map: Optional[Dict[int, Tuple[int, int, int]]] = None,
137 ) -> List[Dict[str, Any]]:
138 self.load_labels_map(num_classes, labels_map)
139
140 self.label_field = label_field
141 label_utilities = FiftyOneLabelUtilities(label_field, fo.Segmentation)
142 label_utilities.validate(sample_collection)
143
144 self._fo_dataset_name = sample_collection.name
145 return to_samples(sample_collection.values("filepath"))
146
147 def predict_load_data(
148 self,
149 sample_collection: SampleCollection,
150 ) -> List[Dict[str, Any]]:
151 return to_samples(sample_collection.values("filepath"))
152
153 def load_sample(self, sample: Dict[str, Any]) -> Dict[str, Any]:
154 filepath = sample[DataKeys.INPUT]
155 sample = super().load_sample(sample)
156 if not self.predicting:
157 fo_dataset = fo.load_dataset(self._fo_dataset_name)
158 fo_sample = fo_dataset[filepath]
159 sample[DataKeys.TARGET] = fo_sample[self.label_field].mask
160 return sample
161
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/flash/image/segmentation/input.py b/src/flash/image/segmentation/input.py
--- a/src/flash/image/segmentation/input.py
+++ b/src/flash/image/segmentation/input.py
@@ -94,7 +94,7 @@
def load_sample(self, sample: Dict[str, Any]) -> Dict[str, Any]:
if DataKeys.TARGET in sample:
- sample[DataKeys.TARGET] = np.array(load_image(sample[DataKeys.TARGET])).transpose((2, 0, 1))[:, :, 0]
+ sample[DataKeys.TARGET] = np.array(load_image(sample[DataKeys.TARGET])).transpose((2, 0, 1))[0, :, :]
return super().load_sample(sample)
| {"golden_diff": "diff --git a/src/flash/image/segmentation/input.py b/src/flash/image/segmentation/input.py\n--- a/src/flash/image/segmentation/input.py\n+++ b/src/flash/image/segmentation/input.py\n@@ -94,7 +94,7 @@\n \n def load_sample(self, sample: Dict[str, Any]) -> Dict[str, Any]:\n if DataKeys.TARGET in sample:\n- sample[DataKeys.TARGET] = np.array(load_image(sample[DataKeys.TARGET])).transpose((2, 0, 1))[:, :, 0]\n+ sample[DataKeys.TARGET] = np.array(load_image(sample[DataKeys.TARGET])).transpose((2, 0, 1))[0, :, :]\n return super().load_sample(sample)\n", "issue": "Semantic Segmentation target masks broken >0.7.5\n## \ud83d\udc1b Bug\r\n\r\nThe switch to albumentation in newer releases of lightning-flash seem to have broken transformation of segmentation targets.\r\n\r\nThis is what I expect masks to look like (screenshot showing below code sample running on 0.7.5):\r\n\r\n\r\n\r\nThis is what it looks like on the latest release (0.8.1):\r\n\r\n\r\n\r\n### To Reproduce\r\n\r\nRun the below sample with lightning-flash=0.7.5 and lightning-flash=0.8.1 and compare behavior. \r\n\r\n#### Code sample\r\n\r\n```py\r\nimport torch\r\n\r\nimport flash\r\nfrom flash.core.data.utils import download_data\r\nfrom flash.image import SemanticSegmentation, SemanticSegmentationData\r\n\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\n# 1. Create the DataModule\r\n# The data was generated with the CARLA self-driving simulator as part of the Kaggle Lyft Udacity Challenge.\r\n# More info here: https://www.kaggle.com/kumaresanmanickavelu/lyft-udacity-challenge\r\n# download_data(\r\n# \"https://github.com/ongchinkiat/LyftPerceptionChallenge/releases/download/v0.1/carla-capture-20180513A.zip\",\r\n# \"./data\",\r\n# )\r\n\r\ndatamodule = SemanticSegmentationData.from_folders(\r\n train_folder=\"data/CameraRGB\",\r\n train_target_folder=\"data/CameraSeg\",\r\n val_split=0.1,\r\n transform_kwargs=dict(image_size=(256, 256)),\r\n num_classes=21,\r\n batch_size=4,\r\n)\r\n\r\n# 2. Build the task\r\nmodel = SemanticSegmentation(\r\n backbone=\"mobilenetv3_large_100\",\r\n head=\"fpn\",\r\n num_classes=datamodule.num_classes,\r\n)\r\n\r\nn = 3\r\nfig, axarr = plt.subplots(ncols=2, nrows=n, figsize=(8, 4*n))\r\n\r\nfor batch in datamodule.train_dataloader():\r\n print(batch.keys())\r\n for i in range(n):\r\n segm = batch['target'][i]\r\n print(segm.shape)\r\n img = np.rollaxis(batch['input'][i].numpy(), 0, 3)\r\n axarr[i, 0].imshow(img)\r\n axarr[i, 1].imshow(segm)\r\n break\r\n```\r\n\r\n### Environment\r\n\r\n - OS: Ubuntu WSL2\r\n - Python version: 3.10.8\r\n - GPU model: RTX 3080\r\n - CUDA Version: 11.6\n", "before_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport os\nfrom typing import Any, Dict, List, Optional, Tuple, Union\n\nimport numpy as np\n\nfrom flash.core.data.io.input import DataKeys, Input\nfrom flash.core.data.utilities.loading import IMG_EXTENSIONS, NP_EXTENSIONS, load_image\nfrom flash.core.data.utilities.paths import PATH_TYPE, filter_valid_files\nfrom flash.core.data.utilities.samples import to_samples\nfrom flash.core.integrations.fiftyone.utils import FiftyOneLabelUtilities\nfrom flash.core.utilities.imports import _FIFTYONE_AVAILABLE, lazy_import\nfrom flash.image.data import ImageFilesInput, ImageNumpyInput, ImageTensorInput\nfrom flash.image.segmentation.output import SegmentationLabelsOutput\n\nif _FIFTYONE_AVAILABLE:\n fo = lazy_import(\"fiftyone\")\n SampleCollection = \"fiftyone.core.collections.SampleCollection\"\nelse:\n fo = None\n SampleCollection = None\n\n\nclass SemanticSegmentationInput(Input):\n num_classes: int\n labels_map: Dict[int, Tuple[int, int, int]]\n\n def load_labels_map(\n self, num_classes: Optional[int] = None, labels_map: Optional[Dict[int, Tuple[int, int, int]]] = None\n ) -> None:\n if num_classes is not None:\n self.num_classes = num_classes\n labels_map = labels_map or SegmentationLabelsOutput.create_random_labels_map(num_classes)\n\n if labels_map is not None:\n self.labels_map = labels_map\n\n\nclass SemanticSegmentationTensorInput(SemanticSegmentationInput, ImageTensorInput):\n def load_data(\n self,\n tensor: Any,\n masks: Any = None,\n num_classes: Optional[int] = None,\n labels_map: Optional[Dict[int, Tuple[int, int, int]]] = None,\n ) -> List[Dict[str, Any]]:\n self.load_labels_map(num_classes, labels_map)\n return to_samples(tensor, masks)\n\n def load_sample(self, sample: Dict[str, Any]) -> Dict[str, Any]:\n if DataKeys.TARGET in sample:\n sample[DataKeys.TARGET] = sample[DataKeys.TARGET].numpy()\n return super().load_sample(sample)\n\n\nclass SemanticSegmentationNumpyInput(SemanticSegmentationInput, ImageNumpyInput):\n def load_data(\n self,\n array: Any,\n masks: Any = None,\n num_classes: Optional[int] = None,\n labels_map: Optional[Dict[int, Tuple[int, int, int]]] = None,\n ) -> List[Dict[str, Any]]:\n self.load_labels_map(num_classes, labels_map)\n return to_samples(array, masks)\n\n\nclass SemanticSegmentationFilesInput(SemanticSegmentationInput, ImageFilesInput):\n def load_data(\n self,\n files: Union[PATH_TYPE, List[PATH_TYPE]],\n mask_files: Optional[Union[PATH_TYPE, List[PATH_TYPE]]] = None,\n num_classes: Optional[int] = None,\n labels_map: Optional[Dict[int, Tuple[int, int, int]]] = None,\n ) -> List[Dict[str, Any]]:\n self.load_labels_map(num_classes, labels_map)\n if mask_files is None:\n files = filter_valid_files(files, valid_extensions=IMG_EXTENSIONS + NP_EXTENSIONS)\n else:\n files, mask_files = filter_valid_files(files, mask_files, valid_extensions=IMG_EXTENSIONS + NP_EXTENSIONS)\n return to_samples(files, mask_files)\n\n def load_sample(self, sample: Dict[str, Any]) -> Dict[str, Any]:\n if DataKeys.TARGET in sample:\n sample[DataKeys.TARGET] = np.array(load_image(sample[DataKeys.TARGET])).transpose((2, 0, 1))[:, :, 0]\n return super().load_sample(sample)\n\n\nclass SemanticSegmentationFolderInput(SemanticSegmentationFilesInput):\n def load_data(\n self,\n folder: PATH_TYPE,\n mask_folder: Optional[PATH_TYPE] = None,\n num_classes: Optional[int] = None,\n labels_map: Optional[Dict[int, Tuple[int, int, int]]] = None,\n ) -> List[Dict[str, Any]]:\n self.load_labels_map(num_classes, labels_map)\n files = os.listdir(folder)\n files.sort()\n if mask_folder is not None:\n mask_files = {os.path.splitext(file)[0]: file for file in os.listdir(mask_folder)}\n file_names = [os.path.splitext(file)[0] for file in files]\n\n if len(set(file_names) - mask_files.keys()) != 0:\n raise ValueError(\n f\"Found inconsistent files in input folder: {folder} and mask folder: {mask_folder}. All input \"\n f\"files must have a corresponding mask file with the same name.\"\n )\n\n files = [os.path.join(folder, file) for file in files]\n mask_files = [os.path.join(mask_folder, mask_files[file_name]) for file_name in file_names]\n return super().load_data(files, mask_files)\n return super().load_data([os.path.join(folder, file) for file in files])\n\n\nclass SemanticSegmentationFiftyOneInput(SemanticSegmentationFilesInput):\n label_field: str\n\n def load_data(\n self,\n sample_collection: SampleCollection,\n label_field: str = \"ground_truth\",\n num_classes: Optional[int] = None,\n labels_map: Optional[Dict[int, Tuple[int, int, int]]] = None,\n ) -> List[Dict[str, Any]]:\n self.load_labels_map(num_classes, labels_map)\n\n self.label_field = label_field\n label_utilities = FiftyOneLabelUtilities(label_field, fo.Segmentation)\n label_utilities.validate(sample_collection)\n\n self._fo_dataset_name = sample_collection.name\n return to_samples(sample_collection.values(\"filepath\"))\n\n def predict_load_data(\n self,\n sample_collection: SampleCollection,\n ) -> List[Dict[str, Any]]:\n return to_samples(sample_collection.values(\"filepath\"))\n\n def load_sample(self, sample: Dict[str, Any]) -> Dict[str, Any]:\n filepath = sample[DataKeys.INPUT]\n sample = super().load_sample(sample)\n if not self.predicting:\n fo_dataset = fo.load_dataset(self._fo_dataset_name)\n fo_sample = fo_dataset[filepath]\n sample[DataKeys.TARGET] = fo_sample[self.label_field].mask\n return sample\n", "path": "src/flash/image/segmentation/input.py"}], "after_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport os\nfrom typing import Any, Dict, List, Optional, Tuple, Union\n\nimport numpy as np\n\nfrom flash.core.data.io.input import DataKeys, Input\nfrom flash.core.data.utilities.loading import IMG_EXTENSIONS, NP_EXTENSIONS, load_image\nfrom flash.core.data.utilities.paths import PATH_TYPE, filter_valid_files\nfrom flash.core.data.utilities.samples import to_samples\nfrom flash.core.integrations.fiftyone.utils import FiftyOneLabelUtilities\nfrom flash.core.utilities.imports import _FIFTYONE_AVAILABLE, lazy_import\nfrom flash.image.data import ImageFilesInput, ImageNumpyInput, ImageTensorInput\nfrom flash.image.segmentation.output import SegmentationLabelsOutput\n\nif _FIFTYONE_AVAILABLE:\n fo = lazy_import(\"fiftyone\")\n SampleCollection = \"fiftyone.core.collections.SampleCollection\"\nelse:\n fo = None\n SampleCollection = None\n\n\nclass SemanticSegmentationInput(Input):\n num_classes: int\n labels_map: Dict[int, Tuple[int, int, int]]\n\n def load_labels_map(\n self, num_classes: Optional[int] = None, labels_map: Optional[Dict[int, Tuple[int, int, int]]] = None\n ) -> None:\n if num_classes is not None:\n self.num_classes = num_classes\n labels_map = labels_map or SegmentationLabelsOutput.create_random_labels_map(num_classes)\n\n if labels_map is not None:\n self.labels_map = labels_map\n\n\nclass SemanticSegmentationTensorInput(SemanticSegmentationInput, ImageTensorInput):\n def load_data(\n self,\n tensor: Any,\n masks: Any = None,\n num_classes: Optional[int] = None,\n labels_map: Optional[Dict[int, Tuple[int, int, int]]] = None,\n ) -> List[Dict[str, Any]]:\n self.load_labels_map(num_classes, labels_map)\n return to_samples(tensor, masks)\n\n def load_sample(self, sample: Dict[str, Any]) -> Dict[str, Any]:\n if DataKeys.TARGET in sample:\n sample[DataKeys.TARGET] = sample[DataKeys.TARGET].numpy()\n return super().load_sample(sample)\n\n\nclass SemanticSegmentationNumpyInput(SemanticSegmentationInput, ImageNumpyInput):\n def load_data(\n self,\n array: Any,\n masks: Any = None,\n num_classes: Optional[int] = None,\n labels_map: Optional[Dict[int, Tuple[int, int, int]]] = None,\n ) -> List[Dict[str, Any]]:\n self.load_labels_map(num_classes, labels_map)\n return to_samples(array, masks)\n\n\nclass SemanticSegmentationFilesInput(SemanticSegmentationInput, ImageFilesInput):\n def load_data(\n self,\n files: Union[PATH_TYPE, List[PATH_TYPE]],\n mask_files: Optional[Union[PATH_TYPE, List[PATH_TYPE]]] = None,\n num_classes: Optional[int] = None,\n labels_map: Optional[Dict[int, Tuple[int, int, int]]] = None,\n ) -> List[Dict[str, Any]]:\n self.load_labels_map(num_classes, labels_map)\n if mask_files is None:\n files = filter_valid_files(files, valid_extensions=IMG_EXTENSIONS + NP_EXTENSIONS)\n else:\n files, mask_files = filter_valid_files(files, mask_files, valid_extensions=IMG_EXTENSIONS + NP_EXTENSIONS)\n return to_samples(files, mask_files)\n\n def load_sample(self, sample: Dict[str, Any]) -> Dict[str, Any]:\n if DataKeys.TARGET in sample:\n sample[DataKeys.TARGET] = np.array(load_image(sample[DataKeys.TARGET])).transpose((2, 0, 1))[0, :, :]\n return super().load_sample(sample)\n\n\nclass SemanticSegmentationFolderInput(SemanticSegmentationFilesInput):\n def load_data(\n self,\n folder: PATH_TYPE,\n mask_folder: Optional[PATH_TYPE] = None,\n num_classes: Optional[int] = None,\n labels_map: Optional[Dict[int, Tuple[int, int, int]]] = None,\n ) -> List[Dict[str, Any]]:\n self.load_labels_map(num_classes, labels_map)\n files = os.listdir(folder)\n files.sort()\n if mask_folder is not None:\n mask_files = {os.path.splitext(file)[0]: file for file in os.listdir(mask_folder)}\n file_names = [os.path.splitext(file)[0] for file in files]\n\n if len(set(file_names) - mask_files.keys()) != 0:\n raise ValueError(\n f\"Found inconsistent files in input folder: {folder} and mask folder: {mask_folder}. All input \"\n f\"files must have a corresponding mask file with the same name.\"\n )\n\n files = [os.path.join(folder, file) for file in files]\n mask_files = [os.path.join(mask_folder, mask_files[file_name]) for file_name in file_names]\n return super().load_data(files, mask_files)\n return super().load_data([os.path.join(folder, file) for file in files])\n\n\nclass SemanticSegmentationFiftyOneInput(SemanticSegmentationFilesInput):\n label_field: str\n\n def load_data(\n self,\n sample_collection: SampleCollection,\n label_field: str = \"ground_truth\",\n num_classes: Optional[int] = None,\n labels_map: Optional[Dict[int, Tuple[int, int, int]]] = None,\n ) -> List[Dict[str, Any]]:\n self.load_labels_map(num_classes, labels_map)\n\n self.label_field = label_field\n label_utilities = FiftyOneLabelUtilities(label_field, fo.Segmentation)\n label_utilities.validate(sample_collection)\n\n self._fo_dataset_name = sample_collection.name\n return to_samples(sample_collection.values(\"filepath\"))\n\n def predict_load_data(\n self,\n sample_collection: SampleCollection,\n ) -> List[Dict[str, Any]]:\n return to_samples(sample_collection.values(\"filepath\"))\n\n def load_sample(self, sample: Dict[str, Any]) -> Dict[str, Any]:\n filepath = sample[DataKeys.INPUT]\n sample = super().load_sample(sample)\n if not self.predicting:\n fo_dataset = fo.load_dataset(self._fo_dataset_name)\n fo_sample = fo_dataset[filepath]\n sample[DataKeys.TARGET] = fo_sample[self.label_field].mask\n return sample\n", "path": "src/flash/image/segmentation/input.py"}]} |
gh_patches_debug_1383 | rasdani/github-patches | git_diff | conan-io__conan-center-index-5412 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[package] all: "Access is denied" in os.rename() on Windows
### Package and Environment Details (include every applicable attribute)
* Package Name/Version: **almost all packages affected**
* Operating System+version: **Windows 10**
* Compiler+version: **MSVC 16**
* Conan version: **conan 1.35.2**
* Python version: **Python 3.8.7**
### Conan profile (output of `conan profile show default` or `conan profile show <profile>` if custom profile is in use)
```
[settings]
os_build=Windows
os=Windows
arch=x86_64
arch_build=x86_64
compiler=Visual Studio
compiler.version=16
compiler.runtime=MD
build_type=Release
```
### Steps to reproduce (Include if Applicable)
This is a known issue. Solution provided by https://github.com/conan-io/conan/pull/6774
However most recipes still use `os.rename()` and not `tools.rename()`.
### Log
```
b2/4.2.0: Configuring sources in C:\Users\xxx\.conan\data\b2\4.2.0\_\_\source
ERROR: b2/4.2.0: Error in source() method, line 58
os.rename(extracted_dir, "source")
PermissionError: [WinError 5] Access is denied: 'build-4.2.0' -> 'source'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `recipes/bzip2/all/conanfile.py`
Content:
```
1 import os
2 import textwrap
3 from conans import ConanFile, CMake, tools
4
5 required_conan_version = ">=1.33.0"
6
7
8 class Bzip2Conan(ConanFile):
9 name = "bzip2"
10 url = "https://github.com/conan-io/conan-center-index"
11 homepage = "http://www.bzip.org"
12 license = "bzip2-1.0.8"
13 description = "bzip2 is a free and open-source file compression program that uses the Burrows Wheeler algorithm."
14 topics = ("conan", "bzip2", "data-compressor", "file-compression")
15
16 settings = "os", "compiler", "arch", "build_type"
17 options = {
18 "shared": [True, False],
19 "fPIC": [True, False],
20 "build_executable": [True, False]
21 }
22 default_options = {
23 "shared": False,
24 "fPIC": True,
25 "build_executable": True
26 }
27
28 exports_sources = ["CMakeLists.txt", "patches/**"]
29 generators = "cmake"
30 _cmake = None
31
32 @property
33 def _source_subfolder(self):
34 return "source_subfolder"
35
36 def config_options(self):
37 if self.settings.os == "Windows":
38 del self.options.fPIC
39 self.license = "bzip2-{}".format(self.version)
40
41 def configure(self):
42 if self.options.shared:
43 del self.options.fPIC
44 del self.settings.compiler.libcxx
45 del self.settings.compiler.cppstd
46
47 def source(self):
48 tools.get(**self.conan_data["sources"][self.version])
49 folder_name = "%s-%s" % (self.name, self.version)
50 os.rename(folder_name, self._source_subfolder)
51
52 def _configure_cmake(self):
53 if self._cmake:
54 return self._cmake
55 self._cmake = CMake(self)
56 self._cmake.definitions["BZ2_VERSION_STRING"] = self.version
57 self._cmake.definitions["BZ2_VERSION_MAJOR"] = tools.Version(self.version).major
58 self._cmake.definitions["BZ2_BUILD_EXE"] = self.options.build_executable
59 self._cmake.configure()
60 return self._cmake
61
62 def build(self):
63 for patch in self.conan_data.get("patches", {}).get(self.version, []):
64 tools.patch(**patch)
65 cmake = self._configure_cmake()
66 cmake.build()
67
68 def package(self):
69 self.copy("LICENSE", dst="licenses", src=self._source_subfolder)
70 cmake = self._configure_cmake()
71 cmake.install()
72 self._create_cmake_module_variables(
73 os.path.join(self.package_folder, self._module_subfolder, self._module_file)
74 )
75
76 @staticmethod
77 def _create_cmake_module_variables(module_file):
78 content = textwrap.dedent("""\
79 if(DEFINED BZip2_FOUND)
80 set(BZIP2_FOUND ${BZip2_FOUND})
81 set(BZIP2_NEED_PREFIX TRUE)
82 endif()
83 if(DEFINED BZip2_INCLUDE_DIR)
84 set(BZIP2_INCLUDE_DIRS ${BZip2_INCLUDE_DIR})
85 set(BZIP2_INCLUDE_DIR ${BZip2_INCLUDE_DIR})
86 endif()
87 if(DEFINED BZip2_LIBRARIES)
88 set(BZIP2_LIBRARIES ${BZip2_LIBRARIES})
89 endif()
90 if(DEFINED BZip2_VERSION)
91 set(BZIP2_VERSION_STRING ${BZip2_VERSION})
92 endif()
93 """)
94 tools.save(module_file, content)
95
96 @property
97 def _module_subfolder(self):
98 return os.path.join("lib", "cmake")
99
100 @property
101 def _module_file(self):
102 return "conan-official-{}-variables.cmake".format(self.name)
103
104 def package_info(self):
105 self.cpp_info.names["cmake_find_package"] = "BZip2"
106 self.cpp_info.names["cmake_find_package_multi"] = "BZip2"
107 self.cpp_info.builddirs.append(self._module_subfolder)
108 self.cpp_info.build_modules["cmake_find_package"] = [os.path.join(self._module_subfolder, self._module_file)]
109 self.cpp_info.libs = ["bz2"]
110
111 if self.options.build_executable:
112 bin_path = os.path.join(self.package_folder, "bin")
113 self.output.info("Appending PATH environment variable: {}".format(bin_path))
114 self.env_info.PATH.append(bin_path)
115
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/recipes/bzip2/all/conanfile.py b/recipes/bzip2/all/conanfile.py
--- a/recipes/bzip2/all/conanfile.py
+++ b/recipes/bzip2/all/conanfile.py
@@ -45,9 +45,7 @@
del self.settings.compiler.cppstd
def source(self):
- tools.get(**self.conan_data["sources"][self.version])
- folder_name = "%s-%s" % (self.name, self.version)
- os.rename(folder_name, self._source_subfolder)
+ tools.get(**self.conan_data["sources"][self.version], destination=self._source_subfolder, strip_root=True)
def _configure_cmake(self):
if self._cmake:
| {"golden_diff": "diff --git a/recipes/bzip2/all/conanfile.py b/recipes/bzip2/all/conanfile.py\n--- a/recipes/bzip2/all/conanfile.py\n+++ b/recipes/bzip2/all/conanfile.py\n@@ -45,9 +45,7 @@\n del self.settings.compiler.cppstd\n \n def source(self):\n- tools.get(**self.conan_data[\"sources\"][self.version])\n- folder_name = \"%s-%s\" % (self.name, self.version)\n- os.rename(folder_name, self._source_subfolder)\n+ tools.get(**self.conan_data[\"sources\"][self.version], destination=self._source_subfolder, strip_root=True)\n \n def _configure_cmake(self):\n if self._cmake:\n", "issue": "[package] all: \"Access is denied\" in os.rename() on Windows\n### Package and Environment Details (include every applicable attribute)\r\n * Package Name/Version: **almost all packages affected**\r\n * Operating System+version: **Windows 10**\r\n * Compiler+version: **MSVC 16**\r\n * Conan version: **conan 1.35.2**\r\n * Python version: **Python 3.8.7**\r\n\r\n\r\n### Conan profile (output of `conan profile show default` or `conan profile show <profile>` if custom profile is in use)\r\n```\r\n[settings]\r\nos_build=Windows\r\nos=Windows\r\narch=x86_64\r\narch_build=x86_64\r\ncompiler=Visual Studio\r\ncompiler.version=16\r\ncompiler.runtime=MD\r\nbuild_type=Release\r\n```\r\n\r\n### Steps to reproduce (Include if Applicable)\r\n\r\nThis is a known issue. Solution provided by https://github.com/conan-io/conan/pull/6774\r\nHowever most recipes still use `os.rename()` and not `tools.rename()`. \r\n\r\n### Log\r\n```\r\nb2/4.2.0: Configuring sources in C:\\Users\\xxx\\.conan\\data\\b2\\4.2.0\\_\\_\\source\r\nERROR: b2/4.2.0: Error in source() method, line 58\r\nos.rename(extracted_dir, \"source\")\r\nPermissionError: [WinError 5] Access is denied: 'build-4.2.0' -> 'source'\r\n```\r\n\n", "before_files": [{"content": "import os\nimport textwrap\nfrom conans import ConanFile, CMake, tools\n\nrequired_conan_version = \">=1.33.0\"\n\n\nclass Bzip2Conan(ConanFile):\n name = \"bzip2\"\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"http://www.bzip.org\"\n license = \"bzip2-1.0.8\"\n description = \"bzip2 is a free and open-source file compression program that uses the Burrows Wheeler algorithm.\"\n topics = (\"conan\", \"bzip2\", \"data-compressor\", \"file-compression\")\n\n settings = \"os\", \"compiler\", \"arch\", \"build_type\"\n options = {\n \"shared\": [True, False],\n \"fPIC\": [True, False],\n \"build_executable\": [True, False]\n }\n default_options = {\n \"shared\": False,\n \"fPIC\": True,\n \"build_executable\": True\n }\n\n exports_sources = [\"CMakeLists.txt\", \"patches/**\"]\n generators = \"cmake\"\n _cmake = None\n\n @property\n def _source_subfolder(self):\n return \"source_subfolder\"\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n self.license = \"bzip2-{}\".format(self.version)\n\n def configure(self):\n if self.options.shared:\n del self.options.fPIC\n del self.settings.compiler.libcxx\n del self.settings.compiler.cppstd\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version])\n folder_name = \"%s-%s\" % (self.name, self.version)\n os.rename(folder_name, self._source_subfolder)\n\n def _configure_cmake(self):\n if self._cmake:\n return self._cmake\n self._cmake = CMake(self)\n self._cmake.definitions[\"BZ2_VERSION_STRING\"] = self.version\n self._cmake.definitions[\"BZ2_VERSION_MAJOR\"] = tools.Version(self.version).major\n self._cmake.definitions[\"BZ2_BUILD_EXE\"] = self.options.build_executable\n self._cmake.configure()\n return self._cmake\n\n def build(self):\n for patch in self.conan_data.get(\"patches\", {}).get(self.version, []):\n tools.patch(**patch)\n cmake = self._configure_cmake()\n cmake.build()\n\n def package(self):\n self.copy(\"LICENSE\", dst=\"licenses\", src=self._source_subfolder)\n cmake = self._configure_cmake()\n cmake.install()\n self._create_cmake_module_variables(\n os.path.join(self.package_folder, self._module_subfolder, self._module_file)\n )\n\n @staticmethod\n def _create_cmake_module_variables(module_file):\n content = textwrap.dedent(\"\"\"\\\n if(DEFINED BZip2_FOUND)\n set(BZIP2_FOUND ${BZip2_FOUND})\n set(BZIP2_NEED_PREFIX TRUE)\n endif()\n if(DEFINED BZip2_INCLUDE_DIR)\n set(BZIP2_INCLUDE_DIRS ${BZip2_INCLUDE_DIR})\n set(BZIP2_INCLUDE_DIR ${BZip2_INCLUDE_DIR})\n endif()\n if(DEFINED BZip2_LIBRARIES)\n set(BZIP2_LIBRARIES ${BZip2_LIBRARIES})\n endif()\n if(DEFINED BZip2_VERSION)\n set(BZIP2_VERSION_STRING ${BZip2_VERSION})\n endif()\n \"\"\")\n tools.save(module_file, content)\n\n @property\n def _module_subfolder(self):\n return os.path.join(\"lib\", \"cmake\")\n\n @property\n def _module_file(self):\n return \"conan-official-{}-variables.cmake\".format(self.name)\n\n def package_info(self):\n self.cpp_info.names[\"cmake_find_package\"] = \"BZip2\"\n self.cpp_info.names[\"cmake_find_package_multi\"] = \"BZip2\"\n self.cpp_info.builddirs.append(self._module_subfolder)\n self.cpp_info.build_modules[\"cmake_find_package\"] = [os.path.join(self._module_subfolder, self._module_file)]\n self.cpp_info.libs = [\"bz2\"]\n\n if self.options.build_executable:\n bin_path = os.path.join(self.package_folder, \"bin\")\n self.output.info(\"Appending PATH environment variable: {}\".format(bin_path))\n self.env_info.PATH.append(bin_path)\n", "path": "recipes/bzip2/all/conanfile.py"}], "after_files": [{"content": "import os\nimport textwrap\nfrom conans import ConanFile, CMake, tools\n\nrequired_conan_version = \">=1.33.0\"\n\n\nclass Bzip2Conan(ConanFile):\n name = \"bzip2\"\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"http://www.bzip.org\"\n license = \"bzip2-1.0.8\"\n description = \"bzip2 is a free and open-source file compression program that uses the Burrows Wheeler algorithm.\"\n topics = (\"conan\", \"bzip2\", \"data-compressor\", \"file-compression\")\n\n settings = \"os\", \"compiler\", \"arch\", \"build_type\"\n options = {\n \"shared\": [True, False],\n \"fPIC\": [True, False],\n \"build_executable\": [True, False]\n }\n default_options = {\n \"shared\": False,\n \"fPIC\": True,\n \"build_executable\": True\n }\n\n exports_sources = [\"CMakeLists.txt\", \"patches/**\"]\n generators = \"cmake\"\n _cmake = None\n\n @property\n def _source_subfolder(self):\n return \"source_subfolder\"\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n self.license = \"bzip2-{}\".format(self.version)\n\n def configure(self):\n if self.options.shared:\n del self.options.fPIC\n del self.settings.compiler.libcxx\n del self.settings.compiler.cppstd\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version], destination=self._source_subfolder, strip_root=True)\n\n def _configure_cmake(self):\n if self._cmake:\n return self._cmake\n self._cmake = CMake(self)\n self._cmake.definitions[\"BZ2_VERSION_STRING\"] = self.version\n self._cmake.definitions[\"BZ2_VERSION_MAJOR\"] = tools.Version(self.version).major\n self._cmake.definitions[\"BZ2_BUILD_EXE\"] = self.options.build_executable\n self._cmake.configure()\n return self._cmake\n\n def build(self):\n for patch in self.conan_data.get(\"patches\", {}).get(self.version, []):\n tools.patch(**patch)\n cmake = self._configure_cmake()\n cmake.build()\n\n def package(self):\n self.copy(\"LICENSE\", dst=\"licenses\", src=self._source_subfolder)\n cmake = self._configure_cmake()\n cmake.install()\n self._create_cmake_module_variables(\n os.path.join(self.package_folder, self._module_subfolder, self._module_file)\n )\n\n @staticmethod\n def _create_cmake_module_variables(module_file):\n content = textwrap.dedent(\"\"\"\\\n if(DEFINED BZip2_FOUND)\n set(BZIP2_FOUND ${BZip2_FOUND})\n set(BZIP2_NEED_PREFIX TRUE)\n endif()\n if(DEFINED BZip2_INCLUDE_DIR)\n set(BZIP2_INCLUDE_DIRS ${BZip2_INCLUDE_DIR})\n set(BZIP2_INCLUDE_DIR ${BZip2_INCLUDE_DIR})\n endif()\n if(DEFINED BZip2_LIBRARIES)\n set(BZIP2_LIBRARIES ${BZip2_LIBRARIES})\n endif()\n if(DEFINED BZip2_VERSION)\n set(BZIP2_VERSION_STRING ${BZip2_VERSION})\n endif()\n \"\"\")\n tools.save(module_file, content)\n\n @property\n def _module_subfolder(self):\n return os.path.join(\"lib\", \"cmake\")\n\n @property\n def _module_file(self):\n return \"conan-official-{}-variables.cmake\".format(self.name)\n\n def package_info(self):\n self.cpp_info.names[\"cmake_find_package\"] = \"BZip2\"\n self.cpp_info.names[\"cmake_find_package_multi\"] = \"BZip2\"\n self.cpp_info.builddirs.append(self._module_subfolder)\n self.cpp_info.build_modules[\"cmake_find_package\"] = [os.path.join(self._module_subfolder, self._module_file)]\n self.cpp_info.libs = [\"bz2\"]\n\n if self.options.build_executable:\n bin_path = os.path.join(self.package_folder, \"bin\")\n self.output.info(\"Appending PATH environment variable: {}\".format(bin_path))\n self.env_info.PATH.append(bin_path)\n", "path": "recipes/bzip2/all/conanfile.py"}]} |
gh_patches_debug_1384 | rasdani/github-patches | git_diff | gammapy__gammapy-3911 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
string representation of various Maker classes may cause a TypeError
This was tested against Gammapy 0.19 and the development version.
In some cases, printing (or using the string representation in another way) of an instance of a `gammapy.makers.Maker` subclass may cause a TypeError.
An example directly from the tutorials (introduction, low-level analysis):
```
from astropy import units
from regions import CircleSkyRegion
from gammapy.makers import FoVBackgroundMaker
from astropy.coordinates import SkyCoord
from gammapy.maps import WcsGeom, MapAxis
energy_axis = MapAxis.from_energy_bounds(1.0, 10.0, 4, unit="TeV")
geom = WcsGeom.create(
skydir=(83.633, 22.014),
binsz=0.02,
width=(2, 2),
frame="icrs",
proj="CAR",
axes=[],
)
circle = CircleSkyRegion(
center=SkyCoord("83.63 deg", "22.14 deg"), radius=0.2 * units.deg
)
exclusion_mask = ~geom.region_mask(regions=[circle])
maker_fov = FoVBackgroundMaker(method="fit", exclusion_mask=exclusion_mask)
str(maker_fov)
```
will cause a
```
TypeError: Cannot parse "not available" as a Quantity. It does not start with a number.
```
(full traceback at the bottom).
The reason is in the `__str__` implementation of the `gammapy.makers.Maker` abstract class:
```
def __str__(self):
# <snip>
for name in names:
value = getattr(self, name, "not available")
if value == "not available":
continue
else:
s += f"\t{name:{max_len}s}: {value}\n"
return s.expandtabs(tabsize=2)
```
When an attribute is not found, it is set to the string "not available". Otherwise, the attribute's value is retrieved.
The resulting value is then compared to the string "not available" to determine whether it is an existing attribute. But some classes can't compare to string. In this particular case, comparing an instance of `WcsNDMap` fails this comparison, resulting (indirectly) in the `TypeError`.
Perhaps the most Pythonic solution is to have `WcsNDMap` handle comparisons with any arbirtrary type, and return `False`. This is what Python does: `1 == "abc"` is valid and returns `False`; similar for e.g. `1 = ["abc", 5.5]`.
Perhaps easier, and in my opinion semantically better, is to use
```
value = getattr(self, name, None)
if value is None:
continue
s += f"\t{name:{max_len}s}: {value}\n"
```
since `None` signifies the optional type in Python.
Though even better, in my opinion, is to simply use
```
for name in names:
try:
value = getattr(self, name)
except AttributeError:
continue
s += f"\t{name:{max_len}s}: {value}\n"
```
After all, this is what exceptions are for. (People sometimes mention speed reasons if the lookup fails a lot of times, but I don't think that's relevant here for a `__str__` implementation.)
I would even simply use `self.name`, but that'll fail because `Maker` is an abstract class, so more dynamic retrieval of attributes is required. I assume this is why it's implemented in its current way.
-----
Full traceback:
```
Traceback (most recent call last):
File "/home/user/.miniconda3/envs/gammapy-dev/lib/python3.7/site-packages/astropy/units/quantity.py", line 333, in __new__
value = float(v.group())
AttributeError: 'NoneType' object has no attribute 'group'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "gammapy_maker_str.py", line 22, in <module>
str(maker_fov)
File "/home/user/.miniconda3/envs/gammapy-dev/lib/python3.7/site-packages/gammapy/makers/core.py", line 31, in __str__
if value == "not available":
File "/home/user/.miniconda3/envs/gammapy-dev/lib/python3.7/site-packages/gammapy/maps/core.py", line 1612, in __eq__
return self._arithmetics(np.equal, other, copy=True)
File "/home/user/.miniconda3/envs/gammapy-dev/lib/python3.7/site-packages/gammapy/maps/core.py", line 1552, in _arithmetics
q = u.Quantity(other, copy=False)
File "/home/user/.miniconda3/envs/gammapy-dev/lib/python3.7/site-packages/astropy/units/quantity.py", line 338, in __new__
.format(value, cls.__name__))
TypeError: Cannot parse "not available" as a Quantity. It does not start with a number.
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `gammapy/makers/core.py`
Content:
```
1 # Licensed under a 3-clause BSD style license - see LICENSE.rst
2 import abc
3 import numpy as np
4
5 __all__ = ["Maker"]
6
7
8 class Maker(abc.ABC):
9 """Abstract maker base class."""
10
11 @property
12 @abc.abstractmethod
13 def tag(self):
14 pass
15
16 @abc.abstractmethod
17 def run(self):
18 pass
19
20 def __str__(self):
21 s = f"{self.__class__.__name__}\n"
22 s += "-" * (len(s) - 1) + "\n\n"
23
24 names = self.__init__.__code__.co_varnames
25
26 max_len = np.max([len(_) for _ in names]) + 1
27
28 for name in names:
29 value = getattr(self, name, "not available")
30
31 if value == "not available":
32 continue
33 else:
34 s += f"\t{name:{max_len}s}: {value}\n"
35
36 return s.expandtabs(tabsize=2)
37
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/gammapy/makers/core.py b/gammapy/makers/core.py
--- a/gammapy/makers/core.py
+++ b/gammapy/makers/core.py
@@ -26,9 +26,9 @@
max_len = np.max([len(_) for _ in names]) + 1
for name in names:
- value = getattr(self, name, "not available")
+ value = getattr(self, name, None)
- if value == "not available":
+ if value is None:
continue
else:
s += f"\t{name:{max_len}s}: {value}\n"
| {"golden_diff": "diff --git a/gammapy/makers/core.py b/gammapy/makers/core.py\n--- a/gammapy/makers/core.py\n+++ b/gammapy/makers/core.py\n@@ -26,9 +26,9 @@\n max_len = np.max([len(_) for _ in names]) + 1\n \n for name in names:\n- value = getattr(self, name, \"not available\")\n+ value = getattr(self, name, None)\n \n- if value == \"not available\":\n+ if value is None:\n continue\n else:\n s += f\"\\t{name:{max_len}s}: {value}\\n\"\n", "issue": "string representation of various Maker classes may cause a TypeError\nThis was tested against Gammapy 0.19 and the development version.\r\n\r\nIn some cases, printing (or using the string representation in another way) of an instance of a `gammapy.makers.Maker` subclass may cause a TypeError.\r\n\r\nAn example directly from the tutorials (introduction, low-level analysis):\r\n```\r\nfrom astropy import units\r\nfrom regions import CircleSkyRegion\r\nfrom gammapy.makers import FoVBackgroundMaker\r\nfrom astropy.coordinates import SkyCoord\r\nfrom gammapy.maps import WcsGeom, MapAxis\r\n\r\nenergy_axis = MapAxis.from_energy_bounds(1.0, 10.0, 4, unit=\"TeV\")\r\ngeom = WcsGeom.create(\r\n skydir=(83.633, 22.014),\r\n binsz=0.02,\r\n width=(2, 2),\r\n frame=\"icrs\",\r\n proj=\"CAR\",\r\n axes=[],\r\n)\r\ncircle = CircleSkyRegion(\r\n center=SkyCoord(\"83.63 deg\", \"22.14 deg\"), radius=0.2 * units.deg\r\n)\r\nexclusion_mask = ~geom.region_mask(regions=[circle])\r\nmaker_fov = FoVBackgroundMaker(method=\"fit\", exclusion_mask=exclusion_mask)\r\nstr(maker_fov)\r\n```\r\n\r\nwill cause a \r\n```\r\nTypeError: Cannot parse \"not available\" as a Quantity. It does not start with a number.\r\n```\r\n(full traceback at the bottom).\r\n\r\nThe reason is in the `__str__` implementation of the `gammapy.makers.Maker` abstract class:\r\n```\r\n def __str__(self):\r\n # <snip>\r\n for name in names:\r\n value = getattr(self, name, \"not available\")\r\n\r\n if value == \"not available\":\r\n continue\r\n else:\r\n s += f\"\\t{name:{max_len}s}: {value}\\n\"\r\n\r\n return s.expandtabs(tabsize=2)\r\n```\r\n\r\nWhen an attribute is not found, it is set to the string \"not available\". Otherwise, the attribute's value is retrieved.\r\nThe resulting value is then compared to the string \"not available\" to determine whether it is an existing attribute. But some classes can't compare to string. In this particular case, comparing an instance of `WcsNDMap` fails this comparison, resulting (indirectly) in the `TypeError`.\r\n\r\nPerhaps the most Pythonic solution is to have `WcsNDMap` handle comparisons with any arbirtrary type, and return `False`. This is what Python does: `1 == \"abc\"` is valid and returns `False`; similar for e.g. `1 = [\"abc\", 5.5]`.\r\n\r\nPerhaps easier, and in my opinion semantically better, is to use\r\n```\r\nvalue = getattr(self, name, None)\r\nif value is None:\r\n continue\r\ns += f\"\\t{name:{max_len}s}: {value}\\n\"\r\n```\r\nsince `None` signifies the optional type in Python.\r\n\r\nThough even better, in my opinion, is to simply use\r\n```\r\nfor name in names:\r\n try:\r\n value = getattr(self, name)\r\n except AttributeError:\r\n continue\r\n s += f\"\\t{name:{max_len}s}: {value}\\n\" \r\n```\r\nAfter all, this is what exceptions are for. (People sometimes mention speed reasons if the lookup fails a lot of times, but I don't think that's relevant here for a `__str__` implementation.)\r\n\r\nI would even simply use `self.name`, but that'll fail because `Maker` is an abstract class, so more dynamic retrieval of attributes is required. I assume this is why it's implemented in its current way.\r\n\r\n\r\n-----\r\n\r\nFull traceback:\r\n```\r\nTraceback (most recent call last):\r\n File \"/home/user/.miniconda3/envs/gammapy-dev/lib/python3.7/site-packages/astropy/units/quantity.py\", line 333, in __new__\r\n value = float(v.group())\r\nAttributeError: 'NoneType' object has no attribute 'group'\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"gammapy_maker_str.py\", line 22, in <module>\r\n str(maker_fov)\r\n File \"/home/user/.miniconda3/envs/gammapy-dev/lib/python3.7/site-packages/gammapy/makers/core.py\", line 31, in __str__\r\n if value == \"not available\":\r\n File \"/home/user/.miniconda3/envs/gammapy-dev/lib/python3.7/site-packages/gammapy/maps/core.py\", line 1612, in __eq__\r\n return self._arithmetics(np.equal, other, copy=True)\r\n File \"/home/user/.miniconda3/envs/gammapy-dev/lib/python3.7/site-packages/gammapy/maps/core.py\", line 1552, in _arithmetics\r\n q = u.Quantity(other, copy=False)\r\n File \"/home/user/.miniconda3/envs/gammapy-dev/lib/python3.7/site-packages/astropy/units/quantity.py\", line 338, in __new__\r\n .format(value, cls.__name__))\r\nTypeError: Cannot parse \"not available\" as a Quantity. It does not start with a number.\r\n```\r\n\n", "before_files": [{"content": "# Licensed under a 3-clause BSD style license - see LICENSE.rst\nimport abc\nimport numpy as np\n\n__all__ = [\"Maker\"]\n\n\nclass Maker(abc.ABC):\n \"\"\"Abstract maker base class.\"\"\"\n\n @property\n @abc.abstractmethod\n def tag(self):\n pass\n\n @abc.abstractmethod\n def run(self):\n pass\n\n def __str__(self):\n s = f\"{self.__class__.__name__}\\n\"\n s += \"-\" * (len(s) - 1) + \"\\n\\n\"\n\n names = self.__init__.__code__.co_varnames\n\n max_len = np.max([len(_) for _ in names]) + 1\n\n for name in names:\n value = getattr(self, name, \"not available\")\n\n if value == \"not available\":\n continue\n else:\n s += f\"\\t{name:{max_len}s}: {value}\\n\"\n\n return s.expandtabs(tabsize=2)\n", "path": "gammapy/makers/core.py"}], "after_files": [{"content": "# Licensed under a 3-clause BSD style license - see LICENSE.rst\nimport abc\nimport numpy as np\n\n__all__ = [\"Maker\"]\n\n\nclass Maker(abc.ABC):\n \"\"\"Abstract maker base class.\"\"\"\n\n @property\n @abc.abstractmethod\n def tag(self):\n pass\n\n @abc.abstractmethod\n def run(self):\n pass\n\n def __str__(self):\n s = f\"{self.__class__.__name__}\\n\"\n s += \"-\" * (len(s) - 1) + \"\\n\\n\"\n\n names = self.__init__.__code__.co_varnames\n\n max_len = np.max([len(_) for _ in names]) + 1\n\n for name in names:\n value = getattr(self, name, None)\n\n if value is None:\n continue\n else:\n s += f\"\\t{name:{max_len}s}: {value}\\n\"\n\n return s.expandtabs(tabsize=2)\n", "path": "gammapy/makers/core.py"}]} |
gh_patches_debug_1385 | rasdani/github-patches | git_diff | zulip__zulip-8805 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Errors when running `manage.py makemessages`
I get these errors in my local development environment:
```
(zulip-py3-venv) tabbott@zaset:~/zulip$ ./manage.py makemessages
UnicodeDecodeError: skipped file brainstorm_notes.txt in ./var/uploads/files/15/3d/47qkB-BgaArZ7wrTMTr-nsTK (reason: 'utf-8' codec can't decode byte 0x8b in position 1: invalid start byte)
UnicodeDecodeError: skipped file -.txt in ./var/uploads/files/15/9e/fqVojOZvoTZuGZ39r2_37NBn (reason: 'utf-8' codec can't decode byte 0x8b in position 1: invalid start byte)
UnicodeDecodeError: skipped file -.txt in ./var/uploads/files/2/fc/IfxNDeGaie57gWdOOok1Pyb5 (reason: 'utf-8' codec can't decode byte 0x8b in position 1: invalid start byte)
processing locale ca
processing locale es
```
I'm not sure why `manage.py makemessages` is parsing these uploaded files at all.
@umairwaheed can you try to track this down? We don't have a clear reproducer, but it seems like this sort of thing should be findable when reading code.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `zerver/management/commands/makemessages.py`
Content:
```
1 """
2 The contents of this file are taken from
3 https://github.com/niwinz/django-jinja/blob/master/django_jinja/management/commands/makemessages.py
4
5 Jinja2's i18n functionality is not exactly the same as Django's.
6 In particular, the tags names and their syntax are different:
7
8 1. The Django ``trans`` tag is replaced by a _() global.
9 2. The Django ``blocktrans`` tag is called ``trans``.
10
11 (1) isn't an issue, since the whole ``makemessages`` process is based on
12 converting the template tags to ``_()`` calls. However, (2) means that
13 those Jinja2 ``trans`` tags will not be picked up by Django's
14 ``makemessages`` command.
15
16 There aren't any nice solutions here. While Jinja2's i18n extension does
17 come with extraction capabilities built in, the code behind ``makemessages``
18 unfortunately isn't extensible, so we can:
19
20 * Duplicate the command + code behind it.
21 * Offer a separate command for Jinja2 extraction.
22 * Try to get Django to offer hooks into makemessages().
23 * Monkey-patch.
24
25 We are currently doing that last thing. It turns out there we are lucky
26 for once: It's simply a matter of extending two regular expressions.
27 Credit for the approach goes to:
28 http://stackoverflow.com/questions/2090717
29
30 """
31
32 import glob
33 import json
34 import os
35 import re
36 from argparse import ArgumentParser
37 from typing import Any, Dict, Iterable, List, Mapping, Text
38
39 from django.conf import settings
40 from django.core.management.commands import makemessages
41 from django.template.base import BLOCK_TAG_END, BLOCK_TAG_START
42 from django.utils.translation import template
43
44 from zerver.lib.str_utils import force_text
45
46 strip_whitespace_right = re.compile("(%s-?\\s*(trans|pluralize).*?-%s)\\s+" % (
47 BLOCK_TAG_START, BLOCK_TAG_END), re.U)
48 strip_whitespace_left = re.compile("\\s+(%s-\\s*(endtrans|pluralize).*?-?%s)" % (
49 BLOCK_TAG_START, BLOCK_TAG_END), re.U)
50
51 regexes = ['{{#tr .*?}}([\s\S]*?){{/tr}}', # '.' doesn't match '\n' by default
52 '{{\s*t "(.*?)"\W*}}',
53 "{{\s*t '(.*?)'\W*}}",
54 "i18n\.t\('([^\']*?)'\)",
55 "i18n\.t\('(.*?)',\s*.*?[^,]\)",
56 'i18n\.t\("([^\"]*?)"\)',
57 'i18n\.t\("(.*?)",\s*.*?[^,]\)',
58 ]
59 tags = [('err_', "error"),
60 ]
61
62 frontend_compiled_regexes = [re.compile(regex) for regex in regexes]
63 multiline_js_comment = re.compile("/\*.*?\*/", re.DOTALL)
64 singleline_js_comment = re.compile("//.*?\n")
65
66 def strip_whitespaces(src: Text) -> Text:
67 src = strip_whitespace_left.sub('\\1', src)
68 src = strip_whitespace_right.sub('\\1', src)
69 return src
70
71 class Command(makemessages.Command):
72
73 xgettext_options = makemessages.Command.xgettext_options
74 for func, tag in tags:
75 xgettext_options += ['--keyword={}:1,"{}"'.format(func, tag)]
76
77 def add_arguments(self, parser: ArgumentParser) -> None:
78 super(Command, self).add_arguments(parser)
79 parser.add_argument('--frontend-source', type=str,
80 default='static/templates',
81 help='Name of the Handlebars template directory')
82 parser.add_argument('--frontend-output', type=str,
83 default='static/locale',
84 help='Name of the frontend messages output directory')
85 parser.add_argument('--frontend-namespace', type=str,
86 default='translations.json',
87 help='Namespace of the frontend locale file')
88
89 def handle(self, *args: Any, **options: Any) -> None:
90 self.handle_django_locales(*args, **options)
91 self.handle_frontend_locales(**options)
92
93 def handle_frontend_locales(self, *,
94 frontend_source: str,
95 frontend_output: str,
96 frontend_namespace: str,
97 locale: List[str],
98 exclude: List[str],
99 all: bool,
100 **options: Any) -> None:
101 self.frontend_source = frontend_source
102 self.frontend_output = frontend_output
103 self.frontend_namespace = frontend_namespace
104 self.frontend_locale = locale
105 self.frontend_exclude = exclude
106 self.frontend_all = all
107
108 translation_strings = self.get_translation_strings()
109 self.write_translation_strings(translation_strings)
110
111 def handle_django_locales(self, *args: Any, **options: Any) -> None:
112 old_endblock_re = template.endblock_re
113 old_block_re = template.block_re
114 old_constant_re = template.constant_re
115
116 old_templatize = template.templatize
117 # Extend the regular expressions that are used to detect
118 # translation blocks with an "OR jinja-syntax" clause.
119 template.endblock_re = re.compile(
120 template.endblock_re.pattern + '|' + r"""^-?\s*endtrans\s*-?$""")
121 template.block_re = re.compile(
122 template.block_re.pattern + '|' + r"""^-?\s*trans(?:\s+(?!'|")(?=.*?=.*?)|\s*-?$)""")
123 template.plural_re = re.compile(
124 template.plural_re.pattern + '|' + r"""^-?\s*pluralize(?:\s+.+|-?$)""")
125 template.constant_re = re.compile(r"""_\(((?:".*?")|(?:'.*?')).*\)""")
126
127 def my_templatize(src: Text, *args: Any, **kwargs: Any) -> Text:
128 new_src = strip_whitespaces(src)
129 return old_templatize(new_src, *args, **kwargs)
130
131 template.templatize = my_templatize
132
133 try:
134 ignore_patterns = options.get('ignore_patterns', [])
135 ignore_patterns.append('docs/*')
136 options['ignore_patterns'] = ignore_patterns
137 super().handle(*args, **options)
138 finally:
139 template.endblock_re = old_endblock_re
140 template.block_re = old_block_re
141 template.templatize = old_templatize
142 template.constant_re = old_constant_re
143
144 def extract_strings(self, data: str) -> List[str]:
145 translation_strings = [] # type: List[str]
146 for regex in frontend_compiled_regexes:
147 for match in regex.findall(data):
148 match = match.strip()
149 match = ' '.join(line.strip() for line in match.splitlines())
150 match = match.replace('\n', '\\n')
151 translation_strings.append(match)
152
153 return translation_strings
154
155 def ignore_javascript_comments(self, data: str) -> str:
156 # Removes multi line comments.
157 data = multiline_js_comment.sub('', data)
158 # Removes single line (//) comments.
159 data = singleline_js_comment.sub('', data)
160 return data
161
162 def get_translation_strings(self) -> List[str]:
163 translation_strings = [] # type: List[str]
164 dirname = self.get_template_dir()
165
166 for dirpath, dirnames, filenames in os.walk(dirname):
167 for filename in [f for f in filenames if f.endswith(".handlebars")]:
168 if filename.startswith('.'):
169 continue
170 with open(os.path.join(dirpath, filename), 'r') as reader:
171 data = reader.read()
172 translation_strings.extend(self.extract_strings(data))
173
174 dirname = os.path.join(settings.DEPLOY_ROOT, 'static/js')
175 for filename in os.listdir(dirname):
176 if filename.endswith('.js') and not filename.startswith('.'):
177 with open(os.path.join(dirname, filename)) as reader:
178 data = reader.read()
179 data = self.ignore_javascript_comments(data)
180 translation_strings.extend(self.extract_strings(data))
181
182 return list(set(translation_strings))
183
184 def get_template_dir(self) -> str:
185 return self.frontend_source
186
187 def get_namespace(self) -> str:
188 return self.frontend_namespace
189
190 def get_locales(self) -> Iterable[str]:
191 locale = self.frontend_locale
192 exclude = self.frontend_exclude
193 process_all = self.frontend_all
194
195 paths = glob.glob('%s/*' % self.default_locale_path,)
196 all_locales = [os.path.basename(path) for path in paths if os.path.isdir(path)]
197
198 # Account for excluded locales
199 if process_all:
200 return all_locales
201 else:
202 locales = locale or all_locales
203 return set(locales) - set(exclude)
204
205 def get_base_path(self) -> str:
206 return self.frontend_output
207
208 def get_output_paths(self) -> Iterable[str]:
209 base_path = self.get_base_path()
210 locales = self.get_locales()
211 for path in [os.path.join(base_path, locale) for locale in locales]:
212 if not os.path.exists(path):
213 os.makedirs(path)
214
215 yield os.path.join(path, self.get_namespace())
216
217 def get_new_strings(self, old_strings: Mapping[str, str],
218 translation_strings: List[str], locale: str) -> Dict[str, str]:
219 """
220 Missing strings are removed, new strings are added and already
221 translated strings are not touched.
222 """
223 new_strings = {} # Dict[str, str]
224 for k in translation_strings:
225 k = k.replace('\\n', '\n')
226 if locale == 'en':
227 # For English language, translation is equal to the key.
228 new_strings[k] = old_strings.get(k, k)
229 else:
230 new_strings[k] = old_strings.get(k, "")
231
232 plurals = {k: v for k, v in old_strings.items() if k.endswith('_plural')}
233 for plural_key, value in plurals.items():
234 components = plural_key.split('_')
235 singular_key = '_'.join(components[:-1])
236 if singular_key in new_strings:
237 new_strings[plural_key] = value
238
239 return new_strings
240
241 def write_translation_strings(self, translation_strings: List[str]) -> None:
242 for locale, output_path in zip(self.get_locales(), self.get_output_paths()):
243 self.stdout.write("[frontend] processing locale {}".format(locale))
244 try:
245 with open(output_path, 'r') as reader:
246 old_strings = json.load(reader)
247 except (IOError, ValueError):
248 old_strings = {}
249
250 new_strings = {
251 force_text(k): v
252 for k, v in self.get_new_strings(old_strings,
253 translation_strings,
254 locale).items()
255 }
256 with open(output_path, 'w') as writer:
257 json.dump(new_strings, writer, indent=2, sort_keys=True)
258
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/zerver/management/commands/makemessages.py b/zerver/management/commands/makemessages.py
--- a/zerver/management/commands/makemessages.py
+++ b/zerver/management/commands/makemessages.py
@@ -133,6 +133,7 @@
try:
ignore_patterns = options.get('ignore_patterns', [])
ignore_patterns.append('docs/*')
+ ignore_patterns.append('var/*')
options['ignore_patterns'] = ignore_patterns
super().handle(*args, **options)
finally:
| {"golden_diff": "diff --git a/zerver/management/commands/makemessages.py b/zerver/management/commands/makemessages.py\n--- a/zerver/management/commands/makemessages.py\n+++ b/zerver/management/commands/makemessages.py\n@@ -133,6 +133,7 @@\n try:\n ignore_patterns = options.get('ignore_patterns', [])\n ignore_patterns.append('docs/*')\n+ ignore_patterns.append('var/*')\n options['ignore_patterns'] = ignore_patterns\n super().handle(*args, **options)\n finally:\n", "issue": "Errors when running `manage.py makemessages`\nI get these errors in my local development environment:\r\n\r\n```\r\n(zulip-py3-venv) tabbott@zaset:~/zulip$ ./manage.py makemessages\r\nUnicodeDecodeError: skipped file brainstorm_notes.txt in ./var/uploads/files/15/3d/47qkB-BgaArZ7wrTMTr-nsTK (reason: 'utf-8' codec can't decode byte 0x8b in position 1: invalid start byte)\r\nUnicodeDecodeError: skipped file -.txt in ./var/uploads/files/15/9e/fqVojOZvoTZuGZ39r2_37NBn (reason: 'utf-8' codec can't decode byte 0x8b in position 1: invalid start byte)\r\nUnicodeDecodeError: skipped file -.txt in ./var/uploads/files/2/fc/IfxNDeGaie57gWdOOok1Pyb5 (reason: 'utf-8' codec can't decode byte 0x8b in position 1: invalid start byte)\r\nprocessing locale ca\r\nprocessing locale es\r\n```\r\n\r\nI'm not sure why `manage.py makemessages` is parsing these uploaded files at all. \r\n\r\n@umairwaheed can you try to track this down? We don't have a clear reproducer, but it seems like this sort of thing should be findable when reading code.\n", "before_files": [{"content": "\"\"\"\nThe contents of this file are taken from\nhttps://github.com/niwinz/django-jinja/blob/master/django_jinja/management/commands/makemessages.py\n\nJinja2's i18n functionality is not exactly the same as Django's.\nIn particular, the tags names and their syntax are different:\n\n 1. The Django ``trans`` tag is replaced by a _() global.\n 2. The Django ``blocktrans`` tag is called ``trans``.\n\n(1) isn't an issue, since the whole ``makemessages`` process is based on\nconverting the template tags to ``_()`` calls. However, (2) means that\nthose Jinja2 ``trans`` tags will not be picked up by Django's\n``makemessages`` command.\n\nThere aren't any nice solutions here. While Jinja2's i18n extension does\ncome with extraction capabilities built in, the code behind ``makemessages``\nunfortunately isn't extensible, so we can:\n\n * Duplicate the command + code behind it.\n * Offer a separate command for Jinja2 extraction.\n * Try to get Django to offer hooks into makemessages().\n * Monkey-patch.\n\nWe are currently doing that last thing. It turns out there we are lucky\nfor once: It's simply a matter of extending two regular expressions.\nCredit for the approach goes to:\nhttp://stackoverflow.com/questions/2090717\n\n\"\"\"\n\nimport glob\nimport json\nimport os\nimport re\nfrom argparse import ArgumentParser\nfrom typing import Any, Dict, Iterable, List, Mapping, Text\n\nfrom django.conf import settings\nfrom django.core.management.commands import makemessages\nfrom django.template.base import BLOCK_TAG_END, BLOCK_TAG_START\nfrom django.utils.translation import template\n\nfrom zerver.lib.str_utils import force_text\n\nstrip_whitespace_right = re.compile(\"(%s-?\\\\s*(trans|pluralize).*?-%s)\\\\s+\" % (\n BLOCK_TAG_START, BLOCK_TAG_END), re.U)\nstrip_whitespace_left = re.compile(\"\\\\s+(%s-\\\\s*(endtrans|pluralize).*?-?%s)\" % (\n BLOCK_TAG_START, BLOCK_TAG_END), re.U)\n\nregexes = ['{{#tr .*?}}([\\s\\S]*?){{/tr}}', # '.' doesn't match '\\n' by default\n '{{\\s*t \"(.*?)\"\\W*}}',\n \"{{\\s*t '(.*?)'\\W*}}\",\n \"i18n\\.t\\('([^\\']*?)'\\)\",\n \"i18n\\.t\\('(.*?)',\\s*.*?[^,]\\)\",\n 'i18n\\.t\\(\"([^\\\"]*?)\"\\)',\n 'i18n\\.t\\(\"(.*?)\",\\s*.*?[^,]\\)',\n ]\ntags = [('err_', \"error\"),\n ]\n\nfrontend_compiled_regexes = [re.compile(regex) for regex in regexes]\nmultiline_js_comment = re.compile(\"/\\*.*?\\*/\", re.DOTALL)\nsingleline_js_comment = re.compile(\"//.*?\\n\")\n\ndef strip_whitespaces(src: Text) -> Text:\n src = strip_whitespace_left.sub('\\\\1', src)\n src = strip_whitespace_right.sub('\\\\1', src)\n return src\n\nclass Command(makemessages.Command):\n\n xgettext_options = makemessages.Command.xgettext_options\n for func, tag in tags:\n xgettext_options += ['--keyword={}:1,\"{}\"'.format(func, tag)]\n\n def add_arguments(self, parser: ArgumentParser) -> None:\n super(Command, self).add_arguments(parser)\n parser.add_argument('--frontend-source', type=str,\n default='static/templates',\n help='Name of the Handlebars template directory')\n parser.add_argument('--frontend-output', type=str,\n default='static/locale',\n help='Name of the frontend messages output directory')\n parser.add_argument('--frontend-namespace', type=str,\n default='translations.json',\n help='Namespace of the frontend locale file')\n\n def handle(self, *args: Any, **options: Any) -> None:\n self.handle_django_locales(*args, **options)\n self.handle_frontend_locales(**options)\n\n def handle_frontend_locales(self, *,\n frontend_source: str,\n frontend_output: str,\n frontend_namespace: str,\n locale: List[str],\n exclude: List[str],\n all: bool,\n **options: Any) -> None:\n self.frontend_source = frontend_source\n self.frontend_output = frontend_output\n self.frontend_namespace = frontend_namespace\n self.frontend_locale = locale\n self.frontend_exclude = exclude\n self.frontend_all = all\n\n translation_strings = self.get_translation_strings()\n self.write_translation_strings(translation_strings)\n\n def handle_django_locales(self, *args: Any, **options: Any) -> None:\n old_endblock_re = template.endblock_re\n old_block_re = template.block_re\n old_constant_re = template.constant_re\n\n old_templatize = template.templatize\n # Extend the regular expressions that are used to detect\n # translation blocks with an \"OR jinja-syntax\" clause.\n template.endblock_re = re.compile(\n template.endblock_re.pattern + '|' + r\"\"\"^-?\\s*endtrans\\s*-?$\"\"\")\n template.block_re = re.compile(\n template.block_re.pattern + '|' + r\"\"\"^-?\\s*trans(?:\\s+(?!'|\")(?=.*?=.*?)|\\s*-?$)\"\"\")\n template.plural_re = re.compile(\n template.plural_re.pattern + '|' + r\"\"\"^-?\\s*pluralize(?:\\s+.+|-?$)\"\"\")\n template.constant_re = re.compile(r\"\"\"_\\(((?:\".*?\")|(?:'.*?')).*\\)\"\"\")\n\n def my_templatize(src: Text, *args: Any, **kwargs: Any) -> Text:\n new_src = strip_whitespaces(src)\n return old_templatize(new_src, *args, **kwargs)\n\n template.templatize = my_templatize\n\n try:\n ignore_patterns = options.get('ignore_patterns', [])\n ignore_patterns.append('docs/*')\n options['ignore_patterns'] = ignore_patterns\n super().handle(*args, **options)\n finally:\n template.endblock_re = old_endblock_re\n template.block_re = old_block_re\n template.templatize = old_templatize\n template.constant_re = old_constant_re\n\n def extract_strings(self, data: str) -> List[str]:\n translation_strings = [] # type: List[str]\n for regex in frontend_compiled_regexes:\n for match in regex.findall(data):\n match = match.strip()\n match = ' '.join(line.strip() for line in match.splitlines())\n match = match.replace('\\n', '\\\\n')\n translation_strings.append(match)\n\n return translation_strings\n\n def ignore_javascript_comments(self, data: str) -> str:\n # Removes multi line comments.\n data = multiline_js_comment.sub('', data)\n # Removes single line (//) comments.\n data = singleline_js_comment.sub('', data)\n return data\n\n def get_translation_strings(self) -> List[str]:\n translation_strings = [] # type: List[str]\n dirname = self.get_template_dir()\n\n for dirpath, dirnames, filenames in os.walk(dirname):\n for filename in [f for f in filenames if f.endswith(\".handlebars\")]:\n if filename.startswith('.'):\n continue\n with open(os.path.join(dirpath, filename), 'r') as reader:\n data = reader.read()\n translation_strings.extend(self.extract_strings(data))\n\n dirname = os.path.join(settings.DEPLOY_ROOT, 'static/js')\n for filename in os.listdir(dirname):\n if filename.endswith('.js') and not filename.startswith('.'):\n with open(os.path.join(dirname, filename)) as reader:\n data = reader.read()\n data = self.ignore_javascript_comments(data)\n translation_strings.extend(self.extract_strings(data))\n\n return list(set(translation_strings))\n\n def get_template_dir(self) -> str:\n return self.frontend_source\n\n def get_namespace(self) -> str:\n return self.frontend_namespace\n\n def get_locales(self) -> Iterable[str]:\n locale = self.frontend_locale\n exclude = self.frontend_exclude\n process_all = self.frontend_all\n\n paths = glob.glob('%s/*' % self.default_locale_path,)\n all_locales = [os.path.basename(path) for path in paths if os.path.isdir(path)]\n\n # Account for excluded locales\n if process_all:\n return all_locales\n else:\n locales = locale or all_locales\n return set(locales) - set(exclude)\n\n def get_base_path(self) -> str:\n return self.frontend_output\n\n def get_output_paths(self) -> Iterable[str]:\n base_path = self.get_base_path()\n locales = self.get_locales()\n for path in [os.path.join(base_path, locale) for locale in locales]:\n if not os.path.exists(path):\n os.makedirs(path)\n\n yield os.path.join(path, self.get_namespace())\n\n def get_new_strings(self, old_strings: Mapping[str, str],\n translation_strings: List[str], locale: str) -> Dict[str, str]:\n \"\"\"\n Missing strings are removed, new strings are added and already\n translated strings are not touched.\n \"\"\"\n new_strings = {} # Dict[str, str]\n for k in translation_strings:\n k = k.replace('\\\\n', '\\n')\n if locale == 'en':\n # For English language, translation is equal to the key.\n new_strings[k] = old_strings.get(k, k)\n else:\n new_strings[k] = old_strings.get(k, \"\")\n\n plurals = {k: v for k, v in old_strings.items() if k.endswith('_plural')}\n for plural_key, value in plurals.items():\n components = plural_key.split('_')\n singular_key = '_'.join(components[:-1])\n if singular_key in new_strings:\n new_strings[plural_key] = value\n\n return new_strings\n\n def write_translation_strings(self, translation_strings: List[str]) -> None:\n for locale, output_path in zip(self.get_locales(), self.get_output_paths()):\n self.stdout.write(\"[frontend] processing locale {}\".format(locale))\n try:\n with open(output_path, 'r') as reader:\n old_strings = json.load(reader)\n except (IOError, ValueError):\n old_strings = {}\n\n new_strings = {\n force_text(k): v\n for k, v in self.get_new_strings(old_strings,\n translation_strings,\n locale).items()\n }\n with open(output_path, 'w') as writer:\n json.dump(new_strings, writer, indent=2, sort_keys=True)\n", "path": "zerver/management/commands/makemessages.py"}], "after_files": [{"content": "\"\"\"\nThe contents of this file are taken from\nhttps://github.com/niwinz/django-jinja/blob/master/django_jinja/management/commands/makemessages.py\n\nJinja2's i18n functionality is not exactly the same as Django's.\nIn particular, the tags names and their syntax are different:\n\n 1. The Django ``trans`` tag is replaced by a _() global.\n 2. The Django ``blocktrans`` tag is called ``trans``.\n\n(1) isn't an issue, since the whole ``makemessages`` process is based on\nconverting the template tags to ``_()`` calls. However, (2) means that\nthose Jinja2 ``trans`` tags will not be picked up by Django's\n``makemessages`` command.\n\nThere aren't any nice solutions here. While Jinja2's i18n extension does\ncome with extraction capabilities built in, the code behind ``makemessages``\nunfortunately isn't extensible, so we can:\n\n * Duplicate the command + code behind it.\n * Offer a separate command for Jinja2 extraction.\n * Try to get Django to offer hooks into makemessages().\n * Monkey-patch.\n\nWe are currently doing that last thing. It turns out there we are lucky\nfor once: It's simply a matter of extending two regular expressions.\nCredit for the approach goes to:\nhttp://stackoverflow.com/questions/2090717\n\n\"\"\"\n\nimport glob\nimport json\nimport os\nimport re\nfrom argparse import ArgumentParser\nfrom typing import Any, Dict, Iterable, List, Mapping, Text\n\nfrom django.conf import settings\nfrom django.core.management.commands import makemessages\nfrom django.template.base import BLOCK_TAG_END, BLOCK_TAG_START\nfrom django.utils.translation import template\n\nfrom zerver.lib.str_utils import force_text\n\nstrip_whitespace_right = re.compile(\"(%s-?\\\\s*(trans|pluralize).*?-%s)\\\\s+\" % (\n BLOCK_TAG_START, BLOCK_TAG_END), re.U)\nstrip_whitespace_left = re.compile(\"\\\\s+(%s-\\\\s*(endtrans|pluralize).*?-?%s)\" % (\n BLOCK_TAG_START, BLOCK_TAG_END), re.U)\n\nregexes = ['{{#tr .*?}}([\\s\\S]*?){{/tr}}', # '.' doesn't match '\\n' by default\n '{{\\s*t \"(.*?)\"\\W*}}',\n \"{{\\s*t '(.*?)'\\W*}}\",\n \"i18n\\.t\\('([^\\']*?)'\\)\",\n \"i18n\\.t\\('(.*?)',\\s*.*?[^,]\\)\",\n 'i18n\\.t\\(\"([^\\\"]*?)\"\\)',\n 'i18n\\.t\\(\"(.*?)\",\\s*.*?[^,]\\)',\n ]\ntags = [('err_', \"error\"),\n ]\n\nfrontend_compiled_regexes = [re.compile(regex) for regex in regexes]\nmultiline_js_comment = re.compile(\"/\\*.*?\\*/\", re.DOTALL)\nsingleline_js_comment = re.compile(\"//.*?\\n\")\n\ndef strip_whitespaces(src: Text) -> Text:\n src = strip_whitespace_left.sub('\\\\1', src)\n src = strip_whitespace_right.sub('\\\\1', src)\n return src\n\nclass Command(makemessages.Command):\n\n xgettext_options = makemessages.Command.xgettext_options\n for func, tag in tags:\n xgettext_options += ['--keyword={}:1,\"{}\"'.format(func, tag)]\n\n def add_arguments(self, parser: ArgumentParser) -> None:\n super(Command, self).add_arguments(parser)\n parser.add_argument('--frontend-source', type=str,\n default='static/templates',\n help='Name of the Handlebars template directory')\n parser.add_argument('--frontend-output', type=str,\n default='static/locale',\n help='Name of the frontend messages output directory')\n parser.add_argument('--frontend-namespace', type=str,\n default='translations.json',\n help='Namespace of the frontend locale file')\n\n def handle(self, *args: Any, **options: Any) -> None:\n self.handle_django_locales(*args, **options)\n self.handle_frontend_locales(**options)\n\n def handle_frontend_locales(self, *,\n frontend_source: str,\n frontend_output: str,\n frontend_namespace: str,\n locale: List[str],\n exclude: List[str],\n all: bool,\n **options: Any) -> None:\n self.frontend_source = frontend_source\n self.frontend_output = frontend_output\n self.frontend_namespace = frontend_namespace\n self.frontend_locale = locale\n self.frontend_exclude = exclude\n self.frontend_all = all\n\n translation_strings = self.get_translation_strings()\n self.write_translation_strings(translation_strings)\n\n def handle_django_locales(self, *args: Any, **options: Any) -> None:\n old_endblock_re = template.endblock_re\n old_block_re = template.block_re\n old_constant_re = template.constant_re\n\n old_templatize = template.templatize\n # Extend the regular expressions that are used to detect\n # translation blocks with an \"OR jinja-syntax\" clause.\n template.endblock_re = re.compile(\n template.endblock_re.pattern + '|' + r\"\"\"^-?\\s*endtrans\\s*-?$\"\"\")\n template.block_re = re.compile(\n template.block_re.pattern + '|' + r\"\"\"^-?\\s*trans(?:\\s+(?!'|\")(?=.*?=.*?)|\\s*-?$)\"\"\")\n template.plural_re = re.compile(\n template.plural_re.pattern + '|' + r\"\"\"^-?\\s*pluralize(?:\\s+.+|-?$)\"\"\")\n template.constant_re = re.compile(r\"\"\"_\\(((?:\".*?\")|(?:'.*?')).*\\)\"\"\")\n\n def my_templatize(src: Text, *args: Any, **kwargs: Any) -> Text:\n new_src = strip_whitespaces(src)\n return old_templatize(new_src, *args, **kwargs)\n\n template.templatize = my_templatize\n\n try:\n ignore_patterns = options.get('ignore_patterns', [])\n ignore_patterns.append('docs/*')\n ignore_patterns.append('var/*')\n options['ignore_patterns'] = ignore_patterns\n super().handle(*args, **options)\n finally:\n template.endblock_re = old_endblock_re\n template.block_re = old_block_re\n template.templatize = old_templatize\n template.constant_re = old_constant_re\n\n def extract_strings(self, data: str) -> List[str]:\n translation_strings = [] # type: List[str]\n for regex in frontend_compiled_regexes:\n for match in regex.findall(data):\n match = match.strip()\n match = ' '.join(line.strip() for line in match.splitlines())\n match = match.replace('\\n', '\\\\n')\n translation_strings.append(match)\n\n return translation_strings\n\n def ignore_javascript_comments(self, data: str) -> str:\n # Removes multi line comments.\n data = multiline_js_comment.sub('', data)\n # Removes single line (//) comments.\n data = singleline_js_comment.sub('', data)\n return data\n\n def get_translation_strings(self) -> List[str]:\n translation_strings = [] # type: List[str]\n dirname = self.get_template_dir()\n\n for dirpath, dirnames, filenames in os.walk(dirname):\n for filename in [f for f in filenames if f.endswith(\".handlebars\")]:\n if filename.startswith('.'):\n continue\n with open(os.path.join(dirpath, filename), 'r') as reader:\n data = reader.read()\n translation_strings.extend(self.extract_strings(data))\n\n dirname = os.path.join(settings.DEPLOY_ROOT, 'static/js')\n for filename in os.listdir(dirname):\n if filename.endswith('.js') and not filename.startswith('.'):\n with open(os.path.join(dirname, filename)) as reader:\n data = reader.read()\n data = self.ignore_javascript_comments(data)\n translation_strings.extend(self.extract_strings(data))\n\n return list(set(translation_strings))\n\n def get_template_dir(self) -> str:\n return self.frontend_source\n\n def get_namespace(self) -> str:\n return self.frontend_namespace\n\n def get_locales(self) -> Iterable[str]:\n locale = self.frontend_locale\n exclude = self.frontend_exclude\n process_all = self.frontend_all\n\n paths = glob.glob('%s/*' % self.default_locale_path,)\n all_locales = [os.path.basename(path) for path in paths if os.path.isdir(path)]\n\n # Account for excluded locales\n if process_all:\n return all_locales\n else:\n locales = locale or all_locales\n return set(locales) - set(exclude)\n\n def get_base_path(self) -> str:\n return self.frontend_output\n\n def get_output_paths(self) -> Iterable[str]:\n base_path = self.get_base_path()\n locales = self.get_locales()\n for path in [os.path.join(base_path, locale) for locale in locales]:\n if not os.path.exists(path):\n os.makedirs(path)\n\n yield os.path.join(path, self.get_namespace())\n\n def get_new_strings(self, old_strings: Mapping[str, str],\n translation_strings: List[str], locale: str) -> Dict[str, str]:\n \"\"\"\n Missing strings are removed, new strings are added and already\n translated strings are not touched.\n \"\"\"\n new_strings = {} # Dict[str, str]\n for k in translation_strings:\n k = k.replace('\\\\n', '\\n')\n if locale == 'en':\n # For English language, translation is equal to the key.\n new_strings[k] = old_strings.get(k, k)\n else:\n new_strings[k] = old_strings.get(k, \"\")\n\n plurals = {k: v for k, v in old_strings.items() if k.endswith('_plural')}\n for plural_key, value in plurals.items():\n components = plural_key.split('_')\n singular_key = '_'.join(components[:-1])\n if singular_key in new_strings:\n new_strings[plural_key] = value\n\n return new_strings\n\n def write_translation_strings(self, translation_strings: List[str]) -> None:\n for locale, output_path in zip(self.get_locales(), self.get_output_paths()):\n self.stdout.write(\"[frontend] processing locale {}\".format(locale))\n try:\n with open(output_path, 'r') as reader:\n old_strings = json.load(reader)\n except (IOError, ValueError):\n old_strings = {}\n\n new_strings = {\n force_text(k): v\n for k, v in self.get_new_strings(old_strings,\n translation_strings,\n locale).items()\n }\n with open(output_path, 'w') as writer:\n json.dump(new_strings, writer, indent=2, sort_keys=True)\n", "path": "zerver/management/commands/makemessages.py"}]} |
gh_patches_debug_1386 | rasdani/github-patches | git_diff | internetarchive__openlibrary-5752 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Open Library login issue after visiting google
<!-- What problem are we solving? What does the experience look like today? What are the symptoms? -->
If http referer, Logging in erroneously redirects away from OpenLibrary
### Evidence / Screenshot (if possible)
### Relevant url?
<!-- `https://openlibrary.org/...` -->
### Steps to Reproduce
<!-- What steps caused you to find the bug? -->
1. Go to ...
2. Do ...
<!-- What actually happened after these steps? What did you expect to happen? -->
* Actual:
* Expected:
### Details
- **Logged in (Y/N)?**
- **Browser type/version?**
- **Operating system?**
- **Environment (prod/dev/local)?** prod
<!-- If not sure, put prod -->
### Proposal & Constraints
<!-- What is the proposed solution / implementation? Is there a precedent of this approach succeeding elsewhere? -->
### Related files
<!-- Files related to this issue; this is super useful for new contributors who might want to help! If you're not sure, leave this blank; a maintainer will add them. -->
### Stakeholders
<!-- @ tag stakeholders of this bug -->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `openlibrary/plugins/upstream/account.py`
Content:
```
1
2 import web
3 import logging
4 import json
5 import re
6
7 from infogami.utils import delegate
8 from infogami import config
9 from infogami.utils.view import (
10 require_login, render, render_template, add_flash_message
11 )
12
13 from infogami.infobase.client import ClientException
14 from infogami.utils.context import context
15 from infogami.utils.view import safeint
16 import infogami.core.code as core
17
18 from openlibrary import accounts
19 from openlibrary.i18n import gettext as _
20 from openlibrary.core import helpers as h, lending
21 from openlibrary.core.booknotes import Booknotes
22 from openlibrary.core.bookshelves import Bookshelves
23 from openlibrary.core.observations import Observations, convert_observation_ids
24 from openlibrary.core.lending import add_availability
25 from openlibrary.plugins.recaptcha import recaptcha
26 from openlibrary.plugins import openlibrary as olib
27 from openlibrary.accounts import (
28 audit_accounts, Account, OpenLibraryAccount, InternetArchiveAccount, valid_email)
29 from openlibrary.core.sponsorships import get_sponsored_editions
30 from openlibrary.plugins.upstream import borrow, forms, utils
31
32 from six.moves import range
33 from six.moves import urllib
34
35
36 logger = logging.getLogger("openlibrary.account")
37
38 RESULTS_PER_PAGE = 25
39 USERNAME_RETRIES = 3
40
41 # XXX: These need to be cleaned up
42 send_verification_email = accounts.send_verification_email
43 create_link_doc = accounts.create_link_doc
44 sendmail = accounts.sendmail
45
46 LOGIN_ERRORS = {
47 "invalid_email": "The email address you entered is invalid",
48 "account_blocked": "This account has been blocked",
49 "account_locked": "This account has been blocked",
50 "account_not_found": "No account was found with this email. Please try again",
51 "account_incorrect_password": "The password you entered is incorrect. Please try again",
52 "account_bad_password": "Wrong password. Please try again",
53 "account_not_verified": "Please verify your Open Library account before logging in",
54 "ia_account_not_verified": "Please verify your Internet Archive account before logging in",
55 "missing_fields": "Please fill out all fields and try again",
56 "email_registered": "This email is already registered",
57 "username_registered": "This username is already registered",
58 "ia_login_only": "Sorry, you must use your Internet Archive email and password to log in",
59 "max_retries_exceeded": "A problem occurred and we were unable to log you in.",
60 "invalid_s3keys": "Login attempted with invalid Internet Archive s3 credentials.",
61 "wrong_ia_account": "An Open Library account with this email is already linked to a different Internet Archive account. Please contact [email protected]."
62 }
63
64 class availability(delegate.page):
65 path = "/internal/fake/availability"
66
67 def POST(self):
68 """Internal private API required for testing on localhost
69 """
70 return delegate.RawText(json.dumps({}),
71 content_type="application/json")
72
73 class loans(delegate.page):
74 path = "/internal/fake/loans"
75
76 def POST(self):
77 """Internal private API required for testing on localhost
78 """
79 return delegate.RawText(json.dumps({}),
80 content_type="application/json")
81
82 class xauth(delegate.page):
83 path = "/internal/fake/xauth"
84
85 def POST(self):
86 """Internal private API required for testing login on localhost
87 which normally would have to hit archive.org's xauth
88 service. This service is spoofable to return successful and
89 unsuccessful login attempts depending on the provided GET parameters
90 """
91 i = web.input(email='', op=None)
92 result = {"error": "incorrect option specified"}
93 if i.op == "authenticate":
94 result = {
95 "success": True,
96 "version": 1,
97 "values": {
98 "access": 'foo',
99 "secret": 'foo',
100 },
101 }
102 elif i.op == "info":
103 result = {
104 "success": True,
105 "values": {
106 "locked": False,
107 "email": "[email protected]",
108 "itemname":"@openlibrary",
109 "screenname":"openlibrary",
110 "verified": True
111 },
112 "version":1
113 }
114 return delegate.RawText(json.dumps(result),
115 content_type="application/json")
116
117 class internal_audit(delegate.page):
118 path = "/internal/account/audit"
119
120 def GET(self):
121 """Internal API endpoint used for authorized test cases and
122 administrators to unlink linked OL and IA accounts.
123 """
124 i = web.input(email='', username='', itemname='', key='', unlink='',
125 new_itemname='')
126 if i.key != lending.config_internal_tests_api_key:
127 result = {'error': 'Authentication failed for private API'}
128 else:
129 try:
130 result = OpenLibraryAccount.get(email=i.email, link=i.itemname,
131 username=i.username)
132 if result is None:
133 raise ValueError('Invalid Open Library account email ' \
134 'or itemname')
135 result.enc_password = 'REDACTED'
136 if i.new_itemname:
137 result.link(i.new_itemname)
138 if i.unlink:
139 result.unlink()
140 except ValueError as e:
141 result = {'error': str(e)}
142
143 return delegate.RawText(json.dumps(result),
144 content_type="application/json")
145
146 class account_migration(delegate.page):
147
148 path = "/internal/account/migration"
149
150 def GET(self):
151 i = web.input(username='', email='', key='')
152 if i.key != lending.config_internal_tests_api_key:
153 return delegate.RawText(json.dumps({
154 'error': 'Authentication failed for private API'
155 }), content_type="application/json")
156 try:
157 if i.username:
158 ol_account = OpenLibraryAccount.get(username=i.username)
159 elif i.email:
160 ol_account = OpenLibraryAccount.get(email=i.email)
161 except Exception as e:
162 return delegate.RawText(json.dumps({
163 'error': 'bad-account'
164 }), content_type="application/json")
165 if ol_account:
166 ol_account.enc_password = 'REDACTED'
167 if ol_account.itemname:
168 return delegate.RawText(json.dumps({
169 'status': 'link-exists',
170 'username': ol_account.username,
171 'itemname': ol_account.itemname,
172 'email': ol_account.email.lower()
173 }), content_type="application/json")
174 if not ol_account.itemname:
175 ia_account = InternetArchiveAccount.get(email=ol_account.email.lower())
176 if ia_account:
177 ol_account.link(ia_account.itemname)
178 return delegate.RawText(json.dumps({
179 'username': ol_account.username,
180 'status': 'link-found',
181 'itemname': ia_account.itemname,
182 'ol-itemname': ol_account.itemname,
183 'email': ol_account.email.lower(),
184 'ia': ia_account
185 }), content_type="application/json")
186
187 password = OpenLibraryAccount.generate_random_password(16)
188 ia_account = InternetArchiveAccount.create(
189 ol_account.username or ol_account.displayname,
190 ol_account.email, password, verified=True, retries=USERNAME_RETRIES)
191 return delegate.RawText(json.dumps({
192 'username': ol_account.username,
193 'email': ol_account.email,
194 'itemname': ia_account.itemname,
195 'password': password,
196 'status': 'link-created'
197 }), content_type="application/json")
198
199 class account(delegate.page):
200 """Account preferences.
201 """
202 @require_login
203 def GET(self):
204 user = accounts.get_current_user()
205 return render.account(user)
206
207 class account_create(delegate.page):
208 """New account creation.
209
210 Account remains in the pending state until the email is activated.
211 """
212 path = "/account/create"
213
214 def GET(self):
215 f = self.get_form()
216 return render['account/create'](f)
217
218 def get_form(self):
219 """
220 :rtype: forms.RegisterForm
221 """
222 f = forms.Register()
223 recap = self.get_recap()
224 f.has_recaptcha = recap is not None
225 if f.has_recaptcha:
226 f.inputs = list(f.inputs) + [recap]
227 return f
228
229 def get_recap(self):
230 if self.is_plugin_enabled('recaptcha'):
231 public_key = config.plugin_recaptcha.public_key
232 private_key = config.plugin_recaptcha.private_key
233 return recaptcha.Recaptcha(public_key, private_key)
234
235 def is_plugin_enabled(self, name):
236 return name in delegate.get_plugins() or "openlibrary.plugins." + name in delegate.get_plugins()
237
238 def POST(self):
239 f = self.get_form() # type: forms.RegisterForm
240
241 if f.validates(web.input()):
242 try:
243 # Create ia_account: require they activate via IA email
244 # and then login to OL. Logging in after activation with
245 # IA credentials will auto create and link OL account.
246
247 """NOTE: the values for the notifications must be kept in sync
248 with the values in the `MAILING_LIST_KEYS` array in
249 https://git.archive.org/ia/petabox/blob/master/www/common/MailSync/Settings.inc
250 Currently, per the fundraising/development team, the
251 "announcements checkbox" should map to BOTH `ml_best_of` and
252 `ml_updates`
253 """ # nopep8
254 mls = ['ml_best_of', 'ml_updates']
255 notifications = mls if f.ia_newsletter.checked else []
256 InternetArchiveAccount.create(
257 screenname=f.username.value, email=f.email.value, password=f.password.value,
258 notifications=notifications, verified=False, retries=USERNAME_RETRIES)
259 return render['account/verify'](username=f.username.value, email=f.email.value)
260 except ValueError:
261 f.note = LOGIN_ERRORS['max_retries_exceeded']
262
263 return render['account/create'](f)
264
265
266 del delegate.pages['/account/register']
267
268
269 class account_login_json(delegate.page):
270
271 encoding = "json"
272 path = "/account/login"
273
274 def POST(self):
275 """Overrides `account_login` and infogami.login to prevent users from
276 logging in with Open Library username and password if the
277 payload is json. Instead, if login attempted w/ json
278 credentials, requires Archive.org s3 keys.
279 """
280 from openlibrary.plugins.openlibrary.code import BadRequest
281 d = json.loads(web.data())
282 access = d.get('access', None)
283 secret = d.get('secret', None)
284 test = d.get('test', False)
285
286 # Try S3 authentication first, fallback to infogami user, pass
287 if access and secret:
288 audit = audit_accounts(None, None, require_link=True,
289 s3_access_key=access,
290 s3_secret_key=secret, test=test)
291 error = audit.get('error')
292 if error:
293 raise olib.code.BadRequest(error)
294 web.setcookie(config.login_cookie_name, web.ctx.conn.get_auth_token())
295 # Fallback to infogami user/pass
296 else:
297 from infogami.plugins.api.code import login as infogami_login
298 infogami_login().POST()
299
300
301
302 class account_login(delegate.page):
303 """Account login.
304
305 Login can fail because of the following reasons:
306
307 * account_not_found: Error message is displayed.
308 * account_bad_password: Error message is displayed with a link to reset password.
309 * account_not_verified: Error page is dispalyed with button to "resend verification email".
310 """
311 path = "/account/login"
312
313 def render_error(self, error_key, i):
314 f = forms.Login()
315 f.fill(i)
316 f.note = LOGIN_ERRORS[error_key]
317 return render.login(f)
318
319 def GET(self):
320 referer = web.ctx.env.get('HTTP_REFERER', '/')
321 # Don't set referer on user activation
322 if 'archive.org' in referer:
323 referer = None
324 i = web.input(redirect=referer)
325 f = forms.Login()
326 f['redirect'].value = i.redirect
327 return render.login(f)
328
329 def POST(self):
330 i = web.input(username="", connect=None, password="", remember=False,
331 redirect='/', test=False, access=None, secret=None)
332 email = i.username # XXX username is now email
333 audit = audit_accounts(email, i.password, require_link=True,
334 s3_access_key=i.access,
335 s3_secret_key=i.secret, test=i.test)
336 error = audit.get('error')
337 if error:
338 return self.render_error(error, i)
339
340 expires = 3600 * 24 * 7 if i.remember else ""
341 web.setcookie('pd', int(audit.get('special_access')) or '',
342 expires=expires)
343 web.setcookie(config.login_cookie_name, web.ctx.conn.get_auth_token(),
344 expires=expires)
345 blacklist = ["/account/login", "/account/password", "/account/email",
346 "/account/create"]
347 if i.redirect == "" or any([path in i.redirect for path in blacklist]):
348 i.redirect = "/"
349 raise web.seeother(i.redirect)
350
351 def POST_resend_verification_email(self, i):
352 try:
353 ol_login = OpenLibraryAccount.authenticate(i.email, i.password)
354 except ClientException as e:
355 code = e.get_data().get("code")
356 if code != "account_not_verified":
357 return self.error("account_incorrect_password", i)
358
359 account = OpenLibraryAccount.get(email=i.email)
360 account.send_verification_email()
361
362 title = _("Hi, %(user)s", user=account.displayname)
363 message = _("We've sent the verification email to %(email)s. You'll need to read that and click on the verification link to verify your email.", email=account.email)
364 return render.message(title, message)
365
366 class account_verify(delegate.page):
367 """Verify user account.
368 """
369 path = "/account/verify/([0-9a-f]*)"
370
371 def GET(self, code):
372 docs = web.ctx.site.store.values(type="account-link", name="code", value=code)
373 if docs:
374 doc = docs[0]
375
376 account = accounts.find(username = doc['username'])
377 if account:
378 if account['status'] != "pending":
379 return render['account/verify/activated'](account)
380 account.activate()
381 user = web.ctx.site.get("/people/" + doc['username']) #TBD
382 return render['account/verify/success'](account)
383 else:
384 return render['account/verify/failed']()
385
386 def POST(self, code=None):
387 """Called to regenerate account verification code.
388 """
389 i = web.input(email=None)
390 account = accounts.find(email=i.email)
391 if not account:
392 return render_template("account/verify/failed", email=i.email)
393 elif account['status'] != "pending":
394 return render['account/verify/activated'](account)
395 else:
396 account.send_verification_email()
397 title = _("Hi, %(user)s", user=account.displayname)
398 message = _("We've sent the verification email to %(email)s. You'll need to read that and click on the verification link to verify your email.", email=account.email)
399 return render.message(title, message)
400
401 class account_verify_old(account_verify):
402 """Old account verification code.
403
404 This takes username, email and code as url parameters. The new one takes just the code as part of the url.
405 """
406 path = "/account/verify"
407 def GET(self):
408 # It is too long since we switched to the new account verification links.
409 # All old links must be expired by now.
410 # Show failed message without thinking.
411 return render['account/verify/failed']()
412
413 class account_validation(delegate.page):
414 path = '/account/validate'
415
416 @staticmethod
417 def validate_username(username):
418 if not 3 <= len(username) <= 20:
419 return _('Username must be between 3-20 characters')
420 if not re.match('^[A-Za-z0-9-_]{3,20}$', username):
421 return _('Username may only contain numbers and letters')
422 ol_account = OpenLibraryAccount.get(username=username)
423 if ol_account:
424 return _("Username unavailable")
425
426 @staticmethod
427 def validate_email(email):
428 if not (email and re.match(r'.*@.*\..*', email)):
429 return _('Must be a valid email address')
430
431 ol_account = OpenLibraryAccount.get(email=email)
432 if ol_account:
433 return _('Email already registered')
434
435
436 def GET(self):
437 i = web.input()
438 errors = {
439 'email': None,
440 'username': None
441 }
442 if i.get('email') is not None:
443 errors['email'] = self.validate_email(i.email)
444 if i.get('username') is not None:
445 errors['username'] = self.validate_username(i.username)
446 return delegate.RawText(json.dumps(errors),
447 content_type="application/json")
448
449
450 class account_email_verify(delegate.page):
451 path = "/account/email/verify/([0-9a-f]*)"
452
453 def GET(self, code):
454 link = accounts.get_link(code)
455 if link:
456 username = link['username']
457 email = link['email']
458 link.delete()
459 return self.update_email(username, email)
460 else:
461 return self.bad_link()
462
463 def update_email(self, username, email):
464 if accounts.find(email=email):
465 title = _("Email address is already used.")
466 message = _("Your email address couldn't be updated. The specified email address is already used.")
467 else:
468 logger.info("updated email of %s to %s", username, email)
469 accounts.update_account(username=username, email=email, status="active")
470 title = _("Email verification successful.")
471 message = _('Your email address has been successfully verified and updated in your account.')
472 return render.message(title, message)
473
474 def bad_link(self):
475 title = _("Email address couldn't be verified.")
476 message = _("Your email address couldn't be verified. The verification link seems invalid.")
477 return render.message(title, message)
478
479 class account_email_verify_old(account_email_verify):
480 path = "/account/email/verify"
481
482 def GET(self):
483 # It is too long since we switched to the new email verification links.
484 # All old links must be expired by now.
485 # Show failed message without thinking.
486 return self.bad_link()
487
488 class account_ia_email_forgot(delegate.page):
489 path = "/account/email/forgot-ia"
490
491 def GET(self):
492 return render_template('account/email/forgot-ia')
493
494 def POST(self):
495 i = web.input(email='', password='')
496 err = ""
497
498 if valid_email(i.email):
499 act = OpenLibraryAccount.get(email=i.email)
500 if act:
501 if OpenLibraryAccount.authenticate(i.email, i.password) == "ok":
502 ia_act = act.get_linked_ia_account()
503 if ia_act:
504 return render_template('account/email/forgot-ia', email=ia_act.email)
505 else:
506 err = "Open Library Account not linked. Login with your Open Library credentials to connect or create an Archive.org account"
507 else:
508 err = "Incorrect password"
509 else:
510 err = "Sorry, this Open Library account does not exist"
511 else:
512 err = "Please enter a valid Open Library email"
513 return render_template('account/email/forgot-ia', err=err)
514
515 class account_ol_email_forgot(delegate.page):
516 path = "/account/email/forgot"
517
518 def GET(self):
519 return render_template('account/email/forgot')
520
521 def POST(self):
522 i = web.input(username='', password='')
523 err = ""
524 act = OpenLibraryAccount.get(username=i.username)
525
526 if act:
527 if OpenLibraryAccount.authenticate(act.email, i.password) == "ok":
528 return render_template('account/email/forgot', email=act.email)
529 else:
530 err = "Incorrect password"
531
532 elif valid_email(i.username):
533 err = "Please enter a username, not an email"
534
535 else:
536 err="Sorry, this user does not exist"
537
538 return render_template('account/email/forgot', err=err)
539
540
541 class account_password_forgot(delegate.page):
542 path = "/account/password/forgot"
543
544 def GET(self):
545 f = forms.ForgotPassword()
546 return render['account/password/forgot'](f)
547
548 def POST(self):
549 i = web.input(email='')
550
551 f = forms.ForgotPassword()
552
553 if not f.validates(i):
554 return render['account/password/forgot'](f)
555
556 account = accounts.find(email=i.email)
557
558 if account.is_blocked():
559 f.note = utils.get_error("account_blocked")
560 return render_template('account/password/forgot', f)
561
562 send_forgot_password_email(account.username, i.email)
563 return render['account/password/sent'](i.email)
564
565 class account_password_reset(delegate.page):
566
567 path = "/account/password/reset/([0-9a-f]*)"
568
569 def GET(self, code):
570 docs = web.ctx.site.store.values(type="account-link", name="code", value=code)
571 if not docs:
572 title = _("Password reset failed.")
573 message = "Your password reset link seems invalid or expired."
574 return render.message(title, message)
575
576 f = forms.ResetPassword()
577 return render['account/password/reset'](f)
578
579 def POST(self, code):
580 link = accounts.get_link(code)
581 if not link:
582 title = _("Password reset failed.")
583 message = "The password reset link seems invalid or expired."
584 return render.message(title, message)
585
586 username = link['username']
587 i = web.input()
588
589 accounts.update_account(username, password=i.password)
590 link.delete()
591 return render_template("account/password/reset_success", username=username)
592
593
594 class account_audit(delegate.page):
595
596 path = "/account/audit"
597
598 def POST(self):
599 """When the user attempts a login, an audit is performed to determine
600 whether their account is already linked (in which case we can
601 proceed to log the user in), whether there is an error
602 authenticating their account, or whether a /account/connect
603 must first performed.
604
605 Note: Emails are case sensitive behind the scenes and
606 functions which require them as lower will make them so
607 """
608 i = web.input(email='', password='')
609 test = i.get('test', '').lower() == 'true'
610 email = i.get('email')
611 password = i.get('password')
612 result = audit_accounts(email, password, test=test)
613 return delegate.RawText(json.dumps(result),
614 content_type="application/json")
615
616 class account_privacy(delegate.page):
617 path = "/account/privacy"
618
619 @require_login
620 def GET(self):
621 user = accounts.get_current_user()
622 return render['account/privacy'](user.preferences())
623
624 @require_login
625 def POST(self):
626 user = accounts.get_current_user()
627 user.save_preferences(web.input())
628 add_flash_message('note', _("Notification preferences have been updated successfully."))
629 web.seeother("/account")
630
631 class account_notifications(delegate.page):
632 path = "/account/notifications"
633
634 @require_login
635 def GET(self):
636 user = accounts.get_current_user()
637 email = user.email
638 return render['account/notifications'](user.preferences(), email)
639
640 @require_login
641 def POST(self):
642 user = accounts.get_current_user()
643 user.save_preferences(web.input())
644 add_flash_message('note', _("Notification preferences have been updated successfully."))
645 web.seeother("/account")
646
647 class account_lists(delegate.page):
648 path = "/account/lists"
649
650 @require_login
651 def GET(self):
652 user = accounts.get_current_user()
653 raise web.seeother(user.key + '/lists')
654
655
656 class ReadingLog(object):
657
658 """Manages the user's account page books (reading log, waitlists, loans)"""
659
660 def __init__(self, user=None):
661 self.user = user or accounts.get_current_user()
662 #self.user.update_loan_status()
663 self.KEYS = {
664 'waitlists': self.get_waitlisted_editions,
665 'loans': self.get_loans,
666 'want-to-read': self.get_want_to_read,
667 'currently-reading': self.get_currently_reading,
668 'already-read': self.get_already_read
669 }
670
671 @property
672 def lists(self):
673 return self.user.get_lists()
674
675 @property
676 def reading_log_counts(self):
677 counts = Bookshelves.count_total_books_logged_by_user_per_shelf(
678 self.user.get_username())
679 return {
680 'want-to-read': counts.get(Bookshelves.PRESET_BOOKSHELVES['Want to Read'], 0),
681 'currently-reading': counts.get(Bookshelves.PRESET_BOOKSHELVES['Currently Reading'], 0),
682 'already-read': counts.get(Bookshelves.PRESET_BOOKSHELVES['Already Read'], 0)
683 }
684
685 def get_loans(self):
686 return borrow.get_loans(self.user)
687
688 def get_waitlist_summary(self):
689 return self.user.get_waitinglist()
690
691 def get_waitlisted_editions(self):
692 """Gets a list of records corresponding to a user's waitlisted
693 editions, fetches all the editions, and then inserts the data
694 from each waitlist record (e.g. position in line) into the
695 corresponding edition
696 """
697 waitlists = self.user.get_waitinglist()
698 keyed_waitlists = dict([(w['identifier'], w) for w in waitlists])
699 ocaids = [i['identifier'] for i in waitlists]
700 edition_keys = web.ctx.site.things({"type": "/type/edition", "ocaid": ocaids})
701 editions = web.ctx.site.get_many(edition_keys)
702 for i in range(len(editions)):
703 # insert the waitlist_entry corresponding to this edition
704 editions[i].waitlist_record = keyed_waitlists[editions[i].ocaid]
705 return editions
706
707 def process_logged_books(self, logged_books):
708 work_ids = ['/works/OL%sW' % i['work_id'] for i in logged_books]
709 works = web.ctx.site.get_many(work_ids)
710 for i in range(len(works)):
711 # insert the logged edition (if present) and logged date
712 works[i].logged_date = logged_books[i]['created']
713 works[i].logged_edition = (
714 '/books/OL%sM' % logged_books[i]['edition_id']
715 if logged_books[i]['edition_id'] else '')
716 return works
717
718 def get_want_to_read(self, page=1, limit=RESULTS_PER_PAGE,
719 sort='created', sort_order='desc'):
720 return self.process_logged_books(Bookshelves.get_users_logged_books(
721 self.user.get_username(), bookshelf_id=Bookshelves.PRESET_BOOKSHELVES['Want to Read'],
722 page=page, limit=limit, sort=sort + ' ' + sort_order))
723
724 def get_currently_reading(self, page=1, limit=RESULTS_PER_PAGE,
725 sort='created', sort_order='desc'):
726 return self.process_logged_books(Bookshelves.get_users_logged_books(
727 self.user.get_username(), bookshelf_id=Bookshelves.PRESET_BOOKSHELVES['Currently Reading'],
728 page=page, limit=limit, sort=sort + ' ' + sort_order))
729
730 def get_already_read(self, page=1, limit=RESULTS_PER_PAGE,
731 sort='created', sort_order='desc'):
732 return self.process_logged_books(Bookshelves.get_users_logged_books(
733 self.user.get_username(), bookshelf_id=Bookshelves.PRESET_BOOKSHELVES['Already Read'],
734 page=page, limit=limit, sort=sort + ' ' + sort_order))
735
736 def get_works(self, key, page=1, limit=RESULTS_PER_PAGE,
737 sort='created', sort_order='desc'):
738 """
739 :rtype: list of openlibrary.plugins.upstream.models.Work
740 """
741 key = key.lower()
742 if key in self.KEYS:
743 return self.KEYS[key](page=page, limit=limit,
744 sort=sort, sort_order=sort_order)
745 else: # must be a list or invalid page!
746 #works = web.ctx.site.get_many([ ... ])
747 raise
748
749
750 class PatronBooknotes(object):
751 """ Manages the patron's book notes and observations """
752
753 def __init__(self, user):
754 user = user or account.get_current_user()
755 self.username = user.key.split('/')[-1]
756
757 def get_notes(self, limit=RESULTS_PER_PAGE, page=1):
758 notes = Booknotes.get_notes_grouped_by_work(
759 self.username,
760 limit=limit,
761 page=page)
762
763 for entry in notes:
764 entry['work_key'] = f"/works/OL{entry['work_id']}W"
765 entry['work'] = self._get_work(entry['work_key'])
766 entry['work_details'] = self._get_work_details(entry['work'])
767 entry['notes'] = {i['edition_id']: i['notes'] for i in entry['notes']}
768 entry['editions'] = {
769 k: web.ctx.site.get(f'/books/OL{k}M')
770 for k in entry['notes'] if k != Booknotes.NULL_EDITION_VALUE}
771 return notes
772
773 def get_observations(self, limit=RESULTS_PER_PAGE, page=1):
774 observations = Observations.get_observations_grouped_by_work(
775 self.username,
776 limit=limit,
777 page=page)
778
779 for entry in observations:
780 entry['work_key'] = f"/works/OL{entry['work_id']}W"
781 entry['work'] = self._get_work(entry['work_key'])
782 entry['work_details'] = self._get_work_details(entry['work'])
783 ids = {}
784 for item in entry['observations']:
785 ids[item['observation_type']] = item['observation_values']
786 entry['observations'] = convert_observation_ids(ids)
787 return observations
788
789 def _get_work(self, work_key):
790 return web.ctx.site.get(work_key)
791
792 def _get_work_details(self, work):
793 author_keys = [a.author.key for a in work.get('authors', [])]
794
795 return {
796 'cover_url': (
797 work.get_cover_url('S') or
798 'https://openlibrary.org/images/icons/avatar_book-sm.png'),
799 'title': work.get('title'),
800 'authors': [a.name for a in web.ctx.site.get_many(author_keys)],
801 'first_publish_year': work.first_publish_year or None
802 }
803
804 @classmethod
805 def get_counts(cls, username):
806 return {
807 'notes': Booknotes.count_works_with_notes_by_user(username),
808 'observations': Observations.count_distinct_observations(username)
809 }
810
811
812 class public_my_books(delegate.page):
813 path = "/people/([^/]+)/books"
814
815 def GET(self, username):
816 raise web.seeother('/people/%s/books/want-to-read' % username)
817
818
819 class public_my_books(delegate.page):
820 path = "/people/([^/]+)/books/([a-zA-Z_-]+)"
821
822 def GET(self, username, key='loans'):
823 """check if user's reading log is public"""
824 i = web.input(page=1, sort='desc')
825 user = web.ctx.site.get('/people/%s' % username)
826 if not user:
827 return render.notfound("User %s" % username, create=False)
828 is_public = user.preferences().get('public_readlog', 'no') == 'yes'
829 logged_in_user = accounts.get_current_user()
830 is_logged_in_user = (
831 logged_in_user and
832 logged_in_user.key.split('/')[-1] == username)
833 if is_public or is_logged_in_user:
834 readlog = ReadingLog(user=user)
835 sponsorships = get_sponsored_editions(user)
836 if key == 'sponsorships':
837 books = (web.ctx.site.get(
838 web.ctx.site.things({
839 'type': '/type/edition',
840 'isbn_%s' % len(s['isbn']): s['isbn']
841 })[0]) for s in sponsorships)
842 elif key == 'notes' and is_logged_in_user:
843 books = PatronBooknotes(user).get_notes(page=int(i.page))
844 elif key == 'observations' and is_logged_in_user:
845 books = PatronBooknotes(user).get_observations(page=int(i.page))
846 else:
847 books = add_availability(
848 readlog.get_works(key, page=i.page,
849 sort='created', sort_order=i.sort),
850 mode="openlibrary_work"
851 )
852 booknotes_counts = PatronBooknotes.get_counts(username)
853
854 return render['account/books'](
855 books, key, sponsorship_count=len(sponsorships),
856 reading_log_counts=readlog.reading_log_counts, lists=readlog.lists,
857 user=user, logged_in_user=logged_in_user, public=is_public,
858 sort_order=str(i.sort), booknotes_counts=booknotes_counts
859 )
860 raise web.seeother(user.key)
861
862
863 class public_my_books_json(delegate.page):
864 encoding = "json"
865 path = "/people/([^/]+)/books/([a-zA-Z_-]+)"
866
867 def GET(self, username, key='want-to-read'):
868 i = web.input(page=1, limit=5000)
869 page = safeint(i.page, 1)
870 limit = safeint(i.limit, 5000)
871 """check if user's reading log is public"""
872 user = web.ctx.site.get('/people/%s' % username)
873 if not user:
874 return delegate.RawText(
875 json.dumps({'error': 'User %s not found' % username}),
876 content_type="application/json")
877 is_public = user.preferences().get('public_readlog', 'no') == 'yes'
878 logged_in_user = accounts.get_current_user()
879 if (is_public or
880 logged_in_user and logged_in_user.key.split('/')[-1] == username):
881 readlog = ReadingLog(user=user)
882 books = readlog.get_works(key, page, limit)
883 records_json = [
884 {
885 'work':
886 {
887 'title': w.get('title'),
888 'key': w.key,
889 'author_keys': [a.author.key for a in w.get('authors', [])],
890 'author_names': [str(a.author.name) for a
891 in w.get('authors', [])],
892 'first_publish_year': w.first_publish_year or None,
893 'lending_edition_s': (w._solr_data and
894 w._solr_data.get('lending_edition_s') or
895 None),
896 'edition_key': (w._solr_data and
897 w._solr_data.get('edition_key') or None),
898 'cover_id': (w._solr_data and
899 w._solr_data.get('cover_id') or None),
900 'cover_edition_key': (w._solr_data and
901 w._solr_data.get('cover_edition_key') or
902 None),
903 },
904 'logged_edition': w.get('logged_edition') or None,
905 'logged_date': (w.get('logged_date').strftime("%Y/%m/%d, %H:%M:%S")
906 if w.get('logged_date') else None),
907 } for w in books
908 ]
909 return delegate.RawText(json.dumps({
910 'page': page,
911 'reading_log_entries': records_json
912 }), content_type="application/json")
913 else:
914 return delegate.RawText(
915 json.dumps({'error': 'Shelf %s not found or not accessible' % key}),
916 content_type="application/json")
917
918
919 class readinglog_stats(delegate.page):
920 path = "/people/([^/]+)/books/([a-zA-Z_-]+)/stats"
921
922 def GET(self, username, key='loans'):
923 user = web.ctx.site.get('/people/%s' % username)
924 if not user:
925 return render.notfound("User %s" % username, create=False)
926
927 cur_user = accounts.get_current_user()
928 if not cur_user or cur_user.key.split('/')[-1] != username:
929 return render.permission_denied(web.ctx.path, 'Permission Denied')
930
931 readlog = ReadingLog(user=user)
932 works = readlog.get_works(key, page=1, limit=2000)
933 works_json = [
934 {
935 'title': w.get('title'),
936 'key': w.key,
937 'author_keys': [a.author.key for a in w.get('authors', [])],
938 'first_publish_year': w.first_publish_year or None,
939 'subjects': w.get('subjects'),
940 'subject_people': w.get('subject_people'),
941 'subject_places': w.get('subject_places'),
942 'subject_times': w.get('subject_times'),
943 } for w in works
944 ]
945 author_keys = set(
946 a
947 for work in works_json
948 for a in work['author_keys']
949 )
950 authors_json = [
951 {
952 'key': a.key,
953 'name': a.name,
954 'birth_date': a.get('birth_date'),
955 }
956 for a in web.ctx.site.get_many(list(author_keys))
957 ]
958 return render['account/readinglog_stats'](
959 json.dumps(works_json),
960 json.dumps(authors_json),
961 len(works_json),
962 user.key,
963 user.displayname,
964 web.ctx.path.rsplit('/', 1)[0],
965 key,
966 lang=web.ctx.lang,
967 )
968
969
970 class account_my_books_redirect(delegate.page):
971 path = "/account/books/(.*)"
972
973 @require_login
974 def GET(self, rest='loans'):
975 user = accounts.get_current_user()
976 username = user.key.split('/')[-1]
977 raise web.seeother('/people/%s/books/%s' % (username, rest))
978
979 class account_my_books(delegate.page):
980 path = "/account/books"
981
982 @require_login
983 def GET(self):
984 user = accounts.get_current_user()
985 username = user.key.split('/')[-1]
986 raise web.seeother('/people/%s/books' % (username))
987
988 # This would be by the civi backend which would require the api keys
989 class fake_civi(delegate.page):
990 path = "/internal/fake/civicrm"
991
992 def GET(self):
993 i = web.input(entity='Contact')
994 contact = {
995 'values': [{
996 'contact_id': '270430'
997 }]
998 }
999 contributions = {
1000 'values': [{
1001 "receive_date": "2019-07-31 08:57:00",
1002 "custom_52": "9780062457714",
1003 "total_amount": "50.00",
1004 "custom_53": "ol",
1005 "contact_id": "270430",
1006 "contribution_status": ""
1007 }]
1008 }
1009 entity = contributions if i.entity == 'Contribution' else contact
1010 return delegate.RawText(json.dumps(entity), content_type="application/json")
1011
1012 class import_books(delegate.page):
1013 path = "/account/import"
1014
1015 @require_login
1016 def GET(self):
1017 return render['account/import']()
1018
1019 class fetch_goodreads(delegate.page):
1020 path = "/account/import/goodreads"
1021
1022 def GET(self):
1023 raise web.seeother("/account/import")
1024
1025 @require_login
1026 def POST(self):
1027 books, books_wo_isbns = process_goodreads_csv(web.input())
1028 return render['account/import'](books, books_wo_isbns)
1029
1030 class export_books(delegate.page):
1031 path = "/account/export"
1032
1033 @require_login
1034 def GET(self):
1035 user = accounts.get_current_user()
1036 username = user.key.split('/')[-1]
1037 books = Bookshelves.get_users_logged_books(username, limit=10000)
1038 csv = []
1039 csv.append('Work Id,Edition Id,Bookshelf\n')
1040 mapping = {1:'Want to Read', 2:'Currently Reading', 3:'Already Read'}
1041 for book in books:
1042 row = [
1043 'OL{}W'.format(book['work_id']),
1044 'OL{}M'.format(book['edition_id']) if book['edition_id'] else '',
1045 '{}\n'.format(mapping[book['bookshelf_id']])
1046 ]
1047 csv.append(','.join(row))
1048 web.header('Content-Type','text/csv')
1049 web.header('Content-disposition', 'attachment; filename=OpenLibrary_ReadingLog.csv')
1050 csv = ''.join(csv)
1051 return delegate.RawText(csv, content_type="text/csv")
1052
1053 class account_loans(delegate.page):
1054 path = "/account/loans"
1055
1056 @require_login
1057 def GET(self):
1058 user = accounts.get_current_user()
1059 user.update_loan_status()
1060 loans = borrow.get_loans(user)
1061 return render['account/borrow'](user, loans)
1062
1063 class account_loans_json(delegate.page):
1064
1065 encoding = "json"
1066 path = "/account/loans"
1067
1068 @require_login
1069 def GET(self):
1070 user = accounts.get_current_user()
1071 user.update_loan_status()
1072 loans = borrow.get_loans(user)
1073 web.header('Content-Type', 'application/json')
1074 return delegate.RawText(json.dumps({
1075 "loans": loans
1076 }))
1077
1078
1079 # Disabling be cause it prevents account_my_books_redirect from working
1080 # for some reason. The purpose of this class is to not show the "Create" link for
1081 # /account pages since that doesn't make any sense.
1082 # class account_others(delegate.page):
1083 # path = "(/account/.*)"
1084 #
1085 # def GET(self, path):
1086 # return render.notfound(path, create=False)
1087
1088
1089 def send_forgot_password_email(username, email):
1090 key = "account/%s/password" % username
1091
1092 doc = create_link_doc(key, username, email)
1093 web.ctx.site.store[key] = doc
1094
1095 link = web.ctx.home + "/account/password/reset/" + doc['code']
1096 msg = render_template("email/password/reminder", username=username, email=email, link=link)
1097 sendmail(email, msg)
1098
1099
1100 def as_admin(f):
1101 """Infobase allows some requests only from admin user. This decorator logs in as admin, executes the function and clears the admin credentials."""
1102 def g(*a, **kw):
1103 try:
1104 delegate.admin_login()
1105 return f(*a, **kw)
1106 finally:
1107 web.ctx.headers = []
1108 return g
1109
1110
1111 def process_goodreads_csv(i):
1112 import csv
1113 csv_payload = i.csv if isinstance(i.csv, str) else i.csv.decode()
1114 csv_file = csv.reader(csv_payload.splitlines(), delimiter=',', quotechar='"')
1115 header = next(csv_file)
1116 books = {}
1117 books_wo_isbns = {}
1118 for book in list(csv_file):
1119 _book = dict(zip(header, book))
1120 isbn = _book['ISBN'] = _book['ISBN'].replace('"', '').replace('=', '')
1121 isbn_13 = _book['ISBN13'] = _book['ISBN13'].replace('"', '').replace('=', '')
1122 if isbn != '':
1123 books[isbn] = _book
1124 elif isbn_13 != '':
1125 books[isbn_13] = _book
1126 books[isbn_13]['ISBN'] = isbn_13
1127 else:
1128 books_wo_isbns[_book['Book Id']] = _book
1129 return books, books_wo_isbns
1130
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/openlibrary/plugins/upstream/account.py b/openlibrary/plugins/upstream/account.py
--- a/openlibrary/plugins/upstream/account.py
+++ b/openlibrary/plugins/upstream/account.py
@@ -318,8 +318,8 @@
def GET(self):
referer = web.ctx.env.get('HTTP_REFERER', '/')
- # Don't set referer on user activation
- if 'archive.org' in referer:
+ # Don't set referer if request is from offsite
+ if 'openlibrary.org' not in referer:
referer = None
i = web.input(redirect=referer)
f = forms.Login()
| {"golden_diff": "diff --git a/openlibrary/plugins/upstream/account.py b/openlibrary/plugins/upstream/account.py\n--- a/openlibrary/plugins/upstream/account.py\n+++ b/openlibrary/plugins/upstream/account.py\n@@ -318,8 +318,8 @@\n \n def GET(self):\n referer = web.ctx.env.get('HTTP_REFERER', '/')\n- # Don't set referer on user activation\n- if 'archive.org' in referer:\n+ # Don't set referer if request is from offsite\n+ if 'openlibrary.org' not in referer:\n referer = None\n i = web.input(redirect=referer)\n f = forms.Login()\n", "issue": "Open Library login issue after visiting google\n<!-- What problem are we solving? What does the experience look like today? What are the symptoms? -->\r\n\r\nIf http referer, Logging in erroneously redirects away from OpenLibrary \r\n\r\n### Evidence / Screenshot (if possible)\r\n\r\n### Relevant url?\r\n<!-- `https://openlibrary.org/...` -->\r\n\r\n### Steps to Reproduce\r\n<!-- What steps caused you to find the bug? -->\r\n1. Go to ...\r\n2. Do ...\r\n\r\n<!-- What actually happened after these steps? What did you expect to happen? -->\r\n* Actual: \r\n* Expected: \r\n\r\n### Details\r\n\r\n- **Logged in (Y/N)?** \r\n- **Browser type/version?** \r\n- **Operating system?** \r\n- **Environment (prod/dev/local)?** prod\r\n<!-- If not sure, put prod -->\r\n\r\n### Proposal & Constraints\r\n<!-- What is the proposed solution / implementation? Is there a precedent of this approach succeeding elsewhere? -->\r\n\r\n### Related files\r\n<!-- Files related to this issue; this is super useful for new contributors who might want to help! If you're not sure, leave this blank; a maintainer will add them. -->\r\n\r\n### Stakeholders\r\n<!-- @ tag stakeholders of this bug -->\r\n\n", "before_files": [{"content": "\nimport web\nimport logging\nimport json\nimport re\n\nfrom infogami.utils import delegate\nfrom infogami import config\nfrom infogami.utils.view import (\n require_login, render, render_template, add_flash_message\n)\n\nfrom infogami.infobase.client import ClientException\nfrom infogami.utils.context import context\nfrom infogami.utils.view import safeint\nimport infogami.core.code as core\n\nfrom openlibrary import accounts\nfrom openlibrary.i18n import gettext as _\nfrom openlibrary.core import helpers as h, lending\nfrom openlibrary.core.booknotes import Booknotes\nfrom openlibrary.core.bookshelves import Bookshelves\nfrom openlibrary.core.observations import Observations, convert_observation_ids\nfrom openlibrary.core.lending import add_availability\nfrom openlibrary.plugins.recaptcha import recaptcha\nfrom openlibrary.plugins import openlibrary as olib\nfrom openlibrary.accounts import (\n audit_accounts, Account, OpenLibraryAccount, InternetArchiveAccount, valid_email)\nfrom openlibrary.core.sponsorships import get_sponsored_editions\nfrom openlibrary.plugins.upstream import borrow, forms, utils\n\nfrom six.moves import range\nfrom six.moves import urllib\n\n\nlogger = logging.getLogger(\"openlibrary.account\")\n\nRESULTS_PER_PAGE = 25\nUSERNAME_RETRIES = 3\n\n# XXX: These need to be cleaned up\nsend_verification_email = accounts.send_verification_email\ncreate_link_doc = accounts.create_link_doc\nsendmail = accounts.sendmail\n\nLOGIN_ERRORS = {\n \"invalid_email\": \"The email address you entered is invalid\",\n \"account_blocked\": \"This account has been blocked\",\n \"account_locked\": \"This account has been blocked\",\n \"account_not_found\": \"No account was found with this email. Please try again\",\n \"account_incorrect_password\": \"The password you entered is incorrect. Please try again\",\n \"account_bad_password\": \"Wrong password. Please try again\",\n \"account_not_verified\": \"Please verify your Open Library account before logging in\",\n \"ia_account_not_verified\": \"Please verify your Internet Archive account before logging in\",\n \"missing_fields\": \"Please fill out all fields and try again\",\n \"email_registered\": \"This email is already registered\",\n \"username_registered\": \"This username is already registered\",\n \"ia_login_only\": \"Sorry, you must use your Internet Archive email and password to log in\",\n \"max_retries_exceeded\": \"A problem occurred and we were unable to log you in.\",\n \"invalid_s3keys\": \"Login attempted with invalid Internet Archive s3 credentials.\",\n \"wrong_ia_account\": \"An Open Library account with this email is already linked to a different Internet Archive account. Please contact [email protected].\"\n }\n\nclass availability(delegate.page):\n path = \"/internal/fake/availability\"\n\n def POST(self):\n \"\"\"Internal private API required for testing on localhost\n \"\"\"\n return delegate.RawText(json.dumps({}),\n content_type=\"application/json\")\n\nclass loans(delegate.page):\n path = \"/internal/fake/loans\"\n\n def POST(self):\n \"\"\"Internal private API required for testing on localhost\n \"\"\"\n return delegate.RawText(json.dumps({}),\n content_type=\"application/json\")\n\nclass xauth(delegate.page):\n path = \"/internal/fake/xauth\"\n\n def POST(self):\n \"\"\"Internal private API required for testing login on localhost\n which normally would have to hit archive.org's xauth\n service. This service is spoofable to return successful and\n unsuccessful login attempts depending on the provided GET parameters\n \"\"\"\n i = web.input(email='', op=None)\n result = {\"error\": \"incorrect option specified\"}\n if i.op == \"authenticate\":\n result = {\n \"success\": True,\n \"version\": 1,\n \"values\": {\n \"access\": 'foo',\n \"secret\": 'foo',\n },\n }\n elif i.op == \"info\":\n result = {\n \"success\": True,\n \"values\": {\n \"locked\": False,\n \"email\": \"[email protected]\",\n \"itemname\":\"@openlibrary\",\n \"screenname\":\"openlibrary\",\n \"verified\": True\n },\n \"version\":1\n }\n return delegate.RawText(json.dumps(result),\n content_type=\"application/json\")\n\nclass internal_audit(delegate.page):\n path = \"/internal/account/audit\"\n\n def GET(self):\n \"\"\"Internal API endpoint used for authorized test cases and\n administrators to unlink linked OL and IA accounts.\n \"\"\"\n i = web.input(email='', username='', itemname='', key='', unlink='',\n new_itemname='')\n if i.key != lending.config_internal_tests_api_key:\n result = {'error': 'Authentication failed for private API'}\n else:\n try:\n result = OpenLibraryAccount.get(email=i.email, link=i.itemname,\n username=i.username)\n if result is None:\n raise ValueError('Invalid Open Library account email ' \\\n 'or itemname')\n result.enc_password = 'REDACTED'\n if i.new_itemname:\n result.link(i.new_itemname)\n if i.unlink:\n result.unlink()\n except ValueError as e:\n result = {'error': str(e)}\n\n return delegate.RawText(json.dumps(result),\n content_type=\"application/json\")\n\nclass account_migration(delegate.page):\n\n path = \"/internal/account/migration\"\n\n def GET(self):\n i = web.input(username='', email='', key='')\n if i.key != lending.config_internal_tests_api_key:\n return delegate.RawText(json.dumps({\n 'error': 'Authentication failed for private API'\n }), content_type=\"application/json\")\n try:\n if i.username:\n ol_account = OpenLibraryAccount.get(username=i.username)\n elif i.email:\n ol_account = OpenLibraryAccount.get(email=i.email)\n except Exception as e:\n return delegate.RawText(json.dumps({\n 'error': 'bad-account'\n }), content_type=\"application/json\")\n if ol_account:\n ol_account.enc_password = 'REDACTED'\n if ol_account.itemname:\n return delegate.RawText(json.dumps({\n 'status': 'link-exists',\n 'username': ol_account.username,\n 'itemname': ol_account.itemname,\n 'email': ol_account.email.lower()\n }), content_type=\"application/json\")\n if not ol_account.itemname:\n ia_account = InternetArchiveAccount.get(email=ol_account.email.lower())\n if ia_account:\n ol_account.link(ia_account.itemname)\n return delegate.RawText(json.dumps({\n 'username': ol_account.username,\n 'status': 'link-found',\n 'itemname': ia_account.itemname,\n 'ol-itemname': ol_account.itemname,\n 'email': ol_account.email.lower(),\n 'ia': ia_account\n }), content_type=\"application/json\")\n\n password = OpenLibraryAccount.generate_random_password(16)\n ia_account = InternetArchiveAccount.create(\n ol_account.username or ol_account.displayname,\n ol_account.email, password, verified=True, retries=USERNAME_RETRIES)\n return delegate.RawText(json.dumps({\n 'username': ol_account.username,\n 'email': ol_account.email,\n 'itemname': ia_account.itemname,\n 'password': password,\n 'status': 'link-created'\n }), content_type=\"application/json\")\n\nclass account(delegate.page):\n \"\"\"Account preferences.\n \"\"\"\n @require_login\n def GET(self):\n user = accounts.get_current_user()\n return render.account(user)\n\nclass account_create(delegate.page):\n \"\"\"New account creation.\n\n Account remains in the pending state until the email is activated.\n \"\"\"\n path = \"/account/create\"\n\n def GET(self):\n f = self.get_form()\n return render['account/create'](f)\n\n def get_form(self):\n \"\"\"\n :rtype: forms.RegisterForm\n \"\"\"\n f = forms.Register()\n recap = self.get_recap()\n f.has_recaptcha = recap is not None\n if f.has_recaptcha:\n f.inputs = list(f.inputs) + [recap]\n return f\n\n def get_recap(self):\n if self.is_plugin_enabled('recaptcha'):\n public_key = config.plugin_recaptcha.public_key\n private_key = config.plugin_recaptcha.private_key\n return recaptcha.Recaptcha(public_key, private_key)\n\n def is_plugin_enabled(self, name):\n return name in delegate.get_plugins() or \"openlibrary.plugins.\" + name in delegate.get_plugins()\n\n def POST(self):\n f = self.get_form() # type: forms.RegisterForm\n\n if f.validates(web.input()):\n try:\n # Create ia_account: require they activate via IA email\n # and then login to OL. Logging in after activation with\n # IA credentials will auto create and link OL account.\n\n \"\"\"NOTE: the values for the notifications must be kept in sync\n with the values in the `MAILING_LIST_KEYS` array in\n https://git.archive.org/ia/petabox/blob/master/www/common/MailSync/Settings.inc\n Currently, per the fundraising/development team, the\n \"announcements checkbox\" should map to BOTH `ml_best_of` and\n `ml_updates`\n \"\"\" # nopep8\n mls = ['ml_best_of', 'ml_updates']\n notifications = mls if f.ia_newsletter.checked else []\n InternetArchiveAccount.create(\n screenname=f.username.value, email=f.email.value, password=f.password.value,\n notifications=notifications, verified=False, retries=USERNAME_RETRIES)\n return render['account/verify'](username=f.username.value, email=f.email.value)\n except ValueError:\n f.note = LOGIN_ERRORS['max_retries_exceeded']\n\n return render['account/create'](f)\n\n\ndel delegate.pages['/account/register']\n\n\nclass account_login_json(delegate.page):\n\n encoding = \"json\"\n path = \"/account/login\"\n\n def POST(self):\n \"\"\"Overrides `account_login` and infogami.login to prevent users from\n logging in with Open Library username and password if the\n payload is json. Instead, if login attempted w/ json\n credentials, requires Archive.org s3 keys.\n \"\"\"\n from openlibrary.plugins.openlibrary.code import BadRequest\n d = json.loads(web.data())\n access = d.get('access', None)\n secret = d.get('secret', None)\n test = d.get('test', False)\n\n # Try S3 authentication first, fallback to infogami user, pass\n if access and secret:\n audit = audit_accounts(None, None, require_link=True,\n s3_access_key=access,\n s3_secret_key=secret, test=test)\n error = audit.get('error')\n if error:\n raise olib.code.BadRequest(error)\n web.setcookie(config.login_cookie_name, web.ctx.conn.get_auth_token())\n # Fallback to infogami user/pass\n else:\n from infogami.plugins.api.code import login as infogami_login\n infogami_login().POST()\n\n\n\nclass account_login(delegate.page):\n \"\"\"Account login.\n\n Login can fail because of the following reasons:\n\n * account_not_found: Error message is displayed.\n * account_bad_password: Error message is displayed with a link to reset password.\n * account_not_verified: Error page is dispalyed with button to \"resend verification email\".\n \"\"\"\n path = \"/account/login\"\n\n def render_error(self, error_key, i):\n f = forms.Login()\n f.fill(i)\n f.note = LOGIN_ERRORS[error_key]\n return render.login(f)\n\n def GET(self):\n referer = web.ctx.env.get('HTTP_REFERER', '/')\n # Don't set referer on user activation\n if 'archive.org' in referer:\n referer = None\n i = web.input(redirect=referer)\n f = forms.Login()\n f['redirect'].value = i.redirect\n return render.login(f)\n\n def POST(self):\n i = web.input(username=\"\", connect=None, password=\"\", remember=False,\n redirect='/', test=False, access=None, secret=None)\n email = i.username # XXX username is now email\n audit = audit_accounts(email, i.password, require_link=True,\n s3_access_key=i.access,\n s3_secret_key=i.secret, test=i.test)\n error = audit.get('error')\n if error:\n return self.render_error(error, i)\n\n expires = 3600 * 24 * 7 if i.remember else \"\"\n web.setcookie('pd', int(audit.get('special_access')) or '',\n expires=expires)\n web.setcookie(config.login_cookie_name, web.ctx.conn.get_auth_token(),\n expires=expires)\n blacklist = [\"/account/login\", \"/account/password\", \"/account/email\",\n \"/account/create\"]\n if i.redirect == \"\" or any([path in i.redirect for path in blacklist]):\n i.redirect = \"/\"\n raise web.seeother(i.redirect)\n\n def POST_resend_verification_email(self, i):\n try:\n ol_login = OpenLibraryAccount.authenticate(i.email, i.password)\n except ClientException as e:\n code = e.get_data().get(\"code\")\n if code != \"account_not_verified\":\n return self.error(\"account_incorrect_password\", i)\n\n account = OpenLibraryAccount.get(email=i.email)\n account.send_verification_email()\n\n title = _(\"Hi, %(user)s\", user=account.displayname)\n message = _(\"We've sent the verification email to %(email)s. You'll need to read that and click on the verification link to verify your email.\", email=account.email)\n return render.message(title, message)\n\nclass account_verify(delegate.page):\n \"\"\"Verify user account.\n \"\"\"\n path = \"/account/verify/([0-9a-f]*)\"\n\n def GET(self, code):\n docs = web.ctx.site.store.values(type=\"account-link\", name=\"code\", value=code)\n if docs:\n doc = docs[0]\n\n account = accounts.find(username = doc['username'])\n if account:\n if account['status'] != \"pending\":\n return render['account/verify/activated'](account)\n account.activate()\n user = web.ctx.site.get(\"/people/\" + doc['username']) #TBD\n return render['account/verify/success'](account)\n else:\n return render['account/verify/failed']()\n\n def POST(self, code=None):\n \"\"\"Called to regenerate account verification code.\n \"\"\"\n i = web.input(email=None)\n account = accounts.find(email=i.email)\n if not account:\n return render_template(\"account/verify/failed\", email=i.email)\n elif account['status'] != \"pending\":\n return render['account/verify/activated'](account)\n else:\n account.send_verification_email()\n title = _(\"Hi, %(user)s\", user=account.displayname)\n message = _(\"We've sent the verification email to %(email)s. You'll need to read that and click on the verification link to verify your email.\", email=account.email)\n return render.message(title, message)\n\nclass account_verify_old(account_verify):\n \"\"\"Old account verification code.\n\n This takes username, email and code as url parameters. The new one takes just the code as part of the url.\n \"\"\"\n path = \"/account/verify\"\n def GET(self):\n # It is too long since we switched to the new account verification links.\n # All old links must be expired by now.\n # Show failed message without thinking.\n return render['account/verify/failed']()\n\nclass account_validation(delegate.page):\n path = '/account/validate'\n\n @staticmethod\n def validate_username(username):\n if not 3 <= len(username) <= 20:\n return _('Username must be between 3-20 characters')\n if not re.match('^[A-Za-z0-9-_]{3,20}$', username):\n return _('Username may only contain numbers and letters')\n ol_account = OpenLibraryAccount.get(username=username)\n if ol_account:\n return _(\"Username unavailable\")\n\n @staticmethod\n def validate_email(email):\n if not (email and re.match(r'.*@.*\\..*', email)):\n return _('Must be a valid email address')\n\n ol_account = OpenLibraryAccount.get(email=email)\n if ol_account:\n return _('Email already registered')\n\n\n def GET(self):\n i = web.input()\n errors = {\n 'email': None,\n 'username': None\n }\n if i.get('email') is not None:\n errors['email'] = self.validate_email(i.email)\n if i.get('username') is not None:\n errors['username'] = self.validate_username(i.username)\n return delegate.RawText(json.dumps(errors),\n content_type=\"application/json\")\n\n\nclass account_email_verify(delegate.page):\n path = \"/account/email/verify/([0-9a-f]*)\"\n\n def GET(self, code):\n link = accounts.get_link(code)\n if link:\n username = link['username']\n email = link['email']\n link.delete()\n return self.update_email(username, email)\n else:\n return self.bad_link()\n\n def update_email(self, username, email):\n if accounts.find(email=email):\n title = _(\"Email address is already used.\")\n message = _(\"Your email address couldn't be updated. The specified email address is already used.\")\n else:\n logger.info(\"updated email of %s to %s\", username, email)\n accounts.update_account(username=username, email=email, status=\"active\")\n title = _(\"Email verification successful.\")\n message = _('Your email address has been successfully verified and updated in your account.')\n return render.message(title, message)\n\n def bad_link(self):\n title = _(\"Email address couldn't be verified.\")\n message = _(\"Your email address couldn't be verified. The verification link seems invalid.\")\n return render.message(title, message)\n\nclass account_email_verify_old(account_email_verify):\n path = \"/account/email/verify\"\n\n def GET(self):\n # It is too long since we switched to the new email verification links.\n # All old links must be expired by now.\n # Show failed message without thinking.\n return self.bad_link()\n\nclass account_ia_email_forgot(delegate.page):\n path = \"/account/email/forgot-ia\"\n\n def GET(self):\n return render_template('account/email/forgot-ia')\n\n def POST(self):\n i = web.input(email='', password='')\n err = \"\"\n\n if valid_email(i.email):\n act = OpenLibraryAccount.get(email=i.email)\n if act:\n if OpenLibraryAccount.authenticate(i.email, i.password) == \"ok\":\n ia_act = act.get_linked_ia_account()\n if ia_act:\n return render_template('account/email/forgot-ia', email=ia_act.email)\n else:\n err = \"Open Library Account not linked. Login with your Open Library credentials to connect or create an Archive.org account\"\n else:\n err = \"Incorrect password\"\n else:\n err = \"Sorry, this Open Library account does not exist\"\n else:\n err = \"Please enter a valid Open Library email\"\n return render_template('account/email/forgot-ia', err=err)\n\nclass account_ol_email_forgot(delegate.page):\n path = \"/account/email/forgot\"\n\n def GET(self):\n return render_template('account/email/forgot')\n\n def POST(self):\n i = web.input(username='', password='')\n err = \"\"\n act = OpenLibraryAccount.get(username=i.username)\n\n if act:\n if OpenLibraryAccount.authenticate(act.email, i.password) == \"ok\":\n return render_template('account/email/forgot', email=act.email)\n else:\n err = \"Incorrect password\"\n\n elif valid_email(i.username):\n err = \"Please enter a username, not an email\"\n\n else:\n err=\"Sorry, this user does not exist\"\n\n return render_template('account/email/forgot', err=err)\n\n\nclass account_password_forgot(delegate.page):\n path = \"/account/password/forgot\"\n\n def GET(self):\n f = forms.ForgotPassword()\n return render['account/password/forgot'](f)\n\n def POST(self):\n i = web.input(email='')\n\n f = forms.ForgotPassword()\n\n if not f.validates(i):\n return render['account/password/forgot'](f)\n\n account = accounts.find(email=i.email)\n\n if account.is_blocked():\n f.note = utils.get_error(\"account_blocked\")\n return render_template('account/password/forgot', f)\n\n send_forgot_password_email(account.username, i.email)\n return render['account/password/sent'](i.email)\n\nclass account_password_reset(delegate.page):\n\n path = \"/account/password/reset/([0-9a-f]*)\"\n\n def GET(self, code):\n docs = web.ctx.site.store.values(type=\"account-link\", name=\"code\", value=code)\n if not docs:\n title = _(\"Password reset failed.\")\n message = \"Your password reset link seems invalid or expired.\"\n return render.message(title, message)\n\n f = forms.ResetPassword()\n return render['account/password/reset'](f)\n\n def POST(self, code):\n link = accounts.get_link(code)\n if not link:\n title = _(\"Password reset failed.\")\n message = \"The password reset link seems invalid or expired.\"\n return render.message(title, message)\n\n username = link['username']\n i = web.input()\n\n accounts.update_account(username, password=i.password)\n link.delete()\n return render_template(\"account/password/reset_success\", username=username)\n\n\nclass account_audit(delegate.page):\n\n path = \"/account/audit\"\n\n def POST(self):\n \"\"\"When the user attempts a login, an audit is performed to determine\n whether their account is already linked (in which case we can\n proceed to log the user in), whether there is an error\n authenticating their account, or whether a /account/connect\n must first performed.\n\n Note: Emails are case sensitive behind the scenes and\n functions which require them as lower will make them so\n \"\"\"\n i = web.input(email='', password='')\n test = i.get('test', '').lower() == 'true'\n email = i.get('email')\n password = i.get('password')\n result = audit_accounts(email, password, test=test)\n return delegate.RawText(json.dumps(result),\n content_type=\"application/json\")\n\nclass account_privacy(delegate.page):\n path = \"/account/privacy\"\n\n @require_login\n def GET(self):\n user = accounts.get_current_user()\n return render['account/privacy'](user.preferences())\n\n @require_login\n def POST(self):\n user = accounts.get_current_user()\n user.save_preferences(web.input())\n add_flash_message('note', _(\"Notification preferences have been updated successfully.\"))\n web.seeother(\"/account\")\n\nclass account_notifications(delegate.page):\n path = \"/account/notifications\"\n\n @require_login\n def GET(self):\n user = accounts.get_current_user()\n email = user.email\n return render['account/notifications'](user.preferences(), email)\n\n @require_login\n def POST(self):\n user = accounts.get_current_user()\n user.save_preferences(web.input())\n add_flash_message('note', _(\"Notification preferences have been updated successfully.\"))\n web.seeother(\"/account\")\n\nclass account_lists(delegate.page):\n path = \"/account/lists\"\n\n @require_login\n def GET(self):\n user = accounts.get_current_user()\n raise web.seeother(user.key + '/lists')\n\n\nclass ReadingLog(object):\n\n \"\"\"Manages the user's account page books (reading log, waitlists, loans)\"\"\"\n\n def __init__(self, user=None):\n self.user = user or accounts.get_current_user()\n #self.user.update_loan_status()\n self.KEYS = {\n 'waitlists': self.get_waitlisted_editions,\n 'loans': self.get_loans,\n 'want-to-read': self.get_want_to_read,\n 'currently-reading': self.get_currently_reading,\n 'already-read': self.get_already_read\n }\n\n @property\n def lists(self):\n return self.user.get_lists()\n\n @property\n def reading_log_counts(self):\n counts = Bookshelves.count_total_books_logged_by_user_per_shelf(\n self.user.get_username())\n return {\n 'want-to-read': counts.get(Bookshelves.PRESET_BOOKSHELVES['Want to Read'], 0),\n 'currently-reading': counts.get(Bookshelves.PRESET_BOOKSHELVES['Currently Reading'], 0),\n 'already-read': counts.get(Bookshelves.PRESET_BOOKSHELVES['Already Read'], 0)\n }\n\n def get_loans(self):\n return borrow.get_loans(self.user)\n\n def get_waitlist_summary(self):\n return self.user.get_waitinglist()\n\n def get_waitlisted_editions(self):\n \"\"\"Gets a list of records corresponding to a user's waitlisted\n editions, fetches all the editions, and then inserts the data\n from each waitlist record (e.g. position in line) into the\n corresponding edition\n \"\"\"\n waitlists = self.user.get_waitinglist()\n keyed_waitlists = dict([(w['identifier'], w) for w in waitlists])\n ocaids = [i['identifier'] for i in waitlists]\n edition_keys = web.ctx.site.things({\"type\": \"/type/edition\", \"ocaid\": ocaids})\n editions = web.ctx.site.get_many(edition_keys)\n for i in range(len(editions)):\n # insert the waitlist_entry corresponding to this edition\n editions[i].waitlist_record = keyed_waitlists[editions[i].ocaid]\n return editions\n\n def process_logged_books(self, logged_books):\n work_ids = ['/works/OL%sW' % i['work_id'] for i in logged_books]\n works = web.ctx.site.get_many(work_ids)\n for i in range(len(works)):\n # insert the logged edition (if present) and logged date\n works[i].logged_date = logged_books[i]['created']\n works[i].logged_edition = (\n '/books/OL%sM' % logged_books[i]['edition_id']\n if logged_books[i]['edition_id'] else '')\n return works\n\n def get_want_to_read(self, page=1, limit=RESULTS_PER_PAGE,\n sort='created', sort_order='desc'):\n return self.process_logged_books(Bookshelves.get_users_logged_books(\n self.user.get_username(), bookshelf_id=Bookshelves.PRESET_BOOKSHELVES['Want to Read'],\n page=page, limit=limit, sort=sort + ' ' + sort_order))\n\n def get_currently_reading(self, page=1, limit=RESULTS_PER_PAGE,\n sort='created', sort_order='desc'):\n return self.process_logged_books(Bookshelves.get_users_logged_books(\n self.user.get_username(), bookshelf_id=Bookshelves.PRESET_BOOKSHELVES['Currently Reading'],\n page=page, limit=limit, sort=sort + ' ' + sort_order))\n\n def get_already_read(self, page=1, limit=RESULTS_PER_PAGE,\n sort='created', sort_order='desc'):\n return self.process_logged_books(Bookshelves.get_users_logged_books(\n self.user.get_username(), bookshelf_id=Bookshelves.PRESET_BOOKSHELVES['Already Read'],\n page=page, limit=limit, sort=sort + ' ' + sort_order))\n\n def get_works(self, key, page=1, limit=RESULTS_PER_PAGE,\n sort='created', sort_order='desc'):\n \"\"\"\n :rtype: list of openlibrary.plugins.upstream.models.Work\n \"\"\"\n key = key.lower()\n if key in self.KEYS:\n return self.KEYS[key](page=page, limit=limit,\n sort=sort, sort_order=sort_order)\n else: # must be a list or invalid page!\n #works = web.ctx.site.get_many([ ... ])\n raise\n\n\nclass PatronBooknotes(object):\n \"\"\" Manages the patron's book notes and observations \"\"\"\n\n def __init__(self, user):\n user = user or account.get_current_user()\n self.username = user.key.split('/')[-1]\n\n def get_notes(self, limit=RESULTS_PER_PAGE, page=1):\n notes = Booknotes.get_notes_grouped_by_work(\n self.username,\n limit=limit,\n page=page)\n\n for entry in notes:\n entry['work_key'] = f\"/works/OL{entry['work_id']}W\"\n entry['work'] = self._get_work(entry['work_key'])\n entry['work_details'] = self._get_work_details(entry['work'])\n entry['notes'] = {i['edition_id']: i['notes'] for i in entry['notes']}\n entry['editions'] = {\n k: web.ctx.site.get(f'/books/OL{k}M')\n for k in entry['notes'] if k != Booknotes.NULL_EDITION_VALUE}\n return notes\n\n def get_observations(self, limit=RESULTS_PER_PAGE, page=1):\n observations = Observations.get_observations_grouped_by_work(\n self.username,\n limit=limit,\n page=page)\n\n for entry in observations:\n entry['work_key'] = f\"/works/OL{entry['work_id']}W\"\n entry['work'] = self._get_work(entry['work_key'])\n entry['work_details'] = self._get_work_details(entry['work'])\n ids = {}\n for item in entry['observations']:\n ids[item['observation_type']] = item['observation_values']\n entry['observations'] = convert_observation_ids(ids)\n return observations\n\n def _get_work(self, work_key):\n return web.ctx.site.get(work_key)\n\n def _get_work_details(self, work):\n author_keys = [a.author.key for a in work.get('authors', [])]\n\n return {\n 'cover_url': (\n work.get_cover_url('S') or\n 'https://openlibrary.org/images/icons/avatar_book-sm.png'),\n 'title': work.get('title'),\n 'authors': [a.name for a in web.ctx.site.get_many(author_keys)],\n 'first_publish_year': work.first_publish_year or None\n }\n\n @classmethod\n def get_counts(cls, username):\n return {\n 'notes': Booknotes.count_works_with_notes_by_user(username),\n 'observations': Observations.count_distinct_observations(username)\n }\n\n\nclass public_my_books(delegate.page):\n path = \"/people/([^/]+)/books\"\n\n def GET(self, username):\n raise web.seeother('/people/%s/books/want-to-read' % username)\n\n\nclass public_my_books(delegate.page):\n path = \"/people/([^/]+)/books/([a-zA-Z_-]+)\"\n\n def GET(self, username, key='loans'):\n \"\"\"check if user's reading log is public\"\"\"\n i = web.input(page=1, sort='desc')\n user = web.ctx.site.get('/people/%s' % username)\n if not user:\n return render.notfound(\"User %s\" % username, create=False)\n is_public = user.preferences().get('public_readlog', 'no') == 'yes'\n logged_in_user = accounts.get_current_user()\n is_logged_in_user = (\n logged_in_user and\n logged_in_user.key.split('/')[-1] == username)\n if is_public or is_logged_in_user:\n readlog = ReadingLog(user=user)\n sponsorships = get_sponsored_editions(user)\n if key == 'sponsorships':\n books = (web.ctx.site.get(\n web.ctx.site.things({\n 'type': '/type/edition',\n 'isbn_%s' % len(s['isbn']): s['isbn']\n })[0]) for s in sponsorships)\n elif key == 'notes' and is_logged_in_user:\n books = PatronBooknotes(user).get_notes(page=int(i.page))\n elif key == 'observations' and is_logged_in_user:\n books = PatronBooknotes(user).get_observations(page=int(i.page))\n else:\n books = add_availability(\n readlog.get_works(key, page=i.page,\n sort='created', sort_order=i.sort),\n mode=\"openlibrary_work\"\n )\n booknotes_counts = PatronBooknotes.get_counts(username)\n\n return render['account/books'](\n books, key, sponsorship_count=len(sponsorships),\n reading_log_counts=readlog.reading_log_counts, lists=readlog.lists,\n user=user, logged_in_user=logged_in_user, public=is_public,\n sort_order=str(i.sort), booknotes_counts=booknotes_counts\n )\n raise web.seeother(user.key)\n\n\nclass public_my_books_json(delegate.page):\n encoding = \"json\"\n path = \"/people/([^/]+)/books/([a-zA-Z_-]+)\"\n\n def GET(self, username, key='want-to-read'):\n i = web.input(page=1, limit=5000)\n page = safeint(i.page, 1)\n limit = safeint(i.limit, 5000)\n \"\"\"check if user's reading log is public\"\"\"\n user = web.ctx.site.get('/people/%s' % username)\n if not user:\n return delegate.RawText(\n json.dumps({'error': 'User %s not found' % username}),\n content_type=\"application/json\")\n is_public = user.preferences().get('public_readlog', 'no') == 'yes'\n logged_in_user = accounts.get_current_user()\n if (is_public or\n logged_in_user and logged_in_user.key.split('/')[-1] == username):\n readlog = ReadingLog(user=user)\n books = readlog.get_works(key, page, limit)\n records_json = [\n {\n 'work':\n {\n 'title': w.get('title'),\n 'key': w.key,\n 'author_keys': [a.author.key for a in w.get('authors', [])],\n 'author_names': [str(a.author.name) for a\n in w.get('authors', [])],\n 'first_publish_year': w.first_publish_year or None,\n 'lending_edition_s': (w._solr_data and\n w._solr_data.get('lending_edition_s') or\n None),\n 'edition_key': (w._solr_data and\n w._solr_data.get('edition_key') or None),\n 'cover_id': (w._solr_data and\n w._solr_data.get('cover_id') or None),\n 'cover_edition_key': (w._solr_data and\n w._solr_data.get('cover_edition_key') or\n None),\n },\n 'logged_edition': w.get('logged_edition') or None,\n 'logged_date': (w.get('logged_date').strftime(\"%Y/%m/%d, %H:%M:%S\")\n if w.get('logged_date') else None),\n } for w in books\n ]\n return delegate.RawText(json.dumps({\n 'page': page,\n 'reading_log_entries': records_json\n }), content_type=\"application/json\")\n else:\n return delegate.RawText(\n json.dumps({'error': 'Shelf %s not found or not accessible' % key}),\n content_type=\"application/json\")\n\n\nclass readinglog_stats(delegate.page):\n path = \"/people/([^/]+)/books/([a-zA-Z_-]+)/stats\"\n\n def GET(self, username, key='loans'):\n user = web.ctx.site.get('/people/%s' % username)\n if not user:\n return render.notfound(\"User %s\" % username, create=False)\n\n cur_user = accounts.get_current_user()\n if not cur_user or cur_user.key.split('/')[-1] != username:\n return render.permission_denied(web.ctx.path, 'Permission Denied')\n\n readlog = ReadingLog(user=user)\n works = readlog.get_works(key, page=1, limit=2000)\n works_json = [\n {\n 'title': w.get('title'),\n 'key': w.key,\n 'author_keys': [a.author.key for a in w.get('authors', [])],\n 'first_publish_year': w.first_publish_year or None,\n 'subjects': w.get('subjects'),\n 'subject_people': w.get('subject_people'),\n 'subject_places': w.get('subject_places'),\n 'subject_times': w.get('subject_times'),\n } for w in works\n ]\n author_keys = set(\n a\n for work in works_json\n for a in work['author_keys']\n )\n authors_json = [\n {\n 'key': a.key,\n 'name': a.name,\n 'birth_date': a.get('birth_date'),\n }\n for a in web.ctx.site.get_many(list(author_keys))\n ]\n return render['account/readinglog_stats'](\n json.dumps(works_json),\n json.dumps(authors_json),\n len(works_json),\n user.key,\n user.displayname,\n web.ctx.path.rsplit('/', 1)[0],\n key,\n lang=web.ctx.lang,\n )\n\n\nclass account_my_books_redirect(delegate.page):\n path = \"/account/books/(.*)\"\n\n @require_login\n def GET(self, rest='loans'):\n user = accounts.get_current_user()\n username = user.key.split('/')[-1]\n raise web.seeother('/people/%s/books/%s' % (username, rest))\n\nclass account_my_books(delegate.page):\n path = \"/account/books\"\n\n @require_login\n def GET(self):\n user = accounts.get_current_user()\n username = user.key.split('/')[-1]\n raise web.seeother('/people/%s/books' % (username))\n\n# This would be by the civi backend which would require the api keys\nclass fake_civi(delegate.page):\n path = \"/internal/fake/civicrm\"\n\n def GET(self):\n i = web.input(entity='Contact')\n contact = {\n 'values': [{\n 'contact_id': '270430'\n }]\n }\n contributions = {\n 'values': [{\n \"receive_date\": \"2019-07-31 08:57:00\",\n \"custom_52\": \"9780062457714\",\n \"total_amount\": \"50.00\",\n \"custom_53\": \"ol\",\n \"contact_id\": \"270430\",\n \"contribution_status\": \"\"\n }]\n }\n entity = contributions if i.entity == 'Contribution' else contact\n return delegate.RawText(json.dumps(entity), content_type=\"application/json\")\n\nclass import_books(delegate.page):\n path = \"/account/import\"\n\n @require_login\n def GET(self):\n return render['account/import']()\n\nclass fetch_goodreads(delegate.page):\n path = \"/account/import/goodreads\"\n\n def GET(self):\n raise web.seeother(\"/account/import\")\n\n @require_login\n def POST(self):\n books, books_wo_isbns = process_goodreads_csv(web.input())\n return render['account/import'](books, books_wo_isbns)\n\nclass export_books(delegate.page):\n path = \"/account/export\"\n\n @require_login\n def GET(self):\n user = accounts.get_current_user()\n username = user.key.split('/')[-1]\n books = Bookshelves.get_users_logged_books(username, limit=10000)\n csv = []\n csv.append('Work Id,Edition Id,Bookshelf\\n')\n mapping = {1:'Want to Read', 2:'Currently Reading', 3:'Already Read'}\n for book in books:\n row = [\n 'OL{}W'.format(book['work_id']),\n 'OL{}M'.format(book['edition_id']) if book['edition_id'] else '',\n '{}\\n'.format(mapping[book['bookshelf_id']])\n ]\n csv.append(','.join(row))\n web.header('Content-Type','text/csv')\n web.header('Content-disposition', 'attachment; filename=OpenLibrary_ReadingLog.csv')\n csv = ''.join(csv)\n return delegate.RawText(csv, content_type=\"text/csv\")\n\nclass account_loans(delegate.page):\n path = \"/account/loans\"\n\n @require_login\n def GET(self):\n user = accounts.get_current_user()\n user.update_loan_status()\n loans = borrow.get_loans(user)\n return render['account/borrow'](user, loans)\n\nclass account_loans_json(delegate.page):\n\n encoding = \"json\"\n path = \"/account/loans\"\n\n @require_login\n def GET(self):\n user = accounts.get_current_user()\n user.update_loan_status()\n loans = borrow.get_loans(user)\n web.header('Content-Type', 'application/json')\n return delegate.RawText(json.dumps({\n \"loans\": loans\n }))\n\n\n# Disabling be cause it prevents account_my_books_redirect from working\n# for some reason. The purpose of this class is to not show the \"Create\" link for\n# /account pages since that doesn't make any sense.\n# class account_others(delegate.page):\n# path = \"(/account/.*)\"\n#\n# def GET(self, path):\n# return render.notfound(path, create=False)\n\n\ndef send_forgot_password_email(username, email):\n key = \"account/%s/password\" % username\n\n doc = create_link_doc(key, username, email)\n web.ctx.site.store[key] = doc\n\n link = web.ctx.home + \"/account/password/reset/\" + doc['code']\n msg = render_template(\"email/password/reminder\", username=username, email=email, link=link)\n sendmail(email, msg)\n\n\ndef as_admin(f):\n \"\"\"Infobase allows some requests only from admin user. This decorator logs in as admin, executes the function and clears the admin credentials.\"\"\"\n def g(*a, **kw):\n try:\n delegate.admin_login()\n return f(*a, **kw)\n finally:\n web.ctx.headers = []\n return g\n\n\ndef process_goodreads_csv(i):\n import csv\n csv_payload = i.csv if isinstance(i.csv, str) else i.csv.decode()\n csv_file = csv.reader(csv_payload.splitlines(), delimiter=',', quotechar='\"')\n header = next(csv_file)\n books = {}\n books_wo_isbns = {}\n for book in list(csv_file):\n _book = dict(zip(header, book))\n isbn = _book['ISBN'] = _book['ISBN'].replace('\"', '').replace('=', '')\n isbn_13 = _book['ISBN13'] = _book['ISBN13'].replace('\"', '').replace('=', '')\n if isbn != '':\n books[isbn] = _book\n elif isbn_13 != '':\n books[isbn_13] = _book\n books[isbn_13]['ISBN'] = isbn_13\n else:\n books_wo_isbns[_book['Book Id']] = _book\n return books, books_wo_isbns\n", "path": "openlibrary/plugins/upstream/account.py"}], "after_files": [{"content": "\nimport web\nimport logging\nimport json\nimport re\n\nfrom infogami.utils import delegate\nfrom infogami import config\nfrom infogami.utils.view import (\n require_login, render, render_template, add_flash_message\n)\n\nfrom infogami.infobase.client import ClientException\nfrom infogami.utils.context import context\nfrom infogami.utils.view import safeint\nimport infogami.core.code as core\n\nfrom openlibrary import accounts\nfrom openlibrary.i18n import gettext as _\nfrom openlibrary.core import helpers as h, lending\nfrom openlibrary.core.booknotes import Booknotes\nfrom openlibrary.core.bookshelves import Bookshelves\nfrom openlibrary.core.observations import Observations, convert_observation_ids\nfrom openlibrary.core.lending import add_availability\nfrom openlibrary.plugins.recaptcha import recaptcha\nfrom openlibrary.plugins import openlibrary as olib\nfrom openlibrary.accounts import (\n audit_accounts, Account, OpenLibraryAccount, InternetArchiveAccount, valid_email)\nfrom openlibrary.core.sponsorships import get_sponsored_editions\nfrom openlibrary.plugins.upstream import borrow, forms, utils\n\nfrom six.moves import range\nfrom six.moves import urllib\n\n\nlogger = logging.getLogger(\"openlibrary.account\")\n\nRESULTS_PER_PAGE = 25\nUSERNAME_RETRIES = 3\n\n# XXX: These need to be cleaned up\nsend_verification_email = accounts.send_verification_email\ncreate_link_doc = accounts.create_link_doc\nsendmail = accounts.sendmail\n\nLOGIN_ERRORS = {\n \"invalid_email\": \"The email address you entered is invalid\",\n \"account_blocked\": \"This account has been blocked\",\n \"account_locked\": \"This account has been blocked\",\n \"account_not_found\": \"No account was found with this email. Please try again\",\n \"account_incorrect_password\": \"The password you entered is incorrect. Please try again\",\n \"account_bad_password\": \"Wrong password. Please try again\",\n \"account_not_verified\": \"Please verify your Open Library account before logging in\",\n \"ia_account_not_verified\": \"Please verify your Internet Archive account before logging in\",\n \"missing_fields\": \"Please fill out all fields and try again\",\n \"email_registered\": \"This email is already registered\",\n \"username_registered\": \"This username is already registered\",\n \"ia_login_only\": \"Sorry, you must use your Internet Archive email and password to log in\",\n \"max_retries_exceeded\": \"A problem occurred and we were unable to log you in.\",\n \"invalid_s3keys\": \"Login attempted with invalid Internet Archive s3 credentials.\",\n \"wrong_ia_account\": \"An Open Library account with this email is already linked to a different Internet Archive account. Please contact [email protected].\"\n }\n\nclass availability(delegate.page):\n path = \"/internal/fake/availability\"\n\n def POST(self):\n \"\"\"Internal private API required for testing on localhost\n \"\"\"\n return delegate.RawText(json.dumps({}),\n content_type=\"application/json\")\n\nclass loans(delegate.page):\n path = \"/internal/fake/loans\"\n\n def POST(self):\n \"\"\"Internal private API required for testing on localhost\n \"\"\"\n return delegate.RawText(json.dumps({}),\n content_type=\"application/json\")\n\nclass xauth(delegate.page):\n path = \"/internal/fake/xauth\"\n\n def POST(self):\n \"\"\"Internal private API required for testing login on localhost\n which normally would have to hit archive.org's xauth\n service. This service is spoofable to return successful and\n unsuccessful login attempts depending on the provided GET parameters\n \"\"\"\n i = web.input(email='', op=None)\n result = {\"error\": \"incorrect option specified\"}\n if i.op == \"authenticate\":\n result = {\n \"success\": True,\n \"version\": 1,\n \"values\": {\n \"access\": 'foo',\n \"secret\": 'foo',\n },\n }\n elif i.op == \"info\":\n result = {\n \"success\": True,\n \"values\": {\n \"locked\": False,\n \"email\": \"[email protected]\",\n \"itemname\":\"@openlibrary\",\n \"screenname\":\"openlibrary\",\n \"verified\": True\n },\n \"version\":1\n }\n return delegate.RawText(json.dumps(result),\n content_type=\"application/json\")\n\nclass internal_audit(delegate.page):\n path = \"/internal/account/audit\"\n\n def GET(self):\n \"\"\"Internal API endpoint used for authorized test cases and\n administrators to unlink linked OL and IA accounts.\n \"\"\"\n i = web.input(email='', username='', itemname='', key='', unlink='',\n new_itemname='')\n if i.key != lending.config_internal_tests_api_key:\n result = {'error': 'Authentication failed for private API'}\n else:\n try:\n result = OpenLibraryAccount.get(email=i.email, link=i.itemname,\n username=i.username)\n if result is None:\n raise ValueError('Invalid Open Library account email ' \\\n 'or itemname')\n result.enc_password = 'REDACTED'\n if i.new_itemname:\n result.link(i.new_itemname)\n if i.unlink:\n result.unlink()\n except ValueError as e:\n result = {'error': str(e)}\n\n return delegate.RawText(json.dumps(result),\n content_type=\"application/json\")\n\nclass account_migration(delegate.page):\n\n path = \"/internal/account/migration\"\n\n def GET(self):\n i = web.input(username='', email='', key='')\n if i.key != lending.config_internal_tests_api_key:\n return delegate.RawText(json.dumps({\n 'error': 'Authentication failed for private API'\n }), content_type=\"application/json\")\n try:\n if i.username:\n ol_account = OpenLibraryAccount.get(username=i.username)\n elif i.email:\n ol_account = OpenLibraryAccount.get(email=i.email)\n except Exception as e:\n return delegate.RawText(json.dumps({\n 'error': 'bad-account'\n }), content_type=\"application/json\")\n if ol_account:\n ol_account.enc_password = 'REDACTED'\n if ol_account.itemname:\n return delegate.RawText(json.dumps({\n 'status': 'link-exists',\n 'username': ol_account.username,\n 'itemname': ol_account.itemname,\n 'email': ol_account.email.lower()\n }), content_type=\"application/json\")\n if not ol_account.itemname:\n ia_account = InternetArchiveAccount.get(email=ol_account.email.lower())\n if ia_account:\n ol_account.link(ia_account.itemname)\n return delegate.RawText(json.dumps({\n 'username': ol_account.username,\n 'status': 'link-found',\n 'itemname': ia_account.itemname,\n 'ol-itemname': ol_account.itemname,\n 'email': ol_account.email.lower(),\n 'ia': ia_account\n }), content_type=\"application/json\")\n\n password = OpenLibraryAccount.generate_random_password(16)\n ia_account = InternetArchiveAccount.create(\n ol_account.username or ol_account.displayname,\n ol_account.email, password, verified=True, retries=USERNAME_RETRIES)\n return delegate.RawText(json.dumps({\n 'username': ol_account.username,\n 'email': ol_account.email,\n 'itemname': ia_account.itemname,\n 'password': password,\n 'status': 'link-created'\n }), content_type=\"application/json\")\n\nclass account(delegate.page):\n \"\"\"Account preferences.\n \"\"\"\n @require_login\n def GET(self):\n user = accounts.get_current_user()\n return render.account(user)\n\nclass account_create(delegate.page):\n \"\"\"New account creation.\n\n Account remains in the pending state until the email is activated.\n \"\"\"\n path = \"/account/create\"\n\n def GET(self):\n f = self.get_form()\n return render['account/create'](f)\n\n def get_form(self):\n \"\"\"\n :rtype: forms.RegisterForm\n \"\"\"\n f = forms.Register()\n recap = self.get_recap()\n f.has_recaptcha = recap is not None\n if f.has_recaptcha:\n f.inputs = list(f.inputs) + [recap]\n return f\n\n def get_recap(self):\n if self.is_plugin_enabled('recaptcha'):\n public_key = config.plugin_recaptcha.public_key\n private_key = config.plugin_recaptcha.private_key\n return recaptcha.Recaptcha(public_key, private_key)\n\n def is_plugin_enabled(self, name):\n return name in delegate.get_plugins() or \"openlibrary.plugins.\" + name in delegate.get_plugins()\n\n def POST(self):\n f = self.get_form() # type: forms.RegisterForm\n\n if f.validates(web.input()):\n try:\n # Create ia_account: require they activate via IA email\n # and then login to OL. Logging in after activation with\n # IA credentials will auto create and link OL account.\n\n \"\"\"NOTE: the values for the notifications must be kept in sync\n with the values in the `MAILING_LIST_KEYS` array in\n https://git.archive.org/ia/petabox/blob/master/www/common/MailSync/Settings.inc\n Currently, per the fundraising/development team, the\n \"announcements checkbox\" should map to BOTH `ml_best_of` and\n `ml_updates`\n \"\"\" # nopep8\n mls = ['ml_best_of', 'ml_updates']\n notifications = mls if f.ia_newsletter.checked else []\n InternetArchiveAccount.create(\n screenname=f.username.value, email=f.email.value, password=f.password.value,\n notifications=notifications, verified=False, retries=USERNAME_RETRIES)\n return render['account/verify'](username=f.username.value, email=f.email.value)\n except ValueError:\n f.note = LOGIN_ERRORS['max_retries_exceeded']\n\n return render['account/create'](f)\n\n\ndel delegate.pages['/account/register']\n\n\nclass account_login_json(delegate.page):\n\n encoding = \"json\"\n path = \"/account/login\"\n\n def POST(self):\n \"\"\"Overrides `account_login` and infogami.login to prevent users from\n logging in with Open Library username and password if the\n payload is json. Instead, if login attempted w/ json\n credentials, requires Archive.org s3 keys.\n \"\"\"\n from openlibrary.plugins.openlibrary.code import BadRequest\n d = json.loads(web.data())\n access = d.get('access', None)\n secret = d.get('secret', None)\n test = d.get('test', False)\n\n # Try S3 authentication first, fallback to infogami user, pass\n if access and secret:\n audit = audit_accounts(None, None, require_link=True,\n s3_access_key=access,\n s3_secret_key=secret, test=test)\n error = audit.get('error')\n if error:\n raise olib.code.BadRequest(error)\n web.setcookie(config.login_cookie_name, web.ctx.conn.get_auth_token())\n # Fallback to infogami user/pass\n else:\n from infogami.plugins.api.code import login as infogami_login\n infogami_login().POST()\n\n\n\nclass account_login(delegate.page):\n \"\"\"Account login.\n\n Login can fail because of the following reasons:\n\n * account_not_found: Error message is displayed.\n * account_bad_password: Error message is displayed with a link to reset password.\n * account_not_verified: Error page is dispalyed with button to \"resend verification email\".\n \"\"\"\n path = \"/account/login\"\n\n def render_error(self, error_key, i):\n f = forms.Login()\n f.fill(i)\n f.note = LOGIN_ERRORS[error_key]\n return render.login(f)\n\n def GET(self):\n referer = web.ctx.env.get('HTTP_REFERER', '/')\n # Don't set referer if request is from offsite\n if 'openlibrary.org' not in referer:\n referer = None\n i = web.input(redirect=referer)\n f = forms.Login()\n f['redirect'].value = i.redirect\n return render.login(f)\n\n def POST(self):\n i = web.input(username=\"\", connect=None, password=\"\", remember=False,\n redirect='/', test=False, access=None, secret=None)\n email = i.username # XXX username is now email\n audit = audit_accounts(email, i.password, require_link=True,\n s3_access_key=i.access,\n s3_secret_key=i.secret, test=i.test)\n error = audit.get('error')\n if error:\n return self.render_error(error, i)\n\n expires = 3600 * 24 * 7 if i.remember else \"\"\n web.setcookie('pd', int(audit.get('special_access')) or '',\n expires=expires)\n web.setcookie(config.login_cookie_name, web.ctx.conn.get_auth_token(),\n expires=expires)\n blacklist = [\"/account/login\", \"/account/password\", \"/account/email\",\n \"/account/create\"]\n if i.redirect == \"\" or any([path in i.redirect for path in blacklist]):\n i.redirect = \"/\"\n raise web.seeother(i.redirect)\n\n def POST_resend_verification_email(self, i):\n try:\n ol_login = OpenLibraryAccount.authenticate(i.email, i.password)\n except ClientException as e:\n code = e.get_data().get(\"code\")\n if code != \"account_not_verified\":\n return self.error(\"account_incorrect_password\", i)\n\n account = OpenLibraryAccount.get(email=i.email)\n account.send_verification_email()\n\n title = _(\"Hi, %(user)s\", user=account.displayname)\n message = _(\"We've sent the verification email to %(email)s. You'll need to read that and click on the verification link to verify your email.\", email=account.email)\n return render.message(title, message)\n\nclass account_verify(delegate.page):\n \"\"\"Verify user account.\n \"\"\"\n path = \"/account/verify/([0-9a-f]*)\"\n\n def GET(self, code):\n docs = web.ctx.site.store.values(type=\"account-link\", name=\"code\", value=code)\n if docs:\n doc = docs[0]\n\n account = accounts.find(username = doc['username'])\n if account:\n if account['status'] != \"pending\":\n return render['account/verify/activated'](account)\n account.activate()\n user = web.ctx.site.get(\"/people/\" + doc['username']) #TBD\n return render['account/verify/success'](account)\n else:\n return render['account/verify/failed']()\n\n def POST(self, code=None):\n \"\"\"Called to regenerate account verification code.\n \"\"\"\n i = web.input(email=None)\n account = accounts.find(email=i.email)\n if not account:\n return render_template(\"account/verify/failed\", email=i.email)\n elif account['status'] != \"pending\":\n return render['account/verify/activated'](account)\n else:\n account.send_verification_email()\n title = _(\"Hi, %(user)s\", user=account.displayname)\n message = _(\"We've sent the verification email to %(email)s. You'll need to read that and click on the verification link to verify your email.\", email=account.email)\n return render.message(title, message)\n\nclass account_verify_old(account_verify):\n \"\"\"Old account verification code.\n\n This takes username, email and code as url parameters. The new one takes just the code as part of the url.\n \"\"\"\n path = \"/account/verify\"\n def GET(self):\n # It is too long since we switched to the new account verification links.\n # All old links must be expired by now.\n # Show failed message without thinking.\n return render['account/verify/failed']()\n\nclass account_validation(delegate.page):\n path = '/account/validate'\n\n @staticmethod\n def validate_username(username):\n if not 3 <= len(username) <= 20:\n return _('Username must be between 3-20 characters')\n if not re.match('^[A-Za-z0-9-_]{3,20}$', username):\n return _('Username may only contain numbers and letters')\n ol_account = OpenLibraryAccount.get(username=username)\n if ol_account:\n return _(\"Username unavailable\")\n\n @staticmethod\n def validate_email(email):\n if not (email and re.match(r'.*@.*\\..*', email)):\n return _('Must be a valid email address')\n\n ol_account = OpenLibraryAccount.get(email=email)\n if ol_account:\n return _('Email already registered')\n\n\n def GET(self):\n i = web.input()\n errors = {\n 'email': None,\n 'username': None\n }\n if i.get('email') is not None:\n errors['email'] = self.validate_email(i.email)\n if i.get('username') is not None:\n errors['username'] = self.validate_username(i.username)\n return delegate.RawText(json.dumps(errors),\n content_type=\"application/json\")\n\n\nclass account_email_verify(delegate.page):\n path = \"/account/email/verify/([0-9a-f]*)\"\n\n def GET(self, code):\n link = accounts.get_link(code)\n if link:\n username = link['username']\n email = link['email']\n link.delete()\n return self.update_email(username, email)\n else:\n return self.bad_link()\n\n def update_email(self, username, email):\n if accounts.find(email=email):\n title = _(\"Email address is already used.\")\n message = _(\"Your email address couldn't be updated. The specified email address is already used.\")\n else:\n logger.info(\"updated email of %s to %s\", username, email)\n accounts.update_account(username=username, email=email, status=\"active\")\n title = _(\"Email verification successful.\")\n message = _('Your email address has been successfully verified and updated in your account.')\n return render.message(title, message)\n\n def bad_link(self):\n title = _(\"Email address couldn't be verified.\")\n message = _(\"Your email address couldn't be verified. The verification link seems invalid.\")\n return render.message(title, message)\n\nclass account_email_verify_old(account_email_verify):\n path = \"/account/email/verify\"\n\n def GET(self):\n # It is too long since we switched to the new email verification links.\n # All old links must be expired by now.\n # Show failed message without thinking.\n return self.bad_link()\n\nclass account_ia_email_forgot(delegate.page):\n path = \"/account/email/forgot-ia\"\n\n def GET(self):\n return render_template('account/email/forgot-ia')\n\n def POST(self):\n i = web.input(email='', password='')\n err = \"\"\n\n if valid_email(i.email):\n act = OpenLibraryAccount.get(email=i.email)\n if act:\n if OpenLibraryAccount.authenticate(i.email, i.password) == \"ok\":\n ia_act = act.get_linked_ia_account()\n if ia_act:\n return render_template('account/email/forgot-ia', email=ia_act.email)\n else:\n err = \"Open Library Account not linked. Login with your Open Library credentials to connect or create an Archive.org account\"\n else:\n err = \"Incorrect password\"\n else:\n err = \"Sorry, this Open Library account does not exist\"\n else:\n err = \"Please enter a valid Open Library email\"\n return render_template('account/email/forgot-ia', err=err)\n\nclass account_ol_email_forgot(delegate.page):\n path = \"/account/email/forgot\"\n\n def GET(self):\n return render_template('account/email/forgot')\n\n def POST(self):\n i = web.input(username='', password='')\n err = \"\"\n act = OpenLibraryAccount.get(username=i.username)\n\n if act:\n if OpenLibraryAccount.authenticate(act.email, i.password) == \"ok\":\n return render_template('account/email/forgot', email=act.email)\n else:\n err = \"Incorrect password\"\n\n elif valid_email(i.username):\n err = \"Please enter a username, not an email\"\n\n else:\n err=\"Sorry, this user does not exist\"\n\n return render_template('account/email/forgot', err=err)\n\n\nclass account_password_forgot(delegate.page):\n path = \"/account/password/forgot\"\n\n def GET(self):\n f = forms.ForgotPassword()\n return render['account/password/forgot'](f)\n\n def POST(self):\n i = web.input(email='')\n\n f = forms.ForgotPassword()\n\n if not f.validates(i):\n return render['account/password/forgot'](f)\n\n account = accounts.find(email=i.email)\n\n if account.is_blocked():\n f.note = utils.get_error(\"account_blocked\")\n return render_template('account/password/forgot', f)\n\n send_forgot_password_email(account.username, i.email)\n return render['account/password/sent'](i.email)\n\nclass account_password_reset(delegate.page):\n\n path = \"/account/password/reset/([0-9a-f]*)\"\n\n def GET(self, code):\n docs = web.ctx.site.store.values(type=\"account-link\", name=\"code\", value=code)\n if not docs:\n title = _(\"Password reset failed.\")\n message = \"Your password reset link seems invalid or expired.\"\n return render.message(title, message)\n\n f = forms.ResetPassword()\n return render['account/password/reset'](f)\n\n def POST(self, code):\n link = accounts.get_link(code)\n if not link:\n title = _(\"Password reset failed.\")\n message = \"The password reset link seems invalid or expired.\"\n return render.message(title, message)\n\n username = link['username']\n i = web.input()\n\n accounts.update_account(username, password=i.password)\n link.delete()\n return render_template(\"account/password/reset_success\", username=username)\n\n\nclass account_audit(delegate.page):\n\n path = \"/account/audit\"\n\n def POST(self):\n \"\"\"When the user attempts a login, an audit is performed to determine\n whether their account is already linked (in which case we can\n proceed to log the user in), whether there is an error\n authenticating their account, or whether a /account/connect\n must first performed.\n\n Note: Emails are case sensitive behind the scenes and\n functions which require them as lower will make them so\n \"\"\"\n i = web.input(email='', password='')\n test = i.get('test', '').lower() == 'true'\n email = i.get('email')\n password = i.get('password')\n result = audit_accounts(email, password, test=test)\n return delegate.RawText(json.dumps(result),\n content_type=\"application/json\")\n\nclass account_privacy(delegate.page):\n path = \"/account/privacy\"\n\n @require_login\n def GET(self):\n user = accounts.get_current_user()\n return render['account/privacy'](user.preferences())\n\n @require_login\n def POST(self):\n user = accounts.get_current_user()\n user.save_preferences(web.input())\n add_flash_message('note', _(\"Notification preferences have been updated successfully.\"))\n web.seeother(\"/account\")\n\nclass account_notifications(delegate.page):\n path = \"/account/notifications\"\n\n @require_login\n def GET(self):\n user = accounts.get_current_user()\n email = user.email\n return render['account/notifications'](user.preferences(), email)\n\n @require_login\n def POST(self):\n user = accounts.get_current_user()\n user.save_preferences(web.input())\n add_flash_message('note', _(\"Notification preferences have been updated successfully.\"))\n web.seeother(\"/account\")\n\nclass account_lists(delegate.page):\n path = \"/account/lists\"\n\n @require_login\n def GET(self):\n user = accounts.get_current_user()\n raise web.seeother(user.key + '/lists')\n\n\nclass ReadingLog(object):\n\n \"\"\"Manages the user's account page books (reading log, waitlists, loans)\"\"\"\n\n def __init__(self, user=None):\n self.user = user or accounts.get_current_user()\n #self.user.update_loan_status()\n self.KEYS = {\n 'waitlists': self.get_waitlisted_editions,\n 'loans': self.get_loans,\n 'want-to-read': self.get_want_to_read,\n 'currently-reading': self.get_currently_reading,\n 'already-read': self.get_already_read\n }\n\n @property\n def lists(self):\n return self.user.get_lists()\n\n @property\n def reading_log_counts(self):\n counts = Bookshelves.count_total_books_logged_by_user_per_shelf(\n self.user.get_username())\n return {\n 'want-to-read': counts.get(Bookshelves.PRESET_BOOKSHELVES['Want to Read'], 0),\n 'currently-reading': counts.get(Bookshelves.PRESET_BOOKSHELVES['Currently Reading'], 0),\n 'already-read': counts.get(Bookshelves.PRESET_BOOKSHELVES['Already Read'], 0)\n }\n\n def get_loans(self):\n return borrow.get_loans(self.user)\n\n def get_waitlist_summary(self):\n return self.user.get_waitinglist()\n\n def get_waitlisted_editions(self):\n \"\"\"Gets a list of records corresponding to a user's waitlisted\n editions, fetches all the editions, and then inserts the data\n from each waitlist record (e.g. position in line) into the\n corresponding edition\n \"\"\"\n waitlists = self.user.get_waitinglist()\n keyed_waitlists = dict([(w['identifier'], w) for w in waitlists])\n ocaids = [i['identifier'] for i in waitlists]\n edition_keys = web.ctx.site.things({\"type\": \"/type/edition\", \"ocaid\": ocaids})\n editions = web.ctx.site.get_many(edition_keys)\n for i in range(len(editions)):\n # insert the waitlist_entry corresponding to this edition\n editions[i].waitlist_record = keyed_waitlists[editions[i].ocaid]\n return editions\n\n def process_logged_books(self, logged_books):\n work_ids = ['/works/OL%sW' % i['work_id'] for i in logged_books]\n works = web.ctx.site.get_many(work_ids)\n for i in range(len(works)):\n # insert the logged edition (if present) and logged date\n works[i].logged_date = logged_books[i]['created']\n works[i].logged_edition = (\n '/books/OL%sM' % logged_books[i]['edition_id']\n if logged_books[i]['edition_id'] else '')\n return works\n\n def get_want_to_read(self, page=1, limit=RESULTS_PER_PAGE,\n sort='created', sort_order='desc'):\n return self.process_logged_books(Bookshelves.get_users_logged_books(\n self.user.get_username(), bookshelf_id=Bookshelves.PRESET_BOOKSHELVES['Want to Read'],\n page=page, limit=limit, sort=sort + ' ' + sort_order))\n\n def get_currently_reading(self, page=1, limit=RESULTS_PER_PAGE,\n sort='created', sort_order='desc'):\n return self.process_logged_books(Bookshelves.get_users_logged_books(\n self.user.get_username(), bookshelf_id=Bookshelves.PRESET_BOOKSHELVES['Currently Reading'],\n page=page, limit=limit, sort=sort + ' ' + sort_order))\n\n def get_already_read(self, page=1, limit=RESULTS_PER_PAGE,\n sort='created', sort_order='desc'):\n return self.process_logged_books(Bookshelves.get_users_logged_books(\n self.user.get_username(), bookshelf_id=Bookshelves.PRESET_BOOKSHELVES['Already Read'],\n page=page, limit=limit, sort=sort + ' ' + sort_order))\n\n def get_works(self, key, page=1, limit=RESULTS_PER_PAGE,\n sort='created', sort_order='desc'):\n \"\"\"\n :rtype: list of openlibrary.plugins.upstream.models.Work\n \"\"\"\n key = key.lower()\n if key in self.KEYS:\n return self.KEYS[key](page=page, limit=limit,\n sort=sort, sort_order=sort_order)\n else: # must be a list or invalid page!\n #works = web.ctx.site.get_many([ ... ])\n raise\n\n\nclass PatronBooknotes(object):\n \"\"\" Manages the patron's book notes and observations \"\"\"\n\n def __init__(self, user):\n user = user or account.get_current_user()\n self.username = user.key.split('/')[-1]\n\n def get_notes(self, limit=RESULTS_PER_PAGE, page=1):\n notes = Booknotes.get_notes_grouped_by_work(\n self.username,\n limit=limit,\n page=page)\n\n for entry in notes:\n entry['work_key'] = f\"/works/OL{entry['work_id']}W\"\n entry['work'] = self._get_work(entry['work_key'])\n entry['work_details'] = self._get_work_details(entry['work'])\n entry['notes'] = {i['edition_id']: i['notes'] for i in entry['notes']}\n entry['editions'] = {\n k: web.ctx.site.get(f'/books/OL{k}M')\n for k in entry['notes'] if k != Booknotes.NULL_EDITION_VALUE}\n return notes\n\n def get_observations(self, limit=RESULTS_PER_PAGE, page=1):\n observations = Observations.get_observations_grouped_by_work(\n self.username,\n limit=limit,\n page=page)\n\n for entry in observations:\n entry['work_key'] = f\"/works/OL{entry['work_id']}W\"\n entry['work'] = self._get_work(entry['work_key'])\n entry['work_details'] = self._get_work_details(entry['work'])\n ids = {}\n for item in entry['observations']:\n ids[item['observation_type']] = item['observation_values']\n entry['observations'] = convert_observation_ids(ids)\n return observations\n\n def _get_work(self, work_key):\n return web.ctx.site.get(work_key)\n\n def _get_work_details(self, work):\n author_keys = [a.author.key for a in work.get('authors', [])]\n\n return {\n 'cover_url': (\n work.get_cover_url('S') or\n 'https://openlibrary.org/images/icons/avatar_book-sm.png'),\n 'title': work.get('title'),\n 'authors': [a.name for a in web.ctx.site.get_many(author_keys)],\n 'first_publish_year': work.first_publish_year or None\n }\n\n @classmethod\n def get_counts(cls, username):\n return {\n 'notes': Booknotes.count_works_with_notes_by_user(username),\n 'observations': Observations.count_distinct_observations(username)\n }\n\n\nclass public_my_books(delegate.page):\n path = \"/people/([^/]+)/books\"\n\n def GET(self, username):\n raise web.seeother('/people/%s/books/want-to-read' % username)\n\n\nclass public_my_books(delegate.page):\n path = \"/people/([^/]+)/books/([a-zA-Z_-]+)\"\n\n def GET(self, username, key='loans'):\n \"\"\"check if user's reading log is public\"\"\"\n i = web.input(page=1, sort='desc')\n user = web.ctx.site.get('/people/%s' % username)\n if not user:\n return render.notfound(\"User %s\" % username, create=False)\n is_public = user.preferences().get('public_readlog', 'no') == 'yes'\n logged_in_user = accounts.get_current_user()\n is_logged_in_user = (\n logged_in_user and\n logged_in_user.key.split('/')[-1] == username)\n if is_public or is_logged_in_user:\n readlog = ReadingLog(user=user)\n sponsorships = get_sponsored_editions(user)\n if key == 'sponsorships':\n books = (web.ctx.site.get(\n web.ctx.site.things({\n 'type': '/type/edition',\n 'isbn_%s' % len(s['isbn']): s['isbn']\n })[0]) for s in sponsorships)\n elif key == 'notes' and is_logged_in_user:\n books = PatronBooknotes(user).get_notes(page=int(i.page))\n elif key == 'observations' and is_logged_in_user:\n books = PatronBooknotes(user).get_observations(page=int(i.page))\n else:\n books = add_availability(\n readlog.get_works(key, page=i.page,\n sort='created', sort_order=i.sort),\n mode=\"openlibrary_work\"\n )\n booknotes_counts = PatronBooknotes.get_counts(username)\n\n return render['account/books'](\n books, key, sponsorship_count=len(sponsorships),\n reading_log_counts=readlog.reading_log_counts, lists=readlog.lists,\n user=user, logged_in_user=logged_in_user, public=is_public,\n sort_order=str(i.sort), booknotes_counts=booknotes_counts\n )\n raise web.seeother(user.key)\n\n\nclass public_my_books_json(delegate.page):\n encoding = \"json\"\n path = \"/people/([^/]+)/books/([a-zA-Z_-]+)\"\n\n def GET(self, username, key='want-to-read'):\n i = web.input(page=1, limit=5000)\n page = safeint(i.page, 1)\n limit = safeint(i.limit, 5000)\n \"\"\"check if user's reading log is public\"\"\"\n user = web.ctx.site.get('/people/%s' % username)\n if not user:\n return delegate.RawText(\n json.dumps({'error': 'User %s not found' % username}),\n content_type=\"application/json\")\n is_public = user.preferences().get('public_readlog', 'no') == 'yes'\n logged_in_user = accounts.get_current_user()\n if (is_public or\n logged_in_user and logged_in_user.key.split('/')[-1] == username):\n readlog = ReadingLog(user=user)\n books = readlog.get_works(key, page, limit)\n records_json = [\n {\n 'work':\n {\n 'title': w.get('title'),\n 'key': w.key,\n 'author_keys': [a.author.key for a in w.get('authors', [])],\n 'author_names': [str(a.author.name) for a\n in w.get('authors', [])],\n 'first_publish_year': w.first_publish_year or None,\n 'lending_edition_s': (w._solr_data and\n w._solr_data.get('lending_edition_s') or\n None),\n 'edition_key': (w._solr_data and\n w._solr_data.get('edition_key') or None),\n 'cover_id': (w._solr_data and\n w._solr_data.get('cover_id') or None),\n 'cover_edition_key': (w._solr_data and\n w._solr_data.get('cover_edition_key') or\n None),\n },\n 'logged_edition': w.get('logged_edition') or None,\n 'logged_date': (w.get('logged_date').strftime(\"%Y/%m/%d, %H:%M:%S\")\n if w.get('logged_date') else None),\n } for w in books\n ]\n return delegate.RawText(json.dumps({\n 'page': page,\n 'reading_log_entries': records_json\n }), content_type=\"application/json\")\n else:\n return delegate.RawText(\n json.dumps({'error': 'Shelf %s not found or not accessible' % key}),\n content_type=\"application/json\")\n\n\nclass readinglog_stats(delegate.page):\n path = \"/people/([^/]+)/books/([a-zA-Z_-]+)/stats\"\n\n def GET(self, username, key='loans'):\n user = web.ctx.site.get('/people/%s' % username)\n if not user:\n return render.notfound(\"User %s\" % username, create=False)\n\n cur_user = accounts.get_current_user()\n if not cur_user or cur_user.key.split('/')[-1] != username:\n return render.permission_denied(web.ctx.path, 'Permission Denied')\n\n readlog = ReadingLog(user=user)\n works = readlog.get_works(key, page=1, limit=2000)\n works_json = [\n {\n 'title': w.get('title'),\n 'key': w.key,\n 'author_keys': [a.author.key for a in w.get('authors', [])],\n 'first_publish_year': w.first_publish_year or None,\n 'subjects': w.get('subjects'),\n 'subject_people': w.get('subject_people'),\n 'subject_places': w.get('subject_places'),\n 'subject_times': w.get('subject_times'),\n } for w in works\n ]\n author_keys = set(\n a\n for work in works_json\n for a in work['author_keys']\n )\n authors_json = [\n {\n 'key': a.key,\n 'name': a.name,\n 'birth_date': a.get('birth_date'),\n }\n for a in web.ctx.site.get_many(list(author_keys))\n ]\n return render['account/readinglog_stats'](\n json.dumps(works_json),\n json.dumps(authors_json),\n len(works_json),\n user.key,\n user.displayname,\n web.ctx.path.rsplit('/', 1)[0],\n key,\n lang=web.ctx.lang,\n )\n\n\nclass account_my_books_redirect(delegate.page):\n path = \"/account/books/(.*)\"\n\n @require_login\n def GET(self, rest='loans'):\n user = accounts.get_current_user()\n username = user.key.split('/')[-1]\n raise web.seeother('/people/%s/books/%s' % (username, rest))\n\nclass account_my_books(delegate.page):\n path = \"/account/books\"\n\n @require_login\n def GET(self):\n user = accounts.get_current_user()\n username = user.key.split('/')[-1]\n raise web.seeother('/people/%s/books' % (username))\n\n# This would be by the civi backend which would require the api keys\nclass fake_civi(delegate.page):\n path = \"/internal/fake/civicrm\"\n\n def GET(self):\n i = web.input(entity='Contact')\n contact = {\n 'values': [{\n 'contact_id': '270430'\n }]\n }\n contributions = {\n 'values': [{\n \"receive_date\": \"2019-07-31 08:57:00\",\n \"custom_52\": \"9780062457714\",\n \"total_amount\": \"50.00\",\n \"custom_53\": \"ol\",\n \"contact_id\": \"270430\",\n \"contribution_status\": \"\"\n }]\n }\n entity = contributions if i.entity == 'Contribution' else contact\n return delegate.RawText(json.dumps(entity), content_type=\"application/json\")\n\nclass import_books(delegate.page):\n path = \"/account/import\"\n\n @require_login\n def GET(self):\n return render['account/import']()\n\nclass fetch_goodreads(delegate.page):\n path = \"/account/import/goodreads\"\n\n def GET(self):\n raise web.seeother(\"/account/import\")\n\n @require_login\n def POST(self):\n books, books_wo_isbns = process_goodreads_csv(web.input())\n return render['account/import'](books, books_wo_isbns)\n\nclass export_books(delegate.page):\n path = \"/account/export\"\n\n @require_login\n def GET(self):\n user = accounts.get_current_user()\n username = user.key.split('/')[-1]\n books = Bookshelves.get_users_logged_books(username, limit=10000)\n csv = []\n csv.append('Work Id,Edition Id,Bookshelf\\n')\n mapping = {1:'Want to Read', 2:'Currently Reading', 3:'Already Read'}\n for book in books:\n row = [\n 'OL{}W'.format(book['work_id']),\n 'OL{}M'.format(book['edition_id']) if book['edition_id'] else '',\n '{}\\n'.format(mapping[book['bookshelf_id']])\n ]\n csv.append(','.join(row))\n web.header('Content-Type','text/csv')\n web.header('Content-disposition', 'attachment; filename=OpenLibrary_ReadingLog.csv')\n csv = ''.join(csv)\n return delegate.RawText(csv, content_type=\"text/csv\")\n\nclass account_loans(delegate.page):\n path = \"/account/loans\"\n\n @require_login\n def GET(self):\n user = accounts.get_current_user()\n user.update_loan_status()\n loans = borrow.get_loans(user)\n return render['account/borrow'](user, loans)\n\nclass account_loans_json(delegate.page):\n\n encoding = \"json\"\n path = \"/account/loans\"\n\n @require_login\n def GET(self):\n user = accounts.get_current_user()\n user.update_loan_status()\n loans = borrow.get_loans(user)\n web.header('Content-Type', 'application/json')\n return delegate.RawText(json.dumps({\n \"loans\": loans\n }))\n\n\n# Disabling be cause it prevents account_my_books_redirect from working\n# for some reason. The purpose of this class is to not show the \"Create\" link for\n# /account pages since that doesn't make any sense.\n# class account_others(delegate.page):\n# path = \"(/account/.*)\"\n#\n# def GET(self, path):\n# return render.notfound(path, create=False)\n\n\ndef send_forgot_password_email(username, email):\n key = \"account/%s/password\" % username\n\n doc = create_link_doc(key, username, email)\n web.ctx.site.store[key] = doc\n\n link = web.ctx.home + \"/account/password/reset/\" + doc['code']\n msg = render_template(\"email/password/reminder\", username=username, email=email, link=link)\n sendmail(email, msg)\n\n\ndef as_admin(f):\n \"\"\"Infobase allows some requests only from admin user. This decorator logs in as admin, executes the function and clears the admin credentials.\"\"\"\n def g(*a, **kw):\n try:\n delegate.admin_login()\n return f(*a, **kw)\n finally:\n web.ctx.headers = []\n return g\n\n\ndef process_goodreads_csv(i):\n import csv\n csv_payload = i.csv if isinstance(i.csv, str) else i.csv.decode()\n csv_file = csv.reader(csv_payload.splitlines(), delimiter=',', quotechar='\"')\n header = next(csv_file)\n books = {}\n books_wo_isbns = {}\n for book in list(csv_file):\n _book = dict(zip(header, book))\n isbn = _book['ISBN'] = _book['ISBN'].replace('\"', '').replace('=', '')\n isbn_13 = _book['ISBN13'] = _book['ISBN13'].replace('\"', '').replace('=', '')\n if isbn != '':\n books[isbn] = _book\n elif isbn_13 != '':\n books[isbn_13] = _book\n books[isbn_13]['ISBN'] = isbn_13\n else:\n books_wo_isbns[_book['Book Id']] = _book\n return books, books_wo_isbns\n", "path": "openlibrary/plugins/upstream/account.py"}]} |
gh_patches_debug_1387 | rasdani/github-patches | git_diff | holoviz__panel-5564 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Extensions with no `cdn_url` cause `pn.extension` to fail
#### ALL software version info
```
panel==1.2.3
bokeh==3.2.2
jupyterlab==4.0.6
jupyterlab-pygments==0.2.2
jupyterlab-widgets==3.0.3
jupyterlab_server==2.25.0
```
Running on Windows 10, Python 3.9, with the crash occurring in both VS Code and standalone Jupyter Lab.
#### Description of expected behavior and the observed behavior
If you load other Python modules that include a pre-compiled extension (or, seemingly, any local extension that does not have `cdn_url` set), then `pn.extension()` crashes in one of the checks that assumes that `cdn_url` is not `None`. The expected behavior is that calling `pn.extension` succeeds.
#### Complete, minimal, self-contained example code that reproduces the issue
I could put together a MWE but this only seems to happen for pre-compiled Bokeh extension models, which is difficult to set up (has a separate typescript build step), and I don't fully understand how Bokeh registers the module.
If needed, I'll come up with a MWE, but this looks like a one line change. The basic way to reproduce the bug is:
```
import cydnus.PolygonGateTool # any pre-compiled Bokeh module, this is just my local one
import panel as pn
pn.extension()
```
I edited my local Panel install to add `print(extensions)` right before [lines 373-376](https://github.com/holoviz/panel/blob/9ea8c34d2029f6ed6a486aa2360566e7d16ca405/panel/io/resources.py#L373-L376)
```
print(extensions)
if reloading:
extensions = [
ext for ext in extensions if not ext.cdn_url.startswith('https://unpkg.com/@holoviz/panel@')
]
```
and the loaded extensions look like:
```
[
ExtensionEmbed(
artifact_path='C:\\local_venv_path\\lib\\site-packages\\panel\\dist\\panel.min.js',
server_url='http://localhost:5006/static/extensions/panel/panel.min.jsv=c47f5b18b8a430e698b9fe15e51f6119984e78334bcf3f45e210d30c37ef2f9e',
cdn_url='https://unpkg.com/@holoviz/[email protected]/dist/panel.min.js'),
ExtensionEmbed(
artifact_path='c:\\local_extension_path\\cydnus.js',
server_url='http://localhost:5006/static/extensions/cydnus/cydnus.js?v=6b13789e43e5485634533de16a65d8ba9d34c4c9758588b665805435f80eb115',
cdn_url=None)
]
```
Note the **`cdn_url=None`** line on the `cydnus` local extension.
#### Stack traceback and/or browser JavaScript console output
```
Traceback (most recent call last):
File "C:\local_venv_path\site-packages\IPython\core\interactiveshell.py", line 3378, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File "C:\local_temp_path\Temp\ipykernel_1860\1180023464.py", line 4, in <module>
pn.extension()
File "C:\local_venv_path\lib\site-packages\pyviz_comms\__init__.py", line 64, in __new__
return param.ParameterizedFunction.__new__(cls, *args, **kwargs)
File "C:\local_venv_path\lib\site-packages\param\parameterized.py", line 3654, in __new__
return inst.__call__(*args,**params)
File "C:\local_venv_path\lib\site-packages\panel\config.py", line 807, in __call__
load_notebook(
File "C:\local_venv_path\lib\site-packages\panel\io\notebook.py", line 364, in load_notebook
bundle = bundle_resources(
File "local_venv_path\lib\site-packages\panel\io\resources.py", line 375, in bundle_resources
extensions = [
File "local_venv_path\lib\site-packages\panel\io\resources.py", line 377, in <listcomp>
ext for ext in extensions if not (ext.cdn_url.startswith('https://unpkg.com/@holoviz/panel@'))
AttributeError: 'NoneType' object has no attribute 'startswith'
```
- [x] I may be interested in making a pull request to address this
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `panel/io/resources.py`
Content:
```
1 """
2 Patches bokeh resources to make it easy to add external JS and CSS
3 resources via the panel.config object.
4 """
5 from __future__ import annotations
6
7 import importlib
8 import json
9 import logging
10 import mimetypes
11 import os
12 import pathlib
13 import re
14 import textwrap
15
16 from base64 import b64encode
17 from collections import OrderedDict
18 from contextlib import contextmanager
19 from functools import lru_cache
20 from pathlib import Path
21 from typing import (
22 TYPE_CHECKING, Dict, List, Literal, TypedDict,
23 )
24
25 import param
26
27 from bokeh.embed.bundle import (
28 CSS_RESOURCES as BkCSS_RESOURCES, Bundle as BkBundle, _bundle_extensions,
29 _use_mathjax, bundle_models, extension_dirs,
30 )
31 from bokeh.model import Model
32 from bokeh.models import ImportedStyleSheet
33 from bokeh.resources import Resources as BkResources, _get_server_urls
34 from bokeh.settings import settings as _settings
35 from jinja2.environment import Environment
36 from jinja2.loaders import FileSystemLoader
37 from markupsafe import Markup
38
39 from ..config import config, panel_extension as extension
40 from ..util import isurl, url_path
41 from .loading import LOADING_INDICATOR_CSS_CLASS
42 from .state import state
43
44 if TYPE_CHECKING:
45 from bokeh.resources import Urls
46
47 class ResourcesType(TypedDict):
48 css: Dict[str, str]
49 js: Dict[str, str]
50 js_modules: Dict[str, str]
51 raw_css: List[str]
52
53 logger = logging.getLogger(__name__)
54
55 ResourceAttr = Literal["__css__", "__javascript__"]
56
57 with open(Path(__file__).parent.parent / 'package.json') as f:
58 package_json = json.load(f)
59 JS_VERSION = package_json['version'].split('+')[0]
60
61 def get_env():
62 ''' Get the correct Jinja2 Environment, also for frozen scripts.
63 '''
64 local_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '_templates'))
65 return Environment(loader=FileSystemLoader(local_path))
66
67 def conffilter(value):
68 return json.dumps(OrderedDict(value)).replace('"', '\'')
69
70 _env = get_env()
71 _env.trim_blocks = True
72 _env.lstrip_blocks = True
73 _env.filters['json'] = lambda obj: Markup(json.dumps(obj))
74 _env.filters['conffilter'] = conffilter
75 _env.filters['sorted'] = sorted
76
77 # Handle serving of the panel extension before session is loaded
78 RESOURCE_MODE = 'server'
79 PANEL_DIR = Path(__file__).parent.parent
80 DIST_DIR = PANEL_DIR / 'dist'
81 BUNDLE_DIR = DIST_DIR / 'bundled'
82 ASSETS_DIR = PANEL_DIR / 'assets'
83 INDEX_TEMPLATE = _env.get_template('convert_index.html')
84 BASE_TEMPLATE = _env.get_template('base.html')
85 ERROR_TEMPLATE = _env.get_template('error.html')
86 LOGOUT_TEMPLATE = _env.get_template('logout.html')
87 BASIC_LOGIN_TEMPLATE = _env.get_template('basic_login.html')
88 DEFAULT_TITLE = "Panel Application"
89 JS_RESOURCES = _env.get_template('js_resources.html')
90 CDN_URL = f"https://cdn.holoviz.org/panel/{JS_VERSION}/"
91 CDN_DIST = f"{CDN_URL}dist/"
92 DOC_DIST = "https://panel.holoviz.org/_static/"
93 LOCAL_DIST = "static/extensions/panel/"
94 COMPONENT_PATH = "components/"
95
96 BK_PREFIX_RE = re.compile('\.bk\.')
97
98 RESOURCE_URLS = {
99 'font-awesome': {
100 'zip': 'https://use.fontawesome.com/releases/v5.15.4/fontawesome-free-5.15.4-web.zip',
101 'src': 'fontawesome-free-5.15.4-web/',
102 'exclude': ['*.svg', '*.scss', '*.less']
103 },
104 'bootstrap4': {
105 'tar': 'https://registry.npmjs.org/bootstrap/-/bootstrap-4.6.1.tgz',
106 'src': 'package/dist',
107 'exclude': [],
108 'dest': ''
109 },
110 'bootstrap5': {
111 'tar': 'https://registry.npmjs.org/bootstrap/-/bootstrap-5.3.0-alpha1.tgz',
112 'src': 'package/dist',
113 'exclude': [],
114 'dest': ''
115 },
116 'jQuery': {
117 'tar': 'https://registry.npmjs.org/jquery/-/jquery-3.5.1.tgz',
118 'src': 'package/dist',
119 'exclude': [],
120 'dest': ''
121 }
122 }
123
124 CSS_URLS = {
125 'font-awesome': f'{CDN_DIST}bundled/font-awesome/css/all.min.css',
126 'bootstrap4': f'{CDN_DIST}bundled/bootstrap4/css/bootstrap.min.css',
127 'bootstrap5': f'{CDN_DIST}bundled/bootstrap5/css/bootstrap.min.css'
128 }
129
130 JS_URLS = {
131 'jQuery': f'{CDN_DIST}bundled/jquery/jquery.slim.min.js',
132 'bootstrap4': f'{CDN_DIST}bundled/bootstrap4/js/bootstrap.bundle.min.js',
133 'bootstrap5': f'{CDN_DIST}bundled/bootstrap5/js/bootstrap.bundle.min.js'
134 }
135
136 extension_dirs['panel'] = str(DIST_DIR)
137
138 mimetypes.add_type("application/javascript", ".js")
139
140 @contextmanager
141 def set_resource_mode(mode):
142 global RESOURCE_MODE
143 old_resources = _settings.resources._user_value
144 old_mode = RESOURCE_MODE
145 _settings.resources = RESOURCE_MODE = mode
146 try:
147 yield
148 finally:
149 RESOURCE_MODE = old_mode
150 _settings.resources.set_value(old_resources)
151
152 def use_cdn() -> bool:
153 return _settings.resources(default="server") != 'server'
154
155 def get_dist_path(cdn: bool | Literal['auto'] = 'auto') -> str:
156 cdn = use_cdn() if cdn == 'auto' else cdn
157 if cdn:
158 dist_path = CDN_DIST
159 elif state.rel_path:
160 dist_path = f'{state.rel_path}/{LOCAL_DIST}'
161 else:
162 dist_path = f'{LOCAL_DIST}'
163 return dist_path
164
165 def is_cdn_url(url) -> bool:
166 return isurl(url) and url.startswith(CDN_DIST)
167
168 def process_raw_css(raw_css):
169 """
170 Converts old-style Bokeh<3 compatible CSS to Bokeh 3 compatible CSS.
171 """
172 return [BK_PREFIX_RE.sub('.', css) for css in raw_css]
173
174 @lru_cache(maxsize=None)
175 def loading_css(loading_spinner, color, max_height):
176 with open(ASSETS_DIR / f'{loading_spinner}_spinner.svg', encoding='utf-8') as f:
177 svg = f.read().replace('\n', '').format(color=color)
178 b64 = b64encode(svg.encode('utf-8')).decode('utf-8')
179 return textwrap.dedent(f"""
180 :host(.{LOADING_INDICATOR_CSS_CLASS}.pn-{loading_spinner}):before, .pn-loading.pn-{loading_spinner}:before {{
181 background-image: url("data:image/svg+xml;base64,{b64}");
182 background-size: auto calc(min(50%, {max_height}px));
183 }}""")
184
185 def resolve_custom_path(
186 obj, path: str | os.PathLike, relative: bool = False
187 ) -> pathlib.Path | None:
188 """
189 Attempts to resolve a path relative to some component.
190
191 Arguments
192 ---------
193 obj: type | object
194 The component to resolve the path relative to.
195 path: str | os.PathLike
196 Absolute or relative path to a resource.
197 relative: bool
198 Whether to return a relative path.
199
200 Returns
201 -------
202 path: pathlib.Path | None
203 """
204 if not path:
205 return
206 if not isinstance(obj, type):
207 obj = type(obj)
208 try:
209 mod = importlib.import_module(obj.__module__)
210 module_path = Path(mod.__file__).parent
211 assert module_path.exists()
212 except Exception:
213 return None
214 path = pathlib.Path(path)
215 if path.is_absolute():
216 abs_path = path
217 else:
218 abs_path = module_path / path
219 if not abs_path.is_file():
220 return None
221 abs_path = abs_path.resolve()
222 if not relative:
223 return abs_path
224 return os.path.relpath(abs_path, module_path)
225
226 def component_resource_path(component, attr, path):
227 """
228 Generates a canonical URL for a component resource.
229
230 To be used in conjunction with the `panel.io.server.ComponentResourceHandler`
231 which allows dynamically resolving resources defined on components.
232 """
233 if not isinstance(component, type):
234 component = type(component)
235 component_path = COMPONENT_PATH
236 if state.rel_path:
237 component_path = f"{state.rel_path}/{component_path}"
238 rel_path = str(resolve_custom_path(component, path, relative=True)).replace(os.path.sep, '/')
239 return f'{component_path}{component.__module__}/{component.__name__}/{attr}/{rel_path}'
240
241 def patch_stylesheet(stylesheet, dist_url):
242 url = stylesheet.url
243 if url.startswith(CDN_DIST+dist_url) and dist_url != CDN_DIST:
244 patched_url = url.replace(CDN_DIST+dist_url, dist_url) + f'?v={JS_VERSION}'
245 elif url.startswith(CDN_DIST) and dist_url != CDN_DIST:
246 patched_url = url.replace(CDN_DIST, dist_url) + f'?v={JS_VERSION}'
247 else:
248 return
249 try:
250 stylesheet.url = patched_url
251 except Exception:
252 pass
253
254 def resolve_stylesheet(cls, stylesheet: str, attribute: str | None = None):
255 """
256 Resolves a stylesheet definition, e.g. originating on a component
257 Reactive._stylesheets or a Design.modifiers attribute. Stylesheets
258 may be defined as one of the following:
259
260 - Absolute URL defined with http(s) protocol
261 - A path relative to the component
262
263 Arguments
264 ---------
265 cls: type | object
266 Object or class defining the stylesheet
267 stylesheet: str
268 The stylesheet definition
269 """
270 stylesheet = str(stylesheet)
271 if not stylesheet.startswith('http') and attribute and (custom_path:= resolve_custom_path(cls, stylesheet)):
272 if not state._is_pyodide and state.curdoc and state.curdoc.session_context:
273 stylesheet = component_resource_path(cls, attribute, stylesheet)
274 else:
275 stylesheet = custom_path.read_text(encoding='utf-8')
276 return stylesheet
277
278 def patch_model_css(root, dist_url):
279 """
280 Temporary patch for Model.css property used by Panel to provide
281 stylesheets for components.
282
283 ALERT: Should find better solution before official Bokeh 3.x compatible release
284 """
285 # Patch model CSS properties
286 doc = root.document
287 if doc:
288 held = doc.callbacks.hold_value
289 events = list(doc.callbacks._held_events)
290 doc.hold()
291 for stylesheet in root.select({'type': ImportedStyleSheet}):
292 patch_stylesheet(stylesheet, dist_url)
293 if doc:
294 doc.callbacks._held_events = events
295 if held:
296 doc.callbacks._hold = held
297 else:
298 doc.unhold()
299
300 def global_css(name):
301 if RESOURCE_MODE == 'server':
302 return f'static/extensions/panel/css/{name}.css'
303 else:
304 return f'{CDN_DIST}css/{name}.css'
305
306 def bundled_files(model, file_type='javascript'):
307 name = model.__name__.lower()
308 bdir = BUNDLE_DIR / name
309 shared = list((JS_URLS if file_type == 'javascript' else CSS_URLS).values())
310 files = []
311 for url in getattr(model, f"__{file_type}_raw__", []):
312 if url.startswith(CDN_DIST):
313 filepath = url.replace(f'{CDN_DIST}bundled/', '')
314 elif url.startswith(config.npm_cdn):
315 filepath = url.replace(config.npm_cdn, '')[1:]
316 else:
317 filepath = url_path(url)
318 test_filepath = filepath.split('?')[0]
319 if url in shared:
320 prefixed = filepath
321 test_path = BUNDLE_DIR / test_filepath
322 elif not test_filepath.replace('/', '').startswith(f'{name}/'):
323 prefixed = f'{name}/{test_filepath}'
324 test_path = bdir / test_filepath
325 else:
326 prefixed = test_filepath
327 test_path = BUNDLE_DIR / test_filepath
328 if test_path.is_file():
329 if RESOURCE_MODE == 'server':
330 files.append(f'static/extensions/panel/bundled/{prefixed}')
331 elif filepath == test_filepath:
332 files.append(f'{CDN_DIST}bundled/{prefixed}')
333 else:
334 files.append(url)
335 else:
336 files.append(url)
337 return files
338
339 def bundle_resources(roots, resources, notebook=False, reloading=False, enable_mathjax='auto'):
340 from ..config import panel_extension as ext
341 global RESOURCE_MODE
342 if not isinstance(resources, Resources):
343 resources = Resources.from_bokeh(resources, notebook=notebook)
344 js_resources = css_resources = resources
345 RESOURCE_MODE = mode = js_resources.mode if resources is not None else "inline"
346
347 js_files = []
348 js_raw = []
349 css_files = []
350 css_raw = []
351
352 if isinstance(enable_mathjax, bool):
353 use_mathjax = enable_mathjax
354 elif roots:
355 use_mathjax = _use_mathjax(roots) or 'mathjax' in ext._loaded_extensions
356 else:
357 use_mathjax = False
358
359 if js_resources:
360 js_resources = js_resources.clone()
361 if not use_mathjax and "bokeh-mathjax" in js_resources.components:
362 js_resources.components.remove("bokeh-mathjax")
363 if reloading:
364 js_resources.components.clear()
365
366 js_files.extend(js_resources.js_files)
367 js_raw.extend(js_resources.js_raw)
368
369 css_files.extend(css_resources.css_files)
370 css_raw.extend(css_resources.css_raw)
371
372 extensions = _bundle_extensions(None, js_resources)
373 if reloading:
374 extensions = [
375 ext for ext in extensions if not ext.cdn_url.startswith('https://unpkg.com/@holoviz/panel@')
376 ]
377 extra_js = []
378 if mode == "inline":
379 js_raw.extend([ Resources._inline(bundle.artifact_path) for bundle in extensions ])
380 elif mode == "server":
381 for bundle in extensions:
382 server_url = bundle.server_url
383 if resources.root_url and not resources.absolute:
384 server_url = server_url.replace(resources.root_url, '', 1)
385 js_files.append(server_url)
386 elif mode == "cdn":
387 for bundle in extensions:
388 if bundle.cdn_url is not None:
389 extra_js.append(bundle.cdn_url)
390 else:
391 js_raw.append(Resources._inline(bundle.artifact_path))
392 else:
393 extra_js.extend([ bundle.artifact_path for bundle in extensions ])
394 js_files += resources.adjust_paths(extra_js)
395
396 ext = bundle_models(None)
397 if ext is not None:
398 js_raw.append(ext)
399
400 hashes = js_resources.hashes if js_resources else {}
401 return Bundle(
402 css_files=css_files,
403 css_raw=css_raw,
404 hashes=hashes,
405 js_files=js_files,
406 js_raw=js_raw,
407 js_module_exports=resources.js_module_exports,
408 js_modules=resources.js_modules,
409 notebook=notebook,
410 )
411
412
413 class ResourceComponent:
414 """
415 Mix-in class for components that define a set of resources
416 that have to be resolved.
417 """
418
419 _resources = {
420 'css': {},
421 'font': {},
422 'js': {},
423 'js_modules': {},
424 'raw_css': [],
425 }
426
427 @classmethod
428 def _resolve_resource(cls, resource_type: str, resource: str, cdn: bool = False):
429 dist_path = get_dist_path(cdn=cdn)
430 if resource.startswith(CDN_DIST):
431 resource_path = resource.replace(f'{CDN_DIST}bundled/', '')
432 elif resource.startswith(config.npm_cdn):
433 resource_path = resource.replace(config.npm_cdn, '')[1:]
434 elif resource.startswith('http:'):
435 resource_path = url_path(resource)
436 else:
437 resource_path = resource
438
439 if resource_type == 'js_modules' and not (state.rel_path or cdn):
440 prefixed_dist = f'./{dist_path}'
441 else:
442 prefixed_dist = dist_path
443
444 bundlepath = BUNDLE_DIR / resource_path.replace('/', os.path.sep)
445 # Windows may trigger OSError: [WinError 123]
446 try:
447 is_file = bundlepath.is_file()
448 except Exception:
449 is_file = False
450 if is_file:
451 return f'{prefixed_dist}bundled/{resource_path}'
452 elif isurl(resource):
453 return resource
454 elif resolve_custom_path(cls, resource):
455 return component_resource_path(
456 cls, f'_resources/{resource_type}', resource
457 )
458
459 def resolve_resources(self, cdn: bool | Literal['auto'] = 'auto') -> ResourcesType:
460 """
461 Resolves the resources required for this component.
462
463 Arguments
464 ---------
465 cdn: bool | Literal['auto']
466 Whether to load resources from CDN or local server. If set
467 to 'auto' value will be automatically determine based on
468 global settings.
469
470 Returns
471 -------
472 Dictionary containing JS and CSS resources.
473 """
474 cls = type(self)
475 resources = {}
476 for rt, res in self._resources.items():
477 if not isinstance(res, dict):
478 continue
479 if rt == 'font':
480 rt = 'css'
481 res = {
482 name: url if isurl(url) else f'{cls.__name__.lower()}/{url}'
483 for name, url in res.items()
484 }
485 if rt in resources:
486 resources[rt] = dict(resources[rt], **res)
487 else:
488 resources[rt] = res
489
490 cdn = use_cdn() if cdn == 'auto' else cdn
491 resource_types: ResourcesType = {
492 'js': {},
493 'js_modules': {},
494 'css': {},
495 'raw_css': []
496 }
497
498 for resource_type in resource_types:
499 if resource_type not in resources or resource_type == 'raw_css':
500 continue
501 resource_files = resource_types[resource_type]
502 for rname, resource in resources[resource_type].items():
503 resolved_resource = self._resolve_resource(
504 resource_type, resource, cdn=cdn
505 )
506 if resolved_resource:
507 resource_files[rname] = resolved_resource
508 return resource_types
509
510
511 class Resources(BkResources):
512
513 def __init__(self, *args, absolute=False, notebook=False, **kwargs):
514 self.absolute = absolute
515 self.notebook = notebook
516 super().__init__(*args, **kwargs)
517
518 @classmethod
519 def from_bokeh(cls, bkr, absolute=False, notebook=False):
520 kwargs = {}
521 if bkr.mode.startswith("server"):
522 kwargs['root_url'] = bkr.root_url
523
524 components = bkr.components if hasattr(bkr, 'components_for') else bkr._components
525 return cls(
526 mode=bkr.mode, version=bkr.version, minified=bkr.minified,
527 log_level=bkr.log_level, notebook=notebook,
528 path_versioner=bkr.path_versioner,
529 components=components, base_dir=bkr.base_dir,
530 root_dir=bkr.root_dir, absolute=absolute, **kwargs
531 )
532
533 def _collect_external_resources(self, resource_attr: ResourceAttr) -> list[str]:
534 """ Collect external resources set on resource_attr attribute of all models."""
535 external_resources: list[str] = []
536
537 if state._extensions is not None:
538 external_modules = {
539 module: ext for ext, module in extension._imports.items()
540 }
541 else:
542 external_modules = None
543
544 for _, cls in sorted(Model.model_class_reverse_map.items(), key=lambda arg: arg[0]):
545 if external_modules is not None and cls.__module__ in external_modules:
546 if external_modules[cls.__module__] not in state._extensions:
547 continue
548 external: list[str] | str | None = getattr(cls, resource_attr, None)
549
550 if isinstance(external, str):
551 if external not in external_resources:
552 external_resources.append(external)
553 elif isinstance(external, list):
554 for e in external:
555 if e not in external_resources:
556 external_resources.append(e)
557
558 return external_resources
559
560 def _server_urls(self) -> Urls:
561 return _get_server_urls(
562 self.root_url if self.absolute else '',
563 False if self.dev else self.minified,
564 self.path_versioner
565 )
566
567 def extra_resources(self, resources, resource_type):
568 """
569 Adds resources for ReactiveHTML components.
570 """
571 from ..reactive import ReactiveHTML
572 for model in param.concrete_descendents(ReactiveHTML).values():
573 if not (getattr(model, resource_type, None) and model._loaded()):
574 continue
575 for resource in getattr(model, resource_type, []):
576 if not isurl(resource) and not resource.startswith('static/extensions'):
577 resource = component_resource_path(model, resource_type, resource)
578 if resource not in resources:
579 resources.append(resource)
580
581 def adjust_paths(self, resources):
582 """
583 Computes relative and absolute paths for resources.
584 """
585 new_resources = []
586 cdn_base = f'{config.npm_cdn}/@holoviz/panel@{JS_VERSION}/dist/'
587 for resource in resources:
588 resource = resource.replace('https://unpkg.com', config.npm_cdn)
589 if resource.startswith(cdn_base):
590 resource = resource.replace(cdn_base, CDN_DIST)
591 if self.mode == 'server':
592 resource = resource.replace(CDN_DIST, LOCAL_DIST)
593 if (resource.startswith(state.base_url) or resource.startswith('static/')):
594 if resource.startswith(state.base_url):
595 resource = resource[len(state.base_url):]
596 if state.rel_path:
597 resource = f'{state.rel_path}/{resource}'
598 elif self.absolute and self.mode == 'server':
599 resource = f'{self.root_url}{resource}'
600 new_resources.append(resource)
601 return new_resources
602
603 def clone(self, *, components=None) -> Resources:
604 """
605 Make a clone of a resources instance allowing to override its components.
606 """
607 return Resources(
608 mode=self.mode,
609 version=self.version,
610 root_dir=self.root_dir,
611 dev=self.dev,
612 minified=self.minified,
613 log_level=self.log_level,
614 root_url=self._root_url,
615 path_versioner=self.path_versioner,
616 components=components if components is not None else list(self.components),
617 base_dir=self.base_dir,
618 notebook=self.notebook,
619 absolute=self.absolute
620 )
621
622 @property
623 def dist_dir(self):
624 if self.notebook and self.mode == 'server':
625 dist_dir = '/panel-preview/static/extensions/panel/'
626 elif self.mode == 'server':
627 if state.rel_path:
628 dist_dir = f'{state.rel_path}/{LOCAL_DIST}'
629 else:
630 dist_dir = LOCAL_DIST
631 if self.absolute:
632 dist_dir = f'{self.root_url}{dist_dir}'
633 else:
634 dist_dir = CDN_DIST
635 return dist_dir
636
637 @property
638 def css_files(self):
639 from ..config import config
640
641 files = super(Resources, self).css_files
642 self.extra_resources(files, '__css__')
643 css_files = self.adjust_paths([
644 css for css in files if self.mode != 'inline' or not is_cdn_url(css)
645 ])
646 if config.design:
647 css_files += list(config.design._resources.get('font', {}).values())
648 for cssf in config.css_files:
649 if os.path.isfile(cssf) or cssf in files:
650 continue
651 css_files.append(cssf)
652 return css_files
653
654 @property
655 def css_raw(self):
656 from ..config import config
657 raw = super(Resources, self).css_raw
658
659 # Inline local dist resources
660 css_files = self._collect_external_resources("__css__")
661 self.extra_resources(css_files, '__css__')
662 raw += [
663 (DIST_DIR / css.replace(CDN_DIST, '')).read_text(encoding='utf-8')
664 for css in css_files if is_cdn_url(css)
665 ]
666
667 # Add local CSS files
668 for cssf in config.css_files:
669 if not os.path.isfile(cssf):
670 continue
671 css_txt = process_raw_css([Path(cssf).read_text(encoding='utf-8')])[0]
672 if css_txt not in raw:
673 raw.append(css_txt)
674
675 # Add loading spinner
676 if config.global_loading_spinner:
677 loading_base = (DIST_DIR / "css" / "loading.css").read_text(encoding='utf-8')
678 raw.extend([loading_base, loading_css(
679 config.loading_spinner, config.loading_color, config.loading_max_height
680 )])
681 return raw + process_raw_css(config.raw_css) + process_raw_css(config.global_css)
682
683 @property
684 def js_files(self):
685 from ..config import config
686
687 # Gather JS files
688 files = super(Resources, self).js_files
689 self.extra_resources(files, '__javascript__')
690 files += [js for js in config.js_files.values()]
691 if config.design:
692 design_js = config.design().resolve_resources(
693 cdn=self.notebook or 'auto', include_theme=False
694 )['js'].values()
695 files += [res for res in design_js if res not in files]
696
697 # Filter and adjust JS file urls
698 js_files = self.adjust_paths([
699 js for js in files if self.mode != 'inline' or not is_cdn_url(js)
700 ])
701
702 # Load requirejs last to avoid interfering with other libraries
703 dist_dir = self.dist_dir
704 require_index = [i for i, jsf in enumerate(js_files) if 'require' in jsf]
705 if require_index:
706 requirejs = js_files.pop(require_index[0])
707 if any('ace' in jsf for jsf in js_files):
708 js_files.append(dist_dir + 'pre_require.js')
709 js_files.append(requirejs)
710 if any('ace' in jsf for jsf in js_files):
711 js_files.append(dist_dir + 'post_require.js')
712 return js_files
713
714 @property
715 def js_modules(self):
716 from ..config import config
717 from ..reactive import ReactiveHTML
718
719 modules = list(config.js_modules.values())
720 self.extra_resources(modules, '__javascript_modules__')
721 if config.design:
722 design_resources = config.design().resolve_resources(
723 cdn=self.notebook or 'auto', include_theme=False
724 )
725 modules += [
726 res for res in design_resources['js_modules'].values()
727 if res not in modules
728 ]
729
730 for model in param.concrete_descendents(ReactiveHTML).values():
731 if not (getattr(model, '__javascript_modules__', None) and model._loaded()):
732 continue
733 for js_module in model.__javascript_modules__:
734 if not isurl(js_module) and not js_module.startswith('static/extensions'):
735 js_module = component_resource_path(model, '__javascript_modules__', js_module)
736 if js_module not in modules:
737 modules.append(js_module)
738
739 return self.adjust_paths(modules)
740
741 @property
742 def js_module_exports(self):
743 modules = {}
744 for model in Model.model_class_reverse_map.values():
745 if hasattr(model, '__javascript_module_exports__'):
746 modules.update(dict(zip(model.__javascript_module_exports__, model.__javascript_modules__)))
747 return modules
748
749 @property
750 def js_raw(self):
751 raw_js = super(Resources, self).js_raw
752 if not self.mode == 'inline':
753 return raw_js
754
755 # Inline local dist resources
756 js_files = self._collect_external_resources("__javascript__")
757 self.extra_resources(js_files, '__javascript__')
758 raw_js += [
759 (DIST_DIR / js.replace(CDN_DIST, '')).read_text(encoding='utf-8')
760 for js in js_files if is_cdn_url(js)
761 ]
762
763 # Inline config.js_files
764 from ..config import config
765 raw_js += [
766 Path(js).read_text(encoding='utf-8') for js in config.js_files.values()
767 if os.path.isfile(js)
768 ]
769
770 # Inline config.design JS resources
771 if config.design:
772 design_js = config.design().resolve_resources(
773 cdn=True, include_theme=False
774 )['js'].values()
775 raw_js += [
776 (DIST_DIR / js.replace(CDN_DIST, '')).read_text(encoding='utf-8')
777 for js in design_js if is_cdn_url(js)
778 ]
779 return raw_js
780
781 @property
782 def render_js(self):
783 return JS_RESOURCES.render(
784 js_raw=self.js_raw, js_files=self.js_files,
785 js_modules=self.js_modules, hashes=self.hashes,
786 js_module_exports=self.js_module_exports
787 )
788
789
790 class Bundle(BkBundle):
791
792 def __init__(self, notebook=False, **kwargs):
793 self.js_modules = kwargs.pop("js_modules", [])
794 self.js_module_exports = kwargs.pop("js_module_exports", {})
795 self.notebook = notebook
796 super().__init__(**kwargs)
797
798 @classmethod
799 def from_bokeh(cls, bk_bundle, notebook=False):
800 return cls(
801 notebook=notebook,
802 js_files=bk_bundle.js_files,
803 js_raw=bk_bundle.js_raw,
804 css_files=bk_bundle.css_files,
805 css_raw=bk_bundle.css_raw,
806 hashes=bk_bundle.hashes,
807 )
808
809 def _render_css(self) -> str:
810 return BkCSS_RESOURCES.render(
811 css_files=self.css_files,
812 css_raw=self.css_raw
813 )
814
815 def _render_js(self):
816 return JS_RESOURCES.render(
817 js_raw=self.js_raw,
818 js_files=self.js_files,
819 js_modules=self.js_modules,
820 js_module_exports=self.js_module_exports,
821 hashes=self.hashes
822 )
823
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/panel/io/resources.py b/panel/io/resources.py
--- a/panel/io/resources.py
+++ b/panel/io/resources.py
@@ -372,7 +372,7 @@
extensions = _bundle_extensions(None, js_resources)
if reloading:
extensions = [
- ext for ext in extensions if not ext.cdn_url.startswith('https://unpkg.com/@holoviz/panel@')
+ ext for ext in extensions if not (ext.cdn_url is not None and ext.cdn_url.startswith('https://unpkg.com/@holoviz/panel@'))
]
extra_js = []
if mode == "inline":
| {"golden_diff": "diff --git a/panel/io/resources.py b/panel/io/resources.py\n--- a/panel/io/resources.py\n+++ b/panel/io/resources.py\n@@ -372,7 +372,7 @@\n extensions = _bundle_extensions(None, js_resources)\n if reloading:\n extensions = [\n- ext for ext in extensions if not ext.cdn_url.startswith('https://unpkg.com/@holoviz/panel@')\n+ ext for ext in extensions if not (ext.cdn_url is not None and ext.cdn_url.startswith('https://unpkg.com/@holoviz/panel@'))\n ]\n extra_js = []\n if mode == \"inline\":\n", "issue": "Extensions with no `cdn_url` cause `pn.extension` to fail\n#### ALL software version info\r\n```\r\npanel==1.2.3\r\nbokeh==3.2.2\r\n\r\njupyterlab==4.0.6\r\njupyterlab-pygments==0.2.2\r\njupyterlab-widgets==3.0.3\r\njupyterlab_server==2.25.0\r\n```\r\nRunning on Windows 10, Python 3.9, with the crash occurring in both VS Code and standalone Jupyter Lab.\r\n\r\n#### Description of expected behavior and the observed behavior\r\nIf you load other Python modules that include a pre-compiled extension (or, seemingly, any local extension that does not have `cdn_url` set), then `pn.extension()` crashes in one of the checks that assumes that `cdn_url` is not `None`. The expected behavior is that calling `pn.extension` succeeds.\r\n\r\n#### Complete, minimal, self-contained example code that reproduces the issue\r\nI could put together a MWE but this only seems to happen for pre-compiled Bokeh extension models, which is difficult to set up (has a separate typescript build step), and I don't fully understand how Bokeh registers the module.\r\n\r\nIf needed, I'll come up with a MWE, but this looks like a one line change. The basic way to reproduce the bug is:\r\n```\r\nimport cydnus.PolygonGateTool # any pre-compiled Bokeh module, this is just my local one\r\nimport panel as pn\r\npn.extension()\r\n```\r\n\r\nI edited my local Panel install to add `print(extensions)` right before [lines 373-376](https://github.com/holoviz/panel/blob/9ea8c34d2029f6ed6a486aa2360566e7d16ca405/panel/io/resources.py#L373-L376)\r\n```\r\n print(extensions)\r\n if reloading:\r\n extensions = [\r\n ext for ext in extensions if not ext.cdn_url.startswith('https://unpkg.com/@holoviz/panel@')\r\n ]\r\n```\r\nand the loaded extensions look like:\r\n```\r\n[\r\n ExtensionEmbed(\r\n artifact_path='C:\\\\local_venv_path\\\\lib\\\\site-packages\\\\panel\\\\dist\\\\panel.min.js', \r\n server_url='http://localhost:5006/static/extensions/panel/panel.min.jsv=c47f5b18b8a430e698b9fe15e51f6119984e78334bcf3f45e210d30c37ef2f9e',\r\n cdn_url='https://unpkg.com/@holoviz/[email protected]/dist/panel.min.js'), \r\n ExtensionEmbed(\r\n artifact_path='c:\\\\local_extension_path\\\\cydnus.js',\r\n server_url='http://localhost:5006/static/extensions/cydnus/cydnus.js?v=6b13789e43e5485634533de16a65d8ba9d34c4c9758588b665805435f80eb115',\r\n cdn_url=None)\r\n]\r\n```\r\n\r\nNote the **`cdn_url=None`** line on the `cydnus` local extension.\r\n\r\n#### Stack traceback and/or browser JavaScript console output\r\n```\r\nTraceback (most recent call last):\r\n File \"C:\\local_venv_path\\site-packages\\IPython\\core\\interactiveshell.py\", line 3378, in run_code\r\n exec(code_obj, self.user_global_ns, self.user_ns)\r\n File \"C:\\local_temp_path\\Temp\\ipykernel_1860\\1180023464.py\", line 4, in <module>\r\n pn.extension()\r\n File \"C:\\local_venv_path\\lib\\site-packages\\pyviz_comms\\__init__.py\", line 64, in __new__\r\n return param.ParameterizedFunction.__new__(cls, *args, **kwargs)\r\n File \"C:\\local_venv_path\\lib\\site-packages\\param\\parameterized.py\", line 3654, in __new__\r\n return inst.__call__(*args,**params)\r\n File \"C:\\local_venv_path\\lib\\site-packages\\panel\\config.py\", line 807, in __call__\r\n load_notebook(\r\n File \"C:\\local_venv_path\\lib\\site-packages\\panel\\io\\notebook.py\", line 364, in load_notebook\r\n bundle = bundle_resources(\r\n File \"local_venv_path\\lib\\site-packages\\panel\\io\\resources.py\", line 375, in bundle_resources\r\n extensions = [\r\n File \"local_venv_path\\lib\\site-packages\\panel\\io\\resources.py\", line 377, in <listcomp>\r\n ext for ext in extensions if not (ext.cdn_url.startswith('https://unpkg.com/@holoviz/panel@'))\r\nAttributeError: 'NoneType' object has no attribute 'startswith'\r\n```\r\n- [x] I may be interested in making a pull request to address this\r\n\n", "before_files": [{"content": "\"\"\"\nPatches bokeh resources to make it easy to add external JS and CSS\nresources via the panel.config object.\n\"\"\"\nfrom __future__ import annotations\n\nimport importlib\nimport json\nimport logging\nimport mimetypes\nimport os\nimport pathlib\nimport re\nimport textwrap\n\nfrom base64 import b64encode\nfrom collections import OrderedDict\nfrom contextlib import contextmanager\nfrom functools import lru_cache\nfrom pathlib import Path\nfrom typing import (\n TYPE_CHECKING, Dict, List, Literal, TypedDict,\n)\n\nimport param\n\nfrom bokeh.embed.bundle import (\n CSS_RESOURCES as BkCSS_RESOURCES, Bundle as BkBundle, _bundle_extensions,\n _use_mathjax, bundle_models, extension_dirs,\n)\nfrom bokeh.model import Model\nfrom bokeh.models import ImportedStyleSheet\nfrom bokeh.resources import Resources as BkResources, _get_server_urls\nfrom bokeh.settings import settings as _settings\nfrom jinja2.environment import Environment\nfrom jinja2.loaders import FileSystemLoader\nfrom markupsafe import Markup\n\nfrom ..config import config, panel_extension as extension\nfrom ..util import isurl, url_path\nfrom .loading import LOADING_INDICATOR_CSS_CLASS\nfrom .state import state\n\nif TYPE_CHECKING:\n from bokeh.resources import Urls\n\n class ResourcesType(TypedDict):\n css: Dict[str, str]\n js: Dict[str, str]\n js_modules: Dict[str, str]\n raw_css: List[str]\n\nlogger = logging.getLogger(__name__)\n\nResourceAttr = Literal[\"__css__\", \"__javascript__\"]\n\nwith open(Path(__file__).parent.parent / 'package.json') as f:\n package_json = json.load(f)\n JS_VERSION = package_json['version'].split('+')[0]\n\ndef get_env():\n ''' Get the correct Jinja2 Environment, also for frozen scripts.\n '''\n local_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '_templates'))\n return Environment(loader=FileSystemLoader(local_path))\n\ndef conffilter(value):\n return json.dumps(OrderedDict(value)).replace('\"', '\\'')\n\n_env = get_env()\n_env.trim_blocks = True\n_env.lstrip_blocks = True\n_env.filters['json'] = lambda obj: Markup(json.dumps(obj))\n_env.filters['conffilter'] = conffilter\n_env.filters['sorted'] = sorted\n\n# Handle serving of the panel extension before session is loaded\nRESOURCE_MODE = 'server'\nPANEL_DIR = Path(__file__).parent.parent\nDIST_DIR = PANEL_DIR / 'dist'\nBUNDLE_DIR = DIST_DIR / 'bundled'\nASSETS_DIR = PANEL_DIR / 'assets'\nINDEX_TEMPLATE = _env.get_template('convert_index.html')\nBASE_TEMPLATE = _env.get_template('base.html')\nERROR_TEMPLATE = _env.get_template('error.html')\nLOGOUT_TEMPLATE = _env.get_template('logout.html')\nBASIC_LOGIN_TEMPLATE = _env.get_template('basic_login.html')\nDEFAULT_TITLE = \"Panel Application\"\nJS_RESOURCES = _env.get_template('js_resources.html')\nCDN_URL = f\"https://cdn.holoviz.org/panel/{JS_VERSION}/\"\nCDN_DIST = f\"{CDN_URL}dist/\"\nDOC_DIST = \"https://panel.holoviz.org/_static/\"\nLOCAL_DIST = \"static/extensions/panel/\"\nCOMPONENT_PATH = \"components/\"\n\nBK_PREFIX_RE = re.compile('\\.bk\\.')\n\nRESOURCE_URLS = {\n 'font-awesome': {\n 'zip': 'https://use.fontawesome.com/releases/v5.15.4/fontawesome-free-5.15.4-web.zip',\n 'src': 'fontawesome-free-5.15.4-web/',\n 'exclude': ['*.svg', '*.scss', '*.less']\n },\n 'bootstrap4': {\n 'tar': 'https://registry.npmjs.org/bootstrap/-/bootstrap-4.6.1.tgz',\n 'src': 'package/dist',\n 'exclude': [],\n 'dest': ''\n },\n 'bootstrap5': {\n 'tar': 'https://registry.npmjs.org/bootstrap/-/bootstrap-5.3.0-alpha1.tgz',\n 'src': 'package/dist',\n 'exclude': [],\n 'dest': ''\n },\n 'jQuery': {\n 'tar': 'https://registry.npmjs.org/jquery/-/jquery-3.5.1.tgz',\n 'src': 'package/dist',\n 'exclude': [],\n 'dest': ''\n }\n}\n\nCSS_URLS = {\n 'font-awesome': f'{CDN_DIST}bundled/font-awesome/css/all.min.css',\n 'bootstrap4': f'{CDN_DIST}bundled/bootstrap4/css/bootstrap.min.css',\n 'bootstrap5': f'{CDN_DIST}bundled/bootstrap5/css/bootstrap.min.css'\n}\n\nJS_URLS = {\n 'jQuery': f'{CDN_DIST}bundled/jquery/jquery.slim.min.js',\n 'bootstrap4': f'{CDN_DIST}bundled/bootstrap4/js/bootstrap.bundle.min.js',\n 'bootstrap5': f'{CDN_DIST}bundled/bootstrap5/js/bootstrap.bundle.min.js'\n}\n\nextension_dirs['panel'] = str(DIST_DIR)\n\nmimetypes.add_type(\"application/javascript\", \".js\")\n\n@contextmanager\ndef set_resource_mode(mode):\n global RESOURCE_MODE\n old_resources = _settings.resources._user_value\n old_mode = RESOURCE_MODE\n _settings.resources = RESOURCE_MODE = mode\n try:\n yield\n finally:\n RESOURCE_MODE = old_mode\n _settings.resources.set_value(old_resources)\n\ndef use_cdn() -> bool:\n return _settings.resources(default=\"server\") != 'server'\n\ndef get_dist_path(cdn: bool | Literal['auto'] = 'auto') -> str:\n cdn = use_cdn() if cdn == 'auto' else cdn\n if cdn:\n dist_path = CDN_DIST\n elif state.rel_path:\n dist_path = f'{state.rel_path}/{LOCAL_DIST}'\n else:\n dist_path = f'{LOCAL_DIST}'\n return dist_path\n\ndef is_cdn_url(url) -> bool:\n return isurl(url) and url.startswith(CDN_DIST)\n\ndef process_raw_css(raw_css):\n \"\"\"\n Converts old-style Bokeh<3 compatible CSS to Bokeh 3 compatible CSS.\n \"\"\"\n return [BK_PREFIX_RE.sub('.', css) for css in raw_css]\n\n@lru_cache(maxsize=None)\ndef loading_css(loading_spinner, color, max_height):\n with open(ASSETS_DIR / f'{loading_spinner}_spinner.svg', encoding='utf-8') as f:\n svg = f.read().replace('\\n', '').format(color=color)\n b64 = b64encode(svg.encode('utf-8')).decode('utf-8')\n return textwrap.dedent(f\"\"\"\n :host(.{LOADING_INDICATOR_CSS_CLASS}.pn-{loading_spinner}):before, .pn-loading.pn-{loading_spinner}:before {{\n background-image: url(\"data:image/svg+xml;base64,{b64}\");\n background-size: auto calc(min(50%, {max_height}px));\n }}\"\"\")\n\ndef resolve_custom_path(\n obj, path: str | os.PathLike, relative: bool = False\n) -> pathlib.Path | None:\n \"\"\"\n Attempts to resolve a path relative to some component.\n\n Arguments\n ---------\n obj: type | object\n The component to resolve the path relative to.\n path: str | os.PathLike\n Absolute or relative path to a resource.\n relative: bool\n Whether to return a relative path.\n\n Returns\n -------\n path: pathlib.Path | None\n \"\"\"\n if not path:\n return\n if not isinstance(obj, type):\n obj = type(obj)\n try:\n mod = importlib.import_module(obj.__module__)\n module_path = Path(mod.__file__).parent\n assert module_path.exists()\n except Exception:\n return None\n path = pathlib.Path(path)\n if path.is_absolute():\n abs_path = path\n else:\n abs_path = module_path / path\n if not abs_path.is_file():\n return None\n abs_path = abs_path.resolve()\n if not relative:\n return abs_path\n return os.path.relpath(abs_path, module_path)\n\ndef component_resource_path(component, attr, path):\n \"\"\"\n Generates a canonical URL for a component resource.\n\n To be used in conjunction with the `panel.io.server.ComponentResourceHandler`\n which allows dynamically resolving resources defined on components.\n \"\"\"\n if not isinstance(component, type):\n component = type(component)\n component_path = COMPONENT_PATH\n if state.rel_path:\n component_path = f\"{state.rel_path}/{component_path}\"\n rel_path = str(resolve_custom_path(component, path, relative=True)).replace(os.path.sep, '/')\n return f'{component_path}{component.__module__}/{component.__name__}/{attr}/{rel_path}'\n\ndef patch_stylesheet(stylesheet, dist_url):\n url = stylesheet.url\n if url.startswith(CDN_DIST+dist_url) and dist_url != CDN_DIST:\n patched_url = url.replace(CDN_DIST+dist_url, dist_url) + f'?v={JS_VERSION}'\n elif url.startswith(CDN_DIST) and dist_url != CDN_DIST:\n patched_url = url.replace(CDN_DIST, dist_url) + f'?v={JS_VERSION}'\n else:\n return\n try:\n stylesheet.url = patched_url\n except Exception:\n pass\n\ndef resolve_stylesheet(cls, stylesheet: str, attribute: str | None = None):\n \"\"\"\n Resolves a stylesheet definition, e.g. originating on a component\n Reactive._stylesheets or a Design.modifiers attribute. Stylesheets\n may be defined as one of the following:\n\n - Absolute URL defined with http(s) protocol\n - A path relative to the component\n\n Arguments\n ---------\n cls: type | object\n Object or class defining the stylesheet\n stylesheet: str\n The stylesheet definition\n \"\"\"\n stylesheet = str(stylesheet)\n if not stylesheet.startswith('http') and attribute and (custom_path:= resolve_custom_path(cls, stylesheet)):\n if not state._is_pyodide and state.curdoc and state.curdoc.session_context:\n stylesheet = component_resource_path(cls, attribute, stylesheet)\n else:\n stylesheet = custom_path.read_text(encoding='utf-8')\n return stylesheet\n\ndef patch_model_css(root, dist_url):\n \"\"\"\n Temporary patch for Model.css property used by Panel to provide\n stylesheets for components.\n\n ALERT: Should find better solution before official Bokeh 3.x compatible release\n \"\"\"\n # Patch model CSS properties\n doc = root.document\n if doc:\n held = doc.callbacks.hold_value\n events = list(doc.callbacks._held_events)\n doc.hold()\n for stylesheet in root.select({'type': ImportedStyleSheet}):\n patch_stylesheet(stylesheet, dist_url)\n if doc:\n doc.callbacks._held_events = events\n if held:\n doc.callbacks._hold = held\n else:\n doc.unhold()\n\ndef global_css(name):\n if RESOURCE_MODE == 'server':\n return f'static/extensions/panel/css/{name}.css'\n else:\n return f'{CDN_DIST}css/{name}.css'\n\ndef bundled_files(model, file_type='javascript'):\n name = model.__name__.lower()\n bdir = BUNDLE_DIR / name\n shared = list((JS_URLS if file_type == 'javascript' else CSS_URLS).values())\n files = []\n for url in getattr(model, f\"__{file_type}_raw__\", []):\n if url.startswith(CDN_DIST):\n filepath = url.replace(f'{CDN_DIST}bundled/', '')\n elif url.startswith(config.npm_cdn):\n filepath = url.replace(config.npm_cdn, '')[1:]\n else:\n filepath = url_path(url)\n test_filepath = filepath.split('?')[0]\n if url in shared:\n prefixed = filepath\n test_path = BUNDLE_DIR / test_filepath\n elif not test_filepath.replace('/', '').startswith(f'{name}/'):\n prefixed = f'{name}/{test_filepath}'\n test_path = bdir / test_filepath\n else:\n prefixed = test_filepath\n test_path = BUNDLE_DIR / test_filepath\n if test_path.is_file():\n if RESOURCE_MODE == 'server':\n files.append(f'static/extensions/panel/bundled/{prefixed}')\n elif filepath == test_filepath:\n files.append(f'{CDN_DIST}bundled/{prefixed}')\n else:\n files.append(url)\n else:\n files.append(url)\n return files\n\ndef bundle_resources(roots, resources, notebook=False, reloading=False, enable_mathjax='auto'):\n from ..config import panel_extension as ext\n global RESOURCE_MODE\n if not isinstance(resources, Resources):\n resources = Resources.from_bokeh(resources, notebook=notebook)\n js_resources = css_resources = resources\n RESOURCE_MODE = mode = js_resources.mode if resources is not None else \"inline\"\n\n js_files = []\n js_raw = []\n css_files = []\n css_raw = []\n\n if isinstance(enable_mathjax, bool):\n use_mathjax = enable_mathjax\n elif roots:\n use_mathjax = _use_mathjax(roots) or 'mathjax' in ext._loaded_extensions\n else:\n use_mathjax = False\n\n if js_resources:\n js_resources = js_resources.clone()\n if not use_mathjax and \"bokeh-mathjax\" in js_resources.components:\n js_resources.components.remove(\"bokeh-mathjax\")\n if reloading:\n js_resources.components.clear()\n\n js_files.extend(js_resources.js_files)\n js_raw.extend(js_resources.js_raw)\n\n css_files.extend(css_resources.css_files)\n css_raw.extend(css_resources.css_raw)\n\n extensions = _bundle_extensions(None, js_resources)\n if reloading:\n extensions = [\n ext for ext in extensions if not ext.cdn_url.startswith('https://unpkg.com/@holoviz/panel@')\n ]\n extra_js = []\n if mode == \"inline\":\n js_raw.extend([ Resources._inline(bundle.artifact_path) for bundle in extensions ])\n elif mode == \"server\":\n for bundle in extensions:\n server_url = bundle.server_url\n if resources.root_url and not resources.absolute:\n server_url = server_url.replace(resources.root_url, '', 1)\n js_files.append(server_url)\n elif mode == \"cdn\":\n for bundle in extensions:\n if bundle.cdn_url is not None:\n extra_js.append(bundle.cdn_url)\n else:\n js_raw.append(Resources._inline(bundle.artifact_path))\n else:\n extra_js.extend([ bundle.artifact_path for bundle in extensions ])\n js_files += resources.adjust_paths(extra_js)\n\n ext = bundle_models(None)\n if ext is not None:\n js_raw.append(ext)\n\n hashes = js_resources.hashes if js_resources else {}\n return Bundle(\n css_files=css_files,\n css_raw=css_raw,\n hashes=hashes,\n js_files=js_files,\n js_raw=js_raw,\n js_module_exports=resources.js_module_exports,\n js_modules=resources.js_modules,\n notebook=notebook,\n )\n\n\nclass ResourceComponent:\n \"\"\"\n Mix-in class for components that define a set of resources\n that have to be resolved.\n \"\"\"\n\n _resources = {\n 'css': {},\n 'font': {},\n 'js': {},\n 'js_modules': {},\n 'raw_css': [],\n }\n\n @classmethod\n def _resolve_resource(cls, resource_type: str, resource: str, cdn: bool = False):\n dist_path = get_dist_path(cdn=cdn)\n if resource.startswith(CDN_DIST):\n resource_path = resource.replace(f'{CDN_DIST}bundled/', '')\n elif resource.startswith(config.npm_cdn):\n resource_path = resource.replace(config.npm_cdn, '')[1:]\n elif resource.startswith('http:'):\n resource_path = url_path(resource)\n else:\n resource_path = resource\n\n if resource_type == 'js_modules' and not (state.rel_path or cdn):\n prefixed_dist = f'./{dist_path}'\n else:\n prefixed_dist = dist_path\n\n bundlepath = BUNDLE_DIR / resource_path.replace('/', os.path.sep)\n # Windows may trigger OSError: [WinError 123]\n try:\n is_file = bundlepath.is_file()\n except Exception:\n is_file = False\n if is_file:\n return f'{prefixed_dist}bundled/{resource_path}'\n elif isurl(resource):\n return resource\n elif resolve_custom_path(cls, resource):\n return component_resource_path(\n cls, f'_resources/{resource_type}', resource\n )\n\n def resolve_resources(self, cdn: bool | Literal['auto'] = 'auto') -> ResourcesType:\n \"\"\"\n Resolves the resources required for this component.\n\n Arguments\n ---------\n cdn: bool | Literal['auto']\n Whether to load resources from CDN or local server. If set\n to 'auto' value will be automatically determine based on\n global settings.\n\n Returns\n -------\n Dictionary containing JS and CSS resources.\n \"\"\"\n cls = type(self)\n resources = {}\n for rt, res in self._resources.items():\n if not isinstance(res, dict):\n continue\n if rt == 'font':\n rt = 'css'\n res = {\n name: url if isurl(url) else f'{cls.__name__.lower()}/{url}'\n for name, url in res.items()\n }\n if rt in resources:\n resources[rt] = dict(resources[rt], **res)\n else:\n resources[rt] = res\n\n cdn = use_cdn() if cdn == 'auto' else cdn\n resource_types: ResourcesType = {\n 'js': {},\n 'js_modules': {},\n 'css': {},\n 'raw_css': []\n }\n\n for resource_type in resource_types:\n if resource_type not in resources or resource_type == 'raw_css':\n continue\n resource_files = resource_types[resource_type]\n for rname, resource in resources[resource_type].items():\n resolved_resource = self._resolve_resource(\n resource_type, resource, cdn=cdn\n )\n if resolved_resource:\n resource_files[rname] = resolved_resource\n return resource_types\n\n\nclass Resources(BkResources):\n\n def __init__(self, *args, absolute=False, notebook=False, **kwargs):\n self.absolute = absolute\n self.notebook = notebook\n super().__init__(*args, **kwargs)\n\n @classmethod\n def from_bokeh(cls, bkr, absolute=False, notebook=False):\n kwargs = {}\n if bkr.mode.startswith(\"server\"):\n kwargs['root_url'] = bkr.root_url\n\n components = bkr.components if hasattr(bkr, 'components_for') else bkr._components\n return cls(\n mode=bkr.mode, version=bkr.version, minified=bkr.minified,\n log_level=bkr.log_level, notebook=notebook,\n path_versioner=bkr.path_versioner,\n components=components, base_dir=bkr.base_dir,\n root_dir=bkr.root_dir, absolute=absolute, **kwargs\n )\n\n def _collect_external_resources(self, resource_attr: ResourceAttr) -> list[str]:\n \"\"\" Collect external resources set on resource_attr attribute of all models.\"\"\"\n external_resources: list[str] = []\n\n if state._extensions is not None:\n external_modules = {\n module: ext for ext, module in extension._imports.items()\n }\n else:\n external_modules = None\n\n for _, cls in sorted(Model.model_class_reverse_map.items(), key=lambda arg: arg[0]):\n if external_modules is not None and cls.__module__ in external_modules:\n if external_modules[cls.__module__] not in state._extensions:\n continue\n external: list[str] | str | None = getattr(cls, resource_attr, None)\n\n if isinstance(external, str):\n if external not in external_resources:\n external_resources.append(external)\n elif isinstance(external, list):\n for e in external:\n if e not in external_resources:\n external_resources.append(e)\n\n return external_resources\n\n def _server_urls(self) -> Urls:\n return _get_server_urls(\n self.root_url if self.absolute else '',\n False if self.dev else self.minified,\n self.path_versioner\n )\n\n def extra_resources(self, resources, resource_type):\n \"\"\"\n Adds resources for ReactiveHTML components.\n \"\"\"\n from ..reactive import ReactiveHTML\n for model in param.concrete_descendents(ReactiveHTML).values():\n if not (getattr(model, resource_type, None) and model._loaded()):\n continue\n for resource in getattr(model, resource_type, []):\n if not isurl(resource) and not resource.startswith('static/extensions'):\n resource = component_resource_path(model, resource_type, resource)\n if resource not in resources:\n resources.append(resource)\n\n def adjust_paths(self, resources):\n \"\"\"\n Computes relative and absolute paths for resources.\n \"\"\"\n new_resources = []\n cdn_base = f'{config.npm_cdn}/@holoviz/panel@{JS_VERSION}/dist/'\n for resource in resources:\n resource = resource.replace('https://unpkg.com', config.npm_cdn)\n if resource.startswith(cdn_base):\n resource = resource.replace(cdn_base, CDN_DIST)\n if self.mode == 'server':\n resource = resource.replace(CDN_DIST, LOCAL_DIST)\n if (resource.startswith(state.base_url) or resource.startswith('static/')):\n if resource.startswith(state.base_url):\n resource = resource[len(state.base_url):]\n if state.rel_path:\n resource = f'{state.rel_path}/{resource}'\n elif self.absolute and self.mode == 'server':\n resource = f'{self.root_url}{resource}'\n new_resources.append(resource)\n return new_resources\n\n def clone(self, *, components=None) -> Resources:\n \"\"\"\n Make a clone of a resources instance allowing to override its components.\n \"\"\"\n return Resources(\n mode=self.mode,\n version=self.version,\n root_dir=self.root_dir,\n dev=self.dev,\n minified=self.minified,\n log_level=self.log_level,\n root_url=self._root_url,\n path_versioner=self.path_versioner,\n components=components if components is not None else list(self.components),\n base_dir=self.base_dir,\n notebook=self.notebook,\n absolute=self.absolute\n )\n\n @property\n def dist_dir(self):\n if self.notebook and self.mode == 'server':\n dist_dir = '/panel-preview/static/extensions/panel/'\n elif self.mode == 'server':\n if state.rel_path:\n dist_dir = f'{state.rel_path}/{LOCAL_DIST}'\n else:\n dist_dir = LOCAL_DIST\n if self.absolute:\n dist_dir = f'{self.root_url}{dist_dir}'\n else:\n dist_dir = CDN_DIST\n return dist_dir\n\n @property\n def css_files(self):\n from ..config import config\n\n files = super(Resources, self).css_files\n self.extra_resources(files, '__css__')\n css_files = self.adjust_paths([\n css for css in files if self.mode != 'inline' or not is_cdn_url(css)\n ])\n if config.design:\n css_files += list(config.design._resources.get('font', {}).values())\n for cssf in config.css_files:\n if os.path.isfile(cssf) or cssf in files:\n continue\n css_files.append(cssf)\n return css_files\n\n @property\n def css_raw(self):\n from ..config import config\n raw = super(Resources, self).css_raw\n\n # Inline local dist resources\n css_files = self._collect_external_resources(\"__css__\")\n self.extra_resources(css_files, '__css__')\n raw += [\n (DIST_DIR / css.replace(CDN_DIST, '')).read_text(encoding='utf-8')\n for css in css_files if is_cdn_url(css)\n ]\n\n # Add local CSS files\n for cssf in config.css_files:\n if not os.path.isfile(cssf):\n continue\n css_txt = process_raw_css([Path(cssf).read_text(encoding='utf-8')])[0]\n if css_txt not in raw:\n raw.append(css_txt)\n\n # Add loading spinner\n if config.global_loading_spinner:\n loading_base = (DIST_DIR / \"css\" / \"loading.css\").read_text(encoding='utf-8')\n raw.extend([loading_base, loading_css(\n config.loading_spinner, config.loading_color, config.loading_max_height\n )])\n return raw + process_raw_css(config.raw_css) + process_raw_css(config.global_css)\n\n @property\n def js_files(self):\n from ..config import config\n\n # Gather JS files\n files = super(Resources, self).js_files\n self.extra_resources(files, '__javascript__')\n files += [js for js in config.js_files.values()]\n if config.design:\n design_js = config.design().resolve_resources(\n cdn=self.notebook or 'auto', include_theme=False\n )['js'].values()\n files += [res for res in design_js if res not in files]\n\n # Filter and adjust JS file urls\n js_files = self.adjust_paths([\n js for js in files if self.mode != 'inline' or not is_cdn_url(js)\n ])\n\n # Load requirejs last to avoid interfering with other libraries\n dist_dir = self.dist_dir\n require_index = [i for i, jsf in enumerate(js_files) if 'require' in jsf]\n if require_index:\n requirejs = js_files.pop(require_index[0])\n if any('ace' in jsf for jsf in js_files):\n js_files.append(dist_dir + 'pre_require.js')\n js_files.append(requirejs)\n if any('ace' in jsf for jsf in js_files):\n js_files.append(dist_dir + 'post_require.js')\n return js_files\n\n @property\n def js_modules(self):\n from ..config import config\n from ..reactive import ReactiveHTML\n\n modules = list(config.js_modules.values())\n self.extra_resources(modules, '__javascript_modules__')\n if config.design:\n design_resources = config.design().resolve_resources(\n cdn=self.notebook or 'auto', include_theme=False\n )\n modules += [\n res for res in design_resources['js_modules'].values()\n if res not in modules\n ]\n\n for model in param.concrete_descendents(ReactiveHTML).values():\n if not (getattr(model, '__javascript_modules__', None) and model._loaded()):\n continue\n for js_module in model.__javascript_modules__:\n if not isurl(js_module) and not js_module.startswith('static/extensions'):\n js_module = component_resource_path(model, '__javascript_modules__', js_module)\n if js_module not in modules:\n modules.append(js_module)\n\n return self.adjust_paths(modules)\n\n @property\n def js_module_exports(self):\n modules = {}\n for model in Model.model_class_reverse_map.values():\n if hasattr(model, '__javascript_module_exports__'):\n modules.update(dict(zip(model.__javascript_module_exports__, model.__javascript_modules__)))\n return modules\n\n @property\n def js_raw(self):\n raw_js = super(Resources, self).js_raw\n if not self.mode == 'inline':\n return raw_js\n\n # Inline local dist resources\n js_files = self._collect_external_resources(\"__javascript__\")\n self.extra_resources(js_files, '__javascript__')\n raw_js += [\n (DIST_DIR / js.replace(CDN_DIST, '')).read_text(encoding='utf-8')\n for js in js_files if is_cdn_url(js)\n ]\n\n # Inline config.js_files\n from ..config import config\n raw_js += [\n Path(js).read_text(encoding='utf-8') for js in config.js_files.values()\n if os.path.isfile(js)\n ]\n\n # Inline config.design JS resources\n if config.design:\n design_js = config.design().resolve_resources(\n cdn=True, include_theme=False\n )['js'].values()\n raw_js += [\n (DIST_DIR / js.replace(CDN_DIST, '')).read_text(encoding='utf-8')\n for js in design_js if is_cdn_url(js)\n ]\n return raw_js\n\n @property\n def render_js(self):\n return JS_RESOURCES.render(\n js_raw=self.js_raw, js_files=self.js_files,\n js_modules=self.js_modules, hashes=self.hashes,\n js_module_exports=self.js_module_exports\n )\n\n\nclass Bundle(BkBundle):\n\n def __init__(self, notebook=False, **kwargs):\n self.js_modules = kwargs.pop(\"js_modules\", [])\n self.js_module_exports = kwargs.pop(\"js_module_exports\", {})\n self.notebook = notebook\n super().__init__(**kwargs)\n\n @classmethod\n def from_bokeh(cls, bk_bundle, notebook=False):\n return cls(\n notebook=notebook,\n js_files=bk_bundle.js_files,\n js_raw=bk_bundle.js_raw,\n css_files=bk_bundle.css_files,\n css_raw=bk_bundle.css_raw,\n hashes=bk_bundle.hashes,\n )\n\n def _render_css(self) -> str:\n return BkCSS_RESOURCES.render(\n css_files=self.css_files,\n css_raw=self.css_raw\n )\n\n def _render_js(self):\n return JS_RESOURCES.render(\n js_raw=self.js_raw,\n js_files=self.js_files,\n js_modules=self.js_modules,\n js_module_exports=self.js_module_exports,\n hashes=self.hashes\n )\n", "path": "panel/io/resources.py"}], "after_files": [{"content": "\"\"\"\nPatches bokeh resources to make it easy to add external JS and CSS\nresources via the panel.config object.\n\"\"\"\nfrom __future__ import annotations\n\nimport importlib\nimport json\nimport logging\nimport mimetypes\nimport os\nimport pathlib\nimport re\nimport textwrap\n\nfrom base64 import b64encode\nfrom collections import OrderedDict\nfrom contextlib import contextmanager\nfrom functools import lru_cache\nfrom pathlib import Path\nfrom typing import (\n TYPE_CHECKING, Dict, List, Literal, TypedDict,\n)\n\nimport param\n\nfrom bokeh.embed.bundle import (\n CSS_RESOURCES as BkCSS_RESOURCES, Bundle as BkBundle, _bundle_extensions,\n _use_mathjax, bundle_models, extension_dirs,\n)\nfrom bokeh.model import Model\nfrom bokeh.models import ImportedStyleSheet\nfrom bokeh.resources import Resources as BkResources, _get_server_urls\nfrom bokeh.settings import settings as _settings\nfrom jinja2.environment import Environment\nfrom jinja2.loaders import FileSystemLoader\nfrom markupsafe import Markup\n\nfrom ..config import config, panel_extension as extension\nfrom ..util import isurl, url_path\nfrom .loading import LOADING_INDICATOR_CSS_CLASS\nfrom .state import state\n\nif TYPE_CHECKING:\n from bokeh.resources import Urls\n\n class ResourcesType(TypedDict):\n css: Dict[str, str]\n js: Dict[str, str]\n js_modules: Dict[str, str]\n raw_css: List[str]\n\nlogger = logging.getLogger(__name__)\n\nResourceAttr = Literal[\"__css__\", \"__javascript__\"]\n\nwith open(Path(__file__).parent.parent / 'package.json') as f:\n package_json = json.load(f)\n JS_VERSION = package_json['version'].split('+')[0]\n\ndef get_env():\n ''' Get the correct Jinja2 Environment, also for frozen scripts.\n '''\n local_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '_templates'))\n return Environment(loader=FileSystemLoader(local_path))\n\ndef conffilter(value):\n return json.dumps(OrderedDict(value)).replace('\"', '\\'')\n\n_env = get_env()\n_env.trim_blocks = True\n_env.lstrip_blocks = True\n_env.filters['json'] = lambda obj: Markup(json.dumps(obj))\n_env.filters['conffilter'] = conffilter\n_env.filters['sorted'] = sorted\n\n# Handle serving of the panel extension before session is loaded\nRESOURCE_MODE = 'server'\nPANEL_DIR = Path(__file__).parent.parent\nDIST_DIR = PANEL_DIR / 'dist'\nBUNDLE_DIR = DIST_DIR / 'bundled'\nASSETS_DIR = PANEL_DIR / 'assets'\nINDEX_TEMPLATE = _env.get_template('convert_index.html')\nBASE_TEMPLATE = _env.get_template('base.html')\nERROR_TEMPLATE = _env.get_template('error.html')\nLOGOUT_TEMPLATE = _env.get_template('logout.html')\nBASIC_LOGIN_TEMPLATE = _env.get_template('basic_login.html')\nDEFAULT_TITLE = \"Panel Application\"\nJS_RESOURCES = _env.get_template('js_resources.html')\nCDN_URL = f\"https://cdn.holoviz.org/panel/{JS_VERSION}/\"\nCDN_DIST = f\"{CDN_URL}dist/\"\nDOC_DIST = \"https://panel.holoviz.org/_static/\"\nLOCAL_DIST = \"static/extensions/panel/\"\nCOMPONENT_PATH = \"components/\"\n\nBK_PREFIX_RE = re.compile('\\.bk\\.')\n\nRESOURCE_URLS = {\n 'font-awesome': {\n 'zip': 'https://use.fontawesome.com/releases/v5.15.4/fontawesome-free-5.15.4-web.zip',\n 'src': 'fontawesome-free-5.15.4-web/',\n 'exclude': ['*.svg', '*.scss', '*.less']\n },\n 'bootstrap4': {\n 'tar': 'https://registry.npmjs.org/bootstrap/-/bootstrap-4.6.1.tgz',\n 'src': 'package/dist',\n 'exclude': [],\n 'dest': ''\n },\n 'bootstrap5': {\n 'tar': 'https://registry.npmjs.org/bootstrap/-/bootstrap-5.3.0-alpha1.tgz',\n 'src': 'package/dist',\n 'exclude': [],\n 'dest': ''\n },\n 'jQuery': {\n 'tar': 'https://registry.npmjs.org/jquery/-/jquery-3.5.1.tgz',\n 'src': 'package/dist',\n 'exclude': [],\n 'dest': ''\n }\n}\n\nCSS_URLS = {\n 'font-awesome': f'{CDN_DIST}bundled/font-awesome/css/all.min.css',\n 'bootstrap4': f'{CDN_DIST}bundled/bootstrap4/css/bootstrap.min.css',\n 'bootstrap5': f'{CDN_DIST}bundled/bootstrap5/css/bootstrap.min.css'\n}\n\nJS_URLS = {\n 'jQuery': f'{CDN_DIST}bundled/jquery/jquery.slim.min.js',\n 'bootstrap4': f'{CDN_DIST}bundled/bootstrap4/js/bootstrap.bundle.min.js',\n 'bootstrap5': f'{CDN_DIST}bundled/bootstrap5/js/bootstrap.bundle.min.js'\n}\n\nextension_dirs['panel'] = str(DIST_DIR)\n\nmimetypes.add_type(\"application/javascript\", \".js\")\n\n@contextmanager\ndef set_resource_mode(mode):\n global RESOURCE_MODE\n old_resources = _settings.resources._user_value\n old_mode = RESOURCE_MODE\n _settings.resources = RESOURCE_MODE = mode\n try:\n yield\n finally:\n RESOURCE_MODE = old_mode\n _settings.resources.set_value(old_resources)\n\ndef use_cdn() -> bool:\n return _settings.resources(default=\"server\") != 'server'\n\ndef get_dist_path(cdn: bool | Literal['auto'] = 'auto') -> str:\n cdn = use_cdn() if cdn == 'auto' else cdn\n if cdn:\n dist_path = CDN_DIST\n elif state.rel_path:\n dist_path = f'{state.rel_path}/{LOCAL_DIST}'\n else:\n dist_path = f'{LOCAL_DIST}'\n return dist_path\n\ndef is_cdn_url(url) -> bool:\n return isurl(url) and url.startswith(CDN_DIST)\n\ndef process_raw_css(raw_css):\n \"\"\"\n Converts old-style Bokeh<3 compatible CSS to Bokeh 3 compatible CSS.\n \"\"\"\n return [BK_PREFIX_RE.sub('.', css) for css in raw_css]\n\n@lru_cache(maxsize=None)\ndef loading_css(loading_spinner, color, max_height):\n with open(ASSETS_DIR / f'{loading_spinner}_spinner.svg', encoding='utf-8') as f:\n svg = f.read().replace('\\n', '').format(color=color)\n b64 = b64encode(svg.encode('utf-8')).decode('utf-8')\n return textwrap.dedent(f\"\"\"\n :host(.{LOADING_INDICATOR_CSS_CLASS}.pn-{loading_spinner}):before, .pn-loading.pn-{loading_spinner}:before {{\n background-image: url(\"data:image/svg+xml;base64,{b64}\");\n background-size: auto calc(min(50%, {max_height}px));\n }}\"\"\")\n\ndef resolve_custom_path(\n obj, path: str | os.PathLike, relative: bool = False\n) -> pathlib.Path | None:\n \"\"\"\n Attempts to resolve a path relative to some component.\n\n Arguments\n ---------\n obj: type | object\n The component to resolve the path relative to.\n path: str | os.PathLike\n Absolute or relative path to a resource.\n relative: bool\n Whether to return a relative path.\n\n Returns\n -------\n path: pathlib.Path | None\n \"\"\"\n if not path:\n return\n if not isinstance(obj, type):\n obj = type(obj)\n try:\n mod = importlib.import_module(obj.__module__)\n module_path = Path(mod.__file__).parent\n assert module_path.exists()\n except Exception:\n return None\n path = pathlib.Path(path)\n if path.is_absolute():\n abs_path = path\n else:\n abs_path = module_path / path\n if not abs_path.is_file():\n return None\n abs_path = abs_path.resolve()\n if not relative:\n return abs_path\n return os.path.relpath(abs_path, module_path)\n\ndef component_resource_path(component, attr, path):\n \"\"\"\n Generates a canonical URL for a component resource.\n\n To be used in conjunction with the `panel.io.server.ComponentResourceHandler`\n which allows dynamically resolving resources defined on components.\n \"\"\"\n if not isinstance(component, type):\n component = type(component)\n component_path = COMPONENT_PATH\n if state.rel_path:\n component_path = f\"{state.rel_path}/{component_path}\"\n rel_path = str(resolve_custom_path(component, path, relative=True)).replace(os.path.sep, '/')\n return f'{component_path}{component.__module__}/{component.__name__}/{attr}/{rel_path}'\n\ndef patch_stylesheet(stylesheet, dist_url):\n url = stylesheet.url\n if url.startswith(CDN_DIST+dist_url) and dist_url != CDN_DIST:\n patched_url = url.replace(CDN_DIST+dist_url, dist_url) + f'?v={JS_VERSION}'\n elif url.startswith(CDN_DIST) and dist_url != CDN_DIST:\n patched_url = url.replace(CDN_DIST, dist_url) + f'?v={JS_VERSION}'\n else:\n return\n try:\n stylesheet.url = patched_url\n except Exception:\n pass\n\ndef resolve_stylesheet(cls, stylesheet: str, attribute: str | None = None):\n \"\"\"\n Resolves a stylesheet definition, e.g. originating on a component\n Reactive._stylesheets or a Design.modifiers attribute. Stylesheets\n may be defined as one of the following:\n\n - Absolute URL defined with http(s) protocol\n - A path relative to the component\n\n Arguments\n ---------\n cls: type | object\n Object or class defining the stylesheet\n stylesheet: str\n The stylesheet definition\n \"\"\"\n stylesheet = str(stylesheet)\n if not stylesheet.startswith('http') and attribute and (custom_path:= resolve_custom_path(cls, stylesheet)):\n if not state._is_pyodide and state.curdoc and state.curdoc.session_context:\n stylesheet = component_resource_path(cls, attribute, stylesheet)\n else:\n stylesheet = custom_path.read_text(encoding='utf-8')\n return stylesheet\n\ndef patch_model_css(root, dist_url):\n \"\"\"\n Temporary patch for Model.css property used by Panel to provide\n stylesheets for components.\n\n ALERT: Should find better solution before official Bokeh 3.x compatible release\n \"\"\"\n # Patch model CSS properties\n doc = root.document\n if doc:\n held = doc.callbacks.hold_value\n events = list(doc.callbacks._held_events)\n doc.hold()\n for stylesheet in root.select({'type': ImportedStyleSheet}):\n patch_stylesheet(stylesheet, dist_url)\n if doc:\n doc.callbacks._held_events = events\n if held:\n doc.callbacks._hold = held\n else:\n doc.unhold()\n\ndef global_css(name):\n if RESOURCE_MODE == 'server':\n return f'static/extensions/panel/css/{name}.css'\n else:\n return f'{CDN_DIST}css/{name}.css'\n\ndef bundled_files(model, file_type='javascript'):\n name = model.__name__.lower()\n bdir = BUNDLE_DIR / name\n shared = list((JS_URLS if file_type == 'javascript' else CSS_URLS).values())\n files = []\n for url in getattr(model, f\"__{file_type}_raw__\", []):\n if url.startswith(CDN_DIST):\n filepath = url.replace(f'{CDN_DIST}bundled/', '')\n elif url.startswith(config.npm_cdn):\n filepath = url.replace(config.npm_cdn, '')[1:]\n else:\n filepath = url_path(url)\n test_filepath = filepath.split('?')[0]\n if url in shared:\n prefixed = filepath\n test_path = BUNDLE_DIR / test_filepath\n elif not test_filepath.replace('/', '').startswith(f'{name}/'):\n prefixed = f'{name}/{test_filepath}'\n test_path = bdir / test_filepath\n else:\n prefixed = test_filepath\n test_path = BUNDLE_DIR / test_filepath\n if test_path.is_file():\n if RESOURCE_MODE == 'server':\n files.append(f'static/extensions/panel/bundled/{prefixed}')\n elif filepath == test_filepath:\n files.append(f'{CDN_DIST}bundled/{prefixed}')\n else:\n files.append(url)\n else:\n files.append(url)\n return files\n\ndef bundle_resources(roots, resources, notebook=False, reloading=False, enable_mathjax='auto'):\n from ..config import panel_extension as ext\n global RESOURCE_MODE\n if not isinstance(resources, Resources):\n resources = Resources.from_bokeh(resources, notebook=notebook)\n js_resources = css_resources = resources\n RESOURCE_MODE = mode = js_resources.mode if resources is not None else \"inline\"\n\n js_files = []\n js_raw = []\n css_files = []\n css_raw = []\n\n if isinstance(enable_mathjax, bool):\n use_mathjax = enable_mathjax\n elif roots:\n use_mathjax = _use_mathjax(roots) or 'mathjax' in ext._loaded_extensions\n else:\n use_mathjax = False\n\n if js_resources:\n js_resources = js_resources.clone()\n if not use_mathjax and \"bokeh-mathjax\" in js_resources.components:\n js_resources.components.remove(\"bokeh-mathjax\")\n if reloading:\n js_resources.components.clear()\n\n js_files.extend(js_resources.js_files)\n js_raw.extend(js_resources.js_raw)\n\n css_files.extend(css_resources.css_files)\n css_raw.extend(css_resources.css_raw)\n\n extensions = _bundle_extensions(None, js_resources)\n if reloading:\n extensions = [\n ext for ext in extensions if not (ext.cdn_url is not None and ext.cdn_url.startswith('https://unpkg.com/@holoviz/panel@'))\n ]\n extra_js = []\n if mode == \"inline\":\n js_raw.extend([ Resources._inline(bundle.artifact_path) for bundle in extensions ])\n elif mode == \"server\":\n for bundle in extensions:\n server_url = bundle.server_url\n if resources.root_url and not resources.absolute:\n server_url = server_url.replace(resources.root_url, '', 1)\n js_files.append(server_url)\n elif mode == \"cdn\":\n for bundle in extensions:\n if bundle.cdn_url is not None:\n extra_js.append(bundle.cdn_url)\n else:\n js_raw.append(Resources._inline(bundle.artifact_path))\n else:\n extra_js.extend([ bundle.artifact_path for bundle in extensions ])\n js_files += resources.adjust_paths(extra_js)\n\n ext = bundle_models(None)\n if ext is not None:\n js_raw.append(ext)\n\n hashes = js_resources.hashes if js_resources else {}\n return Bundle(\n css_files=css_files,\n css_raw=css_raw,\n hashes=hashes,\n js_files=js_files,\n js_raw=js_raw,\n js_module_exports=resources.js_module_exports,\n js_modules=resources.js_modules,\n notebook=notebook,\n )\n\n\nclass ResourceComponent:\n \"\"\"\n Mix-in class for components that define a set of resources\n that have to be resolved.\n \"\"\"\n\n _resources = {\n 'css': {},\n 'font': {},\n 'js': {},\n 'js_modules': {},\n 'raw_css': [],\n }\n\n @classmethod\n def _resolve_resource(cls, resource_type: str, resource: str, cdn: bool = False):\n dist_path = get_dist_path(cdn=cdn)\n if resource.startswith(CDN_DIST):\n resource_path = resource.replace(f'{CDN_DIST}bundled/', '')\n elif resource.startswith(config.npm_cdn):\n resource_path = resource.replace(config.npm_cdn, '')[1:]\n elif resource.startswith('http:'):\n resource_path = url_path(resource)\n else:\n resource_path = resource\n\n if resource_type == 'js_modules' and not (state.rel_path or cdn):\n prefixed_dist = f'./{dist_path}'\n else:\n prefixed_dist = dist_path\n\n bundlepath = BUNDLE_DIR / resource_path.replace('/', os.path.sep)\n # Windows may trigger OSError: [WinError 123]\n try:\n is_file = bundlepath.is_file()\n except Exception:\n is_file = False\n if is_file:\n return f'{prefixed_dist}bundled/{resource_path}'\n elif isurl(resource):\n return resource\n elif resolve_custom_path(cls, resource):\n return component_resource_path(\n cls, f'_resources/{resource_type}', resource\n )\n\n def resolve_resources(self, cdn: bool | Literal['auto'] = 'auto') -> ResourcesType:\n \"\"\"\n Resolves the resources required for this component.\n\n Arguments\n ---------\n cdn: bool | Literal['auto']\n Whether to load resources from CDN or local server. If set\n to 'auto' value will be automatically determine based on\n global settings.\n\n Returns\n -------\n Dictionary containing JS and CSS resources.\n \"\"\"\n cls = type(self)\n resources = {}\n for rt, res in self._resources.items():\n if not isinstance(res, dict):\n continue\n if rt == 'font':\n rt = 'css'\n res = {\n name: url if isurl(url) else f'{cls.__name__.lower()}/{url}'\n for name, url in res.items()\n }\n if rt in resources:\n resources[rt] = dict(resources[rt], **res)\n else:\n resources[rt] = res\n\n cdn = use_cdn() if cdn == 'auto' else cdn\n resource_types: ResourcesType = {\n 'js': {},\n 'js_modules': {},\n 'css': {},\n 'raw_css': []\n }\n\n for resource_type in resource_types:\n if resource_type not in resources or resource_type == 'raw_css':\n continue\n resource_files = resource_types[resource_type]\n for rname, resource in resources[resource_type].items():\n resolved_resource = self._resolve_resource(\n resource_type, resource, cdn=cdn\n )\n if resolved_resource:\n resource_files[rname] = resolved_resource\n return resource_types\n\n\nclass Resources(BkResources):\n\n def __init__(self, *args, absolute=False, notebook=False, **kwargs):\n self.absolute = absolute\n self.notebook = notebook\n super().__init__(*args, **kwargs)\n\n @classmethod\n def from_bokeh(cls, bkr, absolute=False, notebook=False):\n kwargs = {}\n if bkr.mode.startswith(\"server\"):\n kwargs['root_url'] = bkr.root_url\n\n components = bkr.components if hasattr(bkr, 'components_for') else bkr._components\n return cls(\n mode=bkr.mode, version=bkr.version, minified=bkr.minified,\n log_level=bkr.log_level, notebook=notebook,\n path_versioner=bkr.path_versioner,\n components=components, base_dir=bkr.base_dir,\n root_dir=bkr.root_dir, absolute=absolute, **kwargs\n )\n\n def _collect_external_resources(self, resource_attr: ResourceAttr) -> list[str]:\n \"\"\" Collect external resources set on resource_attr attribute of all models.\"\"\"\n external_resources: list[str] = []\n\n if state._extensions is not None:\n external_modules = {\n module: ext for ext, module in extension._imports.items()\n }\n else:\n external_modules = None\n\n for _, cls in sorted(Model.model_class_reverse_map.items(), key=lambda arg: arg[0]):\n if external_modules is not None and cls.__module__ in external_modules:\n if external_modules[cls.__module__] not in state._extensions:\n continue\n external: list[str] | str | None = getattr(cls, resource_attr, None)\n\n if isinstance(external, str):\n if external not in external_resources:\n external_resources.append(external)\n elif isinstance(external, list):\n for e in external:\n if e not in external_resources:\n external_resources.append(e)\n\n return external_resources\n\n def _server_urls(self) -> Urls:\n return _get_server_urls(\n self.root_url if self.absolute else '',\n False if self.dev else self.minified,\n self.path_versioner\n )\n\n def extra_resources(self, resources, resource_type):\n \"\"\"\n Adds resources for ReactiveHTML components.\n \"\"\"\n from ..reactive import ReactiveHTML\n for model in param.concrete_descendents(ReactiveHTML).values():\n if not (getattr(model, resource_type, None) and model._loaded()):\n continue\n for resource in getattr(model, resource_type, []):\n if not isurl(resource) and not resource.startswith('static/extensions'):\n resource = component_resource_path(model, resource_type, resource)\n if resource not in resources:\n resources.append(resource)\n\n def adjust_paths(self, resources):\n \"\"\"\n Computes relative and absolute paths for resources.\n \"\"\"\n new_resources = []\n cdn_base = f'{config.npm_cdn}/@holoviz/panel@{JS_VERSION}/dist/'\n for resource in resources:\n resource = resource.replace('https://unpkg.com', config.npm_cdn)\n if resource.startswith(cdn_base):\n resource = resource.replace(cdn_base, CDN_DIST)\n if self.mode == 'server':\n resource = resource.replace(CDN_DIST, LOCAL_DIST)\n if (resource.startswith(state.base_url) or resource.startswith('static/')):\n if resource.startswith(state.base_url):\n resource = resource[len(state.base_url):]\n if state.rel_path:\n resource = f'{state.rel_path}/{resource}'\n elif self.absolute and self.mode == 'server':\n resource = f'{self.root_url}{resource}'\n new_resources.append(resource)\n return new_resources\n\n def clone(self, *, components=None) -> Resources:\n \"\"\"\n Make a clone of a resources instance allowing to override its components.\n \"\"\"\n return Resources(\n mode=self.mode,\n version=self.version,\n root_dir=self.root_dir,\n dev=self.dev,\n minified=self.minified,\n log_level=self.log_level,\n root_url=self._root_url,\n path_versioner=self.path_versioner,\n components=components if components is not None else list(self.components),\n base_dir=self.base_dir,\n notebook=self.notebook,\n absolute=self.absolute\n )\n\n @property\n def dist_dir(self):\n if self.notebook and self.mode == 'server':\n dist_dir = '/panel-preview/static/extensions/panel/'\n elif self.mode == 'server':\n if state.rel_path:\n dist_dir = f'{state.rel_path}/{LOCAL_DIST}'\n else:\n dist_dir = LOCAL_DIST\n if self.absolute:\n dist_dir = f'{self.root_url}{dist_dir}'\n else:\n dist_dir = CDN_DIST\n return dist_dir\n\n @property\n def css_files(self):\n from ..config import config\n\n files = super(Resources, self).css_files\n self.extra_resources(files, '__css__')\n css_files = self.adjust_paths([\n css for css in files if self.mode != 'inline' or not is_cdn_url(css)\n ])\n if config.design:\n css_files += list(config.design._resources.get('font', {}).values())\n for cssf in config.css_files:\n if os.path.isfile(cssf) or cssf in files:\n continue\n css_files.append(cssf)\n return css_files\n\n @property\n def css_raw(self):\n from ..config import config\n raw = super(Resources, self).css_raw\n\n # Inline local dist resources\n css_files = self._collect_external_resources(\"__css__\")\n self.extra_resources(css_files, '__css__')\n raw += [\n (DIST_DIR / css.replace(CDN_DIST, '')).read_text(encoding='utf-8')\n for css in css_files if is_cdn_url(css)\n ]\n\n # Add local CSS files\n for cssf in config.css_files:\n if not os.path.isfile(cssf):\n continue\n css_txt = process_raw_css([Path(cssf).read_text(encoding='utf-8')])[0]\n if css_txt not in raw:\n raw.append(css_txt)\n\n # Add loading spinner\n if config.global_loading_spinner:\n loading_base = (DIST_DIR / \"css\" / \"loading.css\").read_text(encoding='utf-8')\n raw.extend([loading_base, loading_css(\n config.loading_spinner, config.loading_color, config.loading_max_height\n )])\n return raw + process_raw_css(config.raw_css) + process_raw_css(config.global_css)\n\n @property\n def js_files(self):\n from ..config import config\n\n # Gather JS files\n files = super(Resources, self).js_files\n self.extra_resources(files, '__javascript__')\n files += [js for js in config.js_files.values()]\n if config.design:\n design_js = config.design().resolve_resources(\n cdn=self.notebook or 'auto', include_theme=False\n )['js'].values()\n files += [res for res in design_js if res not in files]\n\n # Filter and adjust JS file urls\n js_files = self.adjust_paths([\n js for js in files if self.mode != 'inline' or not is_cdn_url(js)\n ])\n\n # Load requirejs last to avoid interfering with other libraries\n dist_dir = self.dist_dir\n require_index = [i for i, jsf in enumerate(js_files) if 'require' in jsf]\n if require_index:\n requirejs = js_files.pop(require_index[0])\n if any('ace' in jsf for jsf in js_files):\n js_files.append(dist_dir + 'pre_require.js')\n js_files.append(requirejs)\n if any('ace' in jsf for jsf in js_files):\n js_files.append(dist_dir + 'post_require.js')\n return js_files\n\n @property\n def js_modules(self):\n from ..config import config\n from ..reactive import ReactiveHTML\n\n modules = list(config.js_modules.values())\n self.extra_resources(modules, '__javascript_modules__')\n if config.design:\n design_resources = config.design().resolve_resources(\n cdn=self.notebook or 'auto', include_theme=False\n )\n modules += [\n res for res in design_resources['js_modules'].values()\n if res not in modules\n ]\n\n for model in param.concrete_descendents(ReactiveHTML).values():\n if not (getattr(model, '__javascript_modules__', None) and model._loaded()):\n continue\n for js_module in model.__javascript_modules__:\n if not isurl(js_module) and not js_module.startswith('static/extensions'):\n js_module = component_resource_path(model, '__javascript_modules__', js_module)\n if js_module not in modules:\n modules.append(js_module)\n\n return self.adjust_paths(modules)\n\n @property\n def js_module_exports(self):\n modules = {}\n for model in Model.model_class_reverse_map.values():\n if hasattr(model, '__javascript_module_exports__'):\n modules.update(dict(zip(model.__javascript_module_exports__, model.__javascript_modules__)))\n return modules\n\n @property\n def js_raw(self):\n raw_js = super(Resources, self).js_raw\n if not self.mode == 'inline':\n return raw_js\n\n # Inline local dist resources\n js_files = self._collect_external_resources(\"__javascript__\")\n self.extra_resources(js_files, '__javascript__')\n raw_js += [\n (DIST_DIR / js.replace(CDN_DIST, '')).read_text(encoding='utf-8')\n for js in js_files if is_cdn_url(js)\n ]\n\n # Inline config.js_files\n from ..config import config\n raw_js += [\n Path(js).read_text(encoding='utf-8') for js in config.js_files.values()\n if os.path.isfile(js)\n ]\n\n # Inline config.design JS resources\n if config.design:\n design_js = config.design().resolve_resources(\n cdn=True, include_theme=False\n )['js'].values()\n raw_js += [\n (DIST_DIR / js.replace(CDN_DIST, '')).read_text(encoding='utf-8')\n for js in design_js if is_cdn_url(js)\n ]\n return raw_js\n\n @property\n def render_js(self):\n return JS_RESOURCES.render(\n js_raw=self.js_raw, js_files=self.js_files,\n js_modules=self.js_modules, hashes=self.hashes,\n js_module_exports=self.js_module_exports\n )\n\n\nclass Bundle(BkBundle):\n\n def __init__(self, notebook=False, **kwargs):\n self.js_modules = kwargs.pop(\"js_modules\", [])\n self.js_module_exports = kwargs.pop(\"js_module_exports\", {})\n self.notebook = notebook\n super().__init__(**kwargs)\n\n @classmethod\n def from_bokeh(cls, bk_bundle, notebook=False):\n return cls(\n notebook=notebook,\n js_files=bk_bundle.js_files,\n js_raw=bk_bundle.js_raw,\n css_files=bk_bundle.css_files,\n css_raw=bk_bundle.css_raw,\n hashes=bk_bundle.hashes,\n )\n\n def _render_css(self) -> str:\n return BkCSS_RESOURCES.render(\n css_files=self.css_files,\n css_raw=self.css_raw\n )\n\n def _render_js(self):\n return JS_RESOURCES.render(\n js_raw=self.js_raw,\n js_files=self.js_files,\n js_modules=self.js_modules,\n js_module_exports=self.js_module_exports,\n hashes=self.hashes\n )\n", "path": "panel/io/resources.py"}]} |
gh_patches_debug_1388 | rasdani/github-patches | git_diff | saleor__saleor-3981 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Catch all exceptions when decoding JWT token in TokenVerify mutation
Currently, `TokenVerify` mutation returns an error if the token is malformed or is created with the different secret key. Instead, it should always return `null` if the token is not valid.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `saleor/graphql/core/mutations.py`
Content:
```
1 from itertools import chain
2 from textwrap import dedent
3 from typing import Tuple
4
5 import graphene
6 from django.contrib.auth import get_user_model
7 from django.core.exceptions import (
8 NON_FIELD_ERRORS, ImproperlyConfigured, ValidationError)
9 from django.db.models.fields.files import FileField
10 from graphene.types.mutation import MutationOptions
11 from graphene_django.registry import get_global_registry
12 from graphql.error import GraphQLError
13 from graphql_jwt import ObtainJSONWebToken, Verify
14 from graphql_jwt.exceptions import JSONWebTokenError, PermissionDenied
15
16 from ...account import models
17 from ..account.types import User
18 from ..utils import get_nodes
19 from .types import Error, Upload
20 from .utils import snake_to_camel_case
21
22 registry = get_global_registry()
23
24
25 def get_model_name(model):
26 """Return name of the model with first letter lowercase."""
27 model_name = model.__name__
28 return model_name[:1].lower() + model_name[1:]
29
30
31 def get_output_fields(model, return_field_name):
32 """Return mutation output field for model instance."""
33 model_type = registry.get_type_for_model(model)
34 if not model_type:
35 raise ImproperlyConfigured(
36 'Unable to find type for model %s in graphene registry' %
37 model.__name__)
38 fields = {return_field_name: graphene.Field(model_type)}
39 return fields
40
41
42 def validation_error_to_error_type(validation_error: ValidationError) -> list:
43 """Convert a ValidationError into a list of Error types."""
44 err_list = []
45 if hasattr(validation_error, 'error_dict'):
46 # convert field errors
47 for field, field_errors in validation_error.message_dict.items():
48 for err in field_errors:
49 field = None if field == NON_FIELD_ERRORS else snake_to_camel_case(
50 field)
51 err_list.append(Error(field=field, message=err))
52 else:
53 # convert non-field errors
54 for err in validation_error.error_list:
55 err_list.append(Error(message=err.message))
56 return err_list
57
58
59 class ModelMutationOptions(MutationOptions):
60 exclude = None
61 model = None
62 return_field_name = None
63
64
65 class BaseMutation(graphene.Mutation):
66 errors = graphene.List(
67 graphene.NonNull(Error),
68 description='List of errors that occurred executing the mutation.')
69
70 class Meta:
71 abstract = True
72
73 @classmethod
74 def __init_subclass_with_meta__(
75 cls, description=None, permissions: Tuple = None,
76 _meta=None, **options):
77
78 if not _meta:
79 _meta = MutationOptions(cls)
80
81 if not description:
82 raise ImproperlyConfigured('No description provided in Meta')
83 description = dedent(description)
84
85 if isinstance(permissions, str):
86 permissions = (permissions, )
87
88 if permissions and not isinstance(permissions, tuple):
89 raise ImproperlyConfigured(
90 'Permissions should be a tuple or a string in Meta')
91
92 _meta.permissions = permissions
93 super().__init_subclass_with_meta__(
94 description=description, _meta=_meta, **options)
95
96 @classmethod
97 def _update_mutation_arguments_and_fields(cls, arguments, fields):
98 cls._meta.arguments.update(arguments)
99 cls._meta.fields.update(fields)
100
101 @classmethod
102 def get_node_or_error(cls, info, node_id, field='id', only_type=None):
103 if not node_id:
104 return None
105
106 try:
107 node = graphene.Node.get_node_from_global_id(
108 info, node_id, only_type)
109 except (AssertionError, GraphQLError) as e:
110 raise ValidationError({field: str(e)})
111 else:
112 if node is None:
113 raise ValidationError({
114 field: "Couldn't resolve to a node: %s" % node_id})
115 return node
116
117 @classmethod
118 def get_nodes_or_error(cls, ids, field, only_type=None):
119 try:
120 instances = get_nodes(ids, only_type)
121 except GraphQLError as e:
122 raise ValidationError({field: str(e)})
123 return instances
124
125 @classmethod
126 def clean_instance(cls, instance):
127 """Clean the instance that was created using the input data.
128
129 Once an instance is created, this method runs `full_clean()` to perform
130 model validation.
131 """
132 try:
133 instance.full_clean()
134 except ValidationError as error:
135 if hasattr(cls._meta, 'exclude'):
136 # Ignore validation errors for fields that are specified as
137 # excluded.
138 new_error_dict = {}
139 for field, errors in error.error_dict.items():
140 if field not in cls._meta.exclude:
141 new_error_dict[field] = errors
142 error.error_dict = new_error_dict
143
144 if error.error_dict:
145 raise error
146
147 @classmethod
148 def construct_instance(cls, instance, cleaned_data):
149 """Fill instance fields with cleaned data.
150
151 The `instance` argument is either an empty instance of a already
152 existing one which was fetched from the database. `cleaned_data` is
153 data to be set in instance fields. Returns `instance` with filled
154 fields, but not saved to the database.
155 """
156 from django.db import models
157 opts = instance._meta
158
159 for f in opts.fields:
160 if any([not f.editable, isinstance(f, models.AutoField),
161 f.name not in cleaned_data]):
162 continue
163 data = cleaned_data[f.name]
164 if data is None:
165 # We want to reset the file field value when None was passed
166 # in the input, but `FileField.save_form_data` ignores None
167 # values. In that case we manually pass False which clears
168 # the file.
169 if isinstance(f, FileField):
170 data = False
171 if not f.null:
172 data = f._get_default()
173 f.save_form_data(instance, data)
174 return instance
175
176 @classmethod
177 def check_permissions(cls, user):
178 """Determine whether user has rights to perform this mutation.
179
180 Default implementation assumes that user is allowed to perform any
181 mutation. By overriding this method or defining required permissions
182 in the meta-class, you can restrict access to it.
183
184 The `user` parameter is the User instance associated with the request.
185 """
186 if cls._meta.permissions:
187 return user.has_perms(cls._meta.permissions)
188 return True
189
190 @classmethod
191 def mutate(cls, root, info, **data):
192 if not cls.check_permissions(info.context.user):
193 raise PermissionDenied()
194
195 try:
196 response = cls.perform_mutation(root, info, **data)
197 if response.errors is None:
198 response.errors = []
199 return response
200 except ValidationError as e:
201 errors = validation_error_to_error_type(e)
202 return cls(errors=errors)
203
204 @classmethod
205 def perform_mutation(cls, root, info, **data):
206 pass
207
208
209 class ModelMutation(BaseMutation):
210 class Meta:
211 abstract = True
212
213 @classmethod
214 def __init_subclass_with_meta__(
215 cls,
216 arguments=None,
217 model=None,
218 exclude=None,
219 return_field_name=None,
220 _meta=None,
221 **options):
222 if not model:
223 raise ImproperlyConfigured('model is required for ModelMutation')
224 if not _meta:
225 _meta = ModelMutationOptions(cls)
226
227 if exclude is None:
228 exclude = []
229
230 if not return_field_name:
231 return_field_name = get_model_name(model)
232 if arguments is None:
233 arguments = {}
234 fields = get_output_fields(model, return_field_name)
235
236 _meta.model = model
237 _meta.return_field_name = return_field_name
238 _meta.exclude = exclude
239 super().__init_subclass_with_meta__(_meta=_meta, **options)
240 cls._update_mutation_arguments_and_fields(
241 arguments=arguments, fields=fields)
242
243 @classmethod
244 def clean_input(cls, info, instance, data):
245 """Clean input data received from mutation arguments.
246
247 Fields containing IDs or lists of IDs are automatically resolved into
248 model instances. `instance` argument is the model instance the mutation
249 is operating on (before setting the input data). `input` is raw input
250 data the mutation receives.
251
252 Override this method to provide custom transformations of incoming
253 data.
254 """
255
256 def is_list_of_ids(field):
257 return (
258 isinstance(field.type, graphene.List)
259 and field.type.of_type == graphene.ID)
260
261 def is_id_field(field):
262 return (
263 field.type == graphene.ID
264 or isinstance(field.type, graphene.NonNull)
265 and field.type.of_type == graphene.ID)
266
267 def is_upload_field(field):
268 if hasattr(field.type, 'of_type'):
269 return field.type.of_type == Upload
270 return field.type == Upload
271
272 input_cls = getattr(cls.Arguments, 'input')
273 cleaned_input = {}
274
275 for field_name, field_item in input_cls._meta.fields.items():
276 if field_name in data:
277 value = data[field_name]
278
279 # handle list of IDs field
280 if value is not None and is_list_of_ids(field_item):
281 instances = cls.get_nodes_or_error(
282 value, field_name) if value else []
283 cleaned_input[field_name] = instances
284
285 # handle ID field
286 elif value is not None and is_id_field(field_item):
287 instance = cls.get_node_or_error(info, value, field_name)
288 cleaned_input[field_name] = instance
289
290 # handle uploaded files
291 elif value is not None and is_upload_field(field_item):
292 value = info.context.FILES.get(value)
293 cleaned_input[field_name] = value
294
295 # handle other fields
296 else:
297 cleaned_input[field_name] = value
298 return cleaned_input
299
300 @classmethod
301 def _save_m2m(cls, info, instance, cleaned_data):
302 opts = instance._meta
303 for f in chain(opts.many_to_many, opts.private_fields):
304 if not hasattr(f, 'save_form_data'):
305 continue
306 if f.name in cleaned_data and cleaned_data[f.name] is not None:
307 f.save_form_data(instance, cleaned_data[f.name])
308
309 @classmethod
310 def success_response(cls, instance):
311 """Return a success response."""
312 return cls(**{cls._meta.return_field_name: instance, 'errors': []})
313
314 @classmethod
315 def save(cls, info, instance, cleaned_input):
316 instance.save()
317
318 @classmethod
319 def get_instance(cls, info, **data):
320 object_id = data.get('id')
321 if object_id:
322 model_type = registry.get_type_for_model(cls._meta.model)
323 instance = cls.get_node_or_error(
324 info, object_id, only_type=model_type)
325 else:
326 instance = cls._meta.model()
327 return instance
328
329 @classmethod
330 def perform_mutation(cls, _root, info, **data):
331 """Perform model mutation.
332
333 Depending on the input data, `mutate` either creates a new instance or
334 updates an existing one. If `id` argument is present, it is assumed
335 that this is an "update" mutation. Otherwise, a new instance is
336 created based on the model associated with this mutation.
337 """
338 instance = cls.get_instance(info, **data)
339 data = data.get('input')
340 cleaned_input = cls.clean_input(info, instance, data)
341 instance = cls.construct_instance(instance, cleaned_input)
342 cls.clean_instance(instance)
343 cls.save(info, instance, cleaned_input)
344 cls._save_m2m(info, instance, cleaned_input)
345 return cls.success_response(instance)
346
347
348 class ModelDeleteMutation(ModelMutation):
349 class Meta:
350 abstract = True
351
352 @classmethod
353 def clean_instance(cls, info, instance):
354 """Perform additional logic before deleting the model instance.
355
356 Override this method to raise custom validation error and abort
357 the deletion process.
358 """
359
360 @classmethod
361 def perform_mutation(cls, _root, info, **data):
362 """Perform a mutation that deletes a model instance."""
363 if not cls.check_permissions(info.context.user):
364 raise PermissionDenied()
365
366 node_id = data.get('id')
367 model_type = registry.get_type_for_model(cls._meta.model)
368 instance = cls.get_node_or_error(info, node_id, only_type=model_type)
369
370 if instance:
371 cls.clean_instance(info, instance)
372
373 db_id = instance.id
374 instance.delete()
375
376 # After the instance is deleted, set its ID to the original database's
377 # ID so that the success response contains ID of the deleted object.
378 instance.id = db_id
379 return cls.success_response(instance)
380
381
382 class BaseBulkMutation(BaseMutation):
383 count = graphene.Int(
384 required=True, description='Returns how many objects were affected.')
385
386 class Meta:
387 abstract = True
388
389 @classmethod
390 def __init_subclass_with_meta__(cls, model=None, _meta=None, **kwargs):
391 if not model:
392 raise ImproperlyConfigured('model is required for bulk mutation')
393 if not _meta:
394 _meta = ModelMutationOptions(cls)
395 _meta.model = model
396
397 super().__init_subclass_with_meta__(_meta=_meta, **kwargs)
398
399 @classmethod
400 def clean_instance(cls, info, instance):
401 """Perform additional logic.
402
403 Override this method to raise custom validation error and prevent
404 bulk action on the instance.
405 """
406
407 @classmethod
408 def bulk_action(cls, queryset, **kwargs):
409 """Implement action performed on queryset."""
410 raise NotImplementedError
411
412 @classmethod
413 def perform_mutation(cls, _root, info, ids, **data):
414 """Perform a mutation that deletes a list of model instances."""
415 clean_instance_ids, errors = [], {}
416 instance_model = cls._meta.model
417 model_type = registry.get_type_for_model(instance_model)
418 instances = cls.get_nodes_or_error(ids, 'id', model_type)
419 for instance, node_id in zip(instances, ids):
420 instance_errors = []
421
422 # catch individual validation errors to raise them later as
423 # a single error
424 try:
425 cls.clean_instance(info, instance)
426 except ValidationError as e:
427 msg = '. '.join(e.messages)
428 instance_errors.append(msg)
429
430 if not instance_errors:
431 clean_instance_ids.append(instance.pk)
432 else:
433 instance_errors_msg = '. '.join(instance_errors)
434 ValidationError({
435 node_id: instance_errors_msg}).update_error_dict(errors)
436
437 if errors:
438 errors = ValidationError(errors)
439 count = len(clean_instance_ids)
440 if count:
441 qs = instance_model.objects.filter(pk__in=clean_instance_ids)
442 cls.bulk_action(queryset=qs, **data)
443 return count, errors
444
445 @classmethod
446 def mutate(cls, root, info, **data):
447 if not cls.check_permissions(info.context.user):
448 raise PermissionDenied()
449
450 count, errors = cls.perform_mutation(root, info, **data)
451 if errors:
452 errors = validation_error_to_error_type(errors)
453 return cls(errors=errors, count=count)
454
455
456 class ModelBulkDeleteMutation(BaseBulkMutation):
457 class Meta:
458 abstract = True
459
460 @classmethod
461 def bulk_action(cls, queryset):
462 queryset.delete()
463
464
465 class CreateToken(ObtainJSONWebToken):
466 """Mutation that authenticates a user and returns token and user data.
467
468 It overrides the default graphql_jwt.ObtainJSONWebToken to wrap potential
469 authentication errors in our Error type, which is consistent to how rest of
470 the mutation works.
471 """
472
473 errors = graphene.List(Error, required=True)
474 user = graphene.Field(User)
475
476 @classmethod
477 def mutate(cls, root, info, **kwargs):
478 try:
479 result = super().mutate(root, info, **kwargs)
480 except JSONWebTokenError as e:
481 return CreateToken(errors=[Error(message=str(e))])
482 else:
483 return result
484
485 @classmethod
486 def resolve(cls, root, info, **kwargs):
487 return cls(user=info.context.user, errors=[])
488
489
490 class VerifyToken(Verify):
491 """Mutation that confirm if token is valid and also return user data."""
492
493 user = graphene.Field(User)
494
495 def resolve_user(self, _info, **_kwargs):
496 username_field = get_user_model().USERNAME_FIELD
497 kwargs = {username_field: self.payload.get(username_field)}
498 return models.User.objects.get(**kwargs)
499
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/saleor/graphql/core/mutations.py b/saleor/graphql/core/mutations.py
--- a/saleor/graphql/core/mutations.py
+++ b/saleor/graphql/core/mutations.py
@@ -496,3 +496,10 @@
username_field = get_user_model().USERNAME_FIELD
kwargs = {username_field: self.payload.get(username_field)}
return models.User.objects.get(**kwargs)
+
+ @classmethod
+ def mutate(cls, root, info, token, **kwargs):
+ try:
+ return super().mutate(root, info, token, **kwargs)
+ except JSONWebTokenError:
+ return None
| {"golden_diff": "diff --git a/saleor/graphql/core/mutations.py b/saleor/graphql/core/mutations.py\n--- a/saleor/graphql/core/mutations.py\n+++ b/saleor/graphql/core/mutations.py\n@@ -496,3 +496,10 @@\n username_field = get_user_model().USERNAME_FIELD\n kwargs = {username_field: self.payload.get(username_field)}\n return models.User.objects.get(**kwargs)\n+\n+ @classmethod\n+ def mutate(cls, root, info, token, **kwargs):\n+ try:\n+ return super().mutate(root, info, token, **kwargs)\n+ except JSONWebTokenError:\n+ return None\n", "issue": "Catch all exceptions when decoding JWT token in TokenVerify mutation\nCurrently, `TokenVerify` mutation returns an error if the token is malformed or is created with the different secret key. Instead, it should always return `null` if the token is not valid.\n", "before_files": [{"content": "from itertools import chain\nfrom textwrap import dedent\nfrom typing import Tuple\n\nimport graphene\nfrom django.contrib.auth import get_user_model\nfrom django.core.exceptions import (\n NON_FIELD_ERRORS, ImproperlyConfigured, ValidationError)\nfrom django.db.models.fields.files import FileField\nfrom graphene.types.mutation import MutationOptions\nfrom graphene_django.registry import get_global_registry\nfrom graphql.error import GraphQLError\nfrom graphql_jwt import ObtainJSONWebToken, Verify\nfrom graphql_jwt.exceptions import JSONWebTokenError, PermissionDenied\n\nfrom ...account import models\nfrom ..account.types import User\nfrom ..utils import get_nodes\nfrom .types import Error, Upload\nfrom .utils import snake_to_camel_case\n\nregistry = get_global_registry()\n\n\ndef get_model_name(model):\n \"\"\"Return name of the model with first letter lowercase.\"\"\"\n model_name = model.__name__\n return model_name[:1].lower() + model_name[1:]\n\n\ndef get_output_fields(model, return_field_name):\n \"\"\"Return mutation output field for model instance.\"\"\"\n model_type = registry.get_type_for_model(model)\n if not model_type:\n raise ImproperlyConfigured(\n 'Unable to find type for model %s in graphene registry' %\n model.__name__)\n fields = {return_field_name: graphene.Field(model_type)}\n return fields\n\n\ndef validation_error_to_error_type(validation_error: ValidationError) -> list:\n \"\"\"Convert a ValidationError into a list of Error types.\"\"\"\n err_list = []\n if hasattr(validation_error, 'error_dict'):\n # convert field errors\n for field, field_errors in validation_error.message_dict.items():\n for err in field_errors:\n field = None if field == NON_FIELD_ERRORS else snake_to_camel_case(\n field)\n err_list.append(Error(field=field, message=err))\n else:\n # convert non-field errors\n for err in validation_error.error_list:\n err_list.append(Error(message=err.message))\n return err_list\n\n\nclass ModelMutationOptions(MutationOptions):\n exclude = None\n model = None\n return_field_name = None\n\n\nclass BaseMutation(graphene.Mutation):\n errors = graphene.List(\n graphene.NonNull(Error),\n description='List of errors that occurred executing the mutation.')\n\n class Meta:\n abstract = True\n\n @classmethod\n def __init_subclass_with_meta__(\n cls, description=None, permissions: Tuple = None,\n _meta=None, **options):\n\n if not _meta:\n _meta = MutationOptions(cls)\n\n if not description:\n raise ImproperlyConfigured('No description provided in Meta')\n description = dedent(description)\n\n if isinstance(permissions, str):\n permissions = (permissions, )\n\n if permissions and not isinstance(permissions, tuple):\n raise ImproperlyConfigured(\n 'Permissions should be a tuple or a string in Meta')\n\n _meta.permissions = permissions\n super().__init_subclass_with_meta__(\n description=description, _meta=_meta, **options)\n\n @classmethod\n def _update_mutation_arguments_and_fields(cls, arguments, fields):\n cls._meta.arguments.update(arguments)\n cls._meta.fields.update(fields)\n\n @classmethod\n def get_node_or_error(cls, info, node_id, field='id', only_type=None):\n if not node_id:\n return None\n\n try:\n node = graphene.Node.get_node_from_global_id(\n info, node_id, only_type)\n except (AssertionError, GraphQLError) as e:\n raise ValidationError({field: str(e)})\n else:\n if node is None:\n raise ValidationError({\n field: \"Couldn't resolve to a node: %s\" % node_id})\n return node\n\n @classmethod\n def get_nodes_or_error(cls, ids, field, only_type=None):\n try:\n instances = get_nodes(ids, only_type)\n except GraphQLError as e:\n raise ValidationError({field: str(e)})\n return instances\n\n @classmethod\n def clean_instance(cls, instance):\n \"\"\"Clean the instance that was created using the input data.\n\n Once an instance is created, this method runs `full_clean()` to perform\n model validation.\n \"\"\"\n try:\n instance.full_clean()\n except ValidationError as error:\n if hasattr(cls._meta, 'exclude'):\n # Ignore validation errors for fields that are specified as\n # excluded.\n new_error_dict = {}\n for field, errors in error.error_dict.items():\n if field not in cls._meta.exclude:\n new_error_dict[field] = errors\n error.error_dict = new_error_dict\n\n if error.error_dict:\n raise error\n\n @classmethod\n def construct_instance(cls, instance, cleaned_data):\n \"\"\"Fill instance fields with cleaned data.\n\n The `instance` argument is either an empty instance of a already\n existing one which was fetched from the database. `cleaned_data` is\n data to be set in instance fields. Returns `instance` with filled\n fields, but not saved to the database.\n \"\"\"\n from django.db import models\n opts = instance._meta\n\n for f in opts.fields:\n if any([not f.editable, isinstance(f, models.AutoField),\n f.name not in cleaned_data]):\n continue\n data = cleaned_data[f.name]\n if data is None:\n # We want to reset the file field value when None was passed\n # in the input, but `FileField.save_form_data` ignores None\n # values. In that case we manually pass False which clears\n # the file.\n if isinstance(f, FileField):\n data = False\n if not f.null:\n data = f._get_default()\n f.save_form_data(instance, data)\n return instance\n\n @classmethod\n def check_permissions(cls, user):\n \"\"\"Determine whether user has rights to perform this mutation.\n\n Default implementation assumes that user is allowed to perform any\n mutation. By overriding this method or defining required permissions\n in the meta-class, you can restrict access to it.\n\n The `user` parameter is the User instance associated with the request.\n \"\"\"\n if cls._meta.permissions:\n return user.has_perms(cls._meta.permissions)\n return True\n\n @classmethod\n def mutate(cls, root, info, **data):\n if not cls.check_permissions(info.context.user):\n raise PermissionDenied()\n\n try:\n response = cls.perform_mutation(root, info, **data)\n if response.errors is None:\n response.errors = []\n return response\n except ValidationError as e:\n errors = validation_error_to_error_type(e)\n return cls(errors=errors)\n\n @classmethod\n def perform_mutation(cls, root, info, **data):\n pass\n\n\nclass ModelMutation(BaseMutation):\n class Meta:\n abstract = True\n\n @classmethod\n def __init_subclass_with_meta__(\n cls,\n arguments=None,\n model=None,\n exclude=None,\n return_field_name=None,\n _meta=None,\n **options):\n if not model:\n raise ImproperlyConfigured('model is required for ModelMutation')\n if not _meta:\n _meta = ModelMutationOptions(cls)\n\n if exclude is None:\n exclude = []\n\n if not return_field_name:\n return_field_name = get_model_name(model)\n if arguments is None:\n arguments = {}\n fields = get_output_fields(model, return_field_name)\n\n _meta.model = model\n _meta.return_field_name = return_field_name\n _meta.exclude = exclude\n super().__init_subclass_with_meta__(_meta=_meta, **options)\n cls._update_mutation_arguments_and_fields(\n arguments=arguments, fields=fields)\n\n @classmethod\n def clean_input(cls, info, instance, data):\n \"\"\"Clean input data received from mutation arguments.\n\n Fields containing IDs or lists of IDs are automatically resolved into\n model instances. `instance` argument is the model instance the mutation\n is operating on (before setting the input data). `input` is raw input\n data the mutation receives.\n\n Override this method to provide custom transformations of incoming\n data.\n \"\"\"\n\n def is_list_of_ids(field):\n return (\n isinstance(field.type, graphene.List)\n and field.type.of_type == graphene.ID)\n\n def is_id_field(field):\n return (\n field.type == graphene.ID\n or isinstance(field.type, graphene.NonNull)\n and field.type.of_type == graphene.ID)\n\n def is_upload_field(field):\n if hasattr(field.type, 'of_type'):\n return field.type.of_type == Upload\n return field.type == Upload\n\n input_cls = getattr(cls.Arguments, 'input')\n cleaned_input = {}\n\n for field_name, field_item in input_cls._meta.fields.items():\n if field_name in data:\n value = data[field_name]\n\n # handle list of IDs field\n if value is not None and is_list_of_ids(field_item):\n instances = cls.get_nodes_or_error(\n value, field_name) if value else []\n cleaned_input[field_name] = instances\n\n # handle ID field\n elif value is not None and is_id_field(field_item):\n instance = cls.get_node_or_error(info, value, field_name)\n cleaned_input[field_name] = instance\n\n # handle uploaded files\n elif value is not None and is_upload_field(field_item):\n value = info.context.FILES.get(value)\n cleaned_input[field_name] = value\n\n # handle other fields\n else:\n cleaned_input[field_name] = value\n return cleaned_input\n\n @classmethod\n def _save_m2m(cls, info, instance, cleaned_data):\n opts = instance._meta\n for f in chain(opts.many_to_many, opts.private_fields):\n if not hasattr(f, 'save_form_data'):\n continue\n if f.name in cleaned_data and cleaned_data[f.name] is not None:\n f.save_form_data(instance, cleaned_data[f.name])\n\n @classmethod\n def success_response(cls, instance):\n \"\"\"Return a success response.\"\"\"\n return cls(**{cls._meta.return_field_name: instance, 'errors': []})\n\n @classmethod\n def save(cls, info, instance, cleaned_input):\n instance.save()\n\n @classmethod\n def get_instance(cls, info, **data):\n object_id = data.get('id')\n if object_id:\n model_type = registry.get_type_for_model(cls._meta.model)\n instance = cls.get_node_or_error(\n info, object_id, only_type=model_type)\n else:\n instance = cls._meta.model()\n return instance\n\n @classmethod\n def perform_mutation(cls, _root, info, **data):\n \"\"\"Perform model mutation.\n\n Depending on the input data, `mutate` either creates a new instance or\n updates an existing one. If `id` argument is present, it is assumed\n that this is an \"update\" mutation. Otherwise, a new instance is\n created based on the model associated with this mutation.\n \"\"\"\n instance = cls.get_instance(info, **data)\n data = data.get('input')\n cleaned_input = cls.clean_input(info, instance, data)\n instance = cls.construct_instance(instance, cleaned_input)\n cls.clean_instance(instance)\n cls.save(info, instance, cleaned_input)\n cls._save_m2m(info, instance, cleaned_input)\n return cls.success_response(instance)\n\n\nclass ModelDeleteMutation(ModelMutation):\n class Meta:\n abstract = True\n\n @classmethod\n def clean_instance(cls, info, instance):\n \"\"\"Perform additional logic before deleting the model instance.\n\n Override this method to raise custom validation error and abort\n the deletion process.\n \"\"\"\n\n @classmethod\n def perform_mutation(cls, _root, info, **data):\n \"\"\"Perform a mutation that deletes a model instance.\"\"\"\n if not cls.check_permissions(info.context.user):\n raise PermissionDenied()\n\n node_id = data.get('id')\n model_type = registry.get_type_for_model(cls._meta.model)\n instance = cls.get_node_or_error(info, node_id, only_type=model_type)\n\n if instance:\n cls.clean_instance(info, instance)\n\n db_id = instance.id\n instance.delete()\n\n # After the instance is deleted, set its ID to the original database's\n # ID so that the success response contains ID of the deleted object.\n instance.id = db_id\n return cls.success_response(instance)\n\n\nclass BaseBulkMutation(BaseMutation):\n count = graphene.Int(\n required=True, description='Returns how many objects were affected.')\n\n class Meta:\n abstract = True\n\n @classmethod\n def __init_subclass_with_meta__(cls, model=None, _meta=None, **kwargs):\n if not model:\n raise ImproperlyConfigured('model is required for bulk mutation')\n if not _meta:\n _meta = ModelMutationOptions(cls)\n _meta.model = model\n\n super().__init_subclass_with_meta__(_meta=_meta, **kwargs)\n\n @classmethod\n def clean_instance(cls, info, instance):\n \"\"\"Perform additional logic.\n\n Override this method to raise custom validation error and prevent\n bulk action on the instance.\n \"\"\"\n\n @classmethod\n def bulk_action(cls, queryset, **kwargs):\n \"\"\"Implement action performed on queryset.\"\"\"\n raise NotImplementedError\n\n @classmethod\n def perform_mutation(cls, _root, info, ids, **data):\n \"\"\"Perform a mutation that deletes a list of model instances.\"\"\"\n clean_instance_ids, errors = [], {}\n instance_model = cls._meta.model\n model_type = registry.get_type_for_model(instance_model)\n instances = cls.get_nodes_or_error(ids, 'id', model_type)\n for instance, node_id in zip(instances, ids):\n instance_errors = []\n\n # catch individual validation errors to raise them later as\n # a single error\n try:\n cls.clean_instance(info, instance)\n except ValidationError as e:\n msg = '. '.join(e.messages)\n instance_errors.append(msg)\n\n if not instance_errors:\n clean_instance_ids.append(instance.pk)\n else:\n instance_errors_msg = '. '.join(instance_errors)\n ValidationError({\n node_id: instance_errors_msg}).update_error_dict(errors)\n\n if errors:\n errors = ValidationError(errors)\n count = len(clean_instance_ids)\n if count:\n qs = instance_model.objects.filter(pk__in=clean_instance_ids)\n cls.bulk_action(queryset=qs, **data)\n return count, errors\n\n @classmethod\n def mutate(cls, root, info, **data):\n if not cls.check_permissions(info.context.user):\n raise PermissionDenied()\n\n count, errors = cls.perform_mutation(root, info, **data)\n if errors:\n errors = validation_error_to_error_type(errors)\n return cls(errors=errors, count=count)\n\n\nclass ModelBulkDeleteMutation(BaseBulkMutation):\n class Meta:\n abstract = True\n\n @classmethod\n def bulk_action(cls, queryset):\n queryset.delete()\n\n\nclass CreateToken(ObtainJSONWebToken):\n \"\"\"Mutation that authenticates a user and returns token and user data.\n\n It overrides the default graphql_jwt.ObtainJSONWebToken to wrap potential\n authentication errors in our Error type, which is consistent to how rest of\n the mutation works.\n \"\"\"\n\n errors = graphene.List(Error, required=True)\n user = graphene.Field(User)\n\n @classmethod\n def mutate(cls, root, info, **kwargs):\n try:\n result = super().mutate(root, info, **kwargs)\n except JSONWebTokenError as e:\n return CreateToken(errors=[Error(message=str(e))])\n else:\n return result\n\n @classmethod\n def resolve(cls, root, info, **kwargs):\n return cls(user=info.context.user, errors=[])\n\n\nclass VerifyToken(Verify):\n \"\"\"Mutation that confirm if token is valid and also return user data.\"\"\"\n\n user = graphene.Field(User)\n\n def resolve_user(self, _info, **_kwargs):\n username_field = get_user_model().USERNAME_FIELD\n kwargs = {username_field: self.payload.get(username_field)}\n return models.User.objects.get(**kwargs)\n", "path": "saleor/graphql/core/mutations.py"}], "after_files": [{"content": "from itertools import chain\nfrom textwrap import dedent\nfrom typing import Tuple\n\nimport graphene\nfrom django.contrib.auth import get_user_model\nfrom django.core.exceptions import (\n NON_FIELD_ERRORS, ImproperlyConfigured, ValidationError)\nfrom django.db.models.fields.files import FileField\nfrom graphene.types.mutation import MutationOptions\nfrom graphene_django.registry import get_global_registry\nfrom graphql.error import GraphQLError\nfrom graphql_jwt import ObtainJSONWebToken, Verify\nfrom graphql_jwt.exceptions import JSONWebTokenError, PermissionDenied\n\nfrom ...account import models\nfrom ..account.types import User\nfrom ..utils import get_nodes\nfrom .types import Error, Upload\nfrom .utils import snake_to_camel_case\n\nregistry = get_global_registry()\n\n\ndef get_model_name(model):\n \"\"\"Return name of the model with first letter lowercase.\"\"\"\n model_name = model.__name__\n return model_name[:1].lower() + model_name[1:]\n\n\ndef get_output_fields(model, return_field_name):\n \"\"\"Return mutation output field for model instance.\"\"\"\n model_type = registry.get_type_for_model(model)\n if not model_type:\n raise ImproperlyConfigured(\n 'Unable to find type for model %s in graphene registry' %\n model.__name__)\n fields = {return_field_name: graphene.Field(model_type)}\n return fields\n\n\ndef validation_error_to_error_type(validation_error: ValidationError) -> list:\n \"\"\"Convert a ValidationError into a list of Error types.\"\"\"\n err_list = []\n if hasattr(validation_error, 'error_dict'):\n # convert field errors\n for field, field_errors in validation_error.message_dict.items():\n for err in field_errors:\n field = None if field == NON_FIELD_ERRORS else snake_to_camel_case(\n field)\n err_list.append(Error(field=field, message=err))\n else:\n # convert non-field errors\n for err in validation_error.error_list:\n err_list.append(Error(message=err.message))\n return err_list\n\n\nclass ModelMutationOptions(MutationOptions):\n exclude = None\n model = None\n return_field_name = None\n\n\nclass BaseMutation(graphene.Mutation):\n errors = graphene.List(\n graphene.NonNull(Error),\n description='List of errors that occurred executing the mutation.')\n\n class Meta:\n abstract = True\n\n @classmethod\n def __init_subclass_with_meta__(\n cls, description=None, permissions: Tuple = None,\n _meta=None, **options):\n\n if not _meta:\n _meta = MutationOptions(cls)\n\n if not description:\n raise ImproperlyConfigured('No description provided in Meta')\n description = dedent(description)\n\n if isinstance(permissions, str):\n permissions = (permissions, )\n\n if permissions and not isinstance(permissions, tuple):\n raise ImproperlyConfigured(\n 'Permissions should be a tuple or a string in Meta')\n\n _meta.permissions = permissions\n super().__init_subclass_with_meta__(\n description=description, _meta=_meta, **options)\n\n @classmethod\n def _update_mutation_arguments_and_fields(cls, arguments, fields):\n cls._meta.arguments.update(arguments)\n cls._meta.fields.update(fields)\n\n @classmethod\n def get_node_or_error(cls, info, node_id, field='id', only_type=None):\n if not node_id:\n return None\n\n try:\n node = graphene.Node.get_node_from_global_id(\n info, node_id, only_type)\n except (AssertionError, GraphQLError) as e:\n raise ValidationError({field: str(e)})\n else:\n if node is None:\n raise ValidationError({\n field: \"Couldn't resolve to a node: %s\" % node_id})\n return node\n\n @classmethod\n def get_nodes_or_error(cls, ids, field, only_type=None):\n try:\n instances = get_nodes(ids, only_type)\n except GraphQLError as e:\n raise ValidationError({field: str(e)})\n return instances\n\n @classmethod\n def clean_instance(cls, instance):\n \"\"\"Clean the instance that was created using the input data.\n\n Once an instance is created, this method runs `full_clean()` to perform\n model validation.\n \"\"\"\n try:\n instance.full_clean()\n except ValidationError as error:\n if hasattr(cls._meta, 'exclude'):\n # Ignore validation errors for fields that are specified as\n # excluded.\n new_error_dict = {}\n for field, errors in error.error_dict.items():\n if field not in cls._meta.exclude:\n new_error_dict[field] = errors\n error.error_dict = new_error_dict\n\n if error.error_dict:\n raise error\n\n @classmethod\n def construct_instance(cls, instance, cleaned_data):\n \"\"\"Fill instance fields with cleaned data.\n\n The `instance` argument is either an empty instance of a already\n existing one which was fetched from the database. `cleaned_data` is\n data to be set in instance fields. Returns `instance` with filled\n fields, but not saved to the database.\n \"\"\"\n from django.db import models\n opts = instance._meta\n\n for f in opts.fields:\n if any([not f.editable, isinstance(f, models.AutoField),\n f.name not in cleaned_data]):\n continue\n data = cleaned_data[f.name]\n if data is None:\n # We want to reset the file field value when None was passed\n # in the input, but `FileField.save_form_data` ignores None\n # values. In that case we manually pass False which clears\n # the file.\n if isinstance(f, FileField):\n data = False\n if not f.null:\n data = f._get_default()\n f.save_form_data(instance, data)\n return instance\n\n @classmethod\n def check_permissions(cls, user):\n \"\"\"Determine whether user has rights to perform this mutation.\n\n Default implementation assumes that user is allowed to perform any\n mutation. By overriding this method or defining required permissions\n in the meta-class, you can restrict access to it.\n\n The `user` parameter is the User instance associated with the request.\n \"\"\"\n if cls._meta.permissions:\n return user.has_perms(cls._meta.permissions)\n return True\n\n @classmethod\n def mutate(cls, root, info, **data):\n if not cls.check_permissions(info.context.user):\n raise PermissionDenied()\n\n try:\n response = cls.perform_mutation(root, info, **data)\n if response.errors is None:\n response.errors = []\n return response\n except ValidationError as e:\n errors = validation_error_to_error_type(e)\n return cls(errors=errors)\n\n @classmethod\n def perform_mutation(cls, root, info, **data):\n pass\n\n\nclass ModelMutation(BaseMutation):\n class Meta:\n abstract = True\n\n @classmethod\n def __init_subclass_with_meta__(\n cls,\n arguments=None,\n model=None,\n exclude=None,\n return_field_name=None,\n _meta=None,\n **options):\n if not model:\n raise ImproperlyConfigured('model is required for ModelMutation')\n if not _meta:\n _meta = ModelMutationOptions(cls)\n\n if exclude is None:\n exclude = []\n\n if not return_field_name:\n return_field_name = get_model_name(model)\n if arguments is None:\n arguments = {}\n fields = get_output_fields(model, return_field_name)\n\n _meta.model = model\n _meta.return_field_name = return_field_name\n _meta.exclude = exclude\n super().__init_subclass_with_meta__(_meta=_meta, **options)\n cls._update_mutation_arguments_and_fields(\n arguments=arguments, fields=fields)\n\n @classmethod\n def clean_input(cls, info, instance, data):\n \"\"\"Clean input data received from mutation arguments.\n\n Fields containing IDs or lists of IDs are automatically resolved into\n model instances. `instance` argument is the model instance the mutation\n is operating on (before setting the input data). `input` is raw input\n data the mutation receives.\n\n Override this method to provide custom transformations of incoming\n data.\n \"\"\"\n\n def is_list_of_ids(field):\n return (\n isinstance(field.type, graphene.List)\n and field.type.of_type == graphene.ID)\n\n def is_id_field(field):\n return (\n field.type == graphene.ID\n or isinstance(field.type, graphene.NonNull)\n and field.type.of_type == graphene.ID)\n\n def is_upload_field(field):\n if hasattr(field.type, 'of_type'):\n return field.type.of_type == Upload\n return field.type == Upload\n\n input_cls = getattr(cls.Arguments, 'input')\n cleaned_input = {}\n\n for field_name, field_item in input_cls._meta.fields.items():\n if field_name in data:\n value = data[field_name]\n\n # handle list of IDs field\n if value is not None and is_list_of_ids(field_item):\n instances = cls.get_nodes_or_error(\n value, field_name) if value else []\n cleaned_input[field_name] = instances\n\n # handle ID field\n elif value is not None and is_id_field(field_item):\n instance = cls.get_node_or_error(info, value, field_name)\n cleaned_input[field_name] = instance\n\n # handle uploaded files\n elif value is not None and is_upload_field(field_item):\n value = info.context.FILES.get(value)\n cleaned_input[field_name] = value\n\n # handle other fields\n else:\n cleaned_input[field_name] = value\n return cleaned_input\n\n @classmethod\n def _save_m2m(cls, info, instance, cleaned_data):\n opts = instance._meta\n for f in chain(opts.many_to_many, opts.private_fields):\n if not hasattr(f, 'save_form_data'):\n continue\n if f.name in cleaned_data and cleaned_data[f.name] is not None:\n f.save_form_data(instance, cleaned_data[f.name])\n\n @classmethod\n def success_response(cls, instance):\n \"\"\"Return a success response.\"\"\"\n return cls(**{cls._meta.return_field_name: instance, 'errors': []})\n\n @classmethod\n def save(cls, info, instance, cleaned_input):\n instance.save()\n\n @classmethod\n def get_instance(cls, info, **data):\n object_id = data.get('id')\n if object_id:\n model_type = registry.get_type_for_model(cls._meta.model)\n instance = cls.get_node_or_error(\n info, object_id, only_type=model_type)\n else:\n instance = cls._meta.model()\n return instance\n\n @classmethod\n def perform_mutation(cls, _root, info, **data):\n \"\"\"Perform model mutation.\n\n Depending on the input data, `mutate` either creates a new instance or\n updates an existing one. If `id` argument is present, it is assumed\n that this is an \"update\" mutation. Otherwise, a new instance is\n created based on the model associated with this mutation.\n \"\"\"\n instance = cls.get_instance(info, **data)\n data = data.get('input')\n cleaned_input = cls.clean_input(info, instance, data)\n instance = cls.construct_instance(instance, cleaned_input)\n cls.clean_instance(instance)\n cls.save(info, instance, cleaned_input)\n cls._save_m2m(info, instance, cleaned_input)\n return cls.success_response(instance)\n\n\nclass ModelDeleteMutation(ModelMutation):\n class Meta:\n abstract = True\n\n @classmethod\n def clean_instance(cls, info, instance):\n \"\"\"Perform additional logic before deleting the model instance.\n\n Override this method to raise custom validation error and abort\n the deletion process.\n \"\"\"\n\n @classmethod\n def perform_mutation(cls, _root, info, **data):\n \"\"\"Perform a mutation that deletes a model instance.\"\"\"\n if not cls.check_permissions(info.context.user):\n raise PermissionDenied()\n\n node_id = data.get('id')\n model_type = registry.get_type_for_model(cls._meta.model)\n instance = cls.get_node_or_error(info, node_id, only_type=model_type)\n\n if instance:\n cls.clean_instance(info, instance)\n\n db_id = instance.id\n instance.delete()\n\n # After the instance is deleted, set its ID to the original database's\n # ID so that the success response contains ID of the deleted object.\n instance.id = db_id\n return cls.success_response(instance)\n\n\nclass BaseBulkMutation(BaseMutation):\n count = graphene.Int(\n required=True, description='Returns how many objects were affected.')\n\n class Meta:\n abstract = True\n\n @classmethod\n def __init_subclass_with_meta__(cls, model=None, _meta=None, **kwargs):\n if not model:\n raise ImproperlyConfigured('model is required for bulk mutation')\n if not _meta:\n _meta = ModelMutationOptions(cls)\n _meta.model = model\n\n super().__init_subclass_with_meta__(_meta=_meta, **kwargs)\n\n @classmethod\n def clean_instance(cls, info, instance):\n \"\"\"Perform additional logic.\n\n Override this method to raise custom validation error and prevent\n bulk action on the instance.\n \"\"\"\n\n @classmethod\n def bulk_action(cls, queryset, **kwargs):\n \"\"\"Implement action performed on queryset.\"\"\"\n raise NotImplementedError\n\n @classmethod\n def perform_mutation(cls, _root, info, ids, **data):\n \"\"\"Perform a mutation that deletes a list of model instances.\"\"\"\n clean_instance_ids, errors = [], {}\n instance_model = cls._meta.model\n model_type = registry.get_type_for_model(instance_model)\n instances = cls.get_nodes_or_error(ids, 'id', model_type)\n for instance, node_id in zip(instances, ids):\n instance_errors = []\n\n # catch individual validation errors to raise them later as\n # a single error\n try:\n cls.clean_instance(info, instance)\n except ValidationError as e:\n msg = '. '.join(e.messages)\n instance_errors.append(msg)\n\n if not instance_errors:\n clean_instance_ids.append(instance.pk)\n else:\n instance_errors_msg = '. '.join(instance_errors)\n ValidationError({\n node_id: instance_errors_msg}).update_error_dict(errors)\n\n if errors:\n errors = ValidationError(errors)\n count = len(clean_instance_ids)\n if count:\n qs = instance_model.objects.filter(pk__in=clean_instance_ids)\n cls.bulk_action(queryset=qs, **data)\n return count, errors\n\n @classmethod\n def mutate(cls, root, info, **data):\n if not cls.check_permissions(info.context.user):\n raise PermissionDenied()\n\n count, errors = cls.perform_mutation(root, info, **data)\n if errors:\n errors = validation_error_to_error_type(errors)\n return cls(errors=errors, count=count)\n\n\nclass ModelBulkDeleteMutation(BaseBulkMutation):\n class Meta:\n abstract = True\n\n @classmethod\n def bulk_action(cls, queryset):\n queryset.delete()\n\n\nclass CreateToken(ObtainJSONWebToken):\n \"\"\"Mutation that authenticates a user and returns token and user data.\n\n It overrides the default graphql_jwt.ObtainJSONWebToken to wrap potential\n authentication errors in our Error type, which is consistent to how rest of\n the mutation works.\n \"\"\"\n\n errors = graphene.List(Error, required=True)\n user = graphene.Field(User)\n\n @classmethod\n def mutate(cls, root, info, **kwargs):\n try:\n result = super().mutate(root, info, **kwargs)\n except JSONWebTokenError as e:\n return CreateToken(errors=[Error(message=str(e))])\n else:\n return result\n\n @classmethod\n def resolve(cls, root, info, **kwargs):\n return cls(user=info.context.user, errors=[])\n\n\nclass VerifyToken(Verify):\n \"\"\"Mutation that confirm if token is valid and also return user data.\"\"\"\n\n user = graphene.Field(User)\n\n def resolve_user(self, _info, **_kwargs):\n username_field = get_user_model().USERNAME_FIELD\n kwargs = {username_field: self.payload.get(username_field)}\n return models.User.objects.get(**kwargs)\n\n @classmethod\n def mutate(cls, root, info, token, **kwargs):\n try:\n return super().mutate(root, info, token, **kwargs)\n except JSONWebTokenError:\n return None\n", "path": "saleor/graphql/core/mutations.py"}]} |
gh_patches_debug_1389 | rasdani/github-patches | git_diff | Project-MONAI__MONAI-828 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
bug in GeneralizedWassersteinDiceLoss
in **GeneralizedWassersteinDiceLoss** the forward() is missing :
if not(target.type() in [torch.LongTensor, torch.cuda.LongTensor]):
target = target.long()
since the wasserstein_distance_map expects flat_target to be a long integer type tensor
please fix. otherwise it throws an error
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `monai/losses/dice.py`
Content:
```
1 # Copyright 2020 MONAI Consortium
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at
5 # http://www.apache.org/licenses/LICENSE-2.0
6 # Unless required by applicable law or agreed to in writing, software
7 # distributed under the License is distributed on an "AS IS" BASIS,
8 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9 # See the License for the specific language governing permissions and
10 # limitations under the License.
11
12 import warnings
13 from typing import Callable, Optional, Union
14
15 import numpy as np
16 import torch
17 import torch.nn.functional as F
18 from torch.nn.modules.loss import _Loss
19
20 from monai.networks import one_hot
21 from monai.utils import LossReduction, Weight
22
23
24 class DiceLoss(_Loss):
25 """
26 Compute average Dice loss between two tensors. It can support both multi-classes and multi-labels tasks.
27 Input logits `input` (BNHW[D] where N is number of classes) is compared with ground truth `target` (BNHW[D]).
28 Axis N of `input` is expected to have logit predictions for each class rather than being image channels,
29 while the same axis of `target` can be 1 or N (one-hot format). The `smooth` parameter is a value added to the
30 intersection and union components of the inter-over-union calculation to smooth results and prevent divide by 0,
31 this value should be small. The `include_background` class attribute can be set to False for an instance of
32 DiceLoss to exclude the first category (channel index 0) which is by convention assumed to be background.
33 If the non-background segmentations are small compared to the total image size they can get overwhelmed by
34 the signal from the background so excluding it in such cases helps convergence.
35
36 Milletari, F. et. al. (2016) V-Net: Fully Convolutional Neural Networks forVolumetric Medical Image Segmentation, 3DV, 2016.
37
38 """
39
40 def __init__(
41 self,
42 include_background: bool = True,
43 to_onehot_y: bool = False,
44 sigmoid: bool = False,
45 softmax: bool = False,
46 other_act: Optional[Callable] = None,
47 squared_pred: bool = False,
48 jaccard: bool = False,
49 reduction: Union[LossReduction, str] = LossReduction.MEAN,
50 ) -> None:
51 """
52 Args:
53 include_background: if False channel index 0 (background category) is excluded from the calculation.
54 to_onehot_y: whether to convert `y` into the one-hot format. Defaults to False.
55 sigmoid: if True, apply a sigmoid function to the prediction.
56 softmax: if True, apply a softmax function to the prediction.
57 other_act: if don't want to use `sigmoid` or `softmax`, use other callable function to execute
58 other activation layers, Defaults to ``None``. for example:
59 `other_act = torch.tanh`.
60 squared_pred: use squared versions of targets and predictions in the denominator or not.
61 jaccard: compute Jaccard Index (soft IoU) instead of dice or not.
62 reduction: {``"none"``, ``"mean"``, ``"sum"``}
63 Specifies the reduction to apply to the output. Defaults to ``"mean"``.
64
65 - ``"none"``: no reduction will be applied.
66 - ``"mean"``: the sum of the output will be divided by the number of elements in the output.
67 - ``"sum"``: the output will be summed.
68
69 Raises:
70 TypeError: When ``other_act`` is not an ``Optional[Callable]``.
71 ValueError: When more than 1 of [``sigmoid=True``, ``softmax=True``, ``other_act is not None``].
72 Incompatible values.
73
74 """
75 super().__init__(reduction=LossReduction(reduction).value)
76 if other_act is not None and not callable(other_act):
77 raise TypeError(f"other_act must be None or callable but is {type(other_act).__name__}.")
78 if int(sigmoid) + int(softmax) + int(other_act is not None) > 1:
79 raise ValueError("Incompatible values: more than 1 of [sigmoid=True, softmax=True, other_act is not None].")
80 self.include_background = include_background
81 self.to_onehot_y = to_onehot_y
82 self.sigmoid = sigmoid
83 self.softmax = softmax
84 self.other_act = other_act
85 self.squared_pred = squared_pred
86 self.jaccard = jaccard
87
88 def forward(self, input: torch.Tensor, target: torch.Tensor, smooth: float = 1e-5) -> torch.Tensor:
89 """
90 Args:
91 input: the shape should be BNH[WD].
92 target: the shape should be BNH[WD].
93 smooth: a small constant to avoid nan.
94
95 Raises:
96 ValueError: When ``self.reduction`` is not one of ["mean", "sum", "none"].
97
98 """
99 if self.sigmoid:
100 input = torch.sigmoid(input)
101
102 n_pred_ch = input.shape[1]
103 if self.softmax:
104 if n_pred_ch == 1:
105 warnings.warn("single channel prediction, `softmax=True` ignored.")
106 else:
107 input = torch.softmax(input, 1)
108
109 if self.other_act is not None:
110 input = self.other_act(input)
111
112 if self.to_onehot_y:
113 if n_pred_ch == 1:
114 warnings.warn("single channel prediction, `to_onehot_y=True` ignored.")
115 else:
116 target = one_hot(target, num_classes=n_pred_ch)
117
118 if not self.include_background:
119 if n_pred_ch == 1:
120 warnings.warn("single channel prediction, `include_background=False` ignored.")
121 else:
122 # if skipping background, removing first channel
123 target = target[:, 1:]
124 input = input[:, 1:]
125
126 assert (
127 target.shape == input.shape
128 ), f"ground truth has differing shape ({target.shape}) from input ({input.shape})"
129
130 # reducing only spatial dimensions (not batch nor channels)
131 reduce_axis = list(range(2, len(input.shape)))
132 intersection = torch.sum(target * input, dim=reduce_axis)
133
134 if self.squared_pred:
135 target = torch.pow(target, 2)
136 input = torch.pow(input, 2)
137
138 ground_o = torch.sum(target, dim=reduce_axis)
139 pred_o = torch.sum(input, dim=reduce_axis)
140
141 denominator = ground_o + pred_o
142
143 if self.jaccard:
144 denominator = 2.0 * (denominator - intersection)
145
146 f: torch.Tensor = 1.0 - (2.0 * intersection + smooth) / (denominator + smooth)
147
148 if self.reduction == LossReduction.MEAN.value:
149 f = torch.mean(f) # the batch and channel average
150 elif self.reduction == LossReduction.SUM.value:
151 f = torch.sum(f) # sum over the batch and channel dims
152 elif self.reduction == LossReduction.NONE.value:
153 pass # returns [N, n_classes] losses
154 else:
155 raise ValueError(f'Unsupported reduction: {self.reduction}, available options are ["mean", "sum", "none"].')
156
157 return f
158
159
160 class MaskedDiceLoss(DiceLoss):
161 """
162 Add an additional `masking` process before `DiceLoss`, accept a binary mask ([0, 1]) indicating a region,
163 `input` and `target` will be masked by the region: region with mask `1` will keep the original value,
164 region with `0` mask will be converted to `0`. Then feed `input` and `target` to normal `DiceLoss` computation.
165 This has the effect of ensuring only the masked region contributes to the loss computation and
166 hence gradient calculation.
167
168 """
169
170 def forward(
171 self, input: torch.Tensor, target: torch.Tensor, smooth: float = 1e-5, mask: Optional[torch.Tensor] = None
172 ):
173 """
174 Args:
175 input: the shape should be BNH[WD].
176 target: the shape should be BNH[WD].
177 smooth: a small constant to avoid nan.
178 mask: the shape should B1H[WD] or 11H[WD].
179 """
180 if mask is not None:
181 # checking if mask is of proper shape
182 assert input.dim() == mask.dim(), f"dim of input ({input.shape}) is different from mask ({mask.shape})"
183 assert (
184 input.shape[0] == mask.shape[0] or mask.shape[0] == 1
185 ), f" batch size of mask ({mask.shape}) must be 1 or equal to input ({input.shape})"
186
187 if target.dim() > 1:
188 assert mask.shape[1] == 1, f"mask ({mask.shape}) must have only 1 channel"
189 assert (
190 input.shape[2:] == mask.shape[2:]
191 ), f"spatial size of input ({input.shape}) is different from mask ({mask.shape})"
192
193 input = input * mask
194 target = target * mask
195 else:
196 warnings.warn("no mask value specified for the MaskedDiceLoss.")
197
198 return super().forward(input=input, target=target, smooth=smooth)
199
200
201 class GeneralizedDiceLoss(_Loss):
202 """
203 Compute the generalised Dice loss defined in:
204
205 Sudre, C. et. al. (2017) Generalised Dice overlap as a deep learning
206 loss function for highly unbalanced segmentations. DLMIA 2017.
207
208 Adapted from:
209 https://github.com/NifTK/NiftyNet/blob/v0.6.0/niftynet/layer/loss_segmentation.py#L279
210 """
211
212 def __init__(
213 self,
214 include_background: bool = True,
215 to_onehot_y: bool = False,
216 sigmoid: bool = False,
217 softmax: bool = False,
218 other_act: Optional[Callable] = None,
219 w_type: Union[Weight, str] = Weight.SQUARE,
220 reduction: Union[LossReduction, str] = LossReduction.MEAN,
221 ) -> None:
222 """
223 Args:
224 include_background: If False channel index 0 (background category) is excluded from the calculation.
225 to_onehot_y: whether to convert `y` into the one-hot format. Defaults to False.
226 sigmoid: If True, apply a sigmoid function to the prediction.
227 softmax: If True, apply a softmax function to the prediction.
228 other_act: if don't want to use `sigmoid` or `softmax`, use other callable function to execute
229 other activation layers, Defaults to ``None``. for example:
230 `other_act = torch.tanh`.
231 squared_pred: use squared versions of targets and predictions in the denominator or not.
232 w_type: {``"square"``, ``"simple"``, ``"uniform"``}
233 Type of function to transform ground truth volume to a weight factor. Defaults to ``"square"``.
234 reduction: {``"none"``, ``"mean"``, ``"sum"``}
235 Specifies the reduction to apply to the output. Defaults to ``"mean"``.
236
237 - ``"none"``: no reduction will be applied.
238 - ``"mean"``: the sum of the output will be divided by the number of elements in the output.
239 - ``"sum"``: the output will be summed.
240
241 Raises:
242 TypeError: When ``other_act`` is not an ``Optional[Callable]``.
243 ValueError: When more than 1 of [``sigmoid=True``, ``softmax=True``, ``other_act is not None``].
244 Incompatible values.
245
246 """
247 super().__init__(reduction=LossReduction(reduction).value)
248 if other_act is not None and not callable(other_act):
249 raise TypeError(f"other_act must be None or callable but is {type(other_act).__name__}.")
250 if int(sigmoid) + int(softmax) + int(other_act is not None) > 1:
251 raise ValueError("Incompatible values: more than 1 of [sigmoid=True, softmax=True, other_act is not None].")
252 self.include_background = include_background
253 self.to_onehot_y = to_onehot_y
254 self.sigmoid = sigmoid
255 self.softmax = softmax
256 self.other_act = other_act
257
258 w_type = Weight(w_type)
259 self.w_func: Callable = torch.ones_like
260 if w_type == Weight.SIMPLE:
261 self.w_func = torch.reciprocal
262 elif w_type == Weight.SQUARE:
263 self.w_func = lambda x: torch.reciprocal(x * x)
264
265 def forward(self, input: torch.Tensor, target: torch.Tensor, smooth: float = 1e-5):
266 """
267 Args:
268 input: the shape should be BNH[WD].
269 target: the shape should be BNH[WD].
270 smooth: a small constant to avoid nan.
271
272 Raises:
273 ValueError: When ``self.reduction`` is not one of ["mean", "sum", "none"].
274
275 """
276 if self.sigmoid:
277 input = torch.sigmoid(input)
278 n_pred_ch = input.shape[1]
279 if self.softmax:
280 if n_pred_ch == 1:
281 warnings.warn("single channel prediction, `softmax=True` ignored.")
282 else:
283 input = torch.softmax(input, 1)
284
285 if self.other_act is not None:
286 input = self.other_act(input)
287
288 if self.to_onehot_y:
289 if n_pred_ch == 1:
290 warnings.warn("single channel prediction, `to_onehot_y=True` ignored.")
291 else:
292 target = one_hot(target, num_classes=n_pred_ch)
293
294 if not self.include_background:
295 if n_pred_ch == 1:
296 warnings.warn("single channel prediction, `include_background=False` ignored.")
297 else:
298 # if skipping background, removing first channel
299 target = target[:, 1:]
300 input = input[:, 1:]
301
302 assert (
303 target.shape == input.shape
304 ), f"ground truth has differing shape ({target.shape}) from input ({input.shape})"
305
306 # reducing only spatial dimensions (not batch nor channels)
307 reduce_axis = list(range(2, len(input.shape)))
308 intersection = torch.sum(target * input, reduce_axis)
309
310 ground_o = torch.sum(target, reduce_axis)
311 pred_o = torch.sum(input, reduce_axis)
312
313 denominator = ground_o + pred_o
314
315 w = self.w_func(ground_o.float())
316 for b in w:
317 infs = torch.isinf(b)
318 b[infs] = 0.0
319 b[infs] = torch.max(b)
320
321 f = 1.0 - (2.0 * (intersection * w).sum(1) + smooth) / ((denominator * w).sum(1) + smooth)
322
323 if self.reduction == LossReduction.MEAN.value:
324 f = torch.mean(f) # the batch and channel average
325 elif self.reduction == LossReduction.SUM.value:
326 f = torch.sum(f) # sum over the batch and channel dims
327 elif self.reduction == LossReduction.NONE.value:
328 pass # returns [N, n_classes] losses
329 else:
330 raise ValueError(f'Unsupported reduction: {self.reduction}, available options are ["mean", "sum", "none"].')
331
332 return f
333
334
335 class GeneralizedWassersteinDiceLoss(_Loss):
336 """
337 Generalized Wasserstein Dice Loss [1] in PyTorch.
338 Compared to [1] we used a weighting method similar to the one
339 used in the generalized Dice Loss [2].
340
341 References:
342 ===========
343 [1] "Generalised Wasserstein Dice Score for Imbalanced Multi-class
344 Segmentation using Holistic Convolutional Networks",
345 Fidon L. et al. MICCAI BrainLes 2017.
346 [2] "Generalised dice overlap as a deep learning loss function
347 for highly unbalanced segmentations",
348 Sudre C., et al. MICCAI DLMIA 2017.
349
350 wasserstein_distance_map:
351 Compute the voxel-wise Wasserstein distance (eq. 6 in [1]) between the
352 flattened prediction and the flattened labels (ground_truth) with respect
353 to the distance matrix on the label space M.
354 References:
355 [1] "Generalised Wasserstein Dice Score for Imbalanced Multi-class
356 Segmentation using Holistic Convolutional Networks",
357 Fidon L. et al. MICCAI BrainLes 2017
358
359 compute_weights_generalized_true_positives:
360 Compute the weights \alpha_l of eq. 9 in [1] but using the weighting
361 method proposed in the generalized Dice Loss [2].
362 References:
363 [1] "Generalised Wasserstein Dice Score for Imbalanced Multi-class
364 Segmentation using Holistic Convolutional Networks",
365 Fidon L. et al. MICCAI BrainLes 2017
366 [2] "Generalised dice overlap as a deep learning loss function
367 for highly unbalanced segmentations." Sudre C., et al.
368 MICCAI DLMIA 2017.
369 """
370
371 def __init__(self, dist_matrix, reduction: Union[LossReduction, str] = LossReduction.MEAN):
372 """
373 Args:
374 dist_matrix: 2d tensor or 2d numpy array; matrix of distances
375 between the classes. It must have dimension C x C where C is the
376 number of classes.
377 reduction: str; reduction mode.
378
379 Raises:
380 ValueError: When ``dist_matrix`` is not a square matrix.
381
382 """
383 super(GeneralizedWassersteinDiceLoss, self).__init__(reduction=LossReduction(reduction).value)
384
385 if dist_matrix.shape[0] != dist_matrix.shape[1]:
386 raise ValueError(f"dist_matrix must be C x C, got {dist_matrix.shape[0]} x {dist_matrix.shape[1]}.")
387
388 self.m = dist_matrix
389 if isinstance(self.m, np.ndarray):
390 self.m = torch.from_numpy(self.m)
391 if torch.max(self.m) != 1:
392 self.m = self.m / torch.max(self.m)
393 self.num_classes = self.m.size(0)
394
395 def forward(self, input: torch.Tensor, target: torch.Tensor, smooth: float = 1e-5):
396 """
397 Args:
398 input: the shape should be BNH[WD].
399 target: the shape should be BNH[WD].
400 smooth: a small constant to avoid nan.
401
402 """
403 # Aggregate spatial dimensions
404 flat_input = input.view(input.size(0), input.size(1), -1)
405 flat_target = target.view(target.size(0), -1)
406
407 # Apply the softmax to the input scores map
408 probs = F.softmax(flat_input, dim=1)
409
410 # Compute the Wasserstein distance map
411 wass_dist_map = self.wasserstein_distance_map(probs, flat_target)
412
413 # Compute the generalised number of true positives
414 alpha = self.compute_weights_generalized_true_positives(flat_target)
415 true_pos = self.compute_generalized_true_positive(alpha, flat_target, wass_dist_map)
416 denom = self.compute_denominator(alpha, flat_target, wass_dist_map)
417
418 # Compute and return the final loss
419 wass_dice = (2.0 * true_pos + smooth) / (denom + smooth)
420 wass_dice_loss = 1.0 - wass_dice
421 return wass_dice_loss.mean()
422
423 def wasserstein_distance_map(self, flat_proba: torch.Tensor, flat_target: torch.Tensor):
424 """
425 Args:
426 flat_proba: the probabilities of input(predicted) tensor.
427 flat_target: the target tensor.
428 """
429 # Turn the distance matrix to a map of identical matrix
430 m = torch.clone(self.m).to(flat_proba.device)
431 m_extended = torch.unsqueeze(m, dim=0)
432 m_extended = torch.unsqueeze(m_extended, dim=3)
433 m_extended = m_extended.expand((flat_proba.size(0), m_extended.size(1), m_extended.size(2), flat_proba.size(2)))
434
435 # Expand the feature dimensions of the target
436 flat_target_extended = torch.unsqueeze(flat_target, dim=1)
437 flat_target_extended = flat_target_extended.expand(
438 (flat_target.size(0), m_extended.size(1), flat_target.size(1))
439 )
440 flat_target_extended = torch.unsqueeze(flat_target_extended, dim=1)
441
442 # Extract the vector of class distances for the ground-truth label at each voxel
443 m_extended = torch.gather(m_extended, dim=1, index=flat_target_extended)
444 m_extended = torch.squeeze(m_extended, dim=1)
445
446 # Compute the wasserstein distance map
447 wasserstein_map = m_extended * flat_proba
448
449 # Sum over the classes
450 wasserstein_map = torch.sum(wasserstein_map, dim=1)
451 return wasserstein_map
452
453 def compute_generalized_true_positive(
454 self, alpha: torch.Tensor, flat_target: torch.Tensor, wasserstein_distance_map
455 ):
456 """
457 Args:
458 alpha: generalised number of true positives of target class.
459 flat_target: the target tensor.
460 wasserstein_distance_map: the map obtained from the above function.
461 """
462 # Extend alpha to a map and select value at each voxel according to flat_target
463 alpha_extended = torch.unsqueeze(alpha, dim=2)
464 alpha_extended = alpha_extended.expand((flat_target.size(0), self.num_classes, flat_target.size(1)))
465 flat_target_extended = torch.unsqueeze(flat_target, dim=1)
466 alpha_extended = torch.gather(alpha_extended, index=flat_target_extended, dim=1)
467
468 # Compute the generalized true positive as in eq. 9
469 generalized_true_pos = torch.sum(alpha_extended * (1.0 - wasserstein_distance_map), dim=[1, 2],)
470 return generalized_true_pos
471
472 def compute_denominator(self, alpha: torch.Tensor, flat_target: torch.Tensor, wasserstein_distance_map):
473 """
474 Args:
475 alpha: generalised number of true positives of target class.
476 flat_target: the target tensor.
477 wasserstein_distance_map: the map obtained from the above function.
478 """
479 # Extend alpha to a map and select value at each voxel according to flat_target
480 alpha_extended = torch.unsqueeze(alpha, dim=2)
481 alpha_extended = alpha_extended.expand((flat_target.size(0), self.num_classes, flat_target.size(1)))
482 flat_target_extended = torch.unsqueeze(flat_target, dim=1)
483 alpha_extended = torch.gather(alpha_extended, index=flat_target_extended, dim=1)
484
485 # Compute the generalized true positive as in eq. 9
486 generalized_true_pos = torch.sum(alpha_extended * (2.0 - wasserstein_distance_map), dim=[1, 2],)
487 return generalized_true_pos
488
489 def compute_weights_generalized_true_positives(self, flat_target: torch.Tensor):
490 """
491 Args:
492 flat_target: the target tensor.
493 """
494 one_hot = F.one_hot(flat_target, num_classes=self.num_classes).permute(0, 2, 1).float()
495 volumes = torch.sum(one_hot, dim=2)
496 alpha = 1.0 / (volumes + 1.0)
497 return alpha
498
499
500 dice = Dice = DiceLoss
501 generalized_dice = GeneralizedDiceLoss
502 generalized_wasserstein_dice = GeneralizedWassersteinDiceLoss
503
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/monai/losses/dice.py b/monai/losses/dice.py
--- a/monai/losses/dice.py
+++ b/monai/losses/dice.py
@@ -402,7 +402,7 @@
"""
# Aggregate spatial dimensions
flat_input = input.view(input.size(0), input.size(1), -1)
- flat_target = target.view(target.size(0), -1)
+ flat_target = target.view(target.size(0), -1).long()
# Apply the softmax to the input scores map
probs = F.softmax(flat_input, dim=1)
| {"golden_diff": "diff --git a/monai/losses/dice.py b/monai/losses/dice.py\n--- a/monai/losses/dice.py\n+++ b/monai/losses/dice.py\n@@ -402,7 +402,7 @@\n \"\"\"\n # Aggregate spatial dimensions\n flat_input = input.view(input.size(0), input.size(1), -1)\n- flat_target = target.view(target.size(0), -1)\n+ flat_target = target.view(target.size(0), -1).long()\n \n # Apply the softmax to the input scores map\n probs = F.softmax(flat_input, dim=1)\n", "issue": "bug in GeneralizedWassersteinDiceLoss\nin **GeneralizedWassersteinDiceLoss** the forward() is missing : \r\n\r\nif not(target.type() in [torch.LongTensor, torch.cuda.LongTensor]):\r\n target = target.long()\r\n\r\nsince the wasserstein_distance_map expects flat_target to be a long integer type tensor\r\n\r\nplease fix. otherwise it throws an error\n", "before_files": [{"content": "# Copyright 2020 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport warnings\nfrom typing import Callable, Optional, Union\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom torch.nn.modules.loss import _Loss\n\nfrom monai.networks import one_hot\nfrom monai.utils import LossReduction, Weight\n\n\nclass DiceLoss(_Loss):\n \"\"\"\n Compute average Dice loss between two tensors. It can support both multi-classes and multi-labels tasks.\n Input logits `input` (BNHW[D] where N is number of classes) is compared with ground truth `target` (BNHW[D]).\n Axis N of `input` is expected to have logit predictions for each class rather than being image channels,\n while the same axis of `target` can be 1 or N (one-hot format). The `smooth` parameter is a value added to the\n intersection and union components of the inter-over-union calculation to smooth results and prevent divide by 0,\n this value should be small. The `include_background` class attribute can be set to False for an instance of\n DiceLoss to exclude the first category (channel index 0) which is by convention assumed to be background.\n If the non-background segmentations are small compared to the total image size they can get overwhelmed by\n the signal from the background so excluding it in such cases helps convergence.\n\n Milletari, F. et. al. (2016) V-Net: Fully Convolutional Neural Networks forVolumetric Medical Image Segmentation, 3DV, 2016.\n\n \"\"\"\n\n def __init__(\n self,\n include_background: bool = True,\n to_onehot_y: bool = False,\n sigmoid: bool = False,\n softmax: bool = False,\n other_act: Optional[Callable] = None,\n squared_pred: bool = False,\n jaccard: bool = False,\n reduction: Union[LossReduction, str] = LossReduction.MEAN,\n ) -> None:\n \"\"\"\n Args:\n include_background: if False channel index 0 (background category) is excluded from the calculation.\n to_onehot_y: whether to convert `y` into the one-hot format. Defaults to False.\n sigmoid: if True, apply a sigmoid function to the prediction.\n softmax: if True, apply a softmax function to the prediction.\n other_act: if don't want to use `sigmoid` or `softmax`, use other callable function to execute\n other activation layers, Defaults to ``None``. for example:\n `other_act = torch.tanh`.\n squared_pred: use squared versions of targets and predictions in the denominator or not.\n jaccard: compute Jaccard Index (soft IoU) instead of dice or not.\n reduction: {``\"none\"``, ``\"mean\"``, ``\"sum\"``}\n Specifies the reduction to apply to the output. Defaults to ``\"mean\"``.\n\n - ``\"none\"``: no reduction will be applied.\n - ``\"mean\"``: the sum of the output will be divided by the number of elements in the output.\n - ``\"sum\"``: the output will be summed.\n\n Raises:\n TypeError: When ``other_act`` is not an ``Optional[Callable]``.\n ValueError: When more than 1 of [``sigmoid=True``, ``softmax=True``, ``other_act is not None``].\n Incompatible values.\n\n \"\"\"\n super().__init__(reduction=LossReduction(reduction).value)\n if other_act is not None and not callable(other_act):\n raise TypeError(f\"other_act must be None or callable but is {type(other_act).__name__}.\")\n if int(sigmoid) + int(softmax) + int(other_act is not None) > 1:\n raise ValueError(\"Incompatible values: more than 1 of [sigmoid=True, softmax=True, other_act is not None].\")\n self.include_background = include_background\n self.to_onehot_y = to_onehot_y\n self.sigmoid = sigmoid\n self.softmax = softmax\n self.other_act = other_act\n self.squared_pred = squared_pred\n self.jaccard = jaccard\n\n def forward(self, input: torch.Tensor, target: torch.Tensor, smooth: float = 1e-5) -> torch.Tensor:\n \"\"\"\n Args:\n input: the shape should be BNH[WD].\n target: the shape should be BNH[WD].\n smooth: a small constant to avoid nan.\n\n Raises:\n ValueError: When ``self.reduction`` is not one of [\"mean\", \"sum\", \"none\"].\n\n \"\"\"\n if self.sigmoid:\n input = torch.sigmoid(input)\n\n n_pred_ch = input.shape[1]\n if self.softmax:\n if n_pred_ch == 1:\n warnings.warn(\"single channel prediction, `softmax=True` ignored.\")\n else:\n input = torch.softmax(input, 1)\n\n if self.other_act is not None:\n input = self.other_act(input)\n\n if self.to_onehot_y:\n if n_pred_ch == 1:\n warnings.warn(\"single channel prediction, `to_onehot_y=True` ignored.\")\n else:\n target = one_hot(target, num_classes=n_pred_ch)\n\n if not self.include_background:\n if n_pred_ch == 1:\n warnings.warn(\"single channel prediction, `include_background=False` ignored.\")\n else:\n # if skipping background, removing first channel\n target = target[:, 1:]\n input = input[:, 1:]\n\n assert (\n target.shape == input.shape\n ), f\"ground truth has differing shape ({target.shape}) from input ({input.shape})\"\n\n # reducing only spatial dimensions (not batch nor channels)\n reduce_axis = list(range(2, len(input.shape)))\n intersection = torch.sum(target * input, dim=reduce_axis)\n\n if self.squared_pred:\n target = torch.pow(target, 2)\n input = torch.pow(input, 2)\n\n ground_o = torch.sum(target, dim=reduce_axis)\n pred_o = torch.sum(input, dim=reduce_axis)\n\n denominator = ground_o + pred_o\n\n if self.jaccard:\n denominator = 2.0 * (denominator - intersection)\n\n f: torch.Tensor = 1.0 - (2.0 * intersection + smooth) / (denominator + smooth)\n\n if self.reduction == LossReduction.MEAN.value:\n f = torch.mean(f) # the batch and channel average\n elif self.reduction == LossReduction.SUM.value:\n f = torch.sum(f) # sum over the batch and channel dims\n elif self.reduction == LossReduction.NONE.value:\n pass # returns [N, n_classes] losses\n else:\n raise ValueError(f'Unsupported reduction: {self.reduction}, available options are [\"mean\", \"sum\", \"none\"].')\n\n return f\n\n\nclass MaskedDiceLoss(DiceLoss):\n \"\"\"\n Add an additional `masking` process before `DiceLoss`, accept a binary mask ([0, 1]) indicating a region,\n `input` and `target` will be masked by the region: region with mask `1` will keep the original value,\n region with `0` mask will be converted to `0`. Then feed `input` and `target` to normal `DiceLoss` computation.\n This has the effect of ensuring only the masked region contributes to the loss computation and\n hence gradient calculation.\n\n \"\"\"\n\n def forward(\n self, input: torch.Tensor, target: torch.Tensor, smooth: float = 1e-5, mask: Optional[torch.Tensor] = None\n ):\n \"\"\"\n Args:\n input: the shape should be BNH[WD].\n target: the shape should be BNH[WD].\n smooth: a small constant to avoid nan.\n mask: the shape should B1H[WD] or 11H[WD].\n \"\"\"\n if mask is not None:\n # checking if mask is of proper shape\n assert input.dim() == mask.dim(), f\"dim of input ({input.shape}) is different from mask ({mask.shape})\"\n assert (\n input.shape[0] == mask.shape[0] or mask.shape[0] == 1\n ), f\" batch size of mask ({mask.shape}) must be 1 or equal to input ({input.shape})\"\n\n if target.dim() > 1:\n assert mask.shape[1] == 1, f\"mask ({mask.shape}) must have only 1 channel\"\n assert (\n input.shape[2:] == mask.shape[2:]\n ), f\"spatial size of input ({input.shape}) is different from mask ({mask.shape})\"\n\n input = input * mask\n target = target * mask\n else:\n warnings.warn(\"no mask value specified for the MaskedDiceLoss.\")\n\n return super().forward(input=input, target=target, smooth=smooth)\n\n\nclass GeneralizedDiceLoss(_Loss):\n \"\"\"\n Compute the generalised Dice loss defined in:\n\n Sudre, C. et. al. (2017) Generalised Dice overlap as a deep learning\n loss function for highly unbalanced segmentations. DLMIA 2017.\n\n Adapted from:\n https://github.com/NifTK/NiftyNet/blob/v0.6.0/niftynet/layer/loss_segmentation.py#L279\n \"\"\"\n\n def __init__(\n self,\n include_background: bool = True,\n to_onehot_y: bool = False,\n sigmoid: bool = False,\n softmax: bool = False,\n other_act: Optional[Callable] = None,\n w_type: Union[Weight, str] = Weight.SQUARE,\n reduction: Union[LossReduction, str] = LossReduction.MEAN,\n ) -> None:\n \"\"\"\n Args:\n include_background: If False channel index 0 (background category) is excluded from the calculation.\n to_onehot_y: whether to convert `y` into the one-hot format. Defaults to False.\n sigmoid: If True, apply a sigmoid function to the prediction.\n softmax: If True, apply a softmax function to the prediction.\n other_act: if don't want to use `sigmoid` or `softmax`, use other callable function to execute\n other activation layers, Defaults to ``None``. for example:\n `other_act = torch.tanh`.\n squared_pred: use squared versions of targets and predictions in the denominator or not.\n w_type: {``\"square\"``, ``\"simple\"``, ``\"uniform\"``}\n Type of function to transform ground truth volume to a weight factor. Defaults to ``\"square\"``.\n reduction: {``\"none\"``, ``\"mean\"``, ``\"sum\"``}\n Specifies the reduction to apply to the output. Defaults to ``\"mean\"``.\n\n - ``\"none\"``: no reduction will be applied.\n - ``\"mean\"``: the sum of the output will be divided by the number of elements in the output.\n - ``\"sum\"``: the output will be summed.\n\n Raises:\n TypeError: When ``other_act`` is not an ``Optional[Callable]``.\n ValueError: When more than 1 of [``sigmoid=True``, ``softmax=True``, ``other_act is not None``].\n Incompatible values.\n\n \"\"\"\n super().__init__(reduction=LossReduction(reduction).value)\n if other_act is not None and not callable(other_act):\n raise TypeError(f\"other_act must be None or callable but is {type(other_act).__name__}.\")\n if int(sigmoid) + int(softmax) + int(other_act is not None) > 1:\n raise ValueError(\"Incompatible values: more than 1 of [sigmoid=True, softmax=True, other_act is not None].\")\n self.include_background = include_background\n self.to_onehot_y = to_onehot_y\n self.sigmoid = sigmoid\n self.softmax = softmax\n self.other_act = other_act\n\n w_type = Weight(w_type)\n self.w_func: Callable = torch.ones_like\n if w_type == Weight.SIMPLE:\n self.w_func = torch.reciprocal\n elif w_type == Weight.SQUARE:\n self.w_func = lambda x: torch.reciprocal(x * x)\n\n def forward(self, input: torch.Tensor, target: torch.Tensor, smooth: float = 1e-5):\n \"\"\"\n Args:\n input: the shape should be BNH[WD].\n target: the shape should be BNH[WD].\n smooth: a small constant to avoid nan.\n\n Raises:\n ValueError: When ``self.reduction`` is not one of [\"mean\", \"sum\", \"none\"].\n\n \"\"\"\n if self.sigmoid:\n input = torch.sigmoid(input)\n n_pred_ch = input.shape[1]\n if self.softmax:\n if n_pred_ch == 1:\n warnings.warn(\"single channel prediction, `softmax=True` ignored.\")\n else:\n input = torch.softmax(input, 1)\n\n if self.other_act is not None:\n input = self.other_act(input)\n\n if self.to_onehot_y:\n if n_pred_ch == 1:\n warnings.warn(\"single channel prediction, `to_onehot_y=True` ignored.\")\n else:\n target = one_hot(target, num_classes=n_pred_ch)\n\n if not self.include_background:\n if n_pred_ch == 1:\n warnings.warn(\"single channel prediction, `include_background=False` ignored.\")\n else:\n # if skipping background, removing first channel\n target = target[:, 1:]\n input = input[:, 1:]\n\n assert (\n target.shape == input.shape\n ), f\"ground truth has differing shape ({target.shape}) from input ({input.shape})\"\n\n # reducing only spatial dimensions (not batch nor channels)\n reduce_axis = list(range(2, len(input.shape)))\n intersection = torch.sum(target * input, reduce_axis)\n\n ground_o = torch.sum(target, reduce_axis)\n pred_o = torch.sum(input, reduce_axis)\n\n denominator = ground_o + pred_o\n\n w = self.w_func(ground_o.float())\n for b in w:\n infs = torch.isinf(b)\n b[infs] = 0.0\n b[infs] = torch.max(b)\n\n f = 1.0 - (2.0 * (intersection * w).sum(1) + smooth) / ((denominator * w).sum(1) + smooth)\n\n if self.reduction == LossReduction.MEAN.value:\n f = torch.mean(f) # the batch and channel average\n elif self.reduction == LossReduction.SUM.value:\n f = torch.sum(f) # sum over the batch and channel dims\n elif self.reduction == LossReduction.NONE.value:\n pass # returns [N, n_classes] losses\n else:\n raise ValueError(f'Unsupported reduction: {self.reduction}, available options are [\"mean\", \"sum\", \"none\"].')\n\n return f\n\n\nclass GeneralizedWassersteinDiceLoss(_Loss):\n \"\"\"\n Generalized Wasserstein Dice Loss [1] in PyTorch.\n Compared to [1] we used a weighting method similar to the one\n used in the generalized Dice Loss [2].\n\n References:\n ===========\n [1] \"Generalised Wasserstein Dice Score for Imbalanced Multi-class\n Segmentation using Holistic Convolutional Networks\",\n Fidon L. et al. MICCAI BrainLes 2017.\n [2] \"Generalised dice overlap as a deep learning loss function\n for highly unbalanced segmentations\",\n Sudre C., et al. MICCAI DLMIA 2017.\n\n wasserstein_distance_map:\n Compute the voxel-wise Wasserstein distance (eq. 6 in [1]) between the\n flattened prediction and the flattened labels (ground_truth) with respect\n to the distance matrix on the label space M.\n References:\n [1] \"Generalised Wasserstein Dice Score for Imbalanced Multi-class\n Segmentation using Holistic Convolutional Networks\",\n Fidon L. et al. MICCAI BrainLes 2017\n\n compute_weights_generalized_true_positives:\n Compute the weights \\alpha_l of eq. 9 in [1] but using the weighting\n method proposed in the generalized Dice Loss [2].\n References:\n [1] \"Generalised Wasserstein Dice Score for Imbalanced Multi-class\n Segmentation using Holistic Convolutional Networks\",\n Fidon L. et al. MICCAI BrainLes 2017\n [2] \"Generalised dice overlap as a deep learning loss function\n for highly unbalanced segmentations.\" Sudre C., et al.\n MICCAI DLMIA 2017.\n \"\"\"\n\n def __init__(self, dist_matrix, reduction: Union[LossReduction, str] = LossReduction.MEAN):\n \"\"\"\n Args:\n dist_matrix: 2d tensor or 2d numpy array; matrix of distances\n between the classes. It must have dimension C x C where C is the\n number of classes.\n reduction: str; reduction mode.\n\n Raises:\n ValueError: When ``dist_matrix`` is not a square matrix.\n\n \"\"\"\n super(GeneralizedWassersteinDiceLoss, self).__init__(reduction=LossReduction(reduction).value)\n\n if dist_matrix.shape[0] != dist_matrix.shape[1]:\n raise ValueError(f\"dist_matrix must be C x C, got {dist_matrix.shape[0]} x {dist_matrix.shape[1]}.\")\n\n self.m = dist_matrix\n if isinstance(self.m, np.ndarray):\n self.m = torch.from_numpy(self.m)\n if torch.max(self.m) != 1:\n self.m = self.m / torch.max(self.m)\n self.num_classes = self.m.size(0)\n\n def forward(self, input: torch.Tensor, target: torch.Tensor, smooth: float = 1e-5):\n \"\"\"\n Args:\n input: the shape should be BNH[WD].\n target: the shape should be BNH[WD].\n smooth: a small constant to avoid nan.\n\n \"\"\"\n # Aggregate spatial dimensions\n flat_input = input.view(input.size(0), input.size(1), -1)\n flat_target = target.view(target.size(0), -1)\n\n # Apply the softmax to the input scores map\n probs = F.softmax(flat_input, dim=1)\n\n # Compute the Wasserstein distance map\n wass_dist_map = self.wasserstein_distance_map(probs, flat_target)\n\n # Compute the generalised number of true positives\n alpha = self.compute_weights_generalized_true_positives(flat_target)\n true_pos = self.compute_generalized_true_positive(alpha, flat_target, wass_dist_map)\n denom = self.compute_denominator(alpha, flat_target, wass_dist_map)\n\n # Compute and return the final loss\n wass_dice = (2.0 * true_pos + smooth) / (denom + smooth)\n wass_dice_loss = 1.0 - wass_dice\n return wass_dice_loss.mean()\n\n def wasserstein_distance_map(self, flat_proba: torch.Tensor, flat_target: torch.Tensor):\n \"\"\"\n Args:\n flat_proba: the probabilities of input(predicted) tensor.\n flat_target: the target tensor.\n \"\"\"\n # Turn the distance matrix to a map of identical matrix\n m = torch.clone(self.m).to(flat_proba.device)\n m_extended = torch.unsqueeze(m, dim=0)\n m_extended = torch.unsqueeze(m_extended, dim=3)\n m_extended = m_extended.expand((flat_proba.size(0), m_extended.size(1), m_extended.size(2), flat_proba.size(2)))\n\n # Expand the feature dimensions of the target\n flat_target_extended = torch.unsqueeze(flat_target, dim=1)\n flat_target_extended = flat_target_extended.expand(\n (flat_target.size(0), m_extended.size(1), flat_target.size(1))\n )\n flat_target_extended = torch.unsqueeze(flat_target_extended, dim=1)\n\n # Extract the vector of class distances for the ground-truth label at each voxel\n m_extended = torch.gather(m_extended, dim=1, index=flat_target_extended)\n m_extended = torch.squeeze(m_extended, dim=1)\n\n # Compute the wasserstein distance map\n wasserstein_map = m_extended * flat_proba\n\n # Sum over the classes\n wasserstein_map = torch.sum(wasserstein_map, dim=1)\n return wasserstein_map\n\n def compute_generalized_true_positive(\n self, alpha: torch.Tensor, flat_target: torch.Tensor, wasserstein_distance_map\n ):\n \"\"\"\n Args:\n alpha: generalised number of true positives of target class.\n flat_target: the target tensor.\n wasserstein_distance_map: the map obtained from the above function.\n \"\"\"\n # Extend alpha to a map and select value at each voxel according to flat_target\n alpha_extended = torch.unsqueeze(alpha, dim=2)\n alpha_extended = alpha_extended.expand((flat_target.size(0), self.num_classes, flat_target.size(1)))\n flat_target_extended = torch.unsqueeze(flat_target, dim=1)\n alpha_extended = torch.gather(alpha_extended, index=flat_target_extended, dim=1)\n\n # Compute the generalized true positive as in eq. 9\n generalized_true_pos = torch.sum(alpha_extended * (1.0 - wasserstein_distance_map), dim=[1, 2],)\n return generalized_true_pos\n\n def compute_denominator(self, alpha: torch.Tensor, flat_target: torch.Tensor, wasserstein_distance_map):\n \"\"\"\n Args:\n alpha: generalised number of true positives of target class.\n flat_target: the target tensor.\n wasserstein_distance_map: the map obtained from the above function.\n \"\"\"\n # Extend alpha to a map and select value at each voxel according to flat_target\n alpha_extended = torch.unsqueeze(alpha, dim=2)\n alpha_extended = alpha_extended.expand((flat_target.size(0), self.num_classes, flat_target.size(1)))\n flat_target_extended = torch.unsqueeze(flat_target, dim=1)\n alpha_extended = torch.gather(alpha_extended, index=flat_target_extended, dim=1)\n\n # Compute the generalized true positive as in eq. 9\n generalized_true_pos = torch.sum(alpha_extended * (2.0 - wasserstein_distance_map), dim=[1, 2],)\n return generalized_true_pos\n\n def compute_weights_generalized_true_positives(self, flat_target: torch.Tensor):\n \"\"\"\n Args:\n flat_target: the target tensor.\n \"\"\"\n one_hot = F.one_hot(flat_target, num_classes=self.num_classes).permute(0, 2, 1).float()\n volumes = torch.sum(one_hot, dim=2)\n alpha = 1.0 / (volumes + 1.0)\n return alpha\n\n\ndice = Dice = DiceLoss\ngeneralized_dice = GeneralizedDiceLoss\ngeneralized_wasserstein_dice = GeneralizedWassersteinDiceLoss\n", "path": "monai/losses/dice.py"}], "after_files": [{"content": "# Copyright 2020 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport warnings\nfrom typing import Callable, Optional, Union\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom torch.nn.modules.loss import _Loss\n\nfrom monai.networks import one_hot\nfrom monai.utils import LossReduction, Weight\n\n\nclass DiceLoss(_Loss):\n \"\"\"\n Compute average Dice loss between two tensors. It can support both multi-classes and multi-labels tasks.\n Input logits `input` (BNHW[D] where N is number of classes) is compared with ground truth `target` (BNHW[D]).\n Axis N of `input` is expected to have logit predictions for each class rather than being image channels,\n while the same axis of `target` can be 1 or N (one-hot format). The `smooth` parameter is a value added to the\n intersection and union components of the inter-over-union calculation to smooth results and prevent divide by 0,\n this value should be small. The `include_background` class attribute can be set to False for an instance of\n DiceLoss to exclude the first category (channel index 0) which is by convention assumed to be background.\n If the non-background segmentations are small compared to the total image size they can get overwhelmed by\n the signal from the background so excluding it in such cases helps convergence.\n\n Milletari, F. et. al. (2016) V-Net: Fully Convolutional Neural Networks forVolumetric Medical Image Segmentation, 3DV, 2016.\n\n \"\"\"\n\n def __init__(\n self,\n include_background: bool = True,\n to_onehot_y: bool = False,\n sigmoid: bool = False,\n softmax: bool = False,\n other_act: Optional[Callable] = None,\n squared_pred: bool = False,\n jaccard: bool = False,\n reduction: Union[LossReduction, str] = LossReduction.MEAN,\n ) -> None:\n \"\"\"\n Args:\n include_background: if False channel index 0 (background category) is excluded from the calculation.\n to_onehot_y: whether to convert `y` into the one-hot format. Defaults to False.\n sigmoid: if True, apply a sigmoid function to the prediction.\n softmax: if True, apply a softmax function to the prediction.\n other_act: if don't want to use `sigmoid` or `softmax`, use other callable function to execute\n other activation layers, Defaults to ``None``. for example:\n `other_act = torch.tanh`.\n squared_pred: use squared versions of targets and predictions in the denominator or not.\n jaccard: compute Jaccard Index (soft IoU) instead of dice or not.\n reduction: {``\"none\"``, ``\"mean\"``, ``\"sum\"``}\n Specifies the reduction to apply to the output. Defaults to ``\"mean\"``.\n\n - ``\"none\"``: no reduction will be applied.\n - ``\"mean\"``: the sum of the output will be divided by the number of elements in the output.\n - ``\"sum\"``: the output will be summed.\n\n Raises:\n TypeError: When ``other_act`` is not an ``Optional[Callable]``.\n ValueError: When more than 1 of [``sigmoid=True``, ``softmax=True``, ``other_act is not None``].\n Incompatible values.\n\n \"\"\"\n super().__init__(reduction=LossReduction(reduction).value)\n if other_act is not None and not callable(other_act):\n raise TypeError(f\"other_act must be None or callable but is {type(other_act).__name__}.\")\n if int(sigmoid) + int(softmax) + int(other_act is not None) > 1:\n raise ValueError(\"Incompatible values: more than 1 of [sigmoid=True, softmax=True, other_act is not None].\")\n self.include_background = include_background\n self.to_onehot_y = to_onehot_y\n self.sigmoid = sigmoid\n self.softmax = softmax\n self.other_act = other_act\n self.squared_pred = squared_pred\n self.jaccard = jaccard\n\n def forward(self, input: torch.Tensor, target: torch.Tensor, smooth: float = 1e-5) -> torch.Tensor:\n \"\"\"\n Args:\n input: the shape should be BNH[WD].\n target: the shape should be BNH[WD].\n smooth: a small constant to avoid nan.\n\n Raises:\n ValueError: When ``self.reduction`` is not one of [\"mean\", \"sum\", \"none\"].\n\n \"\"\"\n if self.sigmoid:\n input = torch.sigmoid(input)\n\n n_pred_ch = input.shape[1]\n if self.softmax:\n if n_pred_ch == 1:\n warnings.warn(\"single channel prediction, `softmax=True` ignored.\")\n else:\n input = torch.softmax(input, 1)\n\n if self.other_act is not None:\n input = self.other_act(input)\n\n if self.to_onehot_y:\n if n_pred_ch == 1:\n warnings.warn(\"single channel prediction, `to_onehot_y=True` ignored.\")\n else:\n target = one_hot(target, num_classes=n_pred_ch)\n\n if not self.include_background:\n if n_pred_ch == 1:\n warnings.warn(\"single channel prediction, `include_background=False` ignored.\")\n else:\n # if skipping background, removing first channel\n target = target[:, 1:]\n input = input[:, 1:]\n\n assert (\n target.shape == input.shape\n ), f\"ground truth has differing shape ({target.shape}) from input ({input.shape})\"\n\n # reducing only spatial dimensions (not batch nor channels)\n reduce_axis = list(range(2, len(input.shape)))\n intersection = torch.sum(target * input, dim=reduce_axis)\n\n if self.squared_pred:\n target = torch.pow(target, 2)\n input = torch.pow(input, 2)\n\n ground_o = torch.sum(target, dim=reduce_axis)\n pred_o = torch.sum(input, dim=reduce_axis)\n\n denominator = ground_o + pred_o\n\n if self.jaccard:\n denominator = 2.0 * (denominator - intersection)\n\n f: torch.Tensor = 1.0 - (2.0 * intersection + smooth) / (denominator + smooth)\n\n if self.reduction == LossReduction.MEAN.value:\n f = torch.mean(f) # the batch and channel average\n elif self.reduction == LossReduction.SUM.value:\n f = torch.sum(f) # sum over the batch and channel dims\n elif self.reduction == LossReduction.NONE.value:\n pass # returns [N, n_classes] losses\n else:\n raise ValueError(f'Unsupported reduction: {self.reduction}, available options are [\"mean\", \"sum\", \"none\"].')\n\n return f\n\n\nclass MaskedDiceLoss(DiceLoss):\n \"\"\"\n Add an additional `masking` process before `DiceLoss`, accept a binary mask ([0, 1]) indicating a region,\n `input` and `target` will be masked by the region: region with mask `1` will keep the original value,\n region with `0` mask will be converted to `0`. Then feed `input` and `target` to normal `DiceLoss` computation.\n This has the effect of ensuring only the masked region contributes to the loss computation and\n hence gradient calculation.\n\n \"\"\"\n\n def forward(\n self, input: torch.Tensor, target: torch.Tensor, smooth: float = 1e-5, mask: Optional[torch.Tensor] = None\n ):\n \"\"\"\n Args:\n input: the shape should be BNH[WD].\n target: the shape should be BNH[WD].\n smooth: a small constant to avoid nan.\n mask: the shape should B1H[WD] or 11H[WD].\n \"\"\"\n if mask is not None:\n # checking if mask is of proper shape\n assert input.dim() == mask.dim(), f\"dim of input ({input.shape}) is different from mask ({mask.shape})\"\n assert (\n input.shape[0] == mask.shape[0] or mask.shape[0] == 1\n ), f\" batch size of mask ({mask.shape}) must be 1 or equal to input ({input.shape})\"\n\n if target.dim() > 1:\n assert mask.shape[1] == 1, f\"mask ({mask.shape}) must have only 1 channel\"\n assert (\n input.shape[2:] == mask.shape[2:]\n ), f\"spatial size of input ({input.shape}) is different from mask ({mask.shape})\"\n\n input = input * mask\n target = target * mask\n else:\n warnings.warn(\"no mask value specified for the MaskedDiceLoss.\")\n\n return super().forward(input=input, target=target, smooth=smooth)\n\n\nclass GeneralizedDiceLoss(_Loss):\n \"\"\"\n Compute the generalised Dice loss defined in:\n\n Sudre, C. et. al. (2017) Generalised Dice overlap as a deep learning\n loss function for highly unbalanced segmentations. DLMIA 2017.\n\n Adapted from:\n https://github.com/NifTK/NiftyNet/blob/v0.6.0/niftynet/layer/loss_segmentation.py#L279\n \"\"\"\n\n def __init__(\n self,\n include_background: bool = True,\n to_onehot_y: bool = False,\n sigmoid: bool = False,\n softmax: bool = False,\n other_act: Optional[Callable] = None,\n w_type: Union[Weight, str] = Weight.SQUARE,\n reduction: Union[LossReduction, str] = LossReduction.MEAN,\n ) -> None:\n \"\"\"\n Args:\n include_background: If False channel index 0 (background category) is excluded from the calculation.\n to_onehot_y: whether to convert `y` into the one-hot format. Defaults to False.\n sigmoid: If True, apply a sigmoid function to the prediction.\n softmax: If True, apply a softmax function to the prediction.\n other_act: if don't want to use `sigmoid` or `softmax`, use other callable function to execute\n other activation layers, Defaults to ``None``. for example:\n `other_act = torch.tanh`.\n squared_pred: use squared versions of targets and predictions in the denominator or not.\n w_type: {``\"square\"``, ``\"simple\"``, ``\"uniform\"``}\n Type of function to transform ground truth volume to a weight factor. Defaults to ``\"square\"``.\n reduction: {``\"none\"``, ``\"mean\"``, ``\"sum\"``}\n Specifies the reduction to apply to the output. Defaults to ``\"mean\"``.\n\n - ``\"none\"``: no reduction will be applied.\n - ``\"mean\"``: the sum of the output will be divided by the number of elements in the output.\n - ``\"sum\"``: the output will be summed.\n\n Raises:\n TypeError: When ``other_act`` is not an ``Optional[Callable]``.\n ValueError: When more than 1 of [``sigmoid=True``, ``softmax=True``, ``other_act is not None``].\n Incompatible values.\n\n \"\"\"\n super().__init__(reduction=LossReduction(reduction).value)\n if other_act is not None and not callable(other_act):\n raise TypeError(f\"other_act must be None or callable but is {type(other_act).__name__}.\")\n if int(sigmoid) + int(softmax) + int(other_act is not None) > 1:\n raise ValueError(\"Incompatible values: more than 1 of [sigmoid=True, softmax=True, other_act is not None].\")\n self.include_background = include_background\n self.to_onehot_y = to_onehot_y\n self.sigmoid = sigmoid\n self.softmax = softmax\n self.other_act = other_act\n\n w_type = Weight(w_type)\n self.w_func: Callable = torch.ones_like\n if w_type == Weight.SIMPLE:\n self.w_func = torch.reciprocal\n elif w_type == Weight.SQUARE:\n self.w_func = lambda x: torch.reciprocal(x * x)\n\n def forward(self, input: torch.Tensor, target: torch.Tensor, smooth: float = 1e-5):\n \"\"\"\n Args:\n input: the shape should be BNH[WD].\n target: the shape should be BNH[WD].\n smooth: a small constant to avoid nan.\n\n Raises:\n ValueError: When ``self.reduction`` is not one of [\"mean\", \"sum\", \"none\"].\n\n \"\"\"\n if self.sigmoid:\n input = torch.sigmoid(input)\n n_pred_ch = input.shape[1]\n if self.softmax:\n if n_pred_ch == 1:\n warnings.warn(\"single channel prediction, `softmax=True` ignored.\")\n else:\n input = torch.softmax(input, 1)\n\n if self.other_act is not None:\n input = self.other_act(input)\n\n if self.to_onehot_y:\n if n_pred_ch == 1:\n warnings.warn(\"single channel prediction, `to_onehot_y=True` ignored.\")\n else:\n target = one_hot(target, num_classes=n_pred_ch)\n\n if not self.include_background:\n if n_pred_ch == 1:\n warnings.warn(\"single channel prediction, `include_background=False` ignored.\")\n else:\n # if skipping background, removing first channel\n target = target[:, 1:]\n input = input[:, 1:]\n\n assert (\n target.shape == input.shape\n ), f\"ground truth has differing shape ({target.shape}) from input ({input.shape})\"\n\n # reducing only spatial dimensions (not batch nor channels)\n reduce_axis = list(range(2, len(input.shape)))\n intersection = torch.sum(target * input, reduce_axis)\n\n ground_o = torch.sum(target, reduce_axis)\n pred_o = torch.sum(input, reduce_axis)\n\n denominator = ground_o + pred_o\n\n w = self.w_func(ground_o.float())\n for b in w:\n infs = torch.isinf(b)\n b[infs] = 0.0\n b[infs] = torch.max(b)\n\n f = 1.0 - (2.0 * (intersection * w).sum(1) + smooth) / ((denominator * w).sum(1) + smooth)\n\n if self.reduction == LossReduction.MEAN.value:\n f = torch.mean(f) # the batch and channel average\n elif self.reduction == LossReduction.SUM.value:\n f = torch.sum(f) # sum over the batch and channel dims\n elif self.reduction == LossReduction.NONE.value:\n pass # returns [N, n_classes] losses\n else:\n raise ValueError(f'Unsupported reduction: {self.reduction}, available options are [\"mean\", \"sum\", \"none\"].')\n\n return f\n\n\nclass GeneralizedWassersteinDiceLoss(_Loss):\n \"\"\"\n Generalized Wasserstein Dice Loss [1] in PyTorch.\n Compared to [1] we used a weighting method similar to the one\n used in the generalized Dice Loss [2].\n\n References:\n ===========\n [1] \"Generalised Wasserstein Dice Score for Imbalanced Multi-class\n Segmentation using Holistic Convolutional Networks\",\n Fidon L. et al. MICCAI BrainLes 2017.\n [2] \"Generalised dice overlap as a deep learning loss function\n for highly unbalanced segmentations\",\n Sudre C., et al. MICCAI DLMIA 2017.\n\n wasserstein_distance_map:\n Compute the voxel-wise Wasserstein distance (eq. 6 in [1]) between the\n flattened prediction and the flattened labels (ground_truth) with respect\n to the distance matrix on the label space M.\n References:\n [1] \"Generalised Wasserstein Dice Score for Imbalanced Multi-class\n Segmentation using Holistic Convolutional Networks\",\n Fidon L. et al. MICCAI BrainLes 2017\n\n compute_weights_generalized_true_positives:\n Compute the weights \\alpha_l of eq. 9 in [1] but using the weighting\n method proposed in the generalized Dice Loss [2].\n References:\n [1] \"Generalised Wasserstein Dice Score for Imbalanced Multi-class\n Segmentation using Holistic Convolutional Networks\",\n Fidon L. et al. MICCAI BrainLes 2017\n [2] \"Generalised dice overlap as a deep learning loss function\n for highly unbalanced segmentations.\" Sudre C., et al.\n MICCAI DLMIA 2017.\n \"\"\"\n\n def __init__(self, dist_matrix, reduction: Union[LossReduction, str] = LossReduction.MEAN):\n \"\"\"\n Args:\n dist_matrix: 2d tensor or 2d numpy array; matrix of distances\n between the classes. It must have dimension C x C where C is the\n number of classes.\n reduction: str; reduction mode.\n\n Raises:\n ValueError: When ``dist_matrix`` is not a square matrix.\n\n \"\"\"\n super(GeneralizedWassersteinDiceLoss, self).__init__(reduction=LossReduction(reduction).value)\n\n if dist_matrix.shape[0] != dist_matrix.shape[1]:\n raise ValueError(f\"dist_matrix must be C x C, got {dist_matrix.shape[0]} x {dist_matrix.shape[1]}.\")\n\n self.m = dist_matrix\n if isinstance(self.m, np.ndarray):\n self.m = torch.from_numpy(self.m)\n if torch.max(self.m) != 1:\n self.m = self.m / torch.max(self.m)\n self.num_classes = self.m.size(0)\n\n def forward(self, input: torch.Tensor, target: torch.Tensor, smooth: float = 1e-5):\n \"\"\"\n Args:\n input: the shape should be BNH[WD].\n target: the shape should be BNH[WD].\n smooth: a small constant to avoid nan.\n\n \"\"\"\n # Aggregate spatial dimensions\n flat_input = input.view(input.size(0), input.size(1), -1)\n flat_target = target.view(target.size(0), -1).long()\n\n # Apply the softmax to the input scores map\n probs = F.softmax(flat_input, dim=1)\n\n # Compute the Wasserstein distance map\n wass_dist_map = self.wasserstein_distance_map(probs, flat_target)\n\n # Compute the generalised number of true positives\n alpha = self.compute_weights_generalized_true_positives(flat_target)\n true_pos = self.compute_generalized_true_positive(alpha, flat_target, wass_dist_map)\n denom = self.compute_denominator(alpha, flat_target, wass_dist_map)\n\n # Compute and return the final loss\n wass_dice = (2.0 * true_pos + smooth) / (denom + smooth)\n wass_dice_loss = 1.0 - wass_dice\n return wass_dice_loss.mean()\n\n def wasserstein_distance_map(self, flat_proba: torch.Tensor, flat_target: torch.Tensor):\n \"\"\"\n Args:\n flat_proba: the probabilities of input(predicted) tensor.\n flat_target: the target tensor.\n \"\"\"\n # Turn the distance matrix to a map of identical matrix\n m = torch.clone(self.m).to(flat_proba.device)\n m_extended = torch.unsqueeze(m, dim=0)\n m_extended = torch.unsqueeze(m_extended, dim=3)\n m_extended = m_extended.expand((flat_proba.size(0), m_extended.size(1), m_extended.size(2), flat_proba.size(2)))\n\n # Expand the feature dimensions of the target\n flat_target_extended = torch.unsqueeze(flat_target, dim=1)\n flat_target_extended = flat_target_extended.expand(\n (flat_target.size(0), m_extended.size(1), flat_target.size(1))\n )\n flat_target_extended = torch.unsqueeze(flat_target_extended, dim=1)\n\n # Extract the vector of class distances for the ground-truth label at each voxel\n m_extended = torch.gather(m_extended, dim=1, index=flat_target_extended)\n m_extended = torch.squeeze(m_extended, dim=1)\n\n # Compute the wasserstein distance map\n wasserstein_map = m_extended * flat_proba\n\n # Sum over the classes\n wasserstein_map = torch.sum(wasserstein_map, dim=1)\n return wasserstein_map\n\n def compute_generalized_true_positive(\n self, alpha: torch.Tensor, flat_target: torch.Tensor, wasserstein_distance_map\n ):\n \"\"\"\n Args:\n alpha: generalised number of true positives of target class.\n flat_target: the target tensor.\n wasserstein_distance_map: the map obtained from the above function.\n \"\"\"\n # Extend alpha to a map and select value at each voxel according to flat_target\n alpha_extended = torch.unsqueeze(alpha, dim=2)\n alpha_extended = alpha_extended.expand((flat_target.size(0), self.num_classes, flat_target.size(1)))\n flat_target_extended = torch.unsqueeze(flat_target, dim=1)\n alpha_extended = torch.gather(alpha_extended, index=flat_target_extended, dim=1)\n\n # Compute the generalized true positive as in eq. 9\n generalized_true_pos = torch.sum(alpha_extended * (1.0 - wasserstein_distance_map), dim=[1, 2],)\n return generalized_true_pos\n\n def compute_denominator(self, alpha: torch.Tensor, flat_target: torch.Tensor, wasserstein_distance_map):\n \"\"\"\n Args:\n alpha: generalised number of true positives of target class.\n flat_target: the target tensor.\n wasserstein_distance_map: the map obtained from the above function.\n \"\"\"\n # Extend alpha to a map and select value at each voxel according to flat_target\n alpha_extended = torch.unsqueeze(alpha, dim=2)\n alpha_extended = alpha_extended.expand((flat_target.size(0), self.num_classes, flat_target.size(1)))\n flat_target_extended = torch.unsqueeze(flat_target, dim=1)\n alpha_extended = torch.gather(alpha_extended, index=flat_target_extended, dim=1)\n\n # Compute the generalized true positive as in eq. 9\n generalized_true_pos = torch.sum(alpha_extended * (2.0 - wasserstein_distance_map), dim=[1, 2],)\n return generalized_true_pos\n\n def compute_weights_generalized_true_positives(self, flat_target: torch.Tensor):\n \"\"\"\n Args:\n flat_target: the target tensor.\n \"\"\"\n one_hot = F.one_hot(flat_target, num_classes=self.num_classes).permute(0, 2, 1).float()\n volumes = torch.sum(one_hot, dim=2)\n alpha = 1.0 / (volumes + 1.0)\n return alpha\n\n\ndice = Dice = DiceLoss\ngeneralized_dice = GeneralizedDiceLoss\ngeneralized_wasserstein_dice = GeneralizedWassersteinDiceLoss\n", "path": "monai/losses/dice.py"}]} |
gh_patches_debug_1390 | rasdani/github-patches | git_diff | pypa__pipenv-5529 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Should `pipenv lock` be generating a marker of `null` when `extras` used?
### Issue description
A `Pipfile.lock` is being generated with a `marker` of `null`. When being passed to other tools to parse that lockfile, such as `pipfile2req` its erroring on that entry. I took a quick look at PEP 508 and it didn't say anything about a `marker` of `null` but did suggest it should only be a string.
### Expected result
I'm guessing, if the result is `null` just don't include markers.
### Actual result
When possible, provide the verbose output (`--verbose`), especially for locking and dependencies resolving issues.
### Steps to replicate
```
➜ test git:(main) ✗ cat Pipfile
[[source]]
url = "https://pypi.org/simple"
verify_ssl = true
name = "pypi"
[packages]
pydantic = "==1.10.2"
email-validator = "==1.3.0"
```
```
➜ test git:(main) ✗ pipenv lock
Locking [packages] dependencies...
Building requirements...
Resolving dependencies...
✔ Success!
Locking [dev-packages] dependencies...
Updated Pipfile.lock (d5f483e04b72426b3574dfadf29d845164fc106b68e966b5d0bf515817ff3cec)!
```
```
"pydantic": {
...snipped
],
"index": "pypi",
"version": "==1.10.2"
},
```
Looks great. Now let's specify `{extras}`
```
[[source]]
url = "https://pypi.org/simple"
verify_ssl = true
name = "pypi"
[packages]
pydantic = {extras = ["email"],version = "==1.10.2"}
```
```
➜ test git:(main) ✗ pipenv lock
Locking [packages] dependencies...
Building requirements...
Resolving dependencies...
✔ Success!
Locking [dev-packages] dependencies...
Updated Pipfile.lock (b7af3ff13b8fd9cd4e13a037c9d0a95bdad213656719e0e6d330e0ab5d8b93b7)!
```
```
"pydantic": {
"extras": [
"email"
],
"hashes": [
... snipped
],
"index": "pypi",
"markers": null,
"version": "==1.10.2"
},
```
We now have a `markers` `null` in the output.
---
<details><summary>$ pipenv --support</summary>
Pipenv version: `'2022.11.11'`
Pipenv location: `'/Users/andy/Library/Python/3.9/lib/python/site-packages/pipenv'`
Python location: `'/Library/Developer/CommandLineTools/usr/bin/python3'`
OS Name: `'posix'`
User pip version: `'22.3'`
user Python installations found:
- `3.9.6`: `/usr/bin/python3`
PEP 508 Information:
```
{'implementation_name': 'cpython',
'implementation_version': '3.9.6',
'os_name': 'posix',
'platform_machine': 'arm64',
'platform_python_implementation': 'CPython',
'platform_release': '21.6.0',
'platform_system': 'Darwin',
'platform_version': 'Darwin Kernel Version 21.6.0: Mon Aug 22 20:19:52 PDT '
'2022; root:xnu-8020.140.49~2/RELEASE_ARM64_T6000',
'python_full_version': '3.9.6',
'python_version': '3.9',
'sys_platform': 'darwin'}
```
System environment variables:
- `TERM_SESSION_ID`
- `SSH_AUTH_SOCK`
- `LC_TERMINAL_VERSION`
- `COLORFGBG`
- `ITERM_PROFILE`
- `XPC_FLAGS`
- `LANG`
- `PWD`
- `SHELL`
- `__CFBundleIdentifier`
- `TERM_PROGRAM_VERSION`
- `TERM_PROGRAM`
- `PATH`
- `LC_TERMINAL`
- `COLORTERM`
- `COMMAND_MODE`
- `TERM`
- `HOME`
- `TMPDIR`
- `USER`
- `XPC_SERVICE_NAME`
- `LOGNAME`
- `ITERM_SESSION_ID`
- `__CF_USER_TEXT_ENCODING`
- `SHLVL`
- `OLDPWD`
- `HOMEBREW_PREFIX`
- `HOMEBREW_CELLAR`
- `HOMEBREW_REPOSITORY`
- `MANPATH`
- `INFOPATH`
- `ZSH`
- `PAGER`
- `LESS`
- `LSCOLORS`
- `GITHUB_TOKEN`
- `VERCEL_TOKEN`
- `PNPM_HOME`
- `NVM_DIR`
- `NVM_CD_FLAGS`
- `NVM_BIN`
- `NVM_INC`
- `_`
- `PIP_DISABLE_PIP_VERSION_CHECK`
- `PIP_PYTHON_PATH`
- `PYTHONDONTWRITEBYTECODE`
- `PYTHONFINDER_IGNORE_UNSUPPORTED`
Pipenv–specific environment variables:
Debug–specific environment variables:
- `PATH`: `/Users/andy/.nvm/versions/node/v19.1.0/bin:/Users/andy/Library/Python/3.9/bin:/Users/andy/Library/pnpm:/opt/homebrew/bin:/opt/homebrew/sbin:/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin`
- `SHELL`: `/bin/zsh`
- `LANG`: `en_CA.UTF-8`
- `PWD`: `/Users/andy/c/pipenv/test`
---------------------------
Contents of `Pipfile` ('/Users/andy/c/pipenv/test/Pipfile'):
```toml
[[source]]
url = "https://pypi.org/simple"
verify_ssl = true
name = "pypi"
[packages]
pydantic = "==1.10.2"
email-validator = "==1.3.0"
```
Contents of `Pipfile.lock` ('/Users/andy/c/pipenv/test/Pipfile.lock'):
```json
{
"_meta": {
"hash": {
"sha256": "d5f483e04b72426b3574dfadf29d845164fc106b68e966b5d0bf515817ff3cec"
},
"pipfile-spec": 6,
"requires": {},
"sources": [
{
"name": "pypi",
"url": "https://pypi.org/simple",
"verify_ssl": true
}
]
},
"default": {
"dnspython": {
"hashes": [
"sha256:0f7569a4a6ff151958b64304071d370daa3243d15941a7beedf0c9fe5105603e",
"sha256:a851e51367fb93e9e1361732c1d60dab63eff98712e503ea7d92e6eccb109b4f"
],
"markers": "python_version >= '3.6' and python_version < '4.0'",
"version": "==2.2.1"
},
"email-validator": {
"hashes": [
"sha256:553a66f8be2ec2dea641ae1d3f29017ab89e9d603d4a25cdaac39eefa283d769",
"sha256:816073f2a7cffef786b29928f58ec16cdac42710a53bb18aa94317e3e145ec5c"
],
"index": "pypi",
"version": "==1.3.0"
},
"idna": {
"hashes": [
"sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4",
"sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2"
],
"markers": "python_version >= '3.5'",
"version": "==3.4"
},
"pydantic": {
"hashes": [
"sha256:05e00dbebbe810b33c7a7362f231893183bcc4251f3f2ff991c31d5c08240c42",
"sha256:06094d18dd5e6f2bbf93efa54991c3240964bb663b87729ac340eb5014310624",
"sha256:0b959f4d8211fc964772b595ebb25f7652da3f22322c007b6fed26846a40685e",
"sha256:19b3b9ccf97af2b7519c42032441a891a5e05c68368f40865a90eb88833c2559",
"sha256:1b6ee725bd6e83ec78b1aa32c5b1fa67a3a65badddde3976bca5fe4568f27709",
"sha256:1ee433e274268a4b0c8fde7ad9d58ecba12b069a033ecc4645bb6303c062d2e9",
"sha256:216f3bcbf19c726b1cc22b099dd409aa371f55c08800bcea4c44c8f74b73478d",
"sha256:2d0567e60eb01bccda3a4df01df677adf6b437958d35c12a3ac3e0f078b0ee52",
"sha256:2e05aed07fa02231dbf03d0adb1be1d79cabb09025dd45aa094aa8b4e7b9dcda",
"sha256:352aedb1d71b8b0736c6d56ad2bd34c6982720644b0624462059ab29bd6e5912",
"sha256:355639d9afc76bcb9b0c3000ddcd08472ae75318a6eb67a15866b87e2efa168c",
"sha256:37c90345ec7dd2f1bcef82ce49b6235b40f282b94d3eec47e801baf864d15525",
"sha256:4b8795290deaae348c4eba0cebb196e1c6b98bdbe7f50b2d0d9a4a99716342fe",
"sha256:5760e164b807a48a8f25f8aa1a6d857e6ce62e7ec83ea5d5c5a802eac81bad41",
"sha256:6eb843dcc411b6a2237a694f5e1d649fc66c6064d02b204a7e9d194dff81eb4b",
"sha256:7b5ba54d026c2bd2cb769d3468885f23f43710f651688e91f5fb1edcf0ee9283",
"sha256:7c2abc4393dea97a4ccbb4ec7d8658d4e22c4765b7b9b9445588f16c71ad9965",
"sha256:81a7b66c3f499108b448f3f004801fcd7d7165fb4200acb03f1c2402da73ce4c",
"sha256:91b8e218852ef6007c2b98cd861601c6a09f1aa32bbbb74fab5b1c33d4a1e410",
"sha256:9300fcbebf85f6339a02c6994b2eb3ff1b9c8c14f502058b5bf349d42447dcf5",
"sha256:9cabf4a7f05a776e7793e72793cd92cc865ea0e83a819f9ae4ecccb1b8aa6116",
"sha256:a1f5a63a6dfe19d719b1b6e6106561869d2efaca6167f84f5ab9347887d78b98",
"sha256:a4c805731c33a8db4b6ace45ce440c4ef5336e712508b4d9e1aafa617dc9907f",
"sha256:ae544c47bec47a86bc7d350f965d8b15540e27e5aa4f55170ac6a75e5f73b644",
"sha256:b97890e56a694486f772d36efd2ba31612739bc6f3caeee50e9e7e3ebd2fdd13",
"sha256:bb6ad4489af1bac6955d38ebcb95079a836af31e4c4f74aba1ca05bb9f6027bd",
"sha256:bedf309630209e78582ffacda64a21f96f3ed2e51fbf3962d4d488e503420254",
"sha256:c1ba1afb396148bbc70e9eaa8c06c1716fdddabaf86e7027c5988bae2a829ab6",
"sha256:c33602f93bfb67779f9c507e4d69451664524389546bacfe1bee13cae6dc7488",
"sha256:c4aac8e7103bf598373208f6299fa9a5cfd1fc571f2d40bf1dd1955a63d6eeb5",
"sha256:c6f981882aea41e021f72779ce2a4e87267458cc4d39ea990729e21ef18f0f8c",
"sha256:cc78cc83110d2f275ec1970e7a831f4e371ee92405332ebfe9860a715f8336e1",
"sha256:d49f3db871575e0426b12e2f32fdb25e579dea16486a26e5a0474af87cb1ab0a",
"sha256:dd3f9a40c16daf323cf913593083698caee97df2804aa36c4b3175d5ac1b92a2",
"sha256:e0bedafe4bc165ad0a56ac0bd7695df25c50f76961da29c050712596cf092d6d",
"sha256:e9069e1b01525a96e6ff49e25876d90d5a563bc31c658289a8772ae186552236"
],
"index": "pypi",
"version": "==1.10.2"
},
"typing-extensions": {
"hashes": [
"sha256:1511434bb92bf8dd198c12b1cc812e800d4181cfcb867674e0f8279cc93087aa",
"sha256:16fa4864408f655d35ec496218b85f79b3437c829e93320c7c9215ccfd92489e"
],
"markers": "python_version >= '3.7'",
"version": "==4.4.0"
}
},
"develop": {}
}
```
</details>
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pipenv/utils/dependencies.py`
Content:
```
1 import os
2 from contextlib import contextmanager
3 from typing import Mapping, Sequence
4
5 from pipenv.patched.pip._vendor.packaging.markers import Marker
6 from pipenv.patched.pip._vendor.packaging.version import parse
7 from pipenv.vendor.requirementslib.models.requirements import (
8 InstallRequirement,
9 Requirement,
10 )
11
12 from .constants import SCHEME_LIST, VCS_LIST
13 from .shell import temp_path
14
15
16 def python_version(path_to_python):
17 from pipenv.vendor.pythonfinder.utils import get_python_version
18
19 if not path_to_python:
20 return None
21 try:
22 version = get_python_version(path_to_python)
23 except Exception:
24 return None
25 return version
26
27
28 def clean_pkg_version(version):
29 """Uses pip to prepare a package version string, from our internal version."""
30 return pep440_version(str(version).replace("==", ""))
31
32
33 def get_lockfile_section_using_pipfile_category(category):
34 if category == "dev-packages":
35 lockfile_section = "develop"
36 elif category == "packages":
37 lockfile_section = "default"
38 else:
39 lockfile_section = category
40 return lockfile_section
41
42
43 def get_pipfile_category_using_lockfile_section(category):
44 if category == "develop":
45 lockfile_section = "dev-packages"
46 elif category == "default":
47 lockfile_section = "packages"
48 else:
49 lockfile_section = category
50 return lockfile_section
51
52
53 class HackedPythonVersion:
54 """A Beautiful hack, which allows us to tell pip which version of Python we're using."""
55
56 def __init__(self, python_version, python_path):
57 self.python_version = python_version
58 self.python_path = python_path
59
60 def __enter__(self):
61 # Only inject when the value is valid
62 if self.python_version:
63 os.environ["PIPENV_REQUESTED_PYTHON_VERSION"] = str(self.python_version)
64 if self.python_path:
65 os.environ["PIP_PYTHON_PATH"] = str(self.python_path)
66
67 def __exit__(self, *args):
68 # Restore original Python version information.
69 try:
70 del os.environ["PIPENV_REQUESTED_PYTHON_VERSION"]
71 except KeyError:
72 pass
73
74
75 def get_canonical_names(packages):
76 """Canonicalize a list of packages and return a set of canonical names"""
77 from pipenv.patched.pip._vendor.packaging.utils import canonicalize_name
78
79 if not isinstance(packages, Sequence):
80 if not isinstance(packages, str):
81 return packages
82 packages = [packages]
83 return {canonicalize_name(pkg) for pkg in packages if pkg}
84
85
86 def pep440_version(version):
87 """Normalize version to PEP 440 standards"""
88 return str(parse(version))
89
90
91 def pep423_name(name):
92 """Normalize package name to PEP 423 style standard."""
93 name = name.lower()
94 if any(i not in name for i in (VCS_LIST + SCHEME_LIST)):
95 return name.replace("_", "-")
96
97 else:
98 return name
99
100
101 def get_vcs_deps(project=None, dev=False, pypi_mirror=None, packages=None, reqs=None):
102 from pipenv.vendor.requirementslib.models.requirements import Requirement
103
104 section = "vcs_dev_packages" if dev else "vcs_packages"
105 if reqs is None:
106 reqs = []
107 lockfile = {}
108 if not reqs:
109 if not project and not packages:
110 raise ValueError(
111 "Must supply either a project or a pipfile section to lock vcs dependencies."
112 )
113 if not packages:
114 try:
115 packages = getattr(project, section)
116 except AttributeError:
117 return [], []
118 reqs = [Requirement.from_pipfile(name, entry) for name, entry in packages.items()]
119 result = []
120 for requirement in reqs:
121 name = requirement.normalized_name
122 commit_hash = None
123 if requirement.is_vcs:
124 try:
125 with temp_path(), locked_repository(requirement) as repo:
126 from pipenv.vendor.requirementslib.models.requirements import (
127 Requirement,
128 )
129
130 # from distutils.sysconfig import get_python_lib
131 # sys.path = [repo.checkout_directory, "", ".", get_python_lib(plat_specific=0)]
132 commit_hash = repo.get_commit_hash()
133 name = requirement.normalized_name
134 lockfile[name] = requirement.pipfile_entry[1]
135 lockfile[name]["ref"] = commit_hash
136 result.append(requirement)
137 except OSError:
138 continue
139 return result, lockfile
140
141
142 def translate_markers(pipfile_entry):
143 """Take a pipfile entry and normalize its markers
144
145 Provide a pipfile entry which may have 'markers' as a key or it may have
146 any valid key from `packaging.markers.marker_context.keys()` and standardize
147 the format into {'markers': 'key == "some_value"'}.
148
149 :param pipfile_entry: A dictionariy of keys and values representing a pipfile entry
150 :type pipfile_entry: dict
151 :returns: A normalized dictionary with cleaned marker entries
152 """
153 if not isinstance(pipfile_entry, Mapping):
154 raise TypeError("Entry is not a pipfile formatted mapping.")
155 from pipenv.patched.pip._vendor.packaging.markers import default_environment
156
157 allowed_marker_keys = ["markers"] + list(default_environment().keys())
158 provided_keys = list(pipfile_entry.keys()) if hasattr(pipfile_entry, "keys") else []
159 pipfile_markers = set(provided_keys) & set(allowed_marker_keys)
160 new_pipfile = dict(pipfile_entry).copy()
161 marker_set = set()
162 if "markers" in new_pipfile:
163 marker_str = new_pipfile.pop("markers")
164 if marker_str:
165 marker = str(Marker(marker_str))
166 if "extra" not in marker:
167 marker_set.add(marker)
168 for m in pipfile_markers:
169 entry = f"{pipfile_entry[m]}"
170 if m != "markers":
171 marker_set.add(str(Marker(f"{m} {entry}")))
172 new_pipfile.pop(m)
173 if marker_set:
174 new_pipfile["markers"] = str(
175 Marker(
176 " or ".join(
177 f"{s}" if " and " in s else s
178 for s in sorted(dict.fromkeys(marker_set))
179 )
180 )
181 ).replace('"', "'")
182 return new_pipfile
183
184
185 def clean_resolved_dep(dep, is_top_level=False, pipfile_entry=None):
186 from pipenv.vendor.requirementslib.utils import is_vcs
187
188 name = pep423_name(dep["name"])
189 lockfile = {}
190 # We use this to determine if there are any markers on top level packages
191 # So we can make sure those win out during resolution if the packages reoccur
192 if "version" in dep and dep["version"] and not dep.get("editable", False):
193 version = "{}".format(dep["version"])
194 if not version.startswith("=="):
195 version = f"=={version}"
196 lockfile["version"] = version
197 if is_vcs(dep):
198 ref = dep.get("ref", None)
199 if ref is not None:
200 lockfile["ref"] = ref
201 vcs_type = next(iter(k for k in dep.keys() if k in VCS_LIST), None)
202 if vcs_type:
203 lockfile[vcs_type] = dep[vcs_type]
204 if "subdirectory" in dep:
205 lockfile["subdirectory"] = dep["subdirectory"]
206 for key in ["hashes", "index", "extras", "editable"]:
207 if key in dep:
208 lockfile[key] = dep[key]
209 # In case we lock a uri or a file when the user supplied a path
210 # remove the uri or file keys from the entry and keep the path
211 preferred_file_keys = ["path", "file"]
212 dependency_file_key = next(iter(k for k in preferred_file_keys if k in dep), None)
213 if dependency_file_key:
214 lockfile[dependency_file_key] = dep[dependency_file_key]
215 # Pipfile entry overrides path/file from resolver
216 if pipfile_entry and isinstance(pipfile_entry, dict):
217 for k in preferred_file_keys:
218 if k in pipfile_entry.keys():
219 lockfile[k] = pipfile_entry[k]
220 break
221 # If a package is **PRESENT** in the pipfile but has no markers, make sure we
222 # **NEVER** include markers in the lockfile
223 if "markers" in dep and dep.get("markers", "").strip():
224 # First, handle the case where there is no top level dependency in the pipfile
225 if not is_top_level:
226 translated = translate_markers(dep).get("markers", "").strip()
227 if translated:
228 try:
229 lockfile["markers"] = translated
230 except TypeError:
231 pass
232 # otherwise make sure we are prioritizing whatever the pipfile says about the markers
233 # If the pipfile says nothing, then we should put nothing in the lockfile
234 else:
235 try:
236 pipfile_entry = translate_markers(pipfile_entry)
237 lockfile["markers"] = pipfile_entry.get("markers")
238 except TypeError:
239 pass
240 return {name: lockfile}
241
242
243 def is_star(val):
244 return isinstance(val, str) and val == "*"
245
246
247 def is_pinned(val):
248 if isinstance(val, Mapping):
249 val = val.get("version")
250 return isinstance(val, str) and val.startswith("==")
251
252
253 def is_pinned_requirement(ireq):
254 """
255 Returns whether an InstallRequirement is a "pinned" requirement.
256 """
257 if ireq.editable:
258 return False
259
260 if ireq.req is None or len(ireq.specifier) != 1:
261 return False
262
263 spec = next(iter(ireq.specifier))
264 return spec.operator in {"==", "==="} and not spec.version.endswith(".*")
265
266
267 def convert_deps_to_pip(
268 deps,
269 project=None,
270 include_index=True,
271 include_hashes=True,
272 include_markers=True,
273 ):
274 """ "Converts a Pipfile-formatted dependency to a pip-formatted one."""
275 dependencies = []
276 for dep_name, dep in deps.items():
277 if project:
278 project.clear_pipfile_cache()
279 indexes = []
280 if project:
281 indexes = project.pipfile_sources()
282 new_dep = Requirement.from_pipfile(dep_name, dep)
283 if new_dep.index:
284 include_index = True
285 sources = indexes if include_index else None
286 req = new_dep.as_line(
287 sources=sources,
288 include_hashes=include_hashes,
289 include_markers=include_markers,
290 ).strip()
291 dependencies.append(req)
292 return dependencies
293
294
295 def get_constraints_from_deps(deps):
296 """Get contraints from Pipfile-formatted dependency"""
297
298 def is_constraints(dep: InstallRequirement) -> bool:
299 return dep.name and not dep.editable and not dep.extras
300
301 constraints = []
302 for dep_name, dep in deps.items():
303 new_dep = Requirement.from_pipfile(dep_name, dep)
304 if new_dep.is_named and is_constraints(new_dep.as_ireq()):
305 c = new_dep.as_line().strip()
306 constraints.append(c)
307 return constraints
308
309
310 def prepare_constraint_file(
311 constraints,
312 directory=None,
313 sources=None,
314 pip_args=None,
315 ):
316 from pipenv.vendor.vistir.path import (
317 create_tracked_tempdir,
318 create_tracked_tempfile,
319 )
320
321 if not directory:
322 directory = create_tracked_tempdir(suffix="-requirements", prefix="pipenv-")
323
324 constraints_file = create_tracked_tempfile(
325 mode="w",
326 prefix="pipenv-",
327 suffix="-constraints.txt",
328 dir=directory,
329 delete=False,
330 )
331
332 if sources and pip_args:
333 skip_args = ("build-isolation", "use-pep517", "cache-dir")
334 args_to_add = [
335 arg for arg in pip_args if not any(bad_arg in arg for bad_arg in skip_args)
336 ]
337 requirementstxt_sources = " ".join(args_to_add) if args_to_add else ""
338 requirementstxt_sources = requirementstxt_sources.replace(" --", "\n--")
339 constraints_file.write(f"{requirementstxt_sources}\n")
340
341 constraints_file.write("\n".join([c for c in constraints]))
342 constraints_file.close()
343 return constraints_file.name
344
345
346 def is_required_version(version, specified_version):
347 """Check to see if there's a hard requirement for version
348 number provided in the Pipfile.
349 """
350 # Certain packages may be defined with multiple values.
351 if isinstance(specified_version, dict):
352 specified_version = specified_version.get("version", "")
353 if specified_version.startswith("=="):
354 return version.strip() == specified_version.split("==")[1].strip()
355
356 return True
357
358
359 def is_editable(pipfile_entry):
360 if hasattr(pipfile_entry, "get"):
361 return pipfile_entry.get("editable", False) or any(
362 pipfile_entry.get(key) for key in ("file", "path") + VCS_LIST
363 )
364 return False
365
366
367 @contextmanager
368 def locked_repository(requirement):
369 from pipenv.vendor.vistir.path import create_tracked_tempdir
370
371 if not requirement.is_vcs:
372 return
373 src_dir = create_tracked_tempdir(prefix="pipenv-", suffix="-src")
374 with requirement.req.locked_vcs_repo(src_dir=src_dir) as repo:
375 yield repo
376
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pipenv/utils/dependencies.py b/pipenv/utils/dependencies.py
--- a/pipenv/utils/dependencies.py
+++ b/pipenv/utils/dependencies.py
@@ -234,7 +234,8 @@
else:
try:
pipfile_entry = translate_markers(pipfile_entry)
- lockfile["markers"] = pipfile_entry.get("markers")
+ if pipfile_entry.get("markers"):
+ lockfile["markers"] = pipfile_entry.get("markers")
except TypeError:
pass
return {name: lockfile}
| {"golden_diff": "diff --git a/pipenv/utils/dependencies.py b/pipenv/utils/dependencies.py\n--- a/pipenv/utils/dependencies.py\n+++ b/pipenv/utils/dependencies.py\n@@ -234,7 +234,8 @@\n else:\n try:\n pipfile_entry = translate_markers(pipfile_entry)\n- lockfile[\"markers\"] = pipfile_entry.get(\"markers\")\n+ if pipfile_entry.get(\"markers\"):\n+ lockfile[\"markers\"] = pipfile_entry.get(\"markers\")\n except TypeError:\n pass\n return {name: lockfile}\n", "issue": "Should `pipenv lock` be generating a marker of `null` when `extras` used?\n### Issue description\r\n\r\nA `Pipfile.lock` is being generated with a `marker` of `null`. When being passed to other tools to parse that lockfile, such as `pipfile2req` its erroring on that entry. I took a quick look at PEP 508 and it didn't say anything about a `marker` of `null` but did suggest it should only be a string.\r\n\r\n### Expected result\r\n\r\nI'm guessing, if the result is `null` just don't include markers.\r\n\r\n### Actual result\r\n\r\nWhen possible, provide the verbose output (`--verbose`), especially for locking and dependencies resolving issues.\r\n\r\n### Steps to replicate\r\n\r\n```\r\n\u279c test git:(main) \u2717 cat Pipfile\r\n[[source]]\r\nurl = \"https://pypi.org/simple\"\r\nverify_ssl = true\r\nname = \"pypi\"\r\n\r\n[packages]\r\npydantic = \"==1.10.2\"\r\nemail-validator = \"==1.3.0\"\r\n```\r\n\r\n```\r\n\u279c test git:(main) \u2717 pipenv lock\r\nLocking [packages] dependencies...\r\nBuilding requirements...\r\nResolving dependencies...\r\n\u2714 Success!\r\nLocking [dev-packages] dependencies...\r\nUpdated Pipfile.lock (d5f483e04b72426b3574dfadf29d845164fc106b68e966b5d0bf515817ff3cec)!\r\n```\r\n\r\n```\r\n\"pydantic\": {\r\n ...snipped\r\n ],\r\n \"index\": \"pypi\",\r\n \"version\": \"==1.10.2\"\r\n },\r\n```\r\n\r\nLooks great. Now let's specify `{extras}`\r\n\r\n```\r\n[[source]]\r\nurl = \"https://pypi.org/simple\"\r\nverify_ssl = true\r\nname = \"pypi\"\r\n\r\n[packages]\r\npydantic = {extras = [\"email\"],version = \"==1.10.2\"}\r\n```\r\n\r\n```\r\n\u279c test git:(main) \u2717 pipenv lock\r\nLocking [packages] dependencies...\r\nBuilding requirements...\r\nResolving dependencies...\r\n\u2714 Success!\r\nLocking [dev-packages] dependencies...\r\nUpdated Pipfile.lock (b7af3ff13b8fd9cd4e13a037c9d0a95bdad213656719e0e6d330e0ab5d8b93b7)!\r\n```\r\n\r\n```\r\n\"pydantic\": {\r\n \"extras\": [\r\n \"email\"\r\n ],\r\n \"hashes\": [\r\n... snipped\r\n ],\r\n \"index\": \"pypi\",\r\n \"markers\": null,\r\n \"version\": \"==1.10.2\"\r\n },\r\n```\r\n\r\nWe now have a `markers` `null` in the output.\r\n\r\n\r\n---\r\n\r\n<details><summary>$ pipenv --support</summary>\r\n\r\nPipenv version: `'2022.11.11'`\r\n\r\nPipenv location: `'/Users/andy/Library/Python/3.9/lib/python/site-packages/pipenv'`\r\n\r\nPython location: `'/Library/Developer/CommandLineTools/usr/bin/python3'`\r\n\r\nOS Name: `'posix'`\r\n\r\nUser pip version: `'22.3'`\r\n\r\nuser Python installations found:\r\n\r\n - `3.9.6`: `/usr/bin/python3`\r\n\r\nPEP 508 Information:\r\n\r\n```\r\n{'implementation_name': 'cpython',\r\n 'implementation_version': '3.9.6',\r\n 'os_name': 'posix',\r\n 'platform_machine': 'arm64',\r\n 'platform_python_implementation': 'CPython',\r\n 'platform_release': '21.6.0',\r\n 'platform_system': 'Darwin',\r\n 'platform_version': 'Darwin Kernel Version 21.6.0: Mon Aug 22 20:19:52 PDT '\r\n '2022; root:xnu-8020.140.49~2/RELEASE_ARM64_T6000',\r\n 'python_full_version': '3.9.6',\r\n 'python_version': '3.9',\r\n 'sys_platform': 'darwin'}\r\n```\r\n\r\nSystem environment variables:\r\n\r\n - `TERM_SESSION_ID`\r\n - `SSH_AUTH_SOCK`\r\n - `LC_TERMINAL_VERSION`\r\n - `COLORFGBG`\r\n - `ITERM_PROFILE`\r\n - `XPC_FLAGS`\r\n - `LANG`\r\n - `PWD`\r\n - `SHELL`\r\n - `__CFBundleIdentifier`\r\n - `TERM_PROGRAM_VERSION`\r\n - `TERM_PROGRAM`\r\n - `PATH`\r\n - `LC_TERMINAL`\r\n - `COLORTERM`\r\n - `COMMAND_MODE`\r\n - `TERM`\r\n - `HOME`\r\n - `TMPDIR`\r\n - `USER`\r\n - `XPC_SERVICE_NAME`\r\n - `LOGNAME`\r\n - `ITERM_SESSION_ID`\r\n - `__CF_USER_TEXT_ENCODING`\r\n - `SHLVL`\r\n - `OLDPWD`\r\n - `HOMEBREW_PREFIX`\r\n - `HOMEBREW_CELLAR`\r\n - `HOMEBREW_REPOSITORY`\r\n - `MANPATH`\r\n - `INFOPATH`\r\n - `ZSH`\r\n - `PAGER`\r\n - `LESS`\r\n - `LSCOLORS`\r\n - `GITHUB_TOKEN`\r\n - `VERCEL_TOKEN`\r\n - `PNPM_HOME`\r\n - `NVM_DIR`\r\n - `NVM_CD_FLAGS`\r\n - `NVM_BIN`\r\n - `NVM_INC`\r\n - `_`\r\n - `PIP_DISABLE_PIP_VERSION_CHECK`\r\n - `PIP_PYTHON_PATH`\r\n - `PYTHONDONTWRITEBYTECODE`\r\n - `PYTHONFINDER_IGNORE_UNSUPPORTED`\r\n\r\nPipenv\u2013specific environment variables:\r\n\r\n\r\nDebug\u2013specific environment variables:\r\n\r\n - `PATH`: `/Users/andy/.nvm/versions/node/v19.1.0/bin:/Users/andy/Library/Python/3.9/bin:/Users/andy/Library/pnpm:/opt/homebrew/bin:/opt/homebrew/sbin:/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin`\r\n - `SHELL`: `/bin/zsh`\r\n - `LANG`: `en_CA.UTF-8`\r\n - `PWD`: `/Users/andy/c/pipenv/test`\r\n\r\n\r\n---------------------------\r\n\r\nContents of `Pipfile` ('/Users/andy/c/pipenv/test/Pipfile'):\r\n\r\n```toml\r\n[[source]]\r\nurl = \"https://pypi.org/simple\"\r\nverify_ssl = true\r\nname = \"pypi\"\r\n\r\n[packages]\r\npydantic = \"==1.10.2\"\r\nemail-validator = \"==1.3.0\"\r\n\r\n```\r\n\r\n\r\nContents of `Pipfile.lock` ('/Users/andy/c/pipenv/test/Pipfile.lock'):\r\n\r\n```json\r\n{\r\n \"_meta\": {\r\n \"hash\": {\r\n \"sha256\": \"d5f483e04b72426b3574dfadf29d845164fc106b68e966b5d0bf515817ff3cec\"\r\n },\r\n \"pipfile-spec\": 6,\r\n \"requires\": {},\r\n \"sources\": [\r\n {\r\n \"name\": \"pypi\",\r\n \"url\": \"https://pypi.org/simple\",\r\n \"verify_ssl\": true\r\n }\r\n ]\r\n },\r\n \"default\": {\r\n \"dnspython\": {\r\n \"hashes\": [\r\n \"sha256:0f7569a4a6ff151958b64304071d370daa3243d15941a7beedf0c9fe5105603e\",\r\n \"sha256:a851e51367fb93e9e1361732c1d60dab63eff98712e503ea7d92e6eccb109b4f\"\r\n ],\r\n \"markers\": \"python_version >= '3.6' and python_version < '4.0'\",\r\n \"version\": \"==2.2.1\"\r\n },\r\n \"email-validator\": {\r\n \"hashes\": [\r\n \"sha256:553a66f8be2ec2dea641ae1d3f29017ab89e9d603d4a25cdaac39eefa283d769\",\r\n \"sha256:816073f2a7cffef786b29928f58ec16cdac42710a53bb18aa94317e3e145ec5c\"\r\n ],\r\n \"index\": \"pypi\",\r\n \"version\": \"==1.3.0\"\r\n },\r\n \"idna\": {\r\n \"hashes\": [\r\n \"sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4\",\r\n \"sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2\"\r\n ],\r\n \"markers\": \"python_version >= '3.5'\",\r\n \"version\": \"==3.4\"\r\n },\r\n \"pydantic\": {\r\n \"hashes\": [\r\n \"sha256:05e00dbebbe810b33c7a7362f231893183bcc4251f3f2ff991c31d5c08240c42\",\r\n \"sha256:06094d18dd5e6f2bbf93efa54991c3240964bb663b87729ac340eb5014310624\",\r\n \"sha256:0b959f4d8211fc964772b595ebb25f7652da3f22322c007b6fed26846a40685e\",\r\n \"sha256:19b3b9ccf97af2b7519c42032441a891a5e05c68368f40865a90eb88833c2559\",\r\n \"sha256:1b6ee725bd6e83ec78b1aa32c5b1fa67a3a65badddde3976bca5fe4568f27709\",\r\n \"sha256:1ee433e274268a4b0c8fde7ad9d58ecba12b069a033ecc4645bb6303c062d2e9\",\r\n \"sha256:216f3bcbf19c726b1cc22b099dd409aa371f55c08800bcea4c44c8f74b73478d\",\r\n \"sha256:2d0567e60eb01bccda3a4df01df677adf6b437958d35c12a3ac3e0f078b0ee52\",\r\n \"sha256:2e05aed07fa02231dbf03d0adb1be1d79cabb09025dd45aa094aa8b4e7b9dcda\",\r\n \"sha256:352aedb1d71b8b0736c6d56ad2bd34c6982720644b0624462059ab29bd6e5912\",\r\n \"sha256:355639d9afc76bcb9b0c3000ddcd08472ae75318a6eb67a15866b87e2efa168c\",\r\n \"sha256:37c90345ec7dd2f1bcef82ce49b6235b40f282b94d3eec47e801baf864d15525\",\r\n \"sha256:4b8795290deaae348c4eba0cebb196e1c6b98bdbe7f50b2d0d9a4a99716342fe\",\r\n \"sha256:5760e164b807a48a8f25f8aa1a6d857e6ce62e7ec83ea5d5c5a802eac81bad41\",\r\n \"sha256:6eb843dcc411b6a2237a694f5e1d649fc66c6064d02b204a7e9d194dff81eb4b\",\r\n \"sha256:7b5ba54d026c2bd2cb769d3468885f23f43710f651688e91f5fb1edcf0ee9283\",\r\n \"sha256:7c2abc4393dea97a4ccbb4ec7d8658d4e22c4765b7b9b9445588f16c71ad9965\",\r\n \"sha256:81a7b66c3f499108b448f3f004801fcd7d7165fb4200acb03f1c2402da73ce4c\",\r\n \"sha256:91b8e218852ef6007c2b98cd861601c6a09f1aa32bbbb74fab5b1c33d4a1e410\",\r\n \"sha256:9300fcbebf85f6339a02c6994b2eb3ff1b9c8c14f502058b5bf349d42447dcf5\",\r\n \"sha256:9cabf4a7f05a776e7793e72793cd92cc865ea0e83a819f9ae4ecccb1b8aa6116\",\r\n \"sha256:a1f5a63a6dfe19d719b1b6e6106561869d2efaca6167f84f5ab9347887d78b98\",\r\n \"sha256:a4c805731c33a8db4b6ace45ce440c4ef5336e712508b4d9e1aafa617dc9907f\",\r\n \"sha256:ae544c47bec47a86bc7d350f965d8b15540e27e5aa4f55170ac6a75e5f73b644\",\r\n \"sha256:b97890e56a694486f772d36efd2ba31612739bc6f3caeee50e9e7e3ebd2fdd13\",\r\n \"sha256:bb6ad4489af1bac6955d38ebcb95079a836af31e4c4f74aba1ca05bb9f6027bd\",\r\n \"sha256:bedf309630209e78582ffacda64a21f96f3ed2e51fbf3962d4d488e503420254\",\r\n \"sha256:c1ba1afb396148bbc70e9eaa8c06c1716fdddabaf86e7027c5988bae2a829ab6\",\r\n \"sha256:c33602f93bfb67779f9c507e4d69451664524389546bacfe1bee13cae6dc7488\",\r\n \"sha256:c4aac8e7103bf598373208f6299fa9a5cfd1fc571f2d40bf1dd1955a63d6eeb5\",\r\n \"sha256:c6f981882aea41e021f72779ce2a4e87267458cc4d39ea990729e21ef18f0f8c\",\r\n \"sha256:cc78cc83110d2f275ec1970e7a831f4e371ee92405332ebfe9860a715f8336e1\",\r\n \"sha256:d49f3db871575e0426b12e2f32fdb25e579dea16486a26e5a0474af87cb1ab0a\",\r\n \"sha256:dd3f9a40c16daf323cf913593083698caee97df2804aa36c4b3175d5ac1b92a2\",\r\n \"sha256:e0bedafe4bc165ad0a56ac0bd7695df25c50f76961da29c050712596cf092d6d\",\r\n \"sha256:e9069e1b01525a96e6ff49e25876d90d5a563bc31c658289a8772ae186552236\"\r\n ],\r\n \"index\": \"pypi\",\r\n \"version\": \"==1.10.2\"\r\n },\r\n \"typing-extensions\": {\r\n \"hashes\": [\r\n \"sha256:1511434bb92bf8dd198c12b1cc812e800d4181cfcb867674e0f8279cc93087aa\",\r\n \"sha256:16fa4864408f655d35ec496218b85f79b3437c829e93320c7c9215ccfd92489e\"\r\n ],\r\n \"markers\": \"python_version >= '3.7'\",\r\n \"version\": \"==4.4.0\"\r\n }\r\n },\r\n \"develop\": {}\r\n}\r\n\r\n```\r\n</details>\r\n\n", "before_files": [{"content": "import os\nfrom contextlib import contextmanager\nfrom typing import Mapping, Sequence\n\nfrom pipenv.patched.pip._vendor.packaging.markers import Marker\nfrom pipenv.patched.pip._vendor.packaging.version import parse\nfrom pipenv.vendor.requirementslib.models.requirements import (\n InstallRequirement,\n Requirement,\n)\n\nfrom .constants import SCHEME_LIST, VCS_LIST\nfrom .shell import temp_path\n\n\ndef python_version(path_to_python):\n from pipenv.vendor.pythonfinder.utils import get_python_version\n\n if not path_to_python:\n return None\n try:\n version = get_python_version(path_to_python)\n except Exception:\n return None\n return version\n\n\ndef clean_pkg_version(version):\n \"\"\"Uses pip to prepare a package version string, from our internal version.\"\"\"\n return pep440_version(str(version).replace(\"==\", \"\"))\n\n\ndef get_lockfile_section_using_pipfile_category(category):\n if category == \"dev-packages\":\n lockfile_section = \"develop\"\n elif category == \"packages\":\n lockfile_section = \"default\"\n else:\n lockfile_section = category\n return lockfile_section\n\n\ndef get_pipfile_category_using_lockfile_section(category):\n if category == \"develop\":\n lockfile_section = \"dev-packages\"\n elif category == \"default\":\n lockfile_section = \"packages\"\n else:\n lockfile_section = category\n return lockfile_section\n\n\nclass HackedPythonVersion:\n \"\"\"A Beautiful hack, which allows us to tell pip which version of Python we're using.\"\"\"\n\n def __init__(self, python_version, python_path):\n self.python_version = python_version\n self.python_path = python_path\n\n def __enter__(self):\n # Only inject when the value is valid\n if self.python_version:\n os.environ[\"PIPENV_REQUESTED_PYTHON_VERSION\"] = str(self.python_version)\n if self.python_path:\n os.environ[\"PIP_PYTHON_PATH\"] = str(self.python_path)\n\n def __exit__(self, *args):\n # Restore original Python version information.\n try:\n del os.environ[\"PIPENV_REQUESTED_PYTHON_VERSION\"]\n except KeyError:\n pass\n\n\ndef get_canonical_names(packages):\n \"\"\"Canonicalize a list of packages and return a set of canonical names\"\"\"\n from pipenv.patched.pip._vendor.packaging.utils import canonicalize_name\n\n if not isinstance(packages, Sequence):\n if not isinstance(packages, str):\n return packages\n packages = [packages]\n return {canonicalize_name(pkg) for pkg in packages if pkg}\n\n\ndef pep440_version(version):\n \"\"\"Normalize version to PEP 440 standards\"\"\"\n return str(parse(version))\n\n\ndef pep423_name(name):\n \"\"\"Normalize package name to PEP 423 style standard.\"\"\"\n name = name.lower()\n if any(i not in name for i in (VCS_LIST + SCHEME_LIST)):\n return name.replace(\"_\", \"-\")\n\n else:\n return name\n\n\ndef get_vcs_deps(project=None, dev=False, pypi_mirror=None, packages=None, reqs=None):\n from pipenv.vendor.requirementslib.models.requirements import Requirement\n\n section = \"vcs_dev_packages\" if dev else \"vcs_packages\"\n if reqs is None:\n reqs = []\n lockfile = {}\n if not reqs:\n if not project and not packages:\n raise ValueError(\n \"Must supply either a project or a pipfile section to lock vcs dependencies.\"\n )\n if not packages:\n try:\n packages = getattr(project, section)\n except AttributeError:\n return [], []\n reqs = [Requirement.from_pipfile(name, entry) for name, entry in packages.items()]\n result = []\n for requirement in reqs:\n name = requirement.normalized_name\n commit_hash = None\n if requirement.is_vcs:\n try:\n with temp_path(), locked_repository(requirement) as repo:\n from pipenv.vendor.requirementslib.models.requirements import (\n Requirement,\n )\n\n # from distutils.sysconfig import get_python_lib\n # sys.path = [repo.checkout_directory, \"\", \".\", get_python_lib(plat_specific=0)]\n commit_hash = repo.get_commit_hash()\n name = requirement.normalized_name\n lockfile[name] = requirement.pipfile_entry[1]\n lockfile[name][\"ref\"] = commit_hash\n result.append(requirement)\n except OSError:\n continue\n return result, lockfile\n\n\ndef translate_markers(pipfile_entry):\n \"\"\"Take a pipfile entry and normalize its markers\n\n Provide a pipfile entry which may have 'markers' as a key or it may have\n any valid key from `packaging.markers.marker_context.keys()` and standardize\n the format into {'markers': 'key == \"some_value\"'}.\n\n :param pipfile_entry: A dictionariy of keys and values representing a pipfile entry\n :type pipfile_entry: dict\n :returns: A normalized dictionary with cleaned marker entries\n \"\"\"\n if not isinstance(pipfile_entry, Mapping):\n raise TypeError(\"Entry is not a pipfile formatted mapping.\")\n from pipenv.patched.pip._vendor.packaging.markers import default_environment\n\n allowed_marker_keys = [\"markers\"] + list(default_environment().keys())\n provided_keys = list(pipfile_entry.keys()) if hasattr(pipfile_entry, \"keys\") else []\n pipfile_markers = set(provided_keys) & set(allowed_marker_keys)\n new_pipfile = dict(pipfile_entry).copy()\n marker_set = set()\n if \"markers\" in new_pipfile:\n marker_str = new_pipfile.pop(\"markers\")\n if marker_str:\n marker = str(Marker(marker_str))\n if \"extra\" not in marker:\n marker_set.add(marker)\n for m in pipfile_markers:\n entry = f\"{pipfile_entry[m]}\"\n if m != \"markers\":\n marker_set.add(str(Marker(f\"{m} {entry}\")))\n new_pipfile.pop(m)\n if marker_set:\n new_pipfile[\"markers\"] = str(\n Marker(\n \" or \".join(\n f\"{s}\" if \" and \" in s else s\n for s in sorted(dict.fromkeys(marker_set))\n )\n )\n ).replace('\"', \"'\")\n return new_pipfile\n\n\ndef clean_resolved_dep(dep, is_top_level=False, pipfile_entry=None):\n from pipenv.vendor.requirementslib.utils import is_vcs\n\n name = pep423_name(dep[\"name\"])\n lockfile = {}\n # We use this to determine if there are any markers on top level packages\n # So we can make sure those win out during resolution if the packages reoccur\n if \"version\" in dep and dep[\"version\"] and not dep.get(\"editable\", False):\n version = \"{}\".format(dep[\"version\"])\n if not version.startswith(\"==\"):\n version = f\"=={version}\"\n lockfile[\"version\"] = version\n if is_vcs(dep):\n ref = dep.get(\"ref\", None)\n if ref is not None:\n lockfile[\"ref\"] = ref\n vcs_type = next(iter(k for k in dep.keys() if k in VCS_LIST), None)\n if vcs_type:\n lockfile[vcs_type] = dep[vcs_type]\n if \"subdirectory\" in dep:\n lockfile[\"subdirectory\"] = dep[\"subdirectory\"]\n for key in [\"hashes\", \"index\", \"extras\", \"editable\"]:\n if key in dep:\n lockfile[key] = dep[key]\n # In case we lock a uri or a file when the user supplied a path\n # remove the uri or file keys from the entry and keep the path\n preferred_file_keys = [\"path\", \"file\"]\n dependency_file_key = next(iter(k for k in preferred_file_keys if k in dep), None)\n if dependency_file_key:\n lockfile[dependency_file_key] = dep[dependency_file_key]\n # Pipfile entry overrides path/file from resolver\n if pipfile_entry and isinstance(pipfile_entry, dict):\n for k in preferred_file_keys:\n if k in pipfile_entry.keys():\n lockfile[k] = pipfile_entry[k]\n break\n # If a package is **PRESENT** in the pipfile but has no markers, make sure we\n # **NEVER** include markers in the lockfile\n if \"markers\" in dep and dep.get(\"markers\", \"\").strip():\n # First, handle the case where there is no top level dependency in the pipfile\n if not is_top_level:\n translated = translate_markers(dep).get(\"markers\", \"\").strip()\n if translated:\n try:\n lockfile[\"markers\"] = translated\n except TypeError:\n pass\n # otherwise make sure we are prioritizing whatever the pipfile says about the markers\n # If the pipfile says nothing, then we should put nothing in the lockfile\n else:\n try:\n pipfile_entry = translate_markers(pipfile_entry)\n lockfile[\"markers\"] = pipfile_entry.get(\"markers\")\n except TypeError:\n pass\n return {name: lockfile}\n\n\ndef is_star(val):\n return isinstance(val, str) and val == \"*\"\n\n\ndef is_pinned(val):\n if isinstance(val, Mapping):\n val = val.get(\"version\")\n return isinstance(val, str) and val.startswith(\"==\")\n\n\ndef is_pinned_requirement(ireq):\n \"\"\"\n Returns whether an InstallRequirement is a \"pinned\" requirement.\n \"\"\"\n if ireq.editable:\n return False\n\n if ireq.req is None or len(ireq.specifier) != 1:\n return False\n\n spec = next(iter(ireq.specifier))\n return spec.operator in {\"==\", \"===\"} and not spec.version.endswith(\".*\")\n\n\ndef convert_deps_to_pip(\n deps,\n project=None,\n include_index=True,\n include_hashes=True,\n include_markers=True,\n):\n \"\"\" \"Converts a Pipfile-formatted dependency to a pip-formatted one.\"\"\"\n dependencies = []\n for dep_name, dep in deps.items():\n if project:\n project.clear_pipfile_cache()\n indexes = []\n if project:\n indexes = project.pipfile_sources()\n new_dep = Requirement.from_pipfile(dep_name, dep)\n if new_dep.index:\n include_index = True\n sources = indexes if include_index else None\n req = new_dep.as_line(\n sources=sources,\n include_hashes=include_hashes,\n include_markers=include_markers,\n ).strip()\n dependencies.append(req)\n return dependencies\n\n\ndef get_constraints_from_deps(deps):\n \"\"\"Get contraints from Pipfile-formatted dependency\"\"\"\n\n def is_constraints(dep: InstallRequirement) -> bool:\n return dep.name and not dep.editable and not dep.extras\n\n constraints = []\n for dep_name, dep in deps.items():\n new_dep = Requirement.from_pipfile(dep_name, dep)\n if new_dep.is_named and is_constraints(new_dep.as_ireq()):\n c = new_dep.as_line().strip()\n constraints.append(c)\n return constraints\n\n\ndef prepare_constraint_file(\n constraints,\n directory=None,\n sources=None,\n pip_args=None,\n):\n from pipenv.vendor.vistir.path import (\n create_tracked_tempdir,\n create_tracked_tempfile,\n )\n\n if not directory:\n directory = create_tracked_tempdir(suffix=\"-requirements\", prefix=\"pipenv-\")\n\n constraints_file = create_tracked_tempfile(\n mode=\"w\",\n prefix=\"pipenv-\",\n suffix=\"-constraints.txt\",\n dir=directory,\n delete=False,\n )\n\n if sources and pip_args:\n skip_args = (\"build-isolation\", \"use-pep517\", \"cache-dir\")\n args_to_add = [\n arg for arg in pip_args if not any(bad_arg in arg for bad_arg in skip_args)\n ]\n requirementstxt_sources = \" \".join(args_to_add) if args_to_add else \"\"\n requirementstxt_sources = requirementstxt_sources.replace(\" --\", \"\\n--\")\n constraints_file.write(f\"{requirementstxt_sources}\\n\")\n\n constraints_file.write(\"\\n\".join([c for c in constraints]))\n constraints_file.close()\n return constraints_file.name\n\n\ndef is_required_version(version, specified_version):\n \"\"\"Check to see if there's a hard requirement for version\n number provided in the Pipfile.\n \"\"\"\n # Certain packages may be defined with multiple values.\n if isinstance(specified_version, dict):\n specified_version = specified_version.get(\"version\", \"\")\n if specified_version.startswith(\"==\"):\n return version.strip() == specified_version.split(\"==\")[1].strip()\n\n return True\n\n\ndef is_editable(pipfile_entry):\n if hasattr(pipfile_entry, \"get\"):\n return pipfile_entry.get(\"editable\", False) or any(\n pipfile_entry.get(key) for key in (\"file\", \"path\") + VCS_LIST\n )\n return False\n\n\n@contextmanager\ndef locked_repository(requirement):\n from pipenv.vendor.vistir.path import create_tracked_tempdir\n\n if not requirement.is_vcs:\n return\n src_dir = create_tracked_tempdir(prefix=\"pipenv-\", suffix=\"-src\")\n with requirement.req.locked_vcs_repo(src_dir=src_dir) as repo:\n yield repo\n", "path": "pipenv/utils/dependencies.py"}], "after_files": [{"content": "import os\nfrom contextlib import contextmanager\nfrom typing import Mapping, Sequence\n\nfrom pipenv.patched.pip._vendor.packaging.markers import Marker\nfrom pipenv.patched.pip._vendor.packaging.version import parse\nfrom pipenv.vendor.requirementslib.models.requirements import (\n InstallRequirement,\n Requirement,\n)\n\nfrom .constants import SCHEME_LIST, VCS_LIST\nfrom .shell import temp_path\n\n\ndef python_version(path_to_python):\n from pipenv.vendor.pythonfinder.utils import get_python_version\n\n if not path_to_python:\n return None\n try:\n version = get_python_version(path_to_python)\n except Exception:\n return None\n return version\n\n\ndef clean_pkg_version(version):\n \"\"\"Uses pip to prepare a package version string, from our internal version.\"\"\"\n return pep440_version(str(version).replace(\"==\", \"\"))\n\n\ndef get_lockfile_section_using_pipfile_category(category):\n if category == \"dev-packages\":\n lockfile_section = \"develop\"\n elif category == \"packages\":\n lockfile_section = \"default\"\n else:\n lockfile_section = category\n return lockfile_section\n\n\ndef get_pipfile_category_using_lockfile_section(category):\n if category == \"develop\":\n lockfile_section = \"dev-packages\"\n elif category == \"default\":\n lockfile_section = \"packages\"\n else:\n lockfile_section = category\n return lockfile_section\n\n\nclass HackedPythonVersion:\n \"\"\"A Beautiful hack, which allows us to tell pip which version of Python we're using.\"\"\"\n\n def __init__(self, python_version, python_path):\n self.python_version = python_version\n self.python_path = python_path\n\n def __enter__(self):\n # Only inject when the value is valid\n if self.python_version:\n os.environ[\"PIPENV_REQUESTED_PYTHON_VERSION\"] = str(self.python_version)\n if self.python_path:\n os.environ[\"PIP_PYTHON_PATH\"] = str(self.python_path)\n\n def __exit__(self, *args):\n # Restore original Python version information.\n try:\n del os.environ[\"PIPENV_REQUESTED_PYTHON_VERSION\"]\n except KeyError:\n pass\n\n\ndef get_canonical_names(packages):\n \"\"\"Canonicalize a list of packages and return a set of canonical names\"\"\"\n from pipenv.patched.pip._vendor.packaging.utils import canonicalize_name\n\n if not isinstance(packages, Sequence):\n if not isinstance(packages, str):\n return packages\n packages = [packages]\n return {canonicalize_name(pkg) for pkg in packages if pkg}\n\n\ndef pep440_version(version):\n \"\"\"Normalize version to PEP 440 standards\"\"\"\n return str(parse(version))\n\n\ndef pep423_name(name):\n \"\"\"Normalize package name to PEP 423 style standard.\"\"\"\n name = name.lower()\n if any(i not in name for i in (VCS_LIST + SCHEME_LIST)):\n return name.replace(\"_\", \"-\")\n\n else:\n return name\n\n\ndef get_vcs_deps(project=None, dev=False, pypi_mirror=None, packages=None, reqs=None):\n from pipenv.vendor.requirementslib.models.requirements import Requirement\n\n section = \"vcs_dev_packages\" if dev else \"vcs_packages\"\n if reqs is None:\n reqs = []\n lockfile = {}\n if not reqs:\n if not project and not packages:\n raise ValueError(\n \"Must supply either a project or a pipfile section to lock vcs dependencies.\"\n )\n if not packages:\n try:\n packages = getattr(project, section)\n except AttributeError:\n return [], []\n reqs = [Requirement.from_pipfile(name, entry) for name, entry in packages.items()]\n result = []\n for requirement in reqs:\n name = requirement.normalized_name\n commit_hash = None\n if requirement.is_vcs:\n try:\n with temp_path(), locked_repository(requirement) as repo:\n from pipenv.vendor.requirementslib.models.requirements import (\n Requirement,\n )\n\n # from distutils.sysconfig import get_python_lib\n # sys.path = [repo.checkout_directory, \"\", \".\", get_python_lib(plat_specific=0)]\n commit_hash = repo.get_commit_hash()\n name = requirement.normalized_name\n lockfile[name] = requirement.pipfile_entry[1]\n lockfile[name][\"ref\"] = commit_hash\n result.append(requirement)\n except OSError:\n continue\n return result, lockfile\n\n\ndef translate_markers(pipfile_entry):\n \"\"\"Take a pipfile entry and normalize its markers\n\n Provide a pipfile entry which may have 'markers' as a key or it may have\n any valid key from `packaging.markers.marker_context.keys()` and standardize\n the format into {'markers': 'key == \"some_value\"'}.\n\n :param pipfile_entry: A dictionariy of keys and values representing a pipfile entry\n :type pipfile_entry: dict\n :returns: A normalized dictionary with cleaned marker entries\n \"\"\"\n if not isinstance(pipfile_entry, Mapping):\n raise TypeError(\"Entry is not a pipfile formatted mapping.\")\n from pipenv.patched.pip._vendor.packaging.markers import default_environment\n\n allowed_marker_keys = [\"markers\"] + list(default_environment().keys())\n provided_keys = list(pipfile_entry.keys()) if hasattr(pipfile_entry, \"keys\") else []\n pipfile_markers = set(provided_keys) & set(allowed_marker_keys)\n new_pipfile = dict(pipfile_entry).copy()\n marker_set = set()\n if \"markers\" in new_pipfile:\n marker_str = new_pipfile.pop(\"markers\")\n if marker_str:\n marker = str(Marker(marker_str))\n if \"extra\" not in marker:\n marker_set.add(marker)\n for m in pipfile_markers:\n entry = f\"{pipfile_entry[m]}\"\n if m != \"markers\":\n marker_set.add(str(Marker(f\"{m} {entry}\")))\n new_pipfile.pop(m)\n if marker_set:\n new_pipfile[\"markers\"] = str(\n Marker(\n \" or \".join(\n f\"{s}\" if \" and \" in s else s\n for s in sorted(dict.fromkeys(marker_set))\n )\n )\n ).replace('\"', \"'\")\n return new_pipfile\n\n\ndef clean_resolved_dep(dep, is_top_level=False, pipfile_entry=None):\n from pipenv.vendor.requirementslib.utils import is_vcs\n\n name = pep423_name(dep[\"name\"])\n lockfile = {}\n # We use this to determine if there are any markers on top level packages\n # So we can make sure those win out during resolution if the packages reoccur\n if \"version\" in dep and dep[\"version\"] and not dep.get(\"editable\", False):\n version = \"{}\".format(dep[\"version\"])\n if not version.startswith(\"==\"):\n version = f\"=={version}\"\n lockfile[\"version\"] = version\n if is_vcs(dep):\n ref = dep.get(\"ref\", None)\n if ref is not None:\n lockfile[\"ref\"] = ref\n vcs_type = next(iter(k for k in dep.keys() if k in VCS_LIST), None)\n if vcs_type:\n lockfile[vcs_type] = dep[vcs_type]\n if \"subdirectory\" in dep:\n lockfile[\"subdirectory\"] = dep[\"subdirectory\"]\n for key in [\"hashes\", \"index\", \"extras\", \"editable\"]:\n if key in dep:\n lockfile[key] = dep[key]\n # In case we lock a uri or a file when the user supplied a path\n # remove the uri or file keys from the entry and keep the path\n preferred_file_keys = [\"path\", \"file\"]\n dependency_file_key = next(iter(k for k in preferred_file_keys if k in dep), None)\n if dependency_file_key:\n lockfile[dependency_file_key] = dep[dependency_file_key]\n # Pipfile entry overrides path/file from resolver\n if pipfile_entry and isinstance(pipfile_entry, dict):\n for k in preferred_file_keys:\n if k in pipfile_entry.keys():\n lockfile[k] = pipfile_entry[k]\n break\n # If a package is **PRESENT** in the pipfile but has no markers, make sure we\n # **NEVER** include markers in the lockfile\n if \"markers\" in dep and dep.get(\"markers\", \"\").strip():\n # First, handle the case where there is no top level dependency in the pipfile\n if not is_top_level:\n translated = translate_markers(dep).get(\"markers\", \"\").strip()\n if translated:\n try:\n lockfile[\"markers\"] = translated\n except TypeError:\n pass\n # otherwise make sure we are prioritizing whatever the pipfile says about the markers\n # If the pipfile says nothing, then we should put nothing in the lockfile\n else:\n try:\n pipfile_entry = translate_markers(pipfile_entry)\n if pipfile_entry.get(\"markers\"):\n lockfile[\"markers\"] = pipfile_entry.get(\"markers\")\n except TypeError:\n pass\n return {name: lockfile}\n\n\ndef is_star(val):\n return isinstance(val, str) and val == \"*\"\n\n\ndef is_pinned(val):\n if isinstance(val, Mapping):\n val = val.get(\"version\")\n return isinstance(val, str) and val.startswith(\"==\")\n\n\ndef is_pinned_requirement(ireq):\n \"\"\"\n Returns whether an InstallRequirement is a \"pinned\" requirement.\n \"\"\"\n if ireq.editable:\n return False\n\n if ireq.req is None or len(ireq.specifier) != 1:\n return False\n\n spec = next(iter(ireq.specifier))\n return spec.operator in {\"==\", \"===\"} and not spec.version.endswith(\".*\")\n\n\ndef convert_deps_to_pip(\n deps,\n project=None,\n include_index=True,\n include_hashes=True,\n include_markers=True,\n):\n \"\"\" \"Converts a Pipfile-formatted dependency to a pip-formatted one.\"\"\"\n dependencies = []\n for dep_name, dep in deps.items():\n if project:\n project.clear_pipfile_cache()\n indexes = []\n if project:\n indexes = project.pipfile_sources()\n new_dep = Requirement.from_pipfile(dep_name, dep)\n if new_dep.index:\n include_index = True\n sources = indexes if include_index else None\n req = new_dep.as_line(\n sources=sources,\n include_hashes=include_hashes,\n include_markers=include_markers,\n ).strip()\n dependencies.append(req)\n return dependencies\n\n\ndef get_constraints_from_deps(deps):\n \"\"\"Get contraints from Pipfile-formatted dependency\"\"\"\n\n def is_constraints(dep: InstallRequirement) -> bool:\n return dep.name and not dep.editable and not dep.extras\n\n constraints = []\n for dep_name, dep in deps.items():\n new_dep = Requirement.from_pipfile(dep_name, dep)\n if new_dep.is_named and is_constraints(new_dep.as_ireq()):\n c = new_dep.as_line().strip()\n constraints.append(c)\n return constraints\n\n\ndef prepare_constraint_file(\n constraints,\n directory=None,\n sources=None,\n pip_args=None,\n):\n from pipenv.vendor.vistir.path import (\n create_tracked_tempdir,\n create_tracked_tempfile,\n )\n\n if not directory:\n directory = create_tracked_tempdir(suffix=\"-requirements\", prefix=\"pipenv-\")\n\n constraints_file = create_tracked_tempfile(\n mode=\"w\",\n prefix=\"pipenv-\",\n suffix=\"-constraints.txt\",\n dir=directory,\n delete=False,\n )\n\n if sources and pip_args:\n skip_args = (\"build-isolation\", \"use-pep517\", \"cache-dir\")\n args_to_add = [\n arg for arg in pip_args if not any(bad_arg in arg for bad_arg in skip_args)\n ]\n requirementstxt_sources = \" \".join(args_to_add) if args_to_add else \"\"\n requirementstxt_sources = requirementstxt_sources.replace(\" --\", \"\\n--\")\n constraints_file.write(f\"{requirementstxt_sources}\\n\")\n\n constraints_file.write(\"\\n\".join([c for c in constraints]))\n constraints_file.close()\n return constraints_file.name\n\n\ndef is_required_version(version, specified_version):\n \"\"\"Check to see if there's a hard requirement for version\n number provided in the Pipfile.\n \"\"\"\n # Certain packages may be defined with multiple values.\n if isinstance(specified_version, dict):\n specified_version = specified_version.get(\"version\", \"\")\n if specified_version.startswith(\"==\"):\n return version.strip() == specified_version.split(\"==\")[1].strip()\n\n return True\n\n\ndef is_editable(pipfile_entry):\n if hasattr(pipfile_entry, \"get\"):\n return pipfile_entry.get(\"editable\", False) or any(\n pipfile_entry.get(key) for key in (\"file\", \"path\") + VCS_LIST\n )\n return False\n\n\n@contextmanager\ndef locked_repository(requirement):\n from pipenv.vendor.vistir.path import create_tracked_tempdir\n\n if not requirement.is_vcs:\n return\n src_dir = create_tracked_tempdir(prefix=\"pipenv-\", suffix=\"-src\")\n with requirement.req.locked_vcs_repo(src_dir=src_dir) as repo:\n yield repo\n", "path": "pipenv/utils/dependencies.py"}]} |
gh_patches_debug_1391 | rasdani/github-patches | git_diff | pypi__warehouse-434 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Redirect a slash-less URL to the slashed variant
We have urls like `/project/foobar/`, if someone enters `/project/foobar` we should redirect that to `/project/foobar/`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `warehouse/config.py`
Content:
```
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 # See the License for the specific language governing permissions and
11 # limitations under the License.
12
13 import fs.opener
14 import transaction
15
16 from pyramid.config import Configurator
17 from tzf.pyramid_yml import config_defaults
18
19 from warehouse.utils.static import WarehouseCacheBuster
20
21
22 def content_security_policy_tween_factory(handler, registry):
23 policy = registry.settings.get("csp", {})
24 policy = "; ".join([" ".join([k] + v) for k, v in sorted(policy.items())])
25
26 def content_security_policy_tween(request):
27 resp = handler(request)
28
29 # We don't want to apply our Content Security Policy to the debug
30 # toolbar, that's not part of our application and it doesn't work with
31 # our restrictive CSP.
32 if not request.path.startswith("/_debug_toolbar/"):
33 resp.headers["Content-Security-Policy"] = \
34 policy.format(request=request)
35
36 return resp
37
38 return content_security_policy_tween
39
40
41 def configure(settings=None):
42 if settings is None:
43 settings = {}
44
45 config = Configurator(settings=settings)
46
47 # Set our yml.location so that it contains all of our settings files
48 config_defaults(config, ["warehouse:etc"])
49
50 # We want to load configuration from YAML files
51 config.include("tzf.pyramid_yml")
52
53 # We'll want to use Jinja2 as our template system.
54 config.include("pyramid_jinja2")
55
56 # We also want to use Jinja2 for .html templates as well, because we just
57 # assume that all templates will be using Jinja.
58 config.add_jinja2_renderer(".html")
59
60 # We'll want to configure some filters for Jinja2 as well.
61 filters = config.get_settings().setdefault("jinja2.filters", {})
62 filters.setdefault("readme", "warehouse.filters:readme_renderer")
63 filters.setdefault("shorten_number", "warehouse.filters:shorten_number")
64
65 # We also want to register some global functions for Jinja
66 jglobals = config.get_settings().setdefault("jinja2.globals", {})
67 jglobals.setdefault("gravatar", "warehouse.utils.gravatar:gravatar")
68
69 # We'll store all of our templates in one location, warehouse/templates
70 # so we'll go ahead and add that to the Jinja2 search path.
71 config.add_jinja2_search_path("warehouse:templates", name=".html")
72
73 # Configure our transaction handling so that each request gets it's own
74 # transaction handler and the lifetime of the transaction is tied to the
75 # lifetime of the request.
76 config.add_settings({
77 "tm.manager_hook": lambda request: transaction.TransactionManager(),
78 })
79 config.include("pyramid_tm")
80
81 # Register support for services
82 config.include("pyramid_services")
83
84 # Register support for internationalization and localization
85 config.include(".i18n")
86
87 # Register the configuration for the PostgreSQL database.
88 config.include(".db")
89
90 # Register our session support
91 config.include(".sessions")
92
93 # Register our support for http and origin caching
94 config.include(".cache.http")
95 config.include(".cache.origin")
96
97 # Register our CSRF support
98 config.include(".csrf")
99
100 # Register our authentication support.
101 config.include(".accounts")
102
103 # Allow the packaging app to register any services it has.
104 config.include(".packaging")
105
106 # Register all our URL routes for Warehouse.
107 config.include(".routes")
108
109 # Enable a Content Security Policy
110 config.add_settings({
111 "csp": {
112 "default-src": ["'none'"],
113 "frame-ancestors": ["'none'"],
114 "img-src": [
115 "'self'",
116 config.registry.settings["camo.url"],
117 "https://secure.gravatar.com",
118 ],
119 "referrer": ["cross-origin"],
120 "reflected-xss": ["block"],
121 "script-src": ["'self'"],
122 "style-src": ["'self'"],
123 },
124 })
125 config.add_tween("warehouse.config.content_security_policy_tween_factory")
126
127 # Configure the filesystems we use.
128 config.registry["filesystems"] = {}
129 for key, path in {
130 k[5:]: v
131 for k, v in config.registry.settings.items()
132 if k.startswith("dirs.")}.items():
133 config.registry["filesystems"][key] = \
134 fs.opener.fsopendir(path, create_dir=True)
135
136 # Enable Warehouse to service our static files
137 config.add_static_view(
138 name="static",
139 path="warehouse:static",
140 cachebust=WarehouseCacheBuster(
141 "warehouse:static/manifest.json",
142 cache=not config.registry.settings["pyramid.reload_assets"],
143 ),
144 )
145
146 # Scan everything for configuration
147 config.scan(ignore=["warehouse.migrations.env"])
148
149 return config
150
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/warehouse/config.py b/warehouse/config.py
--- a/warehouse/config.py
+++ b/warehouse/config.py
@@ -124,6 +124,10 @@
})
config.add_tween("warehouse.config.content_security_policy_tween_factory")
+ # If a route matches with a slash appended to it, redirect to that route
+ # instead of returning a HTTPNotFound.
+ config.add_notfound_view(append_slash=True)
+
# Configure the filesystems we use.
config.registry["filesystems"] = {}
for key, path in {
| {"golden_diff": "diff --git a/warehouse/config.py b/warehouse/config.py\n--- a/warehouse/config.py\n+++ b/warehouse/config.py\n@@ -124,6 +124,10 @@\n })\n config.add_tween(\"warehouse.config.content_security_policy_tween_factory\")\n \n+ # If a route matches with a slash appended to it, redirect to that route\n+ # instead of returning a HTTPNotFound.\n+ config.add_notfound_view(append_slash=True)\n+\n # Configure the filesystems we use.\n config.registry[\"filesystems\"] = {}\n for key, path in {\n", "issue": "Redirect a slash-less URL to the slashed variant\nWe have urls like `/project/foobar/`, if someone enters `/project/foobar` we should redirect that to `/project/foobar/`.\n\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport fs.opener\nimport transaction\n\nfrom pyramid.config import Configurator\nfrom tzf.pyramid_yml import config_defaults\n\nfrom warehouse.utils.static import WarehouseCacheBuster\n\n\ndef content_security_policy_tween_factory(handler, registry):\n policy = registry.settings.get(\"csp\", {})\n policy = \"; \".join([\" \".join([k] + v) for k, v in sorted(policy.items())])\n\n def content_security_policy_tween(request):\n resp = handler(request)\n\n # We don't want to apply our Content Security Policy to the debug\n # toolbar, that's not part of our application and it doesn't work with\n # our restrictive CSP.\n if not request.path.startswith(\"/_debug_toolbar/\"):\n resp.headers[\"Content-Security-Policy\"] = \\\n policy.format(request=request)\n\n return resp\n\n return content_security_policy_tween\n\n\ndef configure(settings=None):\n if settings is None:\n settings = {}\n\n config = Configurator(settings=settings)\n\n # Set our yml.location so that it contains all of our settings files\n config_defaults(config, [\"warehouse:etc\"])\n\n # We want to load configuration from YAML files\n config.include(\"tzf.pyramid_yml\")\n\n # We'll want to use Jinja2 as our template system.\n config.include(\"pyramid_jinja2\")\n\n # We also want to use Jinja2 for .html templates as well, because we just\n # assume that all templates will be using Jinja.\n config.add_jinja2_renderer(\".html\")\n\n # We'll want to configure some filters for Jinja2 as well.\n filters = config.get_settings().setdefault(\"jinja2.filters\", {})\n filters.setdefault(\"readme\", \"warehouse.filters:readme_renderer\")\n filters.setdefault(\"shorten_number\", \"warehouse.filters:shorten_number\")\n\n # We also want to register some global functions for Jinja\n jglobals = config.get_settings().setdefault(\"jinja2.globals\", {})\n jglobals.setdefault(\"gravatar\", \"warehouse.utils.gravatar:gravatar\")\n\n # We'll store all of our templates in one location, warehouse/templates\n # so we'll go ahead and add that to the Jinja2 search path.\n config.add_jinja2_search_path(\"warehouse:templates\", name=\".html\")\n\n # Configure our transaction handling so that each request gets it's own\n # transaction handler and the lifetime of the transaction is tied to the\n # lifetime of the request.\n config.add_settings({\n \"tm.manager_hook\": lambda request: transaction.TransactionManager(),\n })\n config.include(\"pyramid_tm\")\n\n # Register support for services\n config.include(\"pyramid_services\")\n\n # Register support for internationalization and localization\n config.include(\".i18n\")\n\n # Register the configuration for the PostgreSQL database.\n config.include(\".db\")\n\n # Register our session support\n config.include(\".sessions\")\n\n # Register our support for http and origin caching\n config.include(\".cache.http\")\n config.include(\".cache.origin\")\n\n # Register our CSRF support\n config.include(\".csrf\")\n\n # Register our authentication support.\n config.include(\".accounts\")\n\n # Allow the packaging app to register any services it has.\n config.include(\".packaging\")\n\n # Register all our URL routes for Warehouse.\n config.include(\".routes\")\n\n # Enable a Content Security Policy\n config.add_settings({\n \"csp\": {\n \"default-src\": [\"'none'\"],\n \"frame-ancestors\": [\"'none'\"],\n \"img-src\": [\n \"'self'\",\n config.registry.settings[\"camo.url\"],\n \"https://secure.gravatar.com\",\n ],\n \"referrer\": [\"cross-origin\"],\n \"reflected-xss\": [\"block\"],\n \"script-src\": [\"'self'\"],\n \"style-src\": [\"'self'\"],\n },\n })\n config.add_tween(\"warehouse.config.content_security_policy_tween_factory\")\n\n # Configure the filesystems we use.\n config.registry[\"filesystems\"] = {}\n for key, path in {\n k[5:]: v\n for k, v in config.registry.settings.items()\n if k.startswith(\"dirs.\")}.items():\n config.registry[\"filesystems\"][key] = \\\n fs.opener.fsopendir(path, create_dir=True)\n\n # Enable Warehouse to service our static files\n config.add_static_view(\n name=\"static\",\n path=\"warehouse:static\",\n cachebust=WarehouseCacheBuster(\n \"warehouse:static/manifest.json\",\n cache=not config.registry.settings[\"pyramid.reload_assets\"],\n ),\n )\n\n # Scan everything for configuration\n config.scan(ignore=[\"warehouse.migrations.env\"])\n\n return config\n", "path": "warehouse/config.py"}], "after_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport fs.opener\nimport transaction\n\nfrom pyramid.config import Configurator\nfrom tzf.pyramid_yml import config_defaults\n\nfrom warehouse.utils.static import WarehouseCacheBuster\n\n\ndef content_security_policy_tween_factory(handler, registry):\n policy = registry.settings.get(\"csp\", {})\n policy = \"; \".join([\" \".join([k] + v) for k, v in sorted(policy.items())])\n\n def content_security_policy_tween(request):\n resp = handler(request)\n\n # We don't want to apply our Content Security Policy to the debug\n # toolbar, that's not part of our application and it doesn't work with\n # our restrictive CSP.\n if not request.path.startswith(\"/_debug_toolbar/\"):\n resp.headers[\"Content-Security-Policy\"] = \\\n policy.format(request=request)\n\n return resp\n\n return content_security_policy_tween\n\n\ndef configure(settings=None):\n if settings is None:\n settings = {}\n\n config = Configurator(settings=settings)\n\n # Set our yml.location so that it contains all of our settings files\n config_defaults(config, [\"warehouse:etc\"])\n\n # We want to load configuration from YAML files\n config.include(\"tzf.pyramid_yml\")\n\n # We'll want to use Jinja2 as our template system.\n config.include(\"pyramid_jinja2\")\n\n # We also want to use Jinja2 for .html templates as well, because we just\n # assume that all templates will be using Jinja.\n config.add_jinja2_renderer(\".html\")\n\n # We'll want to configure some filters for Jinja2 as well.\n filters = config.get_settings().setdefault(\"jinja2.filters\", {})\n filters.setdefault(\"readme\", \"warehouse.filters:readme_renderer\")\n filters.setdefault(\"shorten_number\", \"warehouse.filters:shorten_number\")\n\n # We also want to register some global functions for Jinja\n jglobals = config.get_settings().setdefault(\"jinja2.globals\", {})\n jglobals.setdefault(\"gravatar\", \"warehouse.utils.gravatar:gravatar\")\n\n # We'll store all of our templates in one location, warehouse/templates\n # so we'll go ahead and add that to the Jinja2 search path.\n config.add_jinja2_search_path(\"warehouse:templates\", name=\".html\")\n\n # Configure our transaction handling so that each request gets it's own\n # transaction handler and the lifetime of the transaction is tied to the\n # lifetime of the request.\n config.add_settings({\n \"tm.manager_hook\": lambda request: transaction.TransactionManager(),\n })\n config.include(\"pyramid_tm\")\n\n # Register support for services\n config.include(\"pyramid_services\")\n\n # Register support for internationalization and localization\n config.include(\".i18n\")\n\n # Register the configuration for the PostgreSQL database.\n config.include(\".db\")\n\n # Register our session support\n config.include(\".sessions\")\n\n # Register our support for http and origin caching\n config.include(\".cache.http\")\n config.include(\".cache.origin\")\n\n # Register our CSRF support\n config.include(\".csrf\")\n\n # Register our authentication support.\n config.include(\".accounts\")\n\n # Allow the packaging app to register any services it has.\n config.include(\".packaging\")\n\n # Register all our URL routes for Warehouse.\n config.include(\".routes\")\n\n # Enable a Content Security Policy\n config.add_settings({\n \"csp\": {\n \"default-src\": [\"'none'\"],\n \"frame-ancestors\": [\"'none'\"],\n \"img-src\": [\n \"'self'\",\n config.registry.settings[\"camo.url\"],\n \"https://secure.gravatar.com\",\n ],\n \"referrer\": [\"cross-origin\"],\n \"reflected-xss\": [\"block\"],\n \"script-src\": [\"'self'\"],\n \"style-src\": [\"'self'\"],\n },\n })\n config.add_tween(\"warehouse.config.content_security_policy_tween_factory\")\n\n # If a route matches with a slash appended to it, redirect to that route\n # instead of returning a HTTPNotFound.\n config.add_notfound_view(append_slash=True)\n\n # Configure the filesystems we use.\n config.registry[\"filesystems\"] = {}\n for key, path in {\n k[5:]: v\n for k, v in config.registry.settings.items()\n if k.startswith(\"dirs.\")}.items():\n config.registry[\"filesystems\"][key] = \\\n fs.opener.fsopendir(path, create_dir=True)\n\n # Enable Warehouse to service our static files\n config.add_static_view(\n name=\"static\",\n path=\"warehouse:static\",\n cachebust=WarehouseCacheBuster(\n \"warehouse:static/manifest.json\",\n cache=not config.registry.settings[\"pyramid.reload_assets\"],\n ),\n )\n\n # Scan everything for configuration\n config.scan(ignore=[\"warehouse.migrations.env\"])\n\n return config\n", "path": "warehouse/config.py"}]} |
gh_patches_debug_1392 | rasdani/github-patches | git_diff | scipy__scipy-5935 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
unicode vs. string comparison in scipy.stats.binned_statistic_dd
I'm getting an error where `scipy.stats.binned_statistic_dd` isn't recognizing that the `statistic` parameter I'm passing is 'count' [when it does the string comparison](https://github.com/scipy/scipy/blob/master/scipy/stats/_binned_statistic.py#L469).
I'm assuming this has something to do with a `unicode` vs. `str` type issue --- I'm using python2.7 with `from __future__ import unicode_literals`. Should this be changed to an equality comparison? i.e.
```
if(statistic != 'count' and Vlen != Dlen):
```
instead of
```
if(statistic is not 'count' and Vlen != Dlen):
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scipy/stats/_binned_statistic.py`
Content:
```
1 from __future__ import division, print_function, absolute_import
2
3 import warnings
4
5 import numpy as np
6 from scipy._lib.six import callable, xrange
7 from collections import namedtuple
8
9 __all__ = ['binned_statistic',
10 'binned_statistic_2d',
11 'binned_statistic_dd']
12
13
14 BinnedStatisticResult = namedtuple('BinnedStatisticResult',
15 ('statistic', 'bin_edges', 'binnumber'))
16
17
18 def binned_statistic(x, values, statistic='mean',
19 bins=10, range=None):
20 """
21 Compute a binned statistic for one or more sets of data.
22
23 This is a generalization of a histogram function. A histogram divides
24 the space into bins, and returns the count of the number of points in
25 each bin. This function allows the computation of the sum, mean, median,
26 or other statistic of the values (or set of values) within each bin.
27
28 Parameters
29 ----------
30 x : (N,) array_like
31 A sequence of values to be binned.
32 values : (N,) array_like or list of (N,) array_like
33 The data on which the statistic will be computed. This must be
34 the same shape as `x`, or a set of sequences - each the same shape as
35 `x`. If `values` is a set of sequences, the statistic will be computed
36 on each independently.
37 statistic : string or callable, optional
38 The statistic to compute (default is 'mean').
39 The following statistics are available:
40
41 * 'mean' : compute the mean of values for points within each bin.
42 Empty bins will be represented by NaN.
43 * 'median' : compute the median of values for points within each
44 bin. Empty bins will be represented by NaN.
45 * 'count' : compute the count of points within each bin. This is
46 identical to an unweighted histogram. `values` array is not
47 referenced.
48 * 'sum' : compute the sum of values for points within each bin.
49 This is identical to a weighted histogram.
50 * function : a user-defined function which takes a 1D array of
51 values, and outputs a single numerical statistic. This function
52 will be called on the values in each bin. Empty bins will be
53 represented by function([]), or NaN if this returns an error.
54
55 bins : int or sequence of scalars, optional
56 If `bins` is an int, it defines the number of equal-width bins in the
57 given range (10 by default). If `bins` is a sequence, it defines the
58 bin edges, including the rightmost edge, allowing for non-uniform bin
59 widths. Values in `x` that are smaller than lowest bin edge are
60 assigned to bin number 0, values beyond the highest bin are assigned to
61 ``bins[-1]``. If the bin edges are specified, the number of bins will
62 be, (nx = len(bins)-1).
63 range : (float, float) or [(float, float)], optional
64 The lower and upper range of the bins. If not provided, range
65 is simply ``(x.min(), x.max())``. Values outside the range are
66 ignored.
67
68 Returns
69 -------
70 statistic : array
71 The values of the selected statistic in each bin.
72 bin_edges : array of dtype float
73 Return the bin edges ``(length(statistic)+1)``.
74 binnumber: 1-D ndarray of ints
75 Indices of the bins (corresponding to `bin_edges`) in which each value
76 of `x` belongs. Same length as `values`. A binnumber of `i` means the
77 corresponding value is between (bin_edges[i-1], bin_edges[i]).
78
79 See Also
80 --------
81 numpy.digitize, numpy.histogram, binned_statistic_2d, binned_statistic_dd
82
83 Notes
84 -----
85 All but the last (righthand-most) bin is half-open. In other words, if
86 `bins` is ``[1, 2, 3, 4]``, then the first bin is ``[1, 2)`` (including 1,
87 but excluding 2) and the second ``[2, 3)``. The last bin, however, is
88 ``[3, 4]``, which *includes* 4.
89
90 .. versionadded:: 0.11.0
91
92 Examples
93 --------
94 >>> from scipy import stats
95 >>> import matplotlib.pyplot as plt
96
97 First some basic examples:
98
99 Create two evenly spaced bins in the range of the given sample, and sum the
100 corresponding values in each of those bins:
101
102 >>> values = [1.0, 1.0, 2.0, 1.5, 3.0]
103 >>> stats.binned_statistic([1, 1, 2, 5, 7], values, 'sum', bins=2)
104 (array([ 4. , 4.5]), array([ 1., 4., 7.]), array([1, 1, 1, 2, 2]))
105
106 Multiple arrays of values can also be passed. The statistic is calculated
107 on each set independently:
108
109 >>> values = [[1.0, 1.0, 2.0, 1.5, 3.0], [2.0, 2.0, 4.0, 3.0, 6.0]]
110 >>> stats.binned_statistic([1, 1, 2, 5, 7], values, 'sum', bins=2)
111 (array([[ 4. , 4.5], [ 8. , 9. ]]), array([ 1., 4., 7.]),
112 array([1, 1, 1, 2, 2]))
113
114 >>> stats.binned_statistic([1, 2, 1, 2, 4], np.arange(5), statistic='mean',
115 ... bins=3)
116 (array([ 1., 2., 4.]), array([ 1., 2., 3., 4.]),
117 array([1, 2, 1, 2, 3]))
118
119 As a second example, we now generate some random data of sailing boat speed
120 as a function of wind speed, and then determine how fast our boat is for
121 certain wind speeds:
122
123 >>> windspeed = 8 * np.random.rand(500)
124 >>> boatspeed = .3 * windspeed**.5 + .2 * np.random.rand(500)
125 >>> bin_means, bin_edges, binnumber = stats.binned_statistic(windspeed,
126 ... boatspeed, statistic='median', bins=[1,2,3,4,5,6,7])
127 >>> plt.figure()
128 >>> plt.plot(windspeed, boatspeed, 'b.', label='raw data')
129 >>> plt.hlines(bin_means, bin_edges[:-1], bin_edges[1:], colors='g', lw=5,
130 ... label='binned statistic of data')
131 >>> plt.legend()
132
133 Now we can use ``binnumber`` to select all datapoints with a windspeed
134 below 1:
135
136 >>> low_boatspeed = boatspeed[binnumber == 0]
137
138 As a final example, we will use ``bin_edges`` and ``binnumber`` to make a
139 plot of a distribution that shows the mean and distribution around that
140 mean per bin, on top of a regular histogram and the probability
141 distribution function:
142
143 >>> x = np.linspace(0, 5, num=500)
144 >>> x_pdf = stats.maxwell.pdf(x)
145 >>> samples = stats.maxwell.rvs(size=10000)
146
147 >>> bin_means, bin_edges, binnumber = stats.binned_statistic(x, x_pdf,
148 ... statistic='mean', bins=25)
149 >>> bin_width = (bin_edges[1] - bin_edges[0])
150 >>> bin_centers = bin_edges[1:] - bin_width/2
151
152 >>> plt.figure()
153 >>> plt.hist(samples, bins=50, normed=True, histtype='stepfilled',
154 ... alpha=0.2, label='histogram of data')
155 >>> plt.plot(x, x_pdf, 'r-', label='analytical pdf')
156 >>> plt.hlines(bin_means, bin_edges[:-1], bin_edges[1:], colors='g', lw=2,
157 ... label='binned statistic of data')
158 >>> plt.plot((binnumber - 0.5) * bin_width, x_pdf, 'g.', alpha=0.5)
159 >>> plt.legend(fontsize=10)
160 >>> plt.show()
161
162 """
163 try:
164 N = len(bins)
165 except TypeError:
166 N = 1
167
168 if N != 1:
169 bins = [np.asarray(bins, float)]
170
171 if range is not None:
172 if len(range) == 2:
173 range = [range]
174
175 medians, edges, binnumbers = binned_statistic_dd(
176 [x], values, statistic, bins, range)
177
178 return BinnedStatisticResult(medians, edges[0], binnumbers)
179
180
181 BinnedStatistic2dResult = namedtuple('BinnedStatistic2dResult',
182 ('statistic', 'x_edge', 'y_edge',
183 'binnumber'))
184
185
186 def binned_statistic_2d(x, y, values, statistic='mean',
187 bins=10, range=None, expand_binnumbers=False):
188 """
189 Compute a bidimensional binned statistic for one or more sets of data.
190
191 This is a generalization of a histogram2d function. A histogram divides
192 the space into bins, and returns the count of the number of points in
193 each bin. This function allows the computation of the sum, mean, median,
194 or other statistic of the values (or set of values) within each bin.
195
196 Parameters
197 ----------
198 x : (N,) array_like
199 A sequence of values to be binned along the first dimension.
200 y : (N,) array_like
201 A sequence of values to be binned along the second dimension.
202 values : (N,) array_like or list of (N,) array_like
203 The data on which the statistic will be computed. This must be
204 the same shape as `x`, or a list of sequences - each with the same
205 shape as `x`. If `values` is such a list, the statistic will be
206 computed on each independently.
207 statistic : string or callable, optional
208 The statistic to compute (default is 'mean').
209 The following statistics are available:
210
211 * 'mean' : compute the mean of values for points within each bin.
212 Empty bins will be represented by NaN.
213 * 'median' : compute the median of values for points within each
214 bin. Empty bins will be represented by NaN.
215 * 'count' : compute the count of points within each bin. This is
216 identical to an unweighted histogram. `values` array is not
217 referenced.
218 * 'sum' : compute the sum of values for points within each bin.
219 This is identical to a weighted histogram.
220 * function : a user-defined function which takes a 1D array of
221 values, and outputs a single numerical statistic. This function
222 will be called on the values in each bin. Empty bins will be
223 represented by function([]), or NaN if this returns an error.
224
225 bins : int or [int, int] or array_like or [array, array], optional
226 The bin specification:
227
228 * the number of bins for the two dimensions (nx = ny = bins),
229 * the number of bins in each dimension (nx, ny = bins),
230 * the bin edges for the two dimensions (x_edge = y_edge = bins),
231 * the bin edges in each dimension (x_edge, y_edge = bins).
232
233 If the bin edges are specified, the number of bins will be,
234 (nx = len(x_edge)-1, ny = len(y_edge)-1).
235
236 range : (2,2) array_like, optional
237 The leftmost and rightmost edges of the bins along each dimension
238 (if not specified explicitly in the `bins` parameters):
239 [[xmin, xmax], [ymin, ymax]]. All values outside of this range will be
240 considered outliers and not tallied in the histogram.
241 expand_binnumbers : bool, optional
242 'False' (default): the returned `binnumber` is a shape (N,) array of
243 linearized bin indices.
244 'True': the returned `binnumber` is 'unraveled' into a shape (2,N)
245 ndarray, where each row gives the bin numbers in the corresponding
246 dimension.
247 See the `binnumber` returned value, and the `Examples` section.
248
249 .. versionadded:: 0.17.0
250
251 Returns
252 -------
253 statistic : (nx, ny) ndarray
254 The values of the selected statistic in each two-dimensional bin.
255 x_edge : (nx + 1) ndarray
256 The bin edges along the first dimension.
257 y_edge : (ny + 1) ndarray
258 The bin edges along the second dimension.
259 binnumber : (N,) array of ints or (2,N) ndarray of ints
260 This assigns to each element of `sample` an integer that represents the
261 bin in which this observation falls. The representation depends on the
262 `expand_binnumbers` argument. See `Notes` for details.
263
264
265 See Also
266 --------
267 numpy.digitize, numpy.histogram2d, binned_statistic, binned_statistic_dd
268
269 Notes
270 -----
271 Binedges:
272 All but the last (righthand-most) bin is half-open. In other words, if
273 `bins` is ``[1, 2, 3, 4]``, then the first bin is ``[1, 2)`` (including 1,
274 but excluding 2) and the second ``[2, 3)``. The last bin, however, is
275 ``[3, 4]``, which *includes* 4.
276
277 `binnumber`:
278 This returned argument assigns to each element of `sample` an integer that
279 represents the bin in which it belongs. The representation depends on the
280 `expand_binnumbers` argument. If 'False' (default): The returned
281 `binnumber` is a shape (N,) array of linearized indices mapping each
282 element of `sample` to its corresponding bin (using row-major ordering).
283 If 'True': The returned `binnumber` is a shape (2,N) ndarray where
284 each row indicates bin placements for each dimension respectively. In each
285 dimension, a binnumber of `i` means the corresponding value is between
286 (D_edge[i-1], D_edge[i]), where 'D' is either 'x' or 'y'.
287
288 .. versionadded:: 0.11.0
289
290 Examples
291 --------
292 >>> from scipy import stats
293
294 Calculate the counts with explicit bin-edges:
295
296 >>> x = [0.1, 0.1, 0.1, 0.6]
297 >>> y = [2.1, 2.6, 2.1, 2.1]
298 >>> binx = [0.0, 0.5, 1.0]
299 >>> biny = [2.0, 2.5, 3.0]
300 >>> ret = stats.binned_statistic_2d(x, y, None, 'count', bins=[binx,biny])
301 >>> ret.statistic
302 array([[ 2., 1.],
303 [ 1., 0.]])
304
305 The bin in which each sample is placed is given by the `binnumber`
306 returned parameter. By default, these are the linearized bin indices:
307
308 >>> ret.binnumber
309 array([5, 6, 5, 9])
310
311 The bin indices can also be expanded into separate entries for each
312 dimension using the `expand_binnumbers` parameter:
313
314 >>> ret = stats.binned_statistic_2d(x, y, None, 'count', bins=[binx,biny],
315 ... expand_binnumbers=True)
316 >>> ret.binnumber
317 array([[1, 1, 1, 2],
318 [1, 2, 1, 1]])
319
320 Which shows that the first three elements belong in the xbin 1, and the
321 fourth into xbin 2; and so on for y.
322
323 """
324
325 # This code is based on np.histogram2d
326 try:
327 N = len(bins)
328 except TypeError:
329 N = 1
330
331 if N != 1 and N != 2:
332 xedges = yedges = np.asarray(bins, float)
333 bins = [xedges, yedges]
334
335 medians, edges, binnumbers = binned_statistic_dd(
336 [x, y], values, statistic, bins, range,
337 expand_binnumbers=expand_binnumbers)
338
339 return BinnedStatistic2dResult(medians, edges[0], edges[1], binnumbers)
340
341
342 BinnedStatisticddResult = namedtuple('BinnedStatisticddResult',
343 ('statistic', 'bin_edges',
344 'binnumber'))
345
346
347 def binned_statistic_dd(sample, values, statistic='mean',
348 bins=10, range=None, expand_binnumbers=False):
349 """
350 Compute a multidimensional binned statistic for a set of data.
351
352 This is a generalization of a histogramdd function. A histogram divides
353 the space into bins, and returns the count of the number of points in
354 each bin. This function allows the computation of the sum, mean, median,
355 or other statistic of the values within each bin.
356
357 Parameters
358 ----------
359 sample : array_like
360 Data to histogram passed as a sequence of D arrays of length N, or
361 as an (N,D) array.
362 values : (N,) array_like or list of (N,) array_like
363 The data on which the statistic will be computed. This must be
364 the same shape as `x`, or a list of sequences - each with the same
365 shape as `x`. If `values` is such a list, the statistic will be
366 computed on each independently.
367 statistic : string or callable, optional
368 The statistic to compute (default is 'mean').
369 The following statistics are available:
370
371 * 'mean' : compute the mean of values for points within each bin.
372 Empty bins will be represented by NaN.
373 * 'median' : compute the median of values for points within each
374 bin. Empty bins will be represented by NaN.
375 * 'count' : compute the count of points within each bin. This is
376 identical to an unweighted histogram. `values` array is not
377 referenced.
378 * 'sum' : compute the sum of values for points within each bin.
379 This is identical to a weighted histogram.
380 * function : a user-defined function which takes a 1D array of
381 values, and outputs a single numerical statistic. This function
382 will be called on the values in each bin. Empty bins will be
383 represented by function([]), or NaN if this returns an error.
384
385 bins : sequence or int, optional
386 The bin specification must be in one of the following forms:
387
388 * A sequence of arrays describing the bin edges along each dimension.
389 * The number of bins for each dimension (nx, ny, ... = bins).
390 * The number of bins for all dimensions (nx = ny = ... = bins).
391
392 range : sequence, optional
393 A sequence of lower and upper bin edges to be used if the edges are
394 not given explicitely in `bins`. Defaults to the minimum and maximum
395 values along each dimension.
396 expand_binnumbers : bool, optional
397 'False' (default): the returned `binnumber` is a shape (N,) array of
398 linearized bin indices.
399 'True': the returned `binnumber` is 'unraveled' into a shape (D,N)
400 ndarray, where each row gives the bin numbers in the corresponding
401 dimension.
402 See the `binnumber` returned value, and the `Examples` section of
403 `binned_statistic_2d`.
404
405 .. versionadded:: 0.17.0
406
407 Returns
408 -------
409 statistic : ndarray, shape(nx1, nx2, nx3,...)
410 The values of the selected statistic in each two-dimensional bin.
411 bin_edges : list of ndarrays
412 A list of D arrays describing the (nxi + 1) bin edges for each
413 dimension.
414 binnumber : (N,) array of ints or (D,N) ndarray of ints
415 This assigns to each element of `sample` an integer that represents the
416 bin in which this observation falls. The representation depends on the
417 `expand_binnumbers` argument. See `Notes` for details.
418
419
420 See Also
421 --------
422 numpy.digitize, numpy.histogramdd, binned_statistic, binned_statistic_2d
423
424 Notes
425 -----
426 Binedges:
427 All but the last (righthand-most) bin is half-open in each dimension. In
428 other words, if `bins` is ``[1, 2, 3, 4]``, then the first bin is
429 ``[1, 2)`` (including 1, but excluding 2) and the second ``[2, 3)``. The
430 last bin, however, is ``[3, 4]``, which *includes* 4.
431
432 `binnumber`:
433 This returned argument assigns to each element of `sample` an integer that
434 represents the bin in which it belongs. The representation depends on the
435 `expand_binnumbers` argument. If 'False' (default): The returned
436 `binnumber` is a shape (N,) array of linearized indices mapping each
437 element of `sample` to its corresponding bin (using row-major ordering).
438 If 'True': The returned `binnumber` is a shape (D,N) ndarray where
439 each row indicates bin placements for each dimension respectively. In each
440 dimension, a binnumber of `i` means the corresponding value is between
441 (bin_edges[D][i-1], bin_edges[D][i]), for each dimension 'D'.
442
443 .. versionadded:: 0.11.0
444
445 """
446 known_stats = ['mean', 'median', 'count', 'sum', 'std']
447 if not callable(statistic) and statistic not in known_stats:
448 raise ValueError('invalid statistic %r' % (statistic,))
449
450 # `Ndim` is the number of dimensions (e.g. `2` for `binned_statistic_2d`)
451 # `Dlen` is the length of elements along each dimension.
452 # This code is based on np.histogramdd
453 try:
454 # `sample` is an ND-array.
455 Dlen, Ndim = sample.shape
456 except (AttributeError, ValueError):
457 # `sample` is a sequence of 1D arrays.
458 sample = np.atleast_2d(sample).T
459 Dlen, Ndim = sample.shape
460
461 # Store initial shape of `values` to preserve it in the output
462 values = np.asarray(values)
463 input_shape = list(values.shape)
464 # Make sure that `values` is 2D to iterate over rows
465 values = np.atleast_2d(values)
466 Vdim, Vlen = values.shape
467
468 # Make sure `values` match `sample`
469 if(statistic is not 'count' and Vlen != Dlen):
470 raise AttributeError('The number of `values` elements must match the '
471 'length of each `sample` dimension.')
472
473 nbin = np.empty(Ndim, int) # Number of bins in each dimension
474 edges = Ndim * [None] # Bin edges for each dim (will be 2D array)
475 dedges = Ndim * [None] # Spacing between edges (will be 2D array)
476
477 try:
478 M = len(bins)
479 if M != Ndim:
480 raise AttributeError('The dimension of bins must be equal '
481 'to the dimension of the sample x.')
482 except TypeError:
483 bins = Ndim * [bins]
484
485 # Select range for each dimension
486 # Used only if number of bins is given.
487 if range is None:
488 smin = np.atleast_1d(np.array(sample.min(axis=0), float))
489 smax = np.atleast_1d(np.array(sample.max(axis=0), float))
490 else:
491 smin = np.zeros(Ndim)
492 smax = np.zeros(Ndim)
493 for i in xrange(Ndim):
494 smin[i], smax[i] = range[i]
495
496 # Make sure the bins have a finite width.
497 for i in xrange(len(smin)):
498 if smin[i] == smax[i]:
499 smin[i] = smin[i] - .5
500 smax[i] = smax[i] + .5
501
502 # Create edge arrays
503 for i in xrange(Ndim):
504 if np.isscalar(bins[i]):
505 nbin[i] = bins[i] + 2 # +2 for outlier bins
506 edges[i] = np.linspace(smin[i], smax[i], nbin[i] - 1)
507 else:
508 edges[i] = np.asarray(bins[i], float)
509 nbin[i] = len(edges[i]) + 1 # +1 for outlier bins
510 dedges[i] = np.diff(edges[i])
511
512 nbin = np.asarray(nbin)
513
514 # Compute the bin number each sample falls into, in each dimension
515 sampBin = {}
516 for i in xrange(Ndim):
517 sampBin[i] = np.digitize(sample[:, i], edges[i])
518
519 # Using `digitize`, values that fall on an edge are put in the right bin.
520 # For the rightmost bin, we want values equal to the right
521 # edge to be counted in the last bin, and not as an outlier.
522 for i in xrange(Ndim):
523 # Find the rounding precision
524 decimal = int(-np.log10(dedges[i].min())) + 6
525 # Find which points are on the rightmost edge.
526 on_edge = np.where(np.around(sample[:, i], decimal) ==
527 np.around(edges[i][-1], decimal))[0]
528 # Shift these points one bin to the left.
529 sampBin[i][on_edge] -= 1
530
531 # Compute the sample indices in the flattened statistic matrix.
532 ni = nbin.argsort()
533 # `binnumbers` is which bin (in linearized `Ndim` space) each sample goes
534 binnumbers = np.zeros(Dlen, int)
535 for i in xrange(0, Ndim - 1):
536 binnumbers += sampBin[ni[i]] * nbin[ni[i + 1:]].prod()
537 binnumbers += sampBin[ni[-1]]
538
539 result = np.empty([Vdim, nbin.prod()], float)
540
541 if statistic == 'mean':
542 result.fill(np.nan)
543 flatcount = np.bincount(binnumbers, None)
544 a = flatcount.nonzero()
545 for vv in xrange(Vdim):
546 flatsum = np.bincount(binnumbers, values[vv])
547 result[vv, a] = flatsum[a] / flatcount[a]
548 elif statistic == 'std':
549 result.fill(0)
550 flatcount = np.bincount(binnumbers, None)
551 a = flatcount.nonzero()
552 for vv in xrange(Vdim):
553 flatsum = np.bincount(binnumbers, values[vv])
554 flatsum2 = np.bincount(binnumbers, values[vv] ** 2)
555 result[vv, a] = np.sqrt(flatsum2[a] / flatcount[a] -
556 (flatsum[a] / flatcount[a]) ** 2)
557 elif statistic == 'count':
558 result.fill(0)
559 flatcount = np.bincount(binnumbers, None)
560 a = np.arange(len(flatcount))
561 result[:, a] = flatcount[np.newaxis, :]
562 elif statistic == 'sum':
563 result.fill(0)
564 for vv in xrange(Vdim):
565 flatsum = np.bincount(binnumbers, values[vv])
566 a = np.arange(len(flatsum))
567 result[vv, a] = flatsum
568 elif statistic == 'median':
569 result.fill(np.nan)
570 for i in np.unique(binnumbers):
571 for vv in xrange(Vdim):
572 result[vv, i] = np.median(values[vv, binnumbers == i])
573 elif callable(statistic):
574 with warnings.catch_warnings():
575 # Numpy generates a warnings for mean/std/... with empty list
576 warnings.filterwarnings('ignore', category=RuntimeWarning)
577 old = np.seterr(invalid='ignore')
578 try:
579 null = statistic([])
580 except:
581 null = np.nan
582 np.seterr(**old)
583 result.fill(null)
584 for i in np.unique(binnumbers):
585 for vv in xrange(Vdim):
586 result[vv, i] = statistic(values[vv, binnumbers == i])
587
588 # Shape into a proper matrix
589 result = result.reshape(np.append(Vdim, np.sort(nbin)))
590
591 for i in xrange(nbin.size):
592 j = ni.argsort()[i]
593 # Accomodate the extra `Vdim` dimension-zero with `+1`
594 result = result.swapaxes(i+1, j+1)
595 ni[i], ni[j] = ni[j], ni[i]
596
597 # Remove outliers (indices 0 and -1 for each bin-dimension).
598 core = [slice(None)] + Ndim * [slice(1, -1)]
599 result = result[core]
600
601 # Unravel binnumbers into an ndarray, each row the bins for each dimension
602 if(expand_binnumbers and Ndim > 1):
603 binnumbers = np.asarray(np.unravel_index(binnumbers, nbin))
604
605 if np.any(result.shape[1:] != nbin - 2):
606 raise RuntimeError('Internal Shape Error')
607
608 # Reshape to have output (`reulst`) match input (`values`) shape
609 result = result.reshape(input_shape[:-1] + list(nbin-2))
610
611 return BinnedStatisticddResult(result, edges, binnumbers)
612
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/scipy/stats/_binned_statistic.py b/scipy/stats/_binned_statistic.py
--- a/scipy/stats/_binned_statistic.py
+++ b/scipy/stats/_binned_statistic.py
@@ -466,7 +466,7 @@
Vdim, Vlen = values.shape
# Make sure `values` match `sample`
- if(statistic is not 'count' and Vlen != Dlen):
+ if(statistic != 'count' and Vlen != Dlen):
raise AttributeError('The number of `values` elements must match the '
'length of each `sample` dimension.')
| {"golden_diff": "diff --git a/scipy/stats/_binned_statistic.py b/scipy/stats/_binned_statistic.py\n--- a/scipy/stats/_binned_statistic.py\n+++ b/scipy/stats/_binned_statistic.py\n@@ -466,7 +466,7 @@\n Vdim, Vlen = values.shape\n \n # Make sure `values` match `sample`\n- if(statistic is not 'count' and Vlen != Dlen):\n+ if(statistic != 'count' and Vlen != Dlen):\n raise AttributeError('The number of `values` elements must match the '\n 'length of each `sample` dimension.')\n", "issue": "unicode vs. string comparison in scipy.stats.binned_statistic_dd\nI'm getting an error where `scipy.stats.binned_statistic_dd` isn't recognizing that the `statistic` parameter I'm passing is 'count' [when it does the string comparison](https://github.com/scipy/scipy/blob/master/scipy/stats/_binned_statistic.py#L469).\n\nI'm assuming this has something to do with a `unicode` vs. `str` type issue --- I'm using python2.7 with `from __future__ import unicode_literals`. Should this be changed to an equality comparison? i.e.\n\n```\nif(statistic != 'count' and Vlen != Dlen):\n```\n\ninstead of \n\n```\nif(statistic is not 'count' and Vlen != Dlen):\n```\n\n", "before_files": [{"content": "from __future__ import division, print_function, absolute_import\n\nimport warnings\n\nimport numpy as np\nfrom scipy._lib.six import callable, xrange\nfrom collections import namedtuple\n\n__all__ = ['binned_statistic',\n 'binned_statistic_2d',\n 'binned_statistic_dd']\n\n\nBinnedStatisticResult = namedtuple('BinnedStatisticResult',\n ('statistic', 'bin_edges', 'binnumber'))\n\n\ndef binned_statistic(x, values, statistic='mean',\n bins=10, range=None):\n \"\"\"\n Compute a binned statistic for one or more sets of data.\n\n This is a generalization of a histogram function. A histogram divides\n the space into bins, and returns the count of the number of points in\n each bin. This function allows the computation of the sum, mean, median,\n or other statistic of the values (or set of values) within each bin.\n\n Parameters\n ----------\n x : (N,) array_like\n A sequence of values to be binned.\n values : (N,) array_like or list of (N,) array_like\n The data on which the statistic will be computed. This must be\n the same shape as `x`, or a set of sequences - each the same shape as\n `x`. If `values` is a set of sequences, the statistic will be computed\n on each independently.\n statistic : string or callable, optional\n The statistic to compute (default is 'mean').\n The following statistics are available:\n\n * 'mean' : compute the mean of values for points within each bin.\n Empty bins will be represented by NaN.\n * 'median' : compute the median of values for points within each\n bin. Empty bins will be represented by NaN.\n * 'count' : compute the count of points within each bin. This is\n identical to an unweighted histogram. `values` array is not\n referenced.\n * 'sum' : compute the sum of values for points within each bin.\n This is identical to a weighted histogram.\n * function : a user-defined function which takes a 1D array of\n values, and outputs a single numerical statistic. This function\n will be called on the values in each bin. Empty bins will be\n represented by function([]), or NaN if this returns an error.\n\n bins : int or sequence of scalars, optional\n If `bins` is an int, it defines the number of equal-width bins in the\n given range (10 by default). If `bins` is a sequence, it defines the\n bin edges, including the rightmost edge, allowing for non-uniform bin\n widths. Values in `x` that are smaller than lowest bin edge are\n assigned to bin number 0, values beyond the highest bin are assigned to\n ``bins[-1]``. If the bin edges are specified, the number of bins will\n be, (nx = len(bins)-1).\n range : (float, float) or [(float, float)], optional\n The lower and upper range of the bins. If not provided, range\n is simply ``(x.min(), x.max())``. Values outside the range are\n ignored.\n\n Returns\n -------\n statistic : array\n The values of the selected statistic in each bin.\n bin_edges : array of dtype float\n Return the bin edges ``(length(statistic)+1)``.\n binnumber: 1-D ndarray of ints\n Indices of the bins (corresponding to `bin_edges`) in which each value\n of `x` belongs. Same length as `values`. A binnumber of `i` means the\n corresponding value is between (bin_edges[i-1], bin_edges[i]).\n\n See Also\n --------\n numpy.digitize, numpy.histogram, binned_statistic_2d, binned_statistic_dd\n\n Notes\n -----\n All but the last (righthand-most) bin is half-open. In other words, if\n `bins` is ``[1, 2, 3, 4]``, then the first bin is ``[1, 2)`` (including 1,\n but excluding 2) and the second ``[2, 3)``. The last bin, however, is\n ``[3, 4]``, which *includes* 4.\n\n .. versionadded:: 0.11.0\n\n Examples\n --------\n >>> from scipy import stats\n >>> import matplotlib.pyplot as plt\n\n First some basic examples:\n\n Create two evenly spaced bins in the range of the given sample, and sum the\n corresponding values in each of those bins:\n\n >>> values = [1.0, 1.0, 2.0, 1.5, 3.0]\n >>> stats.binned_statistic([1, 1, 2, 5, 7], values, 'sum', bins=2)\n (array([ 4. , 4.5]), array([ 1., 4., 7.]), array([1, 1, 1, 2, 2]))\n\n Multiple arrays of values can also be passed. The statistic is calculated\n on each set independently:\n\n >>> values = [[1.0, 1.0, 2.0, 1.5, 3.0], [2.0, 2.0, 4.0, 3.0, 6.0]]\n >>> stats.binned_statistic([1, 1, 2, 5, 7], values, 'sum', bins=2)\n (array([[ 4. , 4.5], [ 8. , 9. ]]), array([ 1., 4., 7.]),\n array([1, 1, 1, 2, 2]))\n\n >>> stats.binned_statistic([1, 2, 1, 2, 4], np.arange(5), statistic='mean',\n ... bins=3)\n (array([ 1., 2., 4.]), array([ 1., 2., 3., 4.]),\n array([1, 2, 1, 2, 3]))\n\n As a second example, we now generate some random data of sailing boat speed\n as a function of wind speed, and then determine how fast our boat is for\n certain wind speeds:\n\n >>> windspeed = 8 * np.random.rand(500)\n >>> boatspeed = .3 * windspeed**.5 + .2 * np.random.rand(500)\n >>> bin_means, bin_edges, binnumber = stats.binned_statistic(windspeed,\n ... boatspeed, statistic='median', bins=[1,2,3,4,5,6,7])\n >>> plt.figure()\n >>> plt.plot(windspeed, boatspeed, 'b.', label='raw data')\n >>> plt.hlines(bin_means, bin_edges[:-1], bin_edges[1:], colors='g', lw=5,\n ... label='binned statistic of data')\n >>> plt.legend()\n\n Now we can use ``binnumber`` to select all datapoints with a windspeed\n below 1:\n\n >>> low_boatspeed = boatspeed[binnumber == 0]\n\n As a final example, we will use ``bin_edges`` and ``binnumber`` to make a\n plot of a distribution that shows the mean and distribution around that\n mean per bin, on top of a regular histogram and the probability\n distribution function:\n\n >>> x = np.linspace(0, 5, num=500)\n >>> x_pdf = stats.maxwell.pdf(x)\n >>> samples = stats.maxwell.rvs(size=10000)\n\n >>> bin_means, bin_edges, binnumber = stats.binned_statistic(x, x_pdf,\n ... statistic='mean', bins=25)\n >>> bin_width = (bin_edges[1] - bin_edges[0])\n >>> bin_centers = bin_edges[1:] - bin_width/2\n\n >>> plt.figure()\n >>> plt.hist(samples, bins=50, normed=True, histtype='stepfilled',\n ... alpha=0.2, label='histogram of data')\n >>> plt.plot(x, x_pdf, 'r-', label='analytical pdf')\n >>> plt.hlines(bin_means, bin_edges[:-1], bin_edges[1:], colors='g', lw=2,\n ... label='binned statistic of data')\n >>> plt.plot((binnumber - 0.5) * bin_width, x_pdf, 'g.', alpha=0.5)\n >>> plt.legend(fontsize=10)\n >>> plt.show()\n\n \"\"\"\n try:\n N = len(bins)\n except TypeError:\n N = 1\n\n if N != 1:\n bins = [np.asarray(bins, float)]\n\n if range is not None:\n if len(range) == 2:\n range = [range]\n\n medians, edges, binnumbers = binned_statistic_dd(\n [x], values, statistic, bins, range)\n\n return BinnedStatisticResult(medians, edges[0], binnumbers)\n\n\nBinnedStatistic2dResult = namedtuple('BinnedStatistic2dResult',\n ('statistic', 'x_edge', 'y_edge',\n 'binnumber'))\n\n\ndef binned_statistic_2d(x, y, values, statistic='mean',\n bins=10, range=None, expand_binnumbers=False):\n \"\"\"\n Compute a bidimensional binned statistic for one or more sets of data.\n\n This is a generalization of a histogram2d function. A histogram divides\n the space into bins, and returns the count of the number of points in\n each bin. This function allows the computation of the sum, mean, median,\n or other statistic of the values (or set of values) within each bin.\n\n Parameters\n ----------\n x : (N,) array_like\n A sequence of values to be binned along the first dimension.\n y : (N,) array_like\n A sequence of values to be binned along the second dimension.\n values : (N,) array_like or list of (N,) array_like\n The data on which the statistic will be computed. This must be\n the same shape as `x`, or a list of sequences - each with the same\n shape as `x`. If `values` is such a list, the statistic will be\n computed on each independently.\n statistic : string or callable, optional\n The statistic to compute (default is 'mean').\n The following statistics are available:\n\n * 'mean' : compute the mean of values for points within each bin.\n Empty bins will be represented by NaN.\n * 'median' : compute the median of values for points within each\n bin. Empty bins will be represented by NaN.\n * 'count' : compute the count of points within each bin. This is\n identical to an unweighted histogram. `values` array is not\n referenced.\n * 'sum' : compute the sum of values for points within each bin.\n This is identical to a weighted histogram.\n * function : a user-defined function which takes a 1D array of\n values, and outputs a single numerical statistic. This function\n will be called on the values in each bin. Empty bins will be\n represented by function([]), or NaN if this returns an error.\n\n bins : int or [int, int] or array_like or [array, array], optional\n The bin specification:\n\n * the number of bins for the two dimensions (nx = ny = bins),\n * the number of bins in each dimension (nx, ny = bins),\n * the bin edges for the two dimensions (x_edge = y_edge = bins),\n * the bin edges in each dimension (x_edge, y_edge = bins).\n\n If the bin edges are specified, the number of bins will be,\n (nx = len(x_edge)-1, ny = len(y_edge)-1).\n\n range : (2,2) array_like, optional\n The leftmost and rightmost edges of the bins along each dimension\n (if not specified explicitly in the `bins` parameters):\n [[xmin, xmax], [ymin, ymax]]. All values outside of this range will be\n considered outliers and not tallied in the histogram.\n expand_binnumbers : bool, optional\n 'False' (default): the returned `binnumber` is a shape (N,) array of\n linearized bin indices.\n 'True': the returned `binnumber` is 'unraveled' into a shape (2,N)\n ndarray, where each row gives the bin numbers in the corresponding\n dimension.\n See the `binnumber` returned value, and the `Examples` section.\n\n .. versionadded:: 0.17.0\n\n Returns\n -------\n statistic : (nx, ny) ndarray\n The values of the selected statistic in each two-dimensional bin.\n x_edge : (nx + 1) ndarray\n The bin edges along the first dimension.\n y_edge : (ny + 1) ndarray\n The bin edges along the second dimension.\n binnumber : (N,) array of ints or (2,N) ndarray of ints\n This assigns to each element of `sample` an integer that represents the\n bin in which this observation falls. The representation depends on the\n `expand_binnumbers` argument. See `Notes` for details.\n\n\n See Also\n --------\n numpy.digitize, numpy.histogram2d, binned_statistic, binned_statistic_dd\n\n Notes\n -----\n Binedges:\n All but the last (righthand-most) bin is half-open. In other words, if\n `bins` is ``[1, 2, 3, 4]``, then the first bin is ``[1, 2)`` (including 1,\n but excluding 2) and the second ``[2, 3)``. The last bin, however, is\n ``[3, 4]``, which *includes* 4.\n\n `binnumber`:\n This returned argument assigns to each element of `sample` an integer that\n represents the bin in which it belongs. The representation depends on the\n `expand_binnumbers` argument. If 'False' (default): The returned\n `binnumber` is a shape (N,) array of linearized indices mapping each\n element of `sample` to its corresponding bin (using row-major ordering).\n If 'True': The returned `binnumber` is a shape (2,N) ndarray where\n each row indicates bin placements for each dimension respectively. In each\n dimension, a binnumber of `i` means the corresponding value is between\n (D_edge[i-1], D_edge[i]), where 'D' is either 'x' or 'y'.\n\n .. versionadded:: 0.11.0\n\n Examples\n --------\n >>> from scipy import stats\n\n Calculate the counts with explicit bin-edges:\n\n >>> x = [0.1, 0.1, 0.1, 0.6]\n >>> y = [2.1, 2.6, 2.1, 2.1]\n >>> binx = [0.0, 0.5, 1.0]\n >>> biny = [2.0, 2.5, 3.0]\n >>> ret = stats.binned_statistic_2d(x, y, None, 'count', bins=[binx,biny])\n >>> ret.statistic\n array([[ 2., 1.],\n [ 1., 0.]])\n\n The bin in which each sample is placed is given by the `binnumber`\n returned parameter. By default, these are the linearized bin indices:\n\n >>> ret.binnumber\n array([5, 6, 5, 9])\n\n The bin indices can also be expanded into separate entries for each\n dimension using the `expand_binnumbers` parameter:\n\n >>> ret = stats.binned_statistic_2d(x, y, None, 'count', bins=[binx,biny],\n ... expand_binnumbers=True)\n >>> ret.binnumber\n array([[1, 1, 1, 2],\n [1, 2, 1, 1]])\n\n Which shows that the first three elements belong in the xbin 1, and the\n fourth into xbin 2; and so on for y.\n\n \"\"\"\n\n # This code is based on np.histogram2d\n try:\n N = len(bins)\n except TypeError:\n N = 1\n\n if N != 1 and N != 2:\n xedges = yedges = np.asarray(bins, float)\n bins = [xedges, yedges]\n\n medians, edges, binnumbers = binned_statistic_dd(\n [x, y], values, statistic, bins, range,\n expand_binnumbers=expand_binnumbers)\n\n return BinnedStatistic2dResult(medians, edges[0], edges[1], binnumbers)\n\n\nBinnedStatisticddResult = namedtuple('BinnedStatisticddResult',\n ('statistic', 'bin_edges',\n 'binnumber'))\n\n\ndef binned_statistic_dd(sample, values, statistic='mean',\n bins=10, range=None, expand_binnumbers=False):\n \"\"\"\n Compute a multidimensional binned statistic for a set of data.\n\n This is a generalization of a histogramdd function. A histogram divides\n the space into bins, and returns the count of the number of points in\n each bin. This function allows the computation of the sum, mean, median,\n or other statistic of the values within each bin.\n\n Parameters\n ----------\n sample : array_like\n Data to histogram passed as a sequence of D arrays of length N, or\n as an (N,D) array.\n values : (N,) array_like or list of (N,) array_like\n The data on which the statistic will be computed. This must be\n the same shape as `x`, or a list of sequences - each with the same\n shape as `x`. If `values` is such a list, the statistic will be\n computed on each independently.\n statistic : string or callable, optional\n The statistic to compute (default is 'mean').\n The following statistics are available:\n\n * 'mean' : compute the mean of values for points within each bin.\n Empty bins will be represented by NaN.\n * 'median' : compute the median of values for points within each\n bin. Empty bins will be represented by NaN.\n * 'count' : compute the count of points within each bin. This is\n identical to an unweighted histogram. `values` array is not\n referenced.\n * 'sum' : compute the sum of values for points within each bin.\n This is identical to a weighted histogram.\n * function : a user-defined function which takes a 1D array of\n values, and outputs a single numerical statistic. This function\n will be called on the values in each bin. Empty bins will be\n represented by function([]), or NaN if this returns an error.\n\n bins : sequence or int, optional\n The bin specification must be in one of the following forms:\n\n * A sequence of arrays describing the bin edges along each dimension.\n * The number of bins for each dimension (nx, ny, ... = bins).\n * The number of bins for all dimensions (nx = ny = ... = bins).\n\n range : sequence, optional\n A sequence of lower and upper bin edges to be used if the edges are\n not given explicitely in `bins`. Defaults to the minimum and maximum\n values along each dimension.\n expand_binnumbers : bool, optional\n 'False' (default): the returned `binnumber` is a shape (N,) array of\n linearized bin indices.\n 'True': the returned `binnumber` is 'unraveled' into a shape (D,N)\n ndarray, where each row gives the bin numbers in the corresponding\n dimension.\n See the `binnumber` returned value, and the `Examples` section of\n `binned_statistic_2d`.\n\n .. versionadded:: 0.17.0\n\n Returns\n -------\n statistic : ndarray, shape(nx1, nx2, nx3,...)\n The values of the selected statistic in each two-dimensional bin.\n bin_edges : list of ndarrays\n A list of D arrays describing the (nxi + 1) bin edges for each\n dimension.\n binnumber : (N,) array of ints or (D,N) ndarray of ints\n This assigns to each element of `sample` an integer that represents the\n bin in which this observation falls. The representation depends on the\n `expand_binnumbers` argument. See `Notes` for details.\n\n\n See Also\n --------\n numpy.digitize, numpy.histogramdd, binned_statistic, binned_statistic_2d\n\n Notes\n -----\n Binedges:\n All but the last (righthand-most) bin is half-open in each dimension. In\n other words, if `bins` is ``[1, 2, 3, 4]``, then the first bin is\n ``[1, 2)`` (including 1, but excluding 2) and the second ``[2, 3)``. The\n last bin, however, is ``[3, 4]``, which *includes* 4.\n\n `binnumber`:\n This returned argument assigns to each element of `sample` an integer that\n represents the bin in which it belongs. The representation depends on the\n `expand_binnumbers` argument. If 'False' (default): The returned\n `binnumber` is a shape (N,) array of linearized indices mapping each\n element of `sample` to its corresponding bin (using row-major ordering).\n If 'True': The returned `binnumber` is a shape (D,N) ndarray where\n each row indicates bin placements for each dimension respectively. In each\n dimension, a binnumber of `i` means the corresponding value is between\n (bin_edges[D][i-1], bin_edges[D][i]), for each dimension 'D'.\n\n .. versionadded:: 0.11.0\n\n \"\"\"\n known_stats = ['mean', 'median', 'count', 'sum', 'std']\n if not callable(statistic) and statistic not in known_stats:\n raise ValueError('invalid statistic %r' % (statistic,))\n\n # `Ndim` is the number of dimensions (e.g. `2` for `binned_statistic_2d`)\n # `Dlen` is the length of elements along each dimension.\n # This code is based on np.histogramdd\n try:\n # `sample` is an ND-array.\n Dlen, Ndim = sample.shape\n except (AttributeError, ValueError):\n # `sample` is a sequence of 1D arrays.\n sample = np.atleast_2d(sample).T\n Dlen, Ndim = sample.shape\n\n # Store initial shape of `values` to preserve it in the output\n values = np.asarray(values)\n input_shape = list(values.shape)\n # Make sure that `values` is 2D to iterate over rows\n values = np.atleast_2d(values)\n Vdim, Vlen = values.shape\n\n # Make sure `values` match `sample`\n if(statistic is not 'count' and Vlen != Dlen):\n raise AttributeError('The number of `values` elements must match the '\n 'length of each `sample` dimension.')\n\n nbin = np.empty(Ndim, int) # Number of bins in each dimension\n edges = Ndim * [None] # Bin edges for each dim (will be 2D array)\n dedges = Ndim * [None] # Spacing between edges (will be 2D array)\n\n try:\n M = len(bins)\n if M != Ndim:\n raise AttributeError('The dimension of bins must be equal '\n 'to the dimension of the sample x.')\n except TypeError:\n bins = Ndim * [bins]\n\n # Select range for each dimension\n # Used only if number of bins is given.\n if range is None:\n smin = np.atleast_1d(np.array(sample.min(axis=0), float))\n smax = np.atleast_1d(np.array(sample.max(axis=0), float))\n else:\n smin = np.zeros(Ndim)\n smax = np.zeros(Ndim)\n for i in xrange(Ndim):\n smin[i], smax[i] = range[i]\n\n # Make sure the bins have a finite width.\n for i in xrange(len(smin)):\n if smin[i] == smax[i]:\n smin[i] = smin[i] - .5\n smax[i] = smax[i] + .5\n\n # Create edge arrays\n for i in xrange(Ndim):\n if np.isscalar(bins[i]):\n nbin[i] = bins[i] + 2 # +2 for outlier bins\n edges[i] = np.linspace(smin[i], smax[i], nbin[i] - 1)\n else:\n edges[i] = np.asarray(bins[i], float)\n nbin[i] = len(edges[i]) + 1 # +1 for outlier bins\n dedges[i] = np.diff(edges[i])\n\n nbin = np.asarray(nbin)\n\n # Compute the bin number each sample falls into, in each dimension\n sampBin = {}\n for i in xrange(Ndim):\n sampBin[i] = np.digitize(sample[:, i], edges[i])\n\n # Using `digitize`, values that fall on an edge are put in the right bin.\n # For the rightmost bin, we want values equal to the right\n # edge to be counted in the last bin, and not as an outlier.\n for i in xrange(Ndim):\n # Find the rounding precision\n decimal = int(-np.log10(dedges[i].min())) + 6\n # Find which points are on the rightmost edge.\n on_edge = np.where(np.around(sample[:, i], decimal) ==\n np.around(edges[i][-1], decimal))[0]\n # Shift these points one bin to the left.\n sampBin[i][on_edge] -= 1\n\n # Compute the sample indices in the flattened statistic matrix.\n ni = nbin.argsort()\n # `binnumbers` is which bin (in linearized `Ndim` space) each sample goes\n binnumbers = np.zeros(Dlen, int)\n for i in xrange(0, Ndim - 1):\n binnumbers += sampBin[ni[i]] * nbin[ni[i + 1:]].prod()\n binnumbers += sampBin[ni[-1]]\n\n result = np.empty([Vdim, nbin.prod()], float)\n\n if statistic == 'mean':\n result.fill(np.nan)\n flatcount = np.bincount(binnumbers, None)\n a = flatcount.nonzero()\n for vv in xrange(Vdim):\n flatsum = np.bincount(binnumbers, values[vv])\n result[vv, a] = flatsum[a] / flatcount[a]\n elif statistic == 'std':\n result.fill(0)\n flatcount = np.bincount(binnumbers, None)\n a = flatcount.nonzero()\n for vv in xrange(Vdim):\n flatsum = np.bincount(binnumbers, values[vv])\n flatsum2 = np.bincount(binnumbers, values[vv] ** 2)\n result[vv, a] = np.sqrt(flatsum2[a] / flatcount[a] -\n (flatsum[a] / flatcount[a]) ** 2)\n elif statistic == 'count':\n result.fill(0)\n flatcount = np.bincount(binnumbers, None)\n a = np.arange(len(flatcount))\n result[:, a] = flatcount[np.newaxis, :]\n elif statistic == 'sum':\n result.fill(0)\n for vv in xrange(Vdim):\n flatsum = np.bincount(binnumbers, values[vv])\n a = np.arange(len(flatsum))\n result[vv, a] = flatsum\n elif statistic == 'median':\n result.fill(np.nan)\n for i in np.unique(binnumbers):\n for vv in xrange(Vdim):\n result[vv, i] = np.median(values[vv, binnumbers == i])\n elif callable(statistic):\n with warnings.catch_warnings():\n # Numpy generates a warnings for mean/std/... with empty list\n warnings.filterwarnings('ignore', category=RuntimeWarning)\n old = np.seterr(invalid='ignore')\n try:\n null = statistic([])\n except:\n null = np.nan\n np.seterr(**old)\n result.fill(null)\n for i in np.unique(binnumbers):\n for vv in xrange(Vdim):\n result[vv, i] = statistic(values[vv, binnumbers == i])\n\n # Shape into a proper matrix\n result = result.reshape(np.append(Vdim, np.sort(nbin)))\n\n for i in xrange(nbin.size):\n j = ni.argsort()[i]\n # Accomodate the extra `Vdim` dimension-zero with `+1`\n result = result.swapaxes(i+1, j+1)\n ni[i], ni[j] = ni[j], ni[i]\n\n # Remove outliers (indices 0 and -1 for each bin-dimension).\n core = [slice(None)] + Ndim * [slice(1, -1)]\n result = result[core]\n\n # Unravel binnumbers into an ndarray, each row the bins for each dimension\n if(expand_binnumbers and Ndim > 1):\n binnumbers = np.asarray(np.unravel_index(binnumbers, nbin))\n\n if np.any(result.shape[1:] != nbin - 2):\n raise RuntimeError('Internal Shape Error')\n\n # Reshape to have output (`reulst`) match input (`values`) shape\n result = result.reshape(input_shape[:-1] + list(nbin-2))\n\n return BinnedStatisticddResult(result, edges, binnumbers)\n", "path": "scipy/stats/_binned_statistic.py"}], "after_files": [{"content": "from __future__ import division, print_function, absolute_import\n\nimport warnings\n\nimport numpy as np\nfrom scipy._lib.six import callable, xrange\nfrom collections import namedtuple\n\n__all__ = ['binned_statistic',\n 'binned_statistic_2d',\n 'binned_statistic_dd']\n\n\nBinnedStatisticResult = namedtuple('BinnedStatisticResult',\n ('statistic', 'bin_edges', 'binnumber'))\n\n\ndef binned_statistic(x, values, statistic='mean',\n bins=10, range=None):\n \"\"\"\n Compute a binned statistic for one or more sets of data.\n\n This is a generalization of a histogram function. A histogram divides\n the space into bins, and returns the count of the number of points in\n each bin. This function allows the computation of the sum, mean, median,\n or other statistic of the values (or set of values) within each bin.\n\n Parameters\n ----------\n x : (N,) array_like\n A sequence of values to be binned.\n values : (N,) array_like or list of (N,) array_like\n The data on which the statistic will be computed. This must be\n the same shape as `x`, or a set of sequences - each the same shape as\n `x`. If `values` is a set of sequences, the statistic will be computed\n on each independently.\n statistic : string or callable, optional\n The statistic to compute (default is 'mean').\n The following statistics are available:\n\n * 'mean' : compute the mean of values for points within each bin.\n Empty bins will be represented by NaN.\n * 'median' : compute the median of values for points within each\n bin. Empty bins will be represented by NaN.\n * 'count' : compute the count of points within each bin. This is\n identical to an unweighted histogram. `values` array is not\n referenced.\n * 'sum' : compute the sum of values for points within each bin.\n This is identical to a weighted histogram.\n * function : a user-defined function which takes a 1D array of\n values, and outputs a single numerical statistic. This function\n will be called on the values in each bin. Empty bins will be\n represented by function([]), or NaN if this returns an error.\n\n bins : int or sequence of scalars, optional\n If `bins` is an int, it defines the number of equal-width bins in the\n given range (10 by default). If `bins` is a sequence, it defines the\n bin edges, including the rightmost edge, allowing for non-uniform bin\n widths. Values in `x` that are smaller than lowest bin edge are\n assigned to bin number 0, values beyond the highest bin are assigned to\n ``bins[-1]``. If the bin edges are specified, the number of bins will\n be, (nx = len(bins)-1).\n range : (float, float) or [(float, float)], optional\n The lower and upper range of the bins. If not provided, range\n is simply ``(x.min(), x.max())``. Values outside the range are\n ignored.\n\n Returns\n -------\n statistic : array\n The values of the selected statistic in each bin.\n bin_edges : array of dtype float\n Return the bin edges ``(length(statistic)+1)``.\n binnumber: 1-D ndarray of ints\n Indices of the bins (corresponding to `bin_edges`) in which each value\n of `x` belongs. Same length as `values`. A binnumber of `i` means the\n corresponding value is between (bin_edges[i-1], bin_edges[i]).\n\n See Also\n --------\n numpy.digitize, numpy.histogram, binned_statistic_2d, binned_statistic_dd\n\n Notes\n -----\n All but the last (righthand-most) bin is half-open. In other words, if\n `bins` is ``[1, 2, 3, 4]``, then the first bin is ``[1, 2)`` (including 1,\n but excluding 2) and the second ``[2, 3)``. The last bin, however, is\n ``[3, 4]``, which *includes* 4.\n\n .. versionadded:: 0.11.0\n\n Examples\n --------\n >>> from scipy import stats\n >>> import matplotlib.pyplot as plt\n\n First some basic examples:\n\n Create two evenly spaced bins in the range of the given sample, and sum the\n corresponding values in each of those bins:\n\n >>> values = [1.0, 1.0, 2.0, 1.5, 3.0]\n >>> stats.binned_statistic([1, 1, 2, 5, 7], values, 'sum', bins=2)\n (array([ 4. , 4.5]), array([ 1., 4., 7.]), array([1, 1, 1, 2, 2]))\n\n Multiple arrays of values can also be passed. The statistic is calculated\n on each set independently:\n\n >>> values = [[1.0, 1.0, 2.0, 1.5, 3.0], [2.0, 2.0, 4.0, 3.0, 6.0]]\n >>> stats.binned_statistic([1, 1, 2, 5, 7], values, 'sum', bins=2)\n (array([[ 4. , 4.5], [ 8. , 9. ]]), array([ 1., 4., 7.]),\n array([1, 1, 1, 2, 2]))\n\n >>> stats.binned_statistic([1, 2, 1, 2, 4], np.arange(5), statistic='mean',\n ... bins=3)\n (array([ 1., 2., 4.]), array([ 1., 2., 3., 4.]),\n array([1, 2, 1, 2, 3]))\n\n As a second example, we now generate some random data of sailing boat speed\n as a function of wind speed, and then determine how fast our boat is for\n certain wind speeds:\n\n >>> windspeed = 8 * np.random.rand(500)\n >>> boatspeed = .3 * windspeed**.5 + .2 * np.random.rand(500)\n >>> bin_means, bin_edges, binnumber = stats.binned_statistic(windspeed,\n ... boatspeed, statistic='median', bins=[1,2,3,4,5,6,7])\n >>> plt.figure()\n >>> plt.plot(windspeed, boatspeed, 'b.', label='raw data')\n >>> plt.hlines(bin_means, bin_edges[:-1], bin_edges[1:], colors='g', lw=5,\n ... label='binned statistic of data')\n >>> plt.legend()\n\n Now we can use ``binnumber`` to select all datapoints with a windspeed\n below 1:\n\n >>> low_boatspeed = boatspeed[binnumber == 0]\n\n As a final example, we will use ``bin_edges`` and ``binnumber`` to make a\n plot of a distribution that shows the mean and distribution around that\n mean per bin, on top of a regular histogram and the probability\n distribution function:\n\n >>> x = np.linspace(0, 5, num=500)\n >>> x_pdf = stats.maxwell.pdf(x)\n >>> samples = stats.maxwell.rvs(size=10000)\n\n >>> bin_means, bin_edges, binnumber = stats.binned_statistic(x, x_pdf,\n ... statistic='mean', bins=25)\n >>> bin_width = (bin_edges[1] - bin_edges[0])\n >>> bin_centers = bin_edges[1:] - bin_width/2\n\n >>> plt.figure()\n >>> plt.hist(samples, bins=50, normed=True, histtype='stepfilled',\n ... alpha=0.2, label='histogram of data')\n >>> plt.plot(x, x_pdf, 'r-', label='analytical pdf')\n >>> plt.hlines(bin_means, bin_edges[:-1], bin_edges[1:], colors='g', lw=2,\n ... label='binned statistic of data')\n >>> plt.plot((binnumber - 0.5) * bin_width, x_pdf, 'g.', alpha=0.5)\n >>> plt.legend(fontsize=10)\n >>> plt.show()\n\n \"\"\"\n try:\n N = len(bins)\n except TypeError:\n N = 1\n\n if N != 1:\n bins = [np.asarray(bins, float)]\n\n if range is not None:\n if len(range) == 2:\n range = [range]\n\n medians, edges, binnumbers = binned_statistic_dd(\n [x], values, statistic, bins, range)\n\n return BinnedStatisticResult(medians, edges[0], binnumbers)\n\n\nBinnedStatistic2dResult = namedtuple('BinnedStatistic2dResult',\n ('statistic', 'x_edge', 'y_edge',\n 'binnumber'))\n\n\ndef binned_statistic_2d(x, y, values, statistic='mean',\n bins=10, range=None, expand_binnumbers=False):\n \"\"\"\n Compute a bidimensional binned statistic for one or more sets of data.\n\n This is a generalization of a histogram2d function. A histogram divides\n the space into bins, and returns the count of the number of points in\n each bin. This function allows the computation of the sum, mean, median,\n or other statistic of the values (or set of values) within each bin.\n\n Parameters\n ----------\n x : (N,) array_like\n A sequence of values to be binned along the first dimension.\n y : (N,) array_like\n A sequence of values to be binned along the second dimension.\n values : (N,) array_like or list of (N,) array_like\n The data on which the statistic will be computed. This must be\n the same shape as `x`, or a list of sequences - each with the same\n shape as `x`. If `values` is such a list, the statistic will be\n computed on each independently.\n statistic : string or callable, optional\n The statistic to compute (default is 'mean').\n The following statistics are available:\n\n * 'mean' : compute the mean of values for points within each bin.\n Empty bins will be represented by NaN.\n * 'median' : compute the median of values for points within each\n bin. Empty bins will be represented by NaN.\n * 'count' : compute the count of points within each bin. This is\n identical to an unweighted histogram. `values` array is not\n referenced.\n * 'sum' : compute the sum of values for points within each bin.\n This is identical to a weighted histogram.\n * function : a user-defined function which takes a 1D array of\n values, and outputs a single numerical statistic. This function\n will be called on the values in each bin. Empty bins will be\n represented by function([]), or NaN if this returns an error.\n\n bins : int or [int, int] or array_like or [array, array], optional\n The bin specification:\n\n * the number of bins for the two dimensions (nx = ny = bins),\n * the number of bins in each dimension (nx, ny = bins),\n * the bin edges for the two dimensions (x_edge = y_edge = bins),\n * the bin edges in each dimension (x_edge, y_edge = bins).\n\n If the bin edges are specified, the number of bins will be,\n (nx = len(x_edge)-1, ny = len(y_edge)-1).\n\n range : (2,2) array_like, optional\n The leftmost and rightmost edges of the bins along each dimension\n (if not specified explicitly in the `bins` parameters):\n [[xmin, xmax], [ymin, ymax]]. All values outside of this range will be\n considered outliers and not tallied in the histogram.\n expand_binnumbers : bool, optional\n 'False' (default): the returned `binnumber` is a shape (N,) array of\n linearized bin indices.\n 'True': the returned `binnumber` is 'unraveled' into a shape (2,N)\n ndarray, where each row gives the bin numbers in the corresponding\n dimension.\n See the `binnumber` returned value, and the `Examples` section.\n\n .. versionadded:: 0.17.0\n\n Returns\n -------\n statistic : (nx, ny) ndarray\n The values of the selected statistic in each two-dimensional bin.\n x_edge : (nx + 1) ndarray\n The bin edges along the first dimension.\n y_edge : (ny + 1) ndarray\n The bin edges along the second dimension.\n binnumber : (N,) array of ints or (2,N) ndarray of ints\n This assigns to each element of `sample` an integer that represents the\n bin in which this observation falls. The representation depends on the\n `expand_binnumbers` argument. See `Notes` for details.\n\n\n See Also\n --------\n numpy.digitize, numpy.histogram2d, binned_statistic, binned_statistic_dd\n\n Notes\n -----\n Binedges:\n All but the last (righthand-most) bin is half-open. In other words, if\n `bins` is ``[1, 2, 3, 4]``, then the first bin is ``[1, 2)`` (including 1,\n but excluding 2) and the second ``[2, 3)``. The last bin, however, is\n ``[3, 4]``, which *includes* 4.\n\n `binnumber`:\n This returned argument assigns to each element of `sample` an integer that\n represents the bin in which it belongs. The representation depends on the\n `expand_binnumbers` argument. If 'False' (default): The returned\n `binnumber` is a shape (N,) array of linearized indices mapping each\n element of `sample` to its corresponding bin (using row-major ordering).\n If 'True': The returned `binnumber` is a shape (2,N) ndarray where\n each row indicates bin placements for each dimension respectively. In each\n dimension, a binnumber of `i` means the corresponding value is between\n (D_edge[i-1], D_edge[i]), where 'D' is either 'x' or 'y'.\n\n .. versionadded:: 0.11.0\n\n Examples\n --------\n >>> from scipy import stats\n\n Calculate the counts with explicit bin-edges:\n\n >>> x = [0.1, 0.1, 0.1, 0.6]\n >>> y = [2.1, 2.6, 2.1, 2.1]\n >>> binx = [0.0, 0.5, 1.0]\n >>> biny = [2.0, 2.5, 3.0]\n >>> ret = stats.binned_statistic_2d(x, y, None, 'count', bins=[binx,biny])\n >>> ret.statistic\n array([[ 2., 1.],\n [ 1., 0.]])\n\n The bin in which each sample is placed is given by the `binnumber`\n returned parameter. By default, these are the linearized bin indices:\n\n >>> ret.binnumber\n array([5, 6, 5, 9])\n\n The bin indices can also be expanded into separate entries for each\n dimension using the `expand_binnumbers` parameter:\n\n >>> ret = stats.binned_statistic_2d(x, y, None, 'count', bins=[binx,biny],\n ... expand_binnumbers=True)\n >>> ret.binnumber\n array([[1, 1, 1, 2],\n [1, 2, 1, 1]])\n\n Which shows that the first three elements belong in the xbin 1, and the\n fourth into xbin 2; and so on for y.\n\n \"\"\"\n\n # This code is based on np.histogram2d\n try:\n N = len(bins)\n except TypeError:\n N = 1\n\n if N != 1 and N != 2:\n xedges = yedges = np.asarray(bins, float)\n bins = [xedges, yedges]\n\n medians, edges, binnumbers = binned_statistic_dd(\n [x, y], values, statistic, bins, range,\n expand_binnumbers=expand_binnumbers)\n\n return BinnedStatistic2dResult(medians, edges[0], edges[1], binnumbers)\n\n\nBinnedStatisticddResult = namedtuple('BinnedStatisticddResult',\n ('statistic', 'bin_edges',\n 'binnumber'))\n\n\ndef binned_statistic_dd(sample, values, statistic='mean',\n bins=10, range=None, expand_binnumbers=False):\n \"\"\"\n Compute a multidimensional binned statistic for a set of data.\n\n This is a generalization of a histogramdd function. A histogram divides\n the space into bins, and returns the count of the number of points in\n each bin. This function allows the computation of the sum, mean, median,\n or other statistic of the values within each bin.\n\n Parameters\n ----------\n sample : array_like\n Data to histogram passed as a sequence of D arrays of length N, or\n as an (N,D) array.\n values : (N,) array_like or list of (N,) array_like\n The data on which the statistic will be computed. This must be\n the same shape as `x`, or a list of sequences - each with the same\n shape as `x`. If `values` is such a list, the statistic will be\n computed on each independently.\n statistic : string or callable, optional\n The statistic to compute (default is 'mean').\n The following statistics are available:\n\n * 'mean' : compute the mean of values for points within each bin.\n Empty bins will be represented by NaN.\n * 'median' : compute the median of values for points within each\n bin. Empty bins will be represented by NaN.\n * 'count' : compute the count of points within each bin. This is\n identical to an unweighted histogram. `values` array is not\n referenced.\n * 'sum' : compute the sum of values for points within each bin.\n This is identical to a weighted histogram.\n * function : a user-defined function which takes a 1D array of\n values, and outputs a single numerical statistic. This function\n will be called on the values in each bin. Empty bins will be\n represented by function([]), or NaN if this returns an error.\n\n bins : sequence or int, optional\n The bin specification must be in one of the following forms:\n\n * A sequence of arrays describing the bin edges along each dimension.\n * The number of bins for each dimension (nx, ny, ... = bins).\n * The number of bins for all dimensions (nx = ny = ... = bins).\n\n range : sequence, optional\n A sequence of lower and upper bin edges to be used if the edges are\n not given explicitely in `bins`. Defaults to the minimum and maximum\n values along each dimension.\n expand_binnumbers : bool, optional\n 'False' (default): the returned `binnumber` is a shape (N,) array of\n linearized bin indices.\n 'True': the returned `binnumber` is 'unraveled' into a shape (D,N)\n ndarray, where each row gives the bin numbers in the corresponding\n dimension.\n See the `binnumber` returned value, and the `Examples` section of\n `binned_statistic_2d`.\n\n .. versionadded:: 0.17.0\n\n Returns\n -------\n statistic : ndarray, shape(nx1, nx2, nx3,...)\n The values of the selected statistic in each two-dimensional bin.\n bin_edges : list of ndarrays\n A list of D arrays describing the (nxi + 1) bin edges for each\n dimension.\n binnumber : (N,) array of ints or (D,N) ndarray of ints\n This assigns to each element of `sample` an integer that represents the\n bin in which this observation falls. The representation depends on the\n `expand_binnumbers` argument. See `Notes` for details.\n\n\n See Also\n --------\n numpy.digitize, numpy.histogramdd, binned_statistic, binned_statistic_2d\n\n Notes\n -----\n Binedges:\n All but the last (righthand-most) bin is half-open in each dimension. In\n other words, if `bins` is ``[1, 2, 3, 4]``, then the first bin is\n ``[1, 2)`` (including 1, but excluding 2) and the second ``[2, 3)``. The\n last bin, however, is ``[3, 4]``, which *includes* 4.\n\n `binnumber`:\n This returned argument assigns to each element of `sample` an integer that\n represents the bin in which it belongs. The representation depends on the\n `expand_binnumbers` argument. If 'False' (default): The returned\n `binnumber` is a shape (N,) array of linearized indices mapping each\n element of `sample` to its corresponding bin (using row-major ordering).\n If 'True': The returned `binnumber` is a shape (D,N) ndarray where\n each row indicates bin placements for each dimension respectively. In each\n dimension, a binnumber of `i` means the corresponding value is between\n (bin_edges[D][i-1], bin_edges[D][i]), for each dimension 'D'.\n\n .. versionadded:: 0.11.0\n\n \"\"\"\n known_stats = ['mean', 'median', 'count', 'sum', 'std']\n if not callable(statistic) and statistic not in known_stats:\n raise ValueError('invalid statistic %r' % (statistic,))\n\n # `Ndim` is the number of dimensions (e.g. `2` for `binned_statistic_2d`)\n # `Dlen` is the length of elements along each dimension.\n # This code is based on np.histogramdd\n try:\n # `sample` is an ND-array.\n Dlen, Ndim = sample.shape\n except (AttributeError, ValueError):\n # `sample` is a sequence of 1D arrays.\n sample = np.atleast_2d(sample).T\n Dlen, Ndim = sample.shape\n\n # Store initial shape of `values` to preserve it in the output\n values = np.asarray(values)\n input_shape = list(values.shape)\n # Make sure that `values` is 2D to iterate over rows\n values = np.atleast_2d(values)\n Vdim, Vlen = values.shape\n\n # Make sure `values` match `sample`\n if(statistic != 'count' and Vlen != Dlen):\n raise AttributeError('The number of `values` elements must match the '\n 'length of each `sample` dimension.')\n\n nbin = np.empty(Ndim, int) # Number of bins in each dimension\n edges = Ndim * [None] # Bin edges for each dim (will be 2D array)\n dedges = Ndim * [None] # Spacing between edges (will be 2D array)\n\n try:\n M = len(bins)\n if M != Ndim:\n raise AttributeError('The dimension of bins must be equal '\n 'to the dimension of the sample x.')\n except TypeError:\n bins = Ndim * [bins]\n\n # Select range for each dimension\n # Used only if number of bins is given.\n if range is None:\n smin = np.atleast_1d(np.array(sample.min(axis=0), float))\n smax = np.atleast_1d(np.array(sample.max(axis=0), float))\n else:\n smin = np.zeros(Ndim)\n smax = np.zeros(Ndim)\n for i in xrange(Ndim):\n smin[i], smax[i] = range[i]\n\n # Make sure the bins have a finite width.\n for i in xrange(len(smin)):\n if smin[i] == smax[i]:\n smin[i] = smin[i] - .5\n smax[i] = smax[i] + .5\n\n # Create edge arrays\n for i in xrange(Ndim):\n if np.isscalar(bins[i]):\n nbin[i] = bins[i] + 2 # +2 for outlier bins\n edges[i] = np.linspace(smin[i], smax[i], nbin[i] - 1)\n else:\n edges[i] = np.asarray(bins[i], float)\n nbin[i] = len(edges[i]) + 1 # +1 for outlier bins\n dedges[i] = np.diff(edges[i])\n\n nbin = np.asarray(nbin)\n\n # Compute the bin number each sample falls into, in each dimension\n sampBin = {}\n for i in xrange(Ndim):\n sampBin[i] = np.digitize(sample[:, i], edges[i])\n\n # Using `digitize`, values that fall on an edge are put in the right bin.\n # For the rightmost bin, we want values equal to the right\n # edge to be counted in the last bin, and not as an outlier.\n for i in xrange(Ndim):\n # Find the rounding precision\n decimal = int(-np.log10(dedges[i].min())) + 6\n # Find which points are on the rightmost edge.\n on_edge = np.where(np.around(sample[:, i], decimal) ==\n np.around(edges[i][-1], decimal))[0]\n # Shift these points one bin to the left.\n sampBin[i][on_edge] -= 1\n\n # Compute the sample indices in the flattened statistic matrix.\n ni = nbin.argsort()\n # `binnumbers` is which bin (in linearized `Ndim` space) each sample goes\n binnumbers = np.zeros(Dlen, int)\n for i in xrange(0, Ndim - 1):\n binnumbers += sampBin[ni[i]] * nbin[ni[i + 1:]].prod()\n binnumbers += sampBin[ni[-1]]\n\n result = np.empty([Vdim, nbin.prod()], float)\n\n if statistic == 'mean':\n result.fill(np.nan)\n flatcount = np.bincount(binnumbers, None)\n a = flatcount.nonzero()\n for vv in xrange(Vdim):\n flatsum = np.bincount(binnumbers, values[vv])\n result[vv, a] = flatsum[a] / flatcount[a]\n elif statistic == 'std':\n result.fill(0)\n flatcount = np.bincount(binnumbers, None)\n a = flatcount.nonzero()\n for vv in xrange(Vdim):\n flatsum = np.bincount(binnumbers, values[vv])\n flatsum2 = np.bincount(binnumbers, values[vv] ** 2)\n result[vv, a] = np.sqrt(flatsum2[a] / flatcount[a] -\n (flatsum[a] / flatcount[a]) ** 2)\n elif statistic == 'count':\n result.fill(0)\n flatcount = np.bincount(binnumbers, None)\n a = np.arange(len(flatcount))\n result[:, a] = flatcount[np.newaxis, :]\n elif statistic == 'sum':\n result.fill(0)\n for vv in xrange(Vdim):\n flatsum = np.bincount(binnumbers, values[vv])\n a = np.arange(len(flatsum))\n result[vv, a] = flatsum\n elif statistic == 'median':\n result.fill(np.nan)\n for i in np.unique(binnumbers):\n for vv in xrange(Vdim):\n result[vv, i] = np.median(values[vv, binnumbers == i])\n elif callable(statistic):\n with warnings.catch_warnings():\n # Numpy generates a warnings for mean/std/... with empty list\n warnings.filterwarnings('ignore', category=RuntimeWarning)\n old = np.seterr(invalid='ignore')\n try:\n null = statistic([])\n except:\n null = np.nan\n np.seterr(**old)\n result.fill(null)\n for i in np.unique(binnumbers):\n for vv in xrange(Vdim):\n result[vv, i] = statistic(values[vv, binnumbers == i])\n\n # Shape into a proper matrix\n result = result.reshape(np.append(Vdim, np.sort(nbin)))\n\n for i in xrange(nbin.size):\n j = ni.argsort()[i]\n # Accomodate the extra `Vdim` dimension-zero with `+1`\n result = result.swapaxes(i+1, j+1)\n ni[i], ni[j] = ni[j], ni[i]\n\n # Remove outliers (indices 0 and -1 for each bin-dimension).\n core = [slice(None)] + Ndim * [slice(1, -1)]\n result = result[core]\n\n # Unravel binnumbers into an ndarray, each row the bins for each dimension\n if(expand_binnumbers and Ndim > 1):\n binnumbers = np.asarray(np.unravel_index(binnumbers, nbin))\n\n if np.any(result.shape[1:] != nbin - 2):\n raise RuntimeError('Internal Shape Error')\n\n # Reshape to have output (`reulst`) match input (`values`) shape\n result = result.reshape(input_shape[:-1] + list(nbin-2))\n\n return BinnedStatisticddResult(result, edges, binnumbers)\n", "path": "scipy/stats/_binned_statistic.py"}]} |
gh_patches_debug_1393 | rasdani/github-patches | git_diff | pwr-Solaar__Solaar-629 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
K800 keyboard battery status unavailable.
I am using the newest Manjaro x64.
This is the `solaar show` command output:
```
[xxx@xxx-pc ~]$ solaar show
Unifying Receiver
Device path : /dev/hidraw0
USB id : 046d:c52b
Serial : DC279AB2
Firmware : 24.07.B0030
Bootloader : 02.09
Other : AA.AC
Has 2 paired device(s) out of a maximum of 6.
Notifications: wireless, software present (0x000900)
Device activity counters: 1=155, 2=181
1: Wireless Mouse MX Master 2S
Codename : MX Master 2S
Kind : mouse
Wireless PID : 4069
Protocol : HID++ 4.5
Polling rate : 8 ms (125Hz)
Serial number: A6A40064
Bootloader: BOT 56.10.B0005
Firmware: MPM 12.10.B0005
Firmware: MPM 12.10.B0005
Other:
The power switch is located on the base.
Supports 32 HID++ 2.0 features:
0: ROOT {0000}
1: FEATURE SET {0001}
2: DEVICE FW VERSION {0003}
3: DEVICE NAME {0005}
4: WIRELESS DEVICE STATUS {1D4B}
5: RESET {0020}
6: unknown:0021 {0021}
7: BATTERY STATUS {1000}
8: unknown:1806 {1806} internal, hidden
9: CHANGE HOST {1814}
10: REPROG CONTROLS V4 {1B04}
11: ADJUSTABLE DPI {2201}
12: VERTICAL SCROLLING {2100}
Roller type: 3G
Ratchet per turn: 24
Scroll lines: 0
13: SMART SHIFT {2110}
14: HIRES WHEEL {2121}
Multiplier: 8
Has invert
Normal wheel motion
Has ratchet switch
Normal wheel mode
High resolution mode
HID notification
15: GESTURE 2 {6501}
16: unknown:00C2 {00C2}
17: unknown:1813 {1813} internal, hidden
18: unknown:1830 {1830} internal, hidden
19: unknown:1890 {1890} internal, hidden
20: unknown:1891 {1891} internal, hidden
21: unknown:18A1 {18A1} internal, hidden
22: unknown:18C0 {18C0} internal, hidden
23: unknown:1DF3 {1DF3} internal, hidden
24: unknown:1E00 {1E00} hidden
25: unknown:1EB0 {1EB0} internal, hidden
26: unknown:1803 {1803} internal, hidden
27: unknown:1861 {1861} internal, hidden
28: unknown:9001 {9001} internal, hidden
29: unknown:9200 {9200} internal, hidden
30: unknown:9202 {9202} internal, hidden
31: unknown:1805 {1805} internal, hidden
Has 8 reprogrammable keys:
0: LEFT CLICK , default: LeftClick => LEFT CLICK
mse, pos:0, group:1, gmask:1
1: RIGHT CLICK , default: RightClick => RIGHT CLICK
mse, pos:0, group:1, gmask:1
2: MIDDLE BUTTON , default: MiddleMouseButton => MIDDLE BUTTON
mse, reprogrammable, divertable, pos:0, group:3, gmask:7
3: BACK AS BUTTON 4 , default: BackEx => BACK AS BUTTON 4
mse, reprogrammable, divertable, pos:0, group:2, gmask:3
4: FORWARD AS BUTTON 5 , default: BrowserForwardEx => FORWARD AS BUTTON 5
mse, reprogrammable, divertable, pos:0, group:2, gmask:3
5: unknown:00C3 , default: unknown:00A9 => unknown:00C3
mse, reprogrammable, divertable, pos:0, group:3, gmask:7
6: unknown:00C4 , default: unknown:009D => unknown:00C4
mse, reprogrammable, divertable, pos:0, group:3, gmask:7
7: unknown:00D7 , default: unknown:00B4 => unknown:00D7
divertable, virtual, pos:0, group:4, gmask:0
Battery: 20%, discharging.
2: Wireless Illuminated Keyboard K800
Codename : K800
Kind : keyboard
Wireless PID : 406E
Protocol : HID++ 4.5
Polling rate : 20 ms (50Hz)
Serial number: 636E1413
The power switch is located on the top right corner.
Battery status unavailable.
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lib/logitech_receiver/descriptors.py`
Content:
```
1 # -*- python-mode -*-
2 # -*- coding: UTF-8 -*-
3
4 ## Copyright (C) 2012-2013 Daniel Pavel
5 ##
6 ## This program is free software; you can redistribute it and/or modify
7 ## it under the terms of the GNU General Public License as published by
8 ## the Free Software Foundation; either version 2 of the License, or
9 ## (at your option) any later version.
10 ##
11 ## This program is distributed in the hope that it will be useful,
12 ## but WITHOUT ANY WARRANTY; without even the implied warranty of
13 ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 ## GNU General Public License for more details.
15 ##
16 ## You should have received a copy of the GNU General Public License along
17 ## with this program; if not, write to the Free Software Foundation, Inc.,
18 ## 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
19
20 from __future__ import absolute_import, division, print_function, unicode_literals
21
22
23 from .common import NamedInts as _NamedInts
24 from .hidpp10 import REGISTERS as _R, DEVICE_KIND as _DK
25 from .settings_templates import RegisterSettings as _RS, FeatureSettings as _FS
26
27 #
28 #
29 #
30
31 from collections import namedtuple
32 _DeviceDescriptor = namedtuple('_DeviceDescriptor',
33 ('name', 'kind', 'wpid', 'codename', 'protocol', 'registers', 'settings'))
34 del namedtuple
35
36 DEVICES = {}
37
38 def _D(name, codename=None, kind=None, wpid=None, protocol=None, registers=None, settings=None):
39 assert name
40
41 if kind is None:
42 kind = (_DK.mouse if 'Mouse' in name
43 else _DK.keyboard if 'Keyboard' in name
44 else _DK.numpad if 'Number Pad' in name
45 else _DK.touchpad if 'Touchpad' in name
46 else _DK.trackball if 'Trackball' in name
47 else None)
48 assert kind is not None, 'descriptor for %s does not have kind set' % name
49
50 # heuristic: the codename is the last word in the device name
51 if codename is None and ' ' in name:
52 codename = name.split(' ')[-1]
53 assert codename is not None, 'descriptor for %s does not have codename set' % name
54
55 if protocol is not None:
56 # ? 2.0 devices should not have any registers
57 if protocol < 2.0:
58 assert settings is None or all(s._rw.kind == 1 for s in settings)
59 else:
60 assert registers is None
61 assert settings is None or all(s._rw.kind == 2 for s in settings)
62
63 if wpid:
64 for w in wpid if isinstance(wpid, tuple) else (wpid, ):
65 if protocol > 1.0:
66 assert w[0:1] == '4', '%s has protocol %0.1f, wpid %s' % (name, protocol, w)
67 else:
68 if w[0:1] == '1':
69 assert kind == _DK.mouse, '%s has protocol %0.1f, wpid %s' % (name, protocol, w)
70 elif w[0:1] == '2':
71 assert kind in (_DK.keyboard, _DK.numpad), '%s has protocol %0.1f, wpid %s' % (name, protocol, w)
72
73 device_descriptor = _DeviceDescriptor(name=name, kind=kind,
74 wpid=wpid, codename=codename, protocol=protocol,
75 registers=registers, settings=settings)
76
77 assert codename not in DEVICES, 'duplicate codename in device descriptors: %s' % (DEVICES[codename], )
78 DEVICES[codename] = device_descriptor
79
80 if wpid:
81 if not isinstance(wpid, tuple):
82 wpid = (wpid, )
83
84 for w in wpid:
85 assert w not in DEVICES, 'duplicate wpid in device descriptors: %s' % (DEVICES[w], )
86 DEVICES[w] = device_descriptor
87
88 #
89 #
90 #
91
92 _PERFORMANCE_MX_DPIS = _NamedInts.range(0x81, 0x8F, lambda x: str((x - 0x80) * 100))
93
94 #
95 #
96 #
97
98 # Some HID++1.0 registers and HID++2.0 features can be discovered at run-time,
99 # so they are not specified here.
100 #
101 # For known registers, however, please do specify them here -- avoids
102 # unnecessary communication with the device and makes it easier to make certain
103 # decisions when querying the device's state.
104 #
105 # Specify a negative value to blacklist a certain register for a device.
106 #
107 # Usually, state registers (battery, leds, some features, etc) are only used by
108 # HID++ 1.0 devices, while HID++ 2.0 devices use features for the same
109 # functionalities. This is a rule that's been discovered by trial-and-error,
110 # so it may change in the future.
111
112 # Well-known registers (in hex):
113 # * 00 - notification flags (all devices)
114 # 01 - mice: smooth scrolling
115 # 07 - battery status
116 # 09 - keyboards: FN swap (if it has the FN key)
117 # 0D - battery charge
118 # a device may have either the 07 or 0D register available;
119 # no known device uses both
120 # 51 - leds
121 # 63 - mice: DPI
122 # * F1 - firmware info
123 # Some registers appear to be universally supported, no matter the HID++ version
124 # (marked with *). The rest may or may not be supported, and their values may or
125 # may not mean the same thing across different devices.
126
127 # The 'codename' and 'kind' fields are usually guessed from the device name,
128 # but in some cases (like the Logitech Cube) that heuristic fails and they have
129 # to be specified.
130 #
131 # The 'protocol' and 'wpid' fields are optional (they can be discovered at
132 # runtime), but specifying them here speeds up device discovery and reduces the
133 # USB traffic Solaar has to do to fully identify peripherals.
134 # Same goes for HID++ 2.0 feature settings (like _feature_fn_swap).
135 #
136 # The 'registers' field indicates read-only registers, specifying a state. These
137 # are valid (AFAIK) only to HID++ 1.0 devices.
138 # The 'settings' field indicates a read/write register; based on them Solaar
139 # generates, at runtime, the settings controls in the device panel. HID++ 1.0
140 # devices may only have register-based settings; HID++ 2.0 devices may only have
141 # feature-based settings.
142
143 # Keyboards
144
145 _D('Wireless Keyboard K230', protocol=2.0, wpid='400D')
146 _D('Wireless Keyboard K270(unifying)', protocol=2.0, wpid='4003')
147 _D('Wireless Keyboard MK270', protocol=2.0, wpid='4023',
148 settings=[
149 _FS.fn_swap()
150 ],
151 )
152 _D('Wireless Keyboard K270', protocol=1.0,
153 registers=(_R.battery_status, ),
154 )
155 _D('Wireless Keyboard MK320', protocol=1.0, wpid='200F',
156 registers=(_R.battery_status, ),
157 )
158 _D('Wireless Keyboard MK330')
159 _D('Wireless Compact Keyboard K340', protocol=1.0, wpid='2007',
160 registers=(_R.battery_status, ),
161 )
162 _D('Wireless Wave Keyboard K350', protocol=1.0, wpid='200A',
163 registers=(_R.battery_status, ),
164 )
165 _D('Wireless Keyboard K360', protocol=2.0, wpid='4004',
166 settings=[
167 _FS.fn_swap()
168 ],
169 )
170 _D('Wireless Keyboard K375s', protocol=2.0, wpid='4061',
171 settings=[
172 _FS.k375s_fn_swap()
173 ],
174 )
175 _D('Wireless Touch Keyboard K400', protocol=2.0, wpid=('400E', '4024'),
176 settings=[
177 _FS.fn_swap()
178 ],
179 )
180 _D('Wireless Touch Keyboard K400 Plus', codename='K400 Plus', protocol=2.0, wpid='404D',
181 settings=[
182 _FS.new_fn_swap()
183 ],
184 )
185 _D('Wireless Keyboard K520', protocol=1.0, wpid='2011',
186 registers=(_R.battery_status, ),
187 settings=[
188 _RS.fn_swap(),
189 ],
190 )
191 _D('Number Pad N545', protocol=1.0, wpid='2006',
192 registers=(_R.battery_status, ),
193 )
194 _D('Wireless Keyboard MK550')
195 _D('Wireless Keyboard MK700', protocol=1.0, wpid='2008',
196 registers=(_R.battery_status, ),
197 settings=[
198 _RS.fn_swap(),
199 ],
200 )
201 _D('Wireless Solar Keyboard K750', protocol=2.0, wpid='4002',
202 settings=[
203 _FS.fn_swap()
204 ],
205 )
206 _D('Wireless Multi-Device Keyboard K780', protocol=4.5, wpid='405B',
207 settings=[
208 _FS.new_fn_swap()
209 ],
210 )
211 _D('Wireless Illuminated Keyboard K800', protocol=1.0, wpid='2010',
212 registers=(_R.battery_status, _R.three_leds, ),
213 settings=[
214 _RS.fn_swap(),
215 _RS.hand_detection(),
216 ],
217 )
218 _D('Illuminated Living-Room Keyboard K830', protocol=2.0, wpid='4032',
219 settings=[
220 _FS.new_fn_swap()
221 ],
222 )
223 _D('Craft Advanced Keyboard', protocol=4.5, wpid='4066')
224
225
226 # Mice
227
228 _D('Wireless Mouse M150', protocol=2.0, wpid='4022')
229 _D('Wireless Mouse M175', protocol=2.0, wpid='4008')
230 _D('Wireless Mouse M185 new', codename='M185n', protocol=4.5, wpid='4054',
231 settings=[
232 _FS.lowres_smooth_scroll(),
233 _FS.pointer_speed(),
234 ])
235 # Apparently Logitech uses wpid 4055 for three different mice
236 # That's not so strange, as M185 is used on both Unifying-ready and non-Unifying-ready mice
237 _D('Wireless Mouse M185/M235/M310', codename='M185/M235/M310', protocol=4.5, wpid='4055',
238 settings=[
239 _FS.lowres_smooth_scroll(),
240 _FS.pointer_speed(),
241 ])
242 _D('Wireless Mouse M185', protocol=2.0, wpid='4038')
243 _D('Wireless Mouse M187', protocol=2.0, wpid='4019')
244 _D('Wireless Mouse M215', protocol=1.0, wpid='1020')
245 _D('Wireless Mouse M305', protocol=1.0, wpid='101F',
246 registers=(_R.battery_status, ),
247 settings=[
248 _RS.side_scroll(),
249 ],
250 )
251 _D('Wireless Mouse M310', protocol=1.0, wpid='1024',
252 registers=(_R.battery_status, ),
253 )
254 _D('Wireless Mouse M315')
255 _D('Wireless Mouse M317')
256 _D('Wireless Mouse M325', protocol=2.0, wpid='400A',
257 settings=[
258 _FS.hi_res_scroll(),
259 ])
260 _D('Wireless Mouse M345', protocol=2.0, wpid='4017')
261 _D('Wireless Mouse M350', protocol=1.0, wpid='101C',
262 registers=(_R.battery_charge, ),
263 )
264 _D('Wireless Mouse M505', codename='M505/B605', protocol=1.0, wpid='101D',
265 registers=(_R.battery_charge, ),
266 settings=[
267 _RS.smooth_scroll(),
268 _RS.side_scroll(),
269 ],
270 )
271 _D('Wireless Mouse M510', protocol=1.0, wpid='1025',
272 registers=(_R.battery_status, ),
273 settings=[
274 _RS.smooth_scroll(),
275 _RS.side_scroll(),
276 ],
277 )
278 _D('Wireless Mouse M510', codename='M510v2', protocol=2.0, wpid='4051',
279 settings=[
280 _FS.lowres_smooth_scroll(),
281 ])
282 _D('Couch Mouse M515', protocol=2.0, wpid='4007')
283 _D('Wireless Mouse M525', protocol=2.0, wpid='4013')
284 _D('Multi Device Silent Mouse M585/M590', codename='M585/M590', protocol=4.5, wpid='406B',
285 settings=[
286 _FS.lowres_smooth_scroll(),
287 _FS.pointer_speed(),
288 ],
289 )
290 _D('Touch Mouse M600', protocol=2.0, wpid='401A')
291 _D('Marathon Mouse M705 (M-R0009)', codename='M705 (M-R0009)', protocol=1.0, wpid='101B',
292 registers=(_R.battery_charge, ),
293 settings=[
294 _RS.smooth_scroll(),
295 _RS.side_scroll(),
296 ],
297 )
298 _D('Marathon Mouse M705 (M-R0073)', codename='M705 (M-R0073)', protocol=4.5, wpid='406D',
299 settings=[
300 _FS.hires_smooth_invert(),
301 _FS.hires_smooth_resolution(),
302 _FS.pointer_speed(),
303 ])
304 _D('Zone Touch Mouse T400')
305 _D('Touch Mouse T620', protocol=2.0)
306 _D('Logitech Cube', kind=_DK.mouse, protocol=2.0)
307 _D('Anywhere Mouse MX', codename='Anywhere MX', protocol=1.0, wpid='1017',
308 registers=(_R.battery_charge, ),
309 settings=[
310 _RS.smooth_scroll(),
311 _RS.side_scroll(),
312 ],
313 )
314 _D('Anywhere Mouse MX 2', codename='Anywhere MX 2', protocol=4.5, wpid='404A',
315 settings=[
316 _FS.hires_smooth_invert(),
317 _FS.hires_smooth_resolution(),
318 ],
319 )
320 _D('Performance Mouse MX', codename='Performance MX', protocol=1.0, wpid='101A',
321 registers=(_R.battery_status, _R.three_leds, ),
322 settings=[
323 _RS.dpi(choices=_PERFORMANCE_MX_DPIS),
324 _RS.smooth_scroll(),
325 _RS.side_scroll(),
326 ],
327 )
328
329 _D('Wireless Mouse MX Master', codename='MX Master', protocol=4.5, wpid='4041',
330 settings=[
331 _FS.hires_smooth_invert(),
332 _FS.hires_smooth_resolution(),
333 ],
334 )
335
336 _D('Wireless Mouse MX Master 2S', codename='MX Master 2S', protocol=4.5,wpid='4069',
337 settings=[
338 _FS.hires_smooth_invert(),
339 _FS.hires_smooth_resolution(),
340 ],
341 )
342
343 _D('G7 Cordless Laser Mouse', codename='G7', protocol=1.0, wpid='1002',
344 registers=(_R.battery_status, ),
345 )
346 _D('G700 Gaming Mouse', codename='G700', protocol=1.0, wpid='1023',
347 registers=(_R.battery_status, _R.three_leds, ),
348 settings=[
349 _RS.smooth_scroll(),
350 _RS.side_scroll(),
351 ],
352 )
353 _D('G700s Gaming Mouse', codename='G700s', protocol=1.0, wpid='102A',
354 registers=(_R.battery_status, _R.three_leds, ),
355 settings=[
356 _RS.smooth_scroll(),
357 _RS.side_scroll(),
358 ],
359 )
360
361 # Trackballs
362
363 _D('Wireless Trackball M570')
364
365 # Touchpads
366
367 _D('Wireless Rechargeable Touchpad T650', protocol=2.0, wpid='4101')
368 _D('Wireless Touchpad', codename='Wireless Touch', protocol=2.0, wpid='4011')
369
370 #
371 # Classic Nano peripherals (that don't support the Unifying protocol).
372 # A wpid is necessary to properly identify them.
373 #
374
375 _D('VX Nano Cordless Laser Mouse', codename='VX Nano', protocol=1.0, wpid=('100B', '100F'),
376 registers=(_R.battery_charge, ),
377 settings=[
378 _RS.smooth_scroll(),
379 _RS.side_scroll(),
380 ],
381 )
382 _D('V450 Nano Cordless Laser Mouse', codename='V450 Nano', protocol=1.0, wpid='1011',
383 registers=(_R.battery_charge, ),
384 )
385 _D('V550 Nano Cordless Laser Mouse', codename='V550 Nano', protocol=1.0, wpid='1013',
386 registers=(_R.battery_charge, ),
387 settings=[
388 _RS.smooth_scroll(),
389 _RS.side_scroll(),
390 ],
391 )
392
393 # Mini receiver mice
394
395 _D('MX610 Laser Cordless Mouse', codename='MX610', protocol=1.0, wpid='1001',
396 registers=(_R.battery_status, ),
397 )
398 _D('MX620 Laser Cordless Mouse', codename='MX620', protocol=1.0, wpid=('100A', '1016'),
399 registers=(_R.battery_charge, ),
400 )
401 _D('MX610 Left-Handled Mouse', codename='MX610L', protocol=1.0, wpid='1004',
402 registers=(_R.battery_status, ),
403 )
404 _D('V400 Laser Cordless Mouse', codename='V400', protocol=1.0, wpid='1003',
405 registers=(_R.battery_status, ),
406 )
407 _D('V450 Laser Cordless Mouse', codename='V450', protocol=1.0, wpid='1005',
408 registers=(_R.battery_status, ),
409 )
410 _D('VX Revolution', codename='VX Revolution', kind=_DK.mouse, protocol=1.0, wpid=('1006', '100D'),
411 registers=(_R.battery_charge, ),
412 )
413 _D('MX Air', codename='MX Air', protocol=1.0, kind=_DK.mouse, wpid=('1007', '100E'),
414 registers=(_R.battery_charge, ),
415 )
416 _D('MX Revolution', codename='MX Revolution', protocol=1.0, kind=_DK.mouse, wpid=('1008', '100C'),
417 registers=(_R.battery_charge, ),
418 )
419 _D('MX 1100 Cordless Laser Mouse', codename='MX 1100', protocol=1.0, kind=_DK.mouse, wpid='1014',
420 registers=(_R.battery_charge, ),
421 settings=[
422 _RS.smooth_scroll(),
423 _RS.side_scroll(),
424 ],
425 )
426
427 # Some exotics...
428
429 _D('Fujitsu Sonic Mouse', codename='Sonic', protocol=1.0, wpid='1029')
430
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lib/logitech_receiver/descriptors.py b/lib/logitech_receiver/descriptors.py
--- a/lib/logitech_receiver/descriptors.py
+++ b/lib/logitech_receiver/descriptors.py
@@ -215,6 +215,11 @@
_RS.hand_detection(),
],
)
+_D('Wireless Illuminated Keyboard K800 new', codename='K800 new', protocol=4.5, wpid='406E',
+ settings=[
+ _FS.fn_swap()
+ ],
+ )
_D('Illuminated Living-Room Keyboard K830', protocol=2.0, wpid='4032',
settings=[
_FS.new_fn_swap()
| {"golden_diff": "diff --git a/lib/logitech_receiver/descriptors.py b/lib/logitech_receiver/descriptors.py\n--- a/lib/logitech_receiver/descriptors.py\n+++ b/lib/logitech_receiver/descriptors.py\n@@ -215,6 +215,11 @@\n \t\t\t\t\t\t\t_RS.hand_detection(),\n \t\t\t\t\t\t],\n \t\t\t\t)\n+_D('Wireless Illuminated Keyboard K800 new', codename='K800 new', protocol=4.5, wpid='406E',\n+\t\t\t\tsettings=[\n+\t\t\t\t\t\t\t_FS.fn_swap()\n+\t\t\t\t\t\t],\n+\t\t\t\t)\n _D('Illuminated Living-Room Keyboard K830', protocol=2.0, wpid='4032',\n \t\t\t\tsettings=[\n \t\t\t\t\t\t\t_FS.new_fn_swap()\n", "issue": "K800 keyboard battery status unavailable.\nI am using the newest Manjaro x64.\r\n\r\nThis is the `solaar show` command output:\r\n```\r\n[xxx@xxx-pc ~]$ solaar show\r\nUnifying Receiver\r\n Device path : /dev/hidraw0\r\n USB id : 046d:c52b\r\n Serial : DC279AB2\r\n Firmware : 24.07.B0030\r\n Bootloader : 02.09\r\n Other : AA.AC\r\n Has 2 paired device(s) out of a maximum of 6.\r\n Notifications: wireless, software present (0x000900)\r\n Device activity counters: 1=155, 2=181\r\n\r\n 1: Wireless Mouse MX Master 2S\r\n Codename : MX Master 2S\r\n Kind : mouse\r\n Wireless PID : 4069\r\n Protocol : HID++ 4.5\r\n Polling rate : 8 ms (125Hz)\r\n Serial number: A6A40064\r\n Bootloader: BOT 56.10.B0005\r\n Firmware: MPM 12.10.B0005\r\n Firmware: MPM 12.10.B0005\r\n Other: \r\n The power switch is located on the base.\r\n Supports 32 HID++ 2.0 features:\r\n 0: ROOT {0000} \r\n 1: FEATURE SET {0001} \r\n 2: DEVICE FW VERSION {0003} \r\n 3: DEVICE NAME {0005} \r\n 4: WIRELESS DEVICE STATUS {1D4B} \r\n 5: RESET {0020} \r\n 6: unknown:0021 {0021} \r\n 7: BATTERY STATUS {1000} \r\n 8: unknown:1806 {1806} internal, hidden\r\n 9: CHANGE HOST {1814} \r\n 10: REPROG CONTROLS V4 {1B04} \r\n 11: ADJUSTABLE DPI {2201} \r\n 12: VERTICAL SCROLLING {2100} \r\n Roller type: 3G\r\n Ratchet per turn: 24\r\n Scroll lines: 0\r\n 13: SMART SHIFT {2110} \r\n 14: HIRES WHEEL {2121} \r\n Multiplier: 8\r\n Has invert\r\n Normal wheel motion\r\n Has ratchet switch\r\n Normal wheel mode\r\n High resolution mode\r\n HID notification\r\n 15: GESTURE 2 {6501} \r\n 16: unknown:00C2 {00C2} \r\n 17: unknown:1813 {1813} internal, hidden\r\n 18: unknown:1830 {1830} internal, hidden\r\n 19: unknown:1890 {1890} internal, hidden\r\n 20: unknown:1891 {1891} internal, hidden\r\n 21: unknown:18A1 {18A1} internal, hidden\r\n 22: unknown:18C0 {18C0} internal, hidden\r\n 23: unknown:1DF3 {1DF3} internal, hidden\r\n 24: unknown:1E00 {1E00} hidden\r\n 25: unknown:1EB0 {1EB0} internal, hidden\r\n 26: unknown:1803 {1803} internal, hidden\r\n 27: unknown:1861 {1861} internal, hidden\r\n 28: unknown:9001 {9001} internal, hidden\r\n 29: unknown:9200 {9200} internal, hidden\r\n 30: unknown:9202 {9202} internal, hidden\r\n 31: unknown:1805 {1805} internal, hidden\r\n Has 8 reprogrammable keys:\r\n 0: LEFT CLICK , default: LeftClick => LEFT CLICK \r\n mse, pos:0, group:1, gmask:1\r\n 1: RIGHT CLICK , default: RightClick => RIGHT CLICK \r\n mse, pos:0, group:1, gmask:1\r\n 2: MIDDLE BUTTON , default: MiddleMouseButton => MIDDLE BUTTON \r\n mse, reprogrammable, divertable, pos:0, group:3, gmask:7\r\n 3: BACK AS BUTTON 4 , default: BackEx => BACK AS BUTTON 4 \r\n mse, reprogrammable, divertable, pos:0, group:2, gmask:3\r\n 4: FORWARD AS BUTTON 5 , default: BrowserForwardEx => FORWARD AS BUTTON 5 \r\n mse, reprogrammable, divertable, pos:0, group:2, gmask:3\r\n 5: unknown:00C3 , default: unknown:00A9 => unknown:00C3 \r\n mse, reprogrammable, divertable, pos:0, group:3, gmask:7\r\n 6: unknown:00C4 , default: unknown:009D => unknown:00C4 \r\n mse, reprogrammable, divertable, pos:0, group:3, gmask:7\r\n 7: unknown:00D7 , default: unknown:00B4 => unknown:00D7 \r\n divertable, virtual, pos:0, group:4, gmask:0\r\n Battery: 20%, discharging.\r\n\r\n 2: Wireless Illuminated Keyboard K800\r\n Codename : K800\r\n Kind : keyboard\r\n Wireless PID : 406E\r\n Protocol : HID++ 4.5\r\n Polling rate : 20 ms (50Hz)\r\n Serial number: 636E1413\r\n The power switch is located on the top right corner.\r\n Battery status unavailable.\r\n```\r\n\n", "before_files": [{"content": "# -*- python-mode -*-\n# -*- coding: UTF-8 -*-\n\n## Copyright (C) 2012-2013 Daniel Pavel\n##\n## This program is free software; you can redistribute it and/or modify\n## it under the terms of the GNU General Public License as published by\n## the Free Software Foundation; either version 2 of the License, or\n## (at your option) any later version.\n##\n## This program is distributed in the hope that it will be useful,\n## but WITHOUT ANY WARRANTY; without even the implied warranty of\n## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n## GNU General Public License for more details.\n##\n## You should have received a copy of the GNU General Public License along\n## with this program; if not, write to the Free Software Foundation, Inc.,\n## 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\n\nfrom .common import NamedInts as _NamedInts\nfrom .hidpp10 import REGISTERS as _R, DEVICE_KIND as _DK\nfrom .settings_templates import RegisterSettings as _RS, FeatureSettings as _FS\n\n#\n#\n#\n\nfrom collections import namedtuple\n_DeviceDescriptor = namedtuple('_DeviceDescriptor',\n\t\t\t\t('name', 'kind', 'wpid', 'codename', 'protocol', 'registers', 'settings'))\ndel namedtuple\n\nDEVICES = {}\n\ndef _D(name, codename=None, kind=None, wpid=None, protocol=None, registers=None, settings=None):\n\tassert name\n\n\tif kind is None:\n\t\tkind = (_DK.mouse if 'Mouse' in name\n\t\t\t\telse _DK.keyboard if 'Keyboard' in name\n\t\t\t\telse _DK.numpad if 'Number Pad' in name\n\t\t\t\telse _DK.touchpad if 'Touchpad' in name\n\t\t\t\telse _DK.trackball if 'Trackball' in name\n\t\t\t\telse None)\n\tassert kind is not None, 'descriptor for %s does not have kind set' % name\n\n\t# heuristic: the codename is the last word in the device name\n\tif codename is None and ' ' in name:\n\t\tcodename = name.split(' ')[-1]\n\tassert codename is not None, 'descriptor for %s does not have codename set' % name\n\n\tif protocol is not None:\n\t\t# ? 2.0 devices should not have any registers\n\t\tif protocol < 2.0:\n\t\t\tassert settings is None or all(s._rw.kind == 1 for s in settings)\n\t\telse:\n\t\t\tassert registers is None\n\t\t\tassert settings is None or all(s._rw.kind == 2 for s in settings)\n\n\t\tif wpid:\n\t\t\tfor w in wpid if isinstance(wpid, tuple) else (wpid, ):\n\t\t\t\tif protocol > 1.0:\n\t\t\t\t\tassert w[0:1] == '4', '%s has protocol %0.1f, wpid %s' % (name, protocol, w)\n\t\t\t\telse:\n\t\t\t\t\tif w[0:1] == '1':\n\t\t\t\t\t\tassert kind == _DK.mouse, '%s has protocol %0.1f, wpid %s' % (name, protocol, w)\n\t\t\t\t\telif w[0:1] == '2':\n\t\t\t\t\t\tassert kind in (_DK.keyboard, _DK.numpad), '%s has protocol %0.1f, wpid %s' % (name, protocol, w)\n\n\tdevice_descriptor = _DeviceDescriptor(name=name, kind=kind,\n\t\t\t\t\twpid=wpid, codename=codename, protocol=protocol,\n\t\t\t\t\tregisters=registers, settings=settings)\n\n\tassert codename not in DEVICES, 'duplicate codename in device descriptors: %s' % (DEVICES[codename], )\n\tDEVICES[codename] = device_descriptor\n\n\tif wpid:\n\t\tif not isinstance(wpid, tuple):\n\t\t\twpid = (wpid, )\n\n\t\tfor w in wpid:\n\t\t\tassert w not in DEVICES, 'duplicate wpid in device descriptors: %s' % (DEVICES[w], )\n\t\t\tDEVICES[w] = device_descriptor\n\n#\n#\n#\n\n_PERFORMANCE_MX_DPIS = _NamedInts.range(0x81, 0x8F, lambda x: str((x - 0x80) * 100))\n\n#\n#\n#\n\n# Some HID++1.0 registers and HID++2.0 features can be discovered at run-time,\n# so they are not specified here.\n#\n# For known registers, however, please do specify them here -- avoids\n# unnecessary communication with the device and makes it easier to make certain\n# decisions when querying the device's state.\n#\n# Specify a negative value to blacklist a certain register for a device.\n#\n# Usually, state registers (battery, leds, some features, etc) are only used by\n# HID++ 1.0 devices, while HID++ 2.0 devices use features for the same\n# functionalities. This is a rule that's been discovered by trial-and-error,\n# so it may change in the future.\n\n# Well-known registers (in hex):\n# * 00 - notification flags (all devices)\n# 01 - mice: smooth scrolling\n# 07 - battery status\n# 09 - keyboards: FN swap (if it has the FN key)\n# 0D - battery charge\n# a device may have either the 07 or 0D register available;\n# no known device uses both\n# 51 - leds\n# 63 - mice: DPI\n# * F1 - firmware info\n# Some registers appear to be universally supported, no matter the HID++ version\n# (marked with *). The rest may or may not be supported, and their values may or\n# may not mean the same thing across different devices.\n\n# The 'codename' and 'kind' fields are usually guessed from the device name,\n# but in some cases (like the Logitech Cube) that heuristic fails and they have\n# to be specified.\n#\n# The 'protocol' and 'wpid' fields are optional (they can be discovered at\n# runtime), but specifying them here speeds up device discovery and reduces the\n# USB traffic Solaar has to do to fully identify peripherals.\n# Same goes for HID++ 2.0 feature settings (like _feature_fn_swap).\n#\n# The 'registers' field indicates read-only registers, specifying a state. These\n# are valid (AFAIK) only to HID++ 1.0 devices.\n# The 'settings' field indicates a read/write register; based on them Solaar\n# generates, at runtime, the settings controls in the device panel. HID++ 1.0\n# devices may only have register-based settings; HID++ 2.0 devices may only have\n# feature-based settings.\n\n# Keyboards\n\n_D('Wireless Keyboard K230', protocol=2.0, wpid='400D')\n_D('Wireless Keyboard K270(unifying)', protocol=2.0, wpid='4003')\n_D('Wireless Keyboard MK270', protocol=2.0, wpid='4023',\n\t\t\t settings=[\n\t\t\t\t\t\t\t_FS.fn_swap()\n\t\t\t\t\t\t],\n\t\t\t\t)\n_D('Wireless Keyboard K270', protocol=1.0,\n\t\t\t\tregisters=(_R.battery_status, ),\n\t\t\t\t)\n_D('Wireless Keyboard MK320', protocol=1.0, wpid='200F',\n\t\t\t\tregisters=(_R.battery_status, ),\n\t\t\t\t)\n_D('Wireless Keyboard MK330')\n_D('Wireless Compact Keyboard K340', protocol=1.0, wpid='2007',\n\t\t\t\tregisters=(_R.battery_status, ),\n\t\t\t\t)\n_D('Wireless Wave Keyboard K350', protocol=1.0, wpid='200A',\n\t\t\t\tregisters=(_R.battery_status, ),\n\t\t\t\t)\n_D('Wireless Keyboard K360', protocol=2.0, wpid='4004',\n\t\t\t\tsettings=[\n\t\t\t\t\t\t\t_FS.fn_swap()\n\t\t\t\t\t\t],\n\t\t\t\t)\n_D('Wireless Keyboard K375s', protocol=2.0, wpid='4061',\n\t\t\t\tsettings=[\n\t\t\t\t\t\t\t_FS.k375s_fn_swap()\n\t\t\t\t\t\t],\n\t\t\t\t)\n_D('Wireless Touch Keyboard K400', protocol=2.0, wpid=('400E', '4024'),\n\t\t\t\tsettings=[\n\t\t\t\t\t\t\t_FS.fn_swap()\n\t\t\t\t\t\t],\n\t\t\t\t)\n_D('Wireless Touch Keyboard K400 Plus', codename='K400 Plus', protocol=2.0, wpid='404D',\n settings=[\n _FS.new_fn_swap()\n ],\n )\n_D('Wireless Keyboard K520', protocol=1.0, wpid='2011',\n\t\t\t\tregisters=(_R.battery_status, ),\n\t\t\t\tsettings=[\n\t\t\t\t\t\t\t_RS.fn_swap(),\n\t\t\t\t\t\t],\n\t\t\t\t)\n_D('Number Pad N545', protocol=1.0, wpid='2006',\n\t\t\t\tregisters=(_R.battery_status, ),\n\t\t\t\t)\n_D('Wireless Keyboard MK550')\n_D('Wireless Keyboard MK700', protocol=1.0, wpid='2008',\n\t\t\t\tregisters=(_R.battery_status, ),\n\t\t\t\tsettings=[\n\t\t\t\t\t\t\t_RS.fn_swap(),\n\t\t\t\t\t\t],\n\t\t\t\t)\n_D('Wireless Solar Keyboard K750', protocol=2.0, wpid='4002',\n\t\t\t\tsettings=[\n\t\t\t\t\t\t\t_FS.fn_swap()\n\t\t\t\t\t\t],\n\t\t\t\t)\n_D('Wireless Multi-Device Keyboard K780', protocol=4.5, wpid='405B',\n\t\t\t\tsettings=[\n\t\t\t\t\t\t\t_FS.new_fn_swap()\n\t\t\t\t\t\t],\n\t\t\t\t)\n_D('Wireless Illuminated Keyboard K800', protocol=1.0, wpid='2010',\n\t\t\t\tregisters=(_R.battery_status, _R.three_leds, ),\n\t\t\t\tsettings=[\n\t\t\t\t\t\t\t_RS.fn_swap(),\n\t\t\t\t\t\t\t_RS.hand_detection(),\n\t\t\t\t\t\t],\n\t\t\t\t)\n_D('Illuminated Living-Room Keyboard K830', protocol=2.0, wpid='4032',\n\t\t\t\tsettings=[\n\t\t\t\t\t\t\t_FS.new_fn_swap()\n\t\t\t\t\t\t],\n\t\t\t\t)\n_D('Craft Advanced Keyboard', protocol=4.5, wpid='4066')\n\n\n# Mice\n\n_D('Wireless Mouse M150', protocol=2.0, wpid='4022')\n_D('Wireless Mouse M175', protocol=2.0, wpid='4008')\n_D('Wireless Mouse M185 new', codename='M185n', protocol=4.5, wpid='4054',\n\t\t\t\tsettings=[\n\t\t\t\t\t\t\t_FS.lowres_smooth_scroll(),\n\t\t\t\t\t\t\t_FS.pointer_speed(),\n\t\t\t\t])\n# Apparently Logitech uses wpid 4055 for three different mice\n# That's not so strange, as M185 is used on both Unifying-ready and non-Unifying-ready mice\n_D('Wireless Mouse M185/M235/M310', codename='M185/M235/M310', protocol=4.5, wpid='4055',\n\t\t\t\tsettings=[\n\t\t\t\t\t\t\t_FS.lowres_smooth_scroll(),\n\t\t\t\t\t\t\t_FS.pointer_speed(),\n\t\t\t\t])\n_D('Wireless Mouse M185', protocol=2.0, wpid='4038')\n_D('Wireless Mouse M187', protocol=2.0, wpid='4019')\n_D('Wireless Mouse M215', protocol=1.0, wpid='1020')\n_D('Wireless Mouse M305', protocol=1.0, wpid='101F',\n\t\t\t\tregisters=(_R.battery_status, ),\n\t\t\t\tsettings=[\n\t\t\t\t\t\t\t_RS.side_scroll(),\n\t\t\t\t\t\t],\n\t\t\t\t)\n_D('Wireless Mouse M310', protocol=1.0, wpid='1024',\n\t\t\t\tregisters=(_R.battery_status, ),\n\t\t\t\t)\n_D('Wireless Mouse M315')\n_D('Wireless Mouse M317')\n_D('Wireless Mouse M325', protocol=2.0, wpid='400A',\n\t\t\t\tsettings=[\n\t\t\t\t\t\t\t_FS.hi_res_scroll(),\n\t\t\t\t])\n_D('Wireless Mouse M345', protocol=2.0, wpid='4017')\n_D('Wireless Mouse M350', protocol=1.0, wpid='101C',\n\t\t\t\tregisters=(_R.battery_charge, ),\n\t\t\t\t)\n_D('Wireless Mouse M505', codename='M505/B605', protocol=1.0, wpid='101D',\n\t\t\t\tregisters=(_R.battery_charge, ),\n\t\t\t\tsettings=[\n\t\t\t\t\t\t\t_RS.smooth_scroll(),\n\t\t\t\t\t\t\t_RS.side_scroll(),\n\t\t\t\t\t\t],\n\t\t\t\t)\n_D('Wireless Mouse M510', protocol=1.0, wpid='1025',\n\t\t\t\tregisters=(_R.battery_status, ),\n\t\t\t\tsettings=[\n\t\t\t\t\t\t\t_RS.smooth_scroll(),\n\t\t\t\t\t\t\t_RS.side_scroll(),\n\t\t\t\t\t\t],\n\t\t\t\t)\n_D('Wireless Mouse M510', codename='M510v2', protocol=2.0, wpid='4051',\n\t\t\t\tsettings=[\n\t\t\t\t\t\t\t_FS.lowres_smooth_scroll(),\n\t\t\t\t])\n_D('Couch Mouse M515', protocol=2.0, wpid='4007')\n_D('Wireless Mouse M525', protocol=2.0, wpid='4013')\n_D('Multi Device Silent Mouse M585/M590', codename='M585/M590', protocol=4.5, wpid='406B',\n\t\t\t\tsettings=[\n\t\t\t\t\t\t\t_FS.lowres_smooth_scroll(),\n\t\t\t\t\t\t\t_FS.pointer_speed(),\n\t\t\t\t],\n\t)\n_D('Touch Mouse M600', protocol=2.0, wpid='401A')\n_D('Marathon Mouse M705 (M-R0009)', codename='M705 (M-R0009)', protocol=1.0, wpid='101B',\n\t\t\t\tregisters=(_R.battery_charge, ),\n\t\t\t\tsettings=[\n\t\t\t\t\t\t\t_RS.smooth_scroll(),\n\t\t\t\t\t\t\t_RS.side_scroll(),\n\t\t\t\t\t\t],\n\t\t\t\t)\n_D('Marathon Mouse M705 (M-R0073)', codename='M705 (M-R0073)', protocol=4.5, wpid='406D',\n\t\t\t\tsettings=[\n\t\t\t\t\t\t\t_FS.hires_smooth_invert(),\n\t\t\t\t\t\t\t_FS.hires_smooth_resolution(),\n\t\t\t\t\t\t\t_FS.pointer_speed(),\n\t\t\t\t])\n_D('Zone Touch Mouse T400')\n_D('Touch Mouse T620', protocol=2.0)\n_D('Logitech Cube', kind=_DK.mouse, protocol=2.0)\n_D('Anywhere Mouse MX', codename='Anywhere MX', protocol=1.0, wpid='1017',\n\t\t\t\tregisters=(_R.battery_charge, ),\n\t\t\t\tsettings=[\n\t\t\t\t\t\t\t_RS.smooth_scroll(),\n\t\t\t\t\t\t\t_RS.side_scroll(),\n\t\t\t\t\t\t],\n\t\t\t\t)\n_D('Anywhere Mouse MX 2', codename='Anywhere MX 2', protocol=4.5, wpid='404A',\n\t\t\t\tsettings=[\n\t\t\t\t\t\t\t_FS.hires_smooth_invert(),\n\t\t\t\t\t\t\t_FS.hires_smooth_resolution(),\n\t\t\t\t\t\t],\n\t\t\t\t)\n_D('Performance Mouse MX', codename='Performance MX', protocol=1.0, wpid='101A',\n\t\t\t\tregisters=(_R.battery_status, _R.three_leds, ),\n\t\t\t\tsettings=[\n\t\t\t\t\t\t\t_RS.dpi(choices=_PERFORMANCE_MX_DPIS),\n\t\t\t\t\t\t\t_RS.smooth_scroll(),\n\t\t\t\t\t\t\t_RS.side_scroll(),\n\t\t\t\t\t\t],\n\t\t\t\t)\n\n_D('Wireless Mouse MX Master', codename='MX Master', protocol=4.5, wpid='4041',\n\t\t\t\tsettings=[\n\t\t\t\t\t\t\t_FS.hires_smooth_invert(),\n\t\t\t\t\t\t\t_FS.hires_smooth_resolution(),\n\t\t\t\t\t\t],\n\t\t\t\t)\n\n_D('Wireless Mouse MX Master 2S', codename='MX Master 2S', protocol=4.5,wpid='4069',\n\t\t\t\tsettings=[\n\t\t\t\t\t\t\t_FS.hires_smooth_invert(),\n\t\t\t\t\t\t\t_FS.hires_smooth_resolution(),\n\t\t\t\t\t\t],\n\t\t\t\t)\n\n_D('G7 Cordless Laser Mouse', codename='G7', protocol=1.0, wpid='1002',\n\t\t\t\tregisters=(_R.battery_status, ),\n\t\t\t\t)\n_D('G700 Gaming Mouse', codename='G700', protocol=1.0, wpid='1023',\n\t\t\t\tregisters=(_R.battery_status, _R.three_leds, ),\n\t\t\t\tsettings=[\n\t\t\t\t\t\t\t_RS.smooth_scroll(),\n\t\t\t\t\t\t\t_RS.side_scroll(),\n\t\t\t\t\t\t],\n\t\t\t\t)\n_D('G700s Gaming Mouse', codename='G700s', protocol=1.0, wpid='102A',\n\t\t\t\tregisters=(_R.battery_status, _R.three_leds, ),\n\t\t\t\tsettings=[\n\t\t\t\t\t\t\t_RS.smooth_scroll(),\n\t\t\t\t\t\t\t_RS.side_scroll(),\n\t\t\t\t\t\t],\n\t\t\t\t)\n\n# Trackballs\n\n_D('Wireless Trackball M570')\n\n# Touchpads\n\n_D('Wireless Rechargeable Touchpad T650', protocol=2.0, wpid='4101')\n_D('Wireless Touchpad', codename='Wireless Touch', protocol=2.0, wpid='4011')\n\n#\n# Classic Nano peripherals (that don't support the Unifying protocol).\n# A wpid is necessary to properly identify them.\n#\n\n_D('VX Nano Cordless Laser Mouse', codename='VX Nano', protocol=1.0, wpid=('100B', '100F'),\n\t\t\t\tregisters=(_R.battery_charge, ),\n\t\t\t\tsettings=[\n\t\t\t\t\t\t\t_RS.smooth_scroll(),\n\t\t\t\t\t\t\t_RS.side_scroll(),\n\t\t\t\t\t\t],\n\t\t\t\t)\n_D('V450 Nano Cordless Laser Mouse', codename='V450 Nano', protocol=1.0, wpid='1011',\n\t\t\t\tregisters=(_R.battery_charge, ),\n\t\t\t\t)\n_D('V550 Nano Cordless Laser Mouse', codename='V550 Nano', protocol=1.0, wpid='1013',\n\t\t\t\tregisters=(_R.battery_charge, ),\n\t\t\t\tsettings=[\n\t\t\t\t\t\t\t_RS.smooth_scroll(),\n\t\t\t\t\t\t\t_RS.side_scroll(),\n\t\t\t\t\t\t],\n\t\t\t\t)\n\n# Mini receiver mice\n\n_D('MX610 Laser Cordless Mouse', codename='MX610', protocol=1.0, wpid='1001',\n\t\t\t\tregisters=(_R.battery_status, ),\n\t\t\t\t)\n_D('MX620 Laser Cordless Mouse', codename='MX620', protocol=1.0, wpid=('100A', '1016'),\n\t\t\t\tregisters=(_R.battery_charge, ),\n\t\t\t\t)\n_D('MX610 Left-Handled Mouse', codename='MX610L', protocol=1.0, wpid='1004',\n\t\t\t\tregisters=(_R.battery_status, ),\n\t\t\t\t)\n_D('V400 Laser Cordless Mouse', codename='V400', protocol=1.0, wpid='1003',\n\t\t\t\tregisters=(_R.battery_status, ),\n\t\t\t\t)\n_D('V450 Laser Cordless Mouse', codename='V450', protocol=1.0, wpid='1005',\n\t\t\t\tregisters=(_R.battery_status, ),\n\t\t\t\t)\n_D('VX Revolution', codename='VX Revolution', kind=_DK.mouse, protocol=1.0, wpid=('1006', '100D'),\n\t\t\t\tregisters=(_R.battery_charge, ),\n\t\t\t\t)\n_D('MX Air', codename='MX Air', protocol=1.0, kind=_DK.mouse, wpid=('1007', '100E'),\n\t\t\t\tregisters=(_R.battery_charge, ),\n\t\t\t\t)\n_D('MX Revolution', codename='MX Revolution', protocol=1.0, kind=_DK.mouse, wpid=('1008', '100C'),\n\t\t\t\tregisters=(_R.battery_charge, ),\n\t\t\t\t)\n_D('MX 1100 Cordless Laser Mouse', codename='MX 1100', protocol=1.0, kind=_DK.mouse, wpid='1014',\n registers=(_R.battery_charge, ),\n settings=[\n\t\t\t\t\t\t\t_RS.smooth_scroll(),\n\t\t\t\t\t\t\t_RS.side_scroll(),\n\t\t\t\t\t\t],\n )\n\n# Some exotics...\n\n_D('Fujitsu Sonic Mouse', codename='Sonic', protocol=1.0, wpid='1029')\n", "path": "lib/logitech_receiver/descriptors.py"}], "after_files": [{"content": "# -*- python-mode -*-\n# -*- coding: UTF-8 -*-\n\n## Copyright (C) 2012-2013 Daniel Pavel\n##\n## This program is free software; you can redistribute it and/or modify\n## it under the terms of the GNU General Public License as published by\n## the Free Software Foundation; either version 2 of the License, or\n## (at your option) any later version.\n##\n## This program is distributed in the hope that it will be useful,\n## but WITHOUT ANY WARRANTY; without even the implied warranty of\n## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n## GNU General Public License for more details.\n##\n## You should have received a copy of the GNU General Public License along\n## with this program; if not, write to the Free Software Foundation, Inc.,\n## 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\n\nfrom .common import NamedInts as _NamedInts\nfrom .hidpp10 import REGISTERS as _R, DEVICE_KIND as _DK\nfrom .settings_templates import RegisterSettings as _RS, FeatureSettings as _FS\n\n#\n#\n#\n\nfrom collections import namedtuple\n_DeviceDescriptor = namedtuple('_DeviceDescriptor',\n\t\t\t\t('name', 'kind', 'wpid', 'codename', 'protocol', 'registers', 'settings'))\ndel namedtuple\n\nDEVICES = {}\n\ndef _D(name, codename=None, kind=None, wpid=None, protocol=None, registers=None, settings=None):\n\tassert name\n\n\tif kind is None:\n\t\tkind = (_DK.mouse if 'Mouse' in name\n\t\t\t\telse _DK.keyboard if 'Keyboard' in name\n\t\t\t\telse _DK.numpad if 'Number Pad' in name\n\t\t\t\telse _DK.touchpad if 'Touchpad' in name\n\t\t\t\telse _DK.trackball if 'Trackball' in name\n\t\t\t\telse None)\n\tassert kind is not None, 'descriptor for %s does not have kind set' % name\n\n\t# heuristic: the codename is the last word in the device name\n\tif codename is None and ' ' in name:\n\t\tcodename = name.split(' ')[-1]\n\tassert codename is not None, 'descriptor for %s does not have codename set' % name\n\n\tif protocol is not None:\n\t\t# ? 2.0 devices should not have any registers\n\t\tif protocol < 2.0:\n\t\t\tassert settings is None or all(s._rw.kind == 1 for s in settings)\n\t\telse:\n\t\t\tassert registers is None\n\t\t\tassert settings is None or all(s._rw.kind == 2 for s in settings)\n\n\t\tif wpid:\n\t\t\tfor w in wpid if isinstance(wpid, tuple) else (wpid, ):\n\t\t\t\tif protocol > 1.0:\n\t\t\t\t\tassert w[0:1] == '4', '%s has protocol %0.1f, wpid %s' % (name, protocol, w)\n\t\t\t\telse:\n\t\t\t\t\tif w[0:1] == '1':\n\t\t\t\t\t\tassert kind == _DK.mouse, '%s has protocol %0.1f, wpid %s' % (name, protocol, w)\n\t\t\t\t\telif w[0:1] == '2':\n\t\t\t\t\t\tassert kind in (_DK.keyboard, _DK.numpad), '%s has protocol %0.1f, wpid %s' % (name, protocol, w)\n\n\tdevice_descriptor = _DeviceDescriptor(name=name, kind=kind,\n\t\t\t\t\twpid=wpid, codename=codename, protocol=protocol,\n\t\t\t\t\tregisters=registers, settings=settings)\n\n\tassert codename not in DEVICES, 'duplicate codename in device descriptors: %s' % (DEVICES[codename], )\n\tDEVICES[codename] = device_descriptor\n\n\tif wpid:\n\t\tif not isinstance(wpid, tuple):\n\t\t\twpid = (wpid, )\n\n\t\tfor w in wpid:\n\t\t\tassert w not in DEVICES, 'duplicate wpid in device descriptors: %s' % (DEVICES[w], )\n\t\t\tDEVICES[w] = device_descriptor\n\n#\n#\n#\n\n_PERFORMANCE_MX_DPIS = _NamedInts.range(0x81, 0x8F, lambda x: str((x - 0x80) * 100))\n\n#\n#\n#\n\n# Some HID++1.0 registers and HID++2.0 features can be discovered at run-time,\n# so they are not specified here.\n#\n# For known registers, however, please do specify them here -- avoids\n# unnecessary communication with the device and makes it easier to make certain\n# decisions when querying the device's state.\n#\n# Specify a negative value to blacklist a certain register for a device.\n#\n# Usually, state registers (battery, leds, some features, etc) are only used by\n# HID++ 1.0 devices, while HID++ 2.0 devices use features for the same\n# functionalities. This is a rule that's been discovered by trial-and-error,\n# so it may change in the future.\n\n# Well-known registers (in hex):\n# * 00 - notification flags (all devices)\n# 01 - mice: smooth scrolling\n# 07 - battery status\n# 09 - keyboards: FN swap (if it has the FN key)\n# 0D - battery charge\n# a device may have either the 07 or 0D register available;\n# no known device uses both\n# 51 - leds\n# 63 - mice: DPI\n# * F1 - firmware info\n# Some registers appear to be universally supported, no matter the HID++ version\n# (marked with *). The rest may or may not be supported, and their values may or\n# may not mean the same thing across different devices.\n\n# The 'codename' and 'kind' fields are usually guessed from the device name,\n# but in some cases (like the Logitech Cube) that heuristic fails and they have\n# to be specified.\n#\n# The 'protocol' and 'wpid' fields are optional (they can be discovered at\n# runtime), but specifying them here speeds up device discovery and reduces the\n# USB traffic Solaar has to do to fully identify peripherals.\n# Same goes for HID++ 2.0 feature settings (like _feature_fn_swap).\n#\n# The 'registers' field indicates read-only registers, specifying a state. These\n# are valid (AFAIK) only to HID++ 1.0 devices.\n# The 'settings' field indicates a read/write register; based on them Solaar\n# generates, at runtime, the settings controls in the device panel. HID++ 1.0\n# devices may only have register-based settings; HID++ 2.0 devices may only have\n# feature-based settings.\n\n# Keyboards\n\n_D('Wireless Keyboard K230', protocol=2.0, wpid='400D')\n_D('Wireless Keyboard K270(unifying)', protocol=2.0, wpid='4003')\n_D('Wireless Keyboard MK270', protocol=2.0, wpid='4023',\n\t\t\t settings=[\n\t\t\t\t\t\t\t_FS.fn_swap()\n\t\t\t\t\t\t],\n\t\t\t\t)\n_D('Wireless Keyboard K270', protocol=1.0,\n\t\t\t\tregisters=(_R.battery_status, ),\n\t\t\t\t)\n_D('Wireless Keyboard MK320', protocol=1.0, wpid='200F',\n\t\t\t\tregisters=(_R.battery_status, ),\n\t\t\t\t)\n_D('Wireless Keyboard MK330')\n_D('Wireless Compact Keyboard K340', protocol=1.0, wpid='2007',\n\t\t\t\tregisters=(_R.battery_status, ),\n\t\t\t\t)\n_D('Wireless Wave Keyboard K350', protocol=1.0, wpid='200A',\n\t\t\t\tregisters=(_R.battery_status, ),\n\t\t\t\t)\n_D('Wireless Keyboard K360', protocol=2.0, wpid='4004',\n\t\t\t\tsettings=[\n\t\t\t\t\t\t\t_FS.fn_swap()\n\t\t\t\t\t\t],\n\t\t\t\t)\n_D('Wireless Keyboard K375s', protocol=2.0, wpid='4061',\n\t\t\t\tsettings=[\n\t\t\t\t\t\t\t_FS.k375s_fn_swap()\n\t\t\t\t\t\t],\n\t\t\t\t)\n_D('Wireless Touch Keyboard K400', protocol=2.0, wpid=('400E', '4024'),\n\t\t\t\tsettings=[\n\t\t\t\t\t\t\t_FS.fn_swap()\n\t\t\t\t\t\t],\n\t\t\t\t)\n_D('Wireless Touch Keyboard K400 Plus', codename='K400 Plus', protocol=2.0, wpid='404D',\n settings=[\n _FS.new_fn_swap()\n ],\n )\n_D('Wireless Keyboard K520', protocol=1.0, wpid='2011',\n\t\t\t\tregisters=(_R.battery_status, ),\n\t\t\t\tsettings=[\n\t\t\t\t\t\t\t_RS.fn_swap(),\n\t\t\t\t\t\t],\n\t\t\t\t)\n_D('Number Pad N545', protocol=1.0, wpid='2006',\n\t\t\t\tregisters=(_R.battery_status, ),\n\t\t\t\t)\n_D('Wireless Keyboard MK550')\n_D('Wireless Keyboard MK700', protocol=1.0, wpid='2008',\n\t\t\t\tregisters=(_R.battery_status, ),\n\t\t\t\tsettings=[\n\t\t\t\t\t\t\t_RS.fn_swap(),\n\t\t\t\t\t\t],\n\t\t\t\t)\n_D('Wireless Solar Keyboard K750', protocol=2.0, wpid='4002',\n\t\t\t\tsettings=[\n\t\t\t\t\t\t\t_FS.fn_swap()\n\t\t\t\t\t\t],\n\t\t\t\t)\n_D('Wireless Multi-Device Keyboard K780', protocol=4.5, wpid='405B',\n\t\t\t\tsettings=[\n\t\t\t\t\t\t\t_FS.new_fn_swap()\n\t\t\t\t\t\t],\n\t\t\t\t)\n_D('Wireless Illuminated Keyboard K800', protocol=1.0, wpid='2010',\n\t\t\t\tregisters=(_R.battery_status, _R.three_leds, ),\n\t\t\t\tsettings=[\n\t\t\t\t\t\t\t_RS.fn_swap(),\n\t\t\t\t\t\t\t_RS.hand_detection(),\n\t\t\t\t\t\t],\n\t\t\t\t)\n_D('Wireless Illuminated Keyboard K800 new', codename='K800 new', protocol=4.5, wpid='406E',\n\t\t\t\tsettings=[\n\t\t\t\t\t\t\t_FS.fn_swap()\n\t\t\t\t\t\t],\n\t\t\t\t)\n_D('Illuminated Living-Room Keyboard K830', protocol=2.0, wpid='4032',\n\t\t\t\tsettings=[\n\t\t\t\t\t\t\t_FS.new_fn_swap()\n\t\t\t\t\t\t],\n\t\t\t\t)\n_D('Craft Advanced Keyboard', protocol=4.5, wpid='4066')\n\n\n# Mice\n\n_D('Wireless Mouse M150', protocol=2.0, wpid='4022')\n_D('Wireless Mouse M175', protocol=2.0, wpid='4008')\n_D('Wireless Mouse M185 new', codename='M185n', protocol=4.5, wpid='4054',\n\t\t\t\tsettings=[\n\t\t\t\t\t\t\t_FS.lowres_smooth_scroll(),\n\t\t\t\t\t\t\t_FS.pointer_speed(),\n\t\t\t\t])\n# Apparently Logitech uses wpid 4055 for three different mice\n# That's not so strange, as M185 is used on both Unifying-ready and non-Unifying-ready mice\n_D('Wireless Mouse M185/M235/M310', codename='M185/M235/M310', protocol=4.5, wpid='4055',\n\t\t\t\tsettings=[\n\t\t\t\t\t\t\t_FS.lowres_smooth_scroll(),\n\t\t\t\t\t\t\t_FS.pointer_speed(),\n\t\t\t\t])\n_D('Wireless Mouse M185', protocol=2.0, wpid='4038')\n_D('Wireless Mouse M187', protocol=2.0, wpid='4019')\n_D('Wireless Mouse M215', protocol=1.0, wpid='1020')\n_D('Wireless Mouse M305', protocol=1.0, wpid='101F',\n\t\t\t\tregisters=(_R.battery_status, ),\n\t\t\t\tsettings=[\n\t\t\t\t\t\t\t_RS.side_scroll(),\n\t\t\t\t\t\t],\n\t\t\t\t)\n_D('Wireless Mouse M310', protocol=1.0, wpid='1024',\n\t\t\t\tregisters=(_R.battery_status, ),\n\t\t\t\t)\n_D('Wireless Mouse M315')\n_D('Wireless Mouse M317')\n_D('Wireless Mouse M325', protocol=2.0, wpid='400A',\n\t\t\t\tsettings=[\n\t\t\t\t\t\t\t_FS.hi_res_scroll(),\n\t\t\t\t])\n_D('Wireless Mouse M345', protocol=2.0, wpid='4017')\n_D('Wireless Mouse M350', protocol=1.0, wpid='101C',\n\t\t\t\tregisters=(_R.battery_charge, ),\n\t\t\t\t)\n_D('Wireless Mouse M505', codename='M505/B605', protocol=1.0, wpid='101D',\n\t\t\t\tregisters=(_R.battery_charge, ),\n\t\t\t\tsettings=[\n\t\t\t\t\t\t\t_RS.smooth_scroll(),\n\t\t\t\t\t\t\t_RS.side_scroll(),\n\t\t\t\t\t\t],\n\t\t\t\t)\n_D('Wireless Mouse M510', protocol=1.0, wpid='1025',\n\t\t\t\tregisters=(_R.battery_status, ),\n\t\t\t\tsettings=[\n\t\t\t\t\t\t\t_RS.smooth_scroll(),\n\t\t\t\t\t\t\t_RS.side_scroll(),\n\t\t\t\t\t\t],\n\t\t\t\t)\n_D('Wireless Mouse M510', codename='M510v2', protocol=2.0, wpid='4051',\n\t\t\t\tsettings=[\n\t\t\t\t\t\t\t_FS.lowres_smooth_scroll(),\n\t\t\t\t])\n_D('Couch Mouse M515', protocol=2.0, wpid='4007')\n_D('Wireless Mouse M525', protocol=2.0, wpid='4013')\n_D('Multi Device Silent Mouse M585/M590', codename='M585/M590', protocol=4.5, wpid='406B',\n\t\t\t\tsettings=[\n\t\t\t\t\t\t\t_FS.lowres_smooth_scroll(),\n\t\t\t\t\t\t\t_FS.pointer_speed(),\n\t\t\t\t],\n\t)\n_D('Touch Mouse M600', protocol=2.0, wpid='401A')\n_D('Marathon Mouse M705 (M-R0009)', codename='M705 (M-R0009)', protocol=1.0, wpid='101B',\n\t\t\t\tregisters=(_R.battery_charge, ),\n\t\t\t\tsettings=[\n\t\t\t\t\t\t\t_RS.smooth_scroll(),\n\t\t\t\t\t\t\t_RS.side_scroll(),\n\t\t\t\t\t\t],\n\t\t\t\t)\n_D('Marathon Mouse M705 (M-R0073)', codename='M705 (M-R0073)', protocol=4.5, wpid='406D',\n\t\t\t\tsettings=[\n\t\t\t\t\t\t\t_FS.hires_smooth_invert(),\n\t\t\t\t\t\t\t_FS.hires_smooth_resolution(),\n\t\t\t\t\t\t\t_FS.pointer_speed(),\n\t\t\t\t])\n_D('Zone Touch Mouse T400')\n_D('Touch Mouse T620', protocol=2.0)\n_D('Logitech Cube', kind=_DK.mouse, protocol=2.0)\n_D('Anywhere Mouse MX', codename='Anywhere MX', protocol=1.0, wpid='1017',\n\t\t\t\tregisters=(_R.battery_charge, ),\n\t\t\t\tsettings=[\n\t\t\t\t\t\t\t_RS.smooth_scroll(),\n\t\t\t\t\t\t\t_RS.side_scroll(),\n\t\t\t\t\t\t],\n\t\t\t\t)\n_D('Anywhere Mouse MX 2', codename='Anywhere MX 2', protocol=4.5, wpid='404A',\n\t\t\t\tsettings=[\n\t\t\t\t\t\t\t_FS.hires_smooth_invert(),\n\t\t\t\t\t\t\t_FS.hires_smooth_resolution(),\n\t\t\t\t\t\t],\n\t\t\t\t)\n_D('Performance Mouse MX', codename='Performance MX', protocol=1.0, wpid='101A',\n\t\t\t\tregisters=(_R.battery_status, _R.three_leds, ),\n\t\t\t\tsettings=[\n\t\t\t\t\t\t\t_RS.dpi(choices=_PERFORMANCE_MX_DPIS),\n\t\t\t\t\t\t\t_RS.smooth_scroll(),\n\t\t\t\t\t\t\t_RS.side_scroll(),\n\t\t\t\t\t\t],\n\t\t\t\t)\n\n_D('Wireless Mouse MX Master', codename='MX Master', protocol=4.5, wpid='4041',\n\t\t\t\tsettings=[\n\t\t\t\t\t\t\t_FS.hires_smooth_invert(),\n\t\t\t\t\t\t\t_FS.hires_smooth_resolution(),\n\t\t\t\t\t\t],\n\t\t\t\t)\n\n_D('Wireless Mouse MX Master 2S', codename='MX Master 2S', protocol=4.5,wpid='4069',\n\t\t\t\tsettings=[\n\t\t\t\t\t\t\t_FS.hires_smooth_invert(),\n\t\t\t\t\t\t\t_FS.hires_smooth_resolution(),\n\t\t\t\t\t\t],\n\t\t\t\t)\n\n_D('G7 Cordless Laser Mouse', codename='G7', protocol=1.0, wpid='1002',\n\t\t\t\tregisters=(_R.battery_status, ),\n\t\t\t\t)\n_D('G700 Gaming Mouse', codename='G700', protocol=1.0, wpid='1023',\n\t\t\t\tregisters=(_R.battery_status, _R.three_leds, ),\n\t\t\t\tsettings=[\n\t\t\t\t\t\t\t_RS.smooth_scroll(),\n\t\t\t\t\t\t\t_RS.side_scroll(),\n\t\t\t\t\t\t],\n\t\t\t\t)\n_D('G700s Gaming Mouse', codename='G700s', protocol=1.0, wpid='102A',\n\t\t\t\tregisters=(_R.battery_status, _R.three_leds, ),\n\t\t\t\tsettings=[\n\t\t\t\t\t\t\t_RS.smooth_scroll(),\n\t\t\t\t\t\t\t_RS.side_scroll(),\n\t\t\t\t\t\t],\n\t\t\t\t)\n\n# Trackballs\n\n_D('Wireless Trackball M570')\n\n# Touchpads\n\n_D('Wireless Rechargeable Touchpad T650', protocol=2.0, wpid='4101')\n_D('Wireless Touchpad', codename='Wireless Touch', protocol=2.0, wpid='4011')\n\n#\n# Classic Nano peripherals (that don't support the Unifying protocol).\n# A wpid is necessary to properly identify them.\n#\n\n_D('VX Nano Cordless Laser Mouse', codename='VX Nano', protocol=1.0, wpid=('100B', '100F'),\n\t\t\t\tregisters=(_R.battery_charge, ),\n\t\t\t\tsettings=[\n\t\t\t\t\t\t\t_RS.smooth_scroll(),\n\t\t\t\t\t\t\t_RS.side_scroll(),\n\t\t\t\t\t\t],\n\t\t\t\t)\n_D('V450 Nano Cordless Laser Mouse', codename='V450 Nano', protocol=1.0, wpid='1011',\n\t\t\t\tregisters=(_R.battery_charge, ),\n\t\t\t\t)\n_D('V550 Nano Cordless Laser Mouse', codename='V550 Nano', protocol=1.0, wpid='1013',\n\t\t\t\tregisters=(_R.battery_charge, ),\n\t\t\t\tsettings=[\n\t\t\t\t\t\t\t_RS.smooth_scroll(),\n\t\t\t\t\t\t\t_RS.side_scroll(),\n\t\t\t\t\t\t],\n\t\t\t\t)\n\n# Mini receiver mice\n\n_D('MX610 Laser Cordless Mouse', codename='MX610', protocol=1.0, wpid='1001',\n\t\t\t\tregisters=(_R.battery_status, ),\n\t\t\t\t)\n_D('MX620 Laser Cordless Mouse', codename='MX620', protocol=1.0, wpid=('100A', '1016'),\n\t\t\t\tregisters=(_R.battery_charge, ),\n\t\t\t\t)\n_D('MX610 Left-Handled Mouse', codename='MX610L', protocol=1.0, wpid='1004',\n\t\t\t\tregisters=(_R.battery_status, ),\n\t\t\t\t)\n_D('V400 Laser Cordless Mouse', codename='V400', protocol=1.0, wpid='1003',\n\t\t\t\tregisters=(_R.battery_status, ),\n\t\t\t\t)\n_D('V450 Laser Cordless Mouse', codename='V450', protocol=1.0, wpid='1005',\n\t\t\t\tregisters=(_R.battery_status, ),\n\t\t\t\t)\n_D('VX Revolution', codename='VX Revolution', kind=_DK.mouse, protocol=1.0, wpid=('1006', '100D'),\n\t\t\t\tregisters=(_R.battery_charge, ),\n\t\t\t\t)\n_D('MX Air', codename='MX Air', protocol=1.0, kind=_DK.mouse, wpid=('1007', '100E'),\n\t\t\t\tregisters=(_R.battery_charge, ),\n\t\t\t\t)\n_D('MX Revolution', codename='MX Revolution', protocol=1.0, kind=_DK.mouse, wpid=('1008', '100C'),\n\t\t\t\tregisters=(_R.battery_charge, ),\n\t\t\t\t)\n_D('MX 1100 Cordless Laser Mouse', codename='MX 1100', protocol=1.0, kind=_DK.mouse, wpid='1014',\n registers=(_R.battery_charge, ),\n settings=[\n\t\t\t\t\t\t\t_RS.smooth_scroll(),\n\t\t\t\t\t\t\t_RS.side_scroll(),\n\t\t\t\t\t\t],\n )\n\n# Some exotics...\n\n_D('Fujitsu Sonic Mouse', codename='Sonic', protocol=1.0, wpid='1029')\n", "path": "lib/logitech_receiver/descriptors.py"}]} |
gh_patches_debug_1394 | rasdani/github-patches | git_diff | Textualize__textual-2654 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Docs for `ProgressBar.percentage` talk about a `ProgressBar.Started` message that doesn't exist
Likely a docstring hangover from the evolution of `ProgressBar`?
Ping @rodrigogiraoserrao for obvious reasons.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/textual/widgets/_progress_bar.py`
Content:
```
1 """Implements a progress bar widget."""
2
3 from __future__ import annotations
4
5 from math import ceil
6 from time import monotonic
7 from typing import Callable, Optional
8
9 from rich.style import Style
10
11 from textual.geometry import clamp
12
13 from ..app import ComposeResult, RenderResult
14 from ..containers import Horizontal
15 from ..reactive import reactive
16 from ..renderables.bar import Bar as BarRenderable
17 from ..timer import Timer
18 from ..widget import Widget
19 from ..widgets import Label
20
21
22 class Bar(Widget, can_focus=False):
23 """The bar portion of the progress bar."""
24
25 COMPONENT_CLASSES = {"bar--bar", "bar--complete", "bar--indeterminate"}
26 """
27 The bar sub-widget provides the component classes that follow.
28
29 These component classes let you modify the foreground and background color of the
30 bar in its different states.
31
32 | Class | Description |
33 | :- | :- |
34 | `bar--bar` | Style of the bar (may be used to change the color). |
35 | `bar--complete` | Style of the bar when it's complete. |
36 | `bar--indeterminate` | Style of the bar when it's in an indeterminate state. |
37 """
38
39 DEFAULT_CSS = """
40 Bar {
41 width: 32;
42 height: 1;
43 }
44 Bar > .bar--bar {
45 color: $warning;
46 background: $foreground 10%;
47 }
48 Bar > .bar--indeterminate {
49 color: $error;
50 background: $foreground 10%;
51 }
52 Bar > .bar--complete {
53 color: $success;
54 background: $foreground 10%;
55 }
56 """
57
58 _percentage: reactive[float | None] = reactive[Optional[float]](None)
59 """The percentage of progress that has been completed."""
60 _start_time: float | None
61 """The time when the widget started tracking progress."""
62
63 def __init__(
64 self,
65 name: str | None = None,
66 id: str | None = None,
67 classes: str | None = None,
68 disabled: bool = False,
69 ):
70 """Create a bar for a [`ProgressBar`][textual.widgets.ProgressBar]."""
71 super().__init__(name=name, id=id, classes=classes, disabled=disabled)
72 self._start_time = None
73 self._percentage = None
74
75 def watch__percentage(self, percentage: float | None) -> None:
76 """Manage the timer that enables the indeterminate bar animation."""
77 if percentage is not None:
78 self.auto_refresh = None
79 else:
80 self.auto_refresh = 1 / 15
81
82 def render(self) -> RenderResult:
83 """Render the bar with the correct portion filled."""
84 if self._percentage is None:
85 return self.render_indeterminate()
86 else:
87 bar_style = (
88 self.get_component_rich_style("bar--bar")
89 if self._percentage < 1
90 else self.get_component_rich_style("bar--complete")
91 )
92 return BarRenderable(
93 highlight_range=(0, self.size.width * self._percentage),
94 highlight_style=Style.from_color(bar_style.color),
95 background_style=Style.from_color(bar_style.bgcolor),
96 )
97
98 def render_indeterminate(self) -> RenderResult:
99 """Render a frame of the indeterminate progress bar animation."""
100 width = self.size.width
101 highlighted_bar_width = 0.25 * width
102 # Width used to enable the visual effect of the bar going into the corners.
103 total_imaginary_width = width + highlighted_bar_width
104
105 speed = 30 # Cells per second.
106 # Compute the position of the bar.
107 start = (speed * self._get_elapsed_time()) % (2 * total_imaginary_width)
108 if start > total_imaginary_width:
109 # If the bar is to the right of its width, wrap it back from right to left.
110 start = 2 * total_imaginary_width - start # = (tiw - (start - tiw))
111 start -= highlighted_bar_width
112 end = start + highlighted_bar_width
113
114 bar_style = self.get_component_rich_style("bar--indeterminate")
115 return BarRenderable(
116 highlight_range=(max(0, start), min(end, width)),
117 highlight_style=Style.from_color(bar_style.color),
118 background_style=Style.from_color(bar_style.bgcolor),
119 )
120
121 def _get_elapsed_time(self) -> float:
122 """Get time for the indeterminate progress animation.
123
124 This method ensures that the progress bar animation always starts at the
125 beginning and it also makes it easier to test the bar if we monkey patch
126 this method.
127
128 Returns:
129 The time elapsed since the bar started being animated.
130 """
131 if self._start_time is None:
132 self._start_time = monotonic()
133 return 0
134 return monotonic() - self._start_time
135
136
137 class PercentageStatus(Label):
138 """A label to display the percentage status of the progress bar."""
139
140 DEFAULT_CSS = """
141 PercentageStatus {
142 width: 5;
143 content-align-horizontal: right;
144 }
145 """
146
147 _label_text: reactive[str] = reactive("", repaint=False)
148 """This is used as an auxiliary reactive to only refresh the label when needed."""
149 _percentage: reactive[float | None] = reactive[Optional[float]](None)
150 """The percentage of progress that has been completed."""
151
152 def __init__(
153 self,
154 name: str | None = None,
155 id: str | None = None,
156 classes: str | None = None,
157 disabled: bool = False,
158 ):
159 super().__init__(name=name, id=id, classes=classes, disabled=disabled)
160 self._percentage = None
161 self._label_text = "--%"
162
163 def watch__percentage(self, percentage: float | None) -> None:
164 """Manage the text that shows the percentage of progress."""
165 if percentage is None:
166 self._label_text = "--%"
167 else:
168 self._label_text = f"{int(100 * percentage)}%"
169
170 def watch__label_text(self, label_text: str) -> None:
171 """If the label text changed, update the renderable (which also refreshes)."""
172 self.update(label_text)
173
174
175 class ETAStatus(Label):
176 """A label to display the estimated time until completion of the progress bar."""
177
178 DEFAULT_CSS = """
179 ETAStatus {
180 width: 9;
181 content-align-horizontal: right;
182 }
183 """
184
185 _label_text: reactive[str] = reactive("", repaint=False)
186 """This is used as an auxiliary reactive to only refresh the label when needed."""
187 _percentage: reactive[float | None] = reactive[Optional[float]](None)
188 """The percentage of progress that has been completed."""
189 _refresh_timer: Timer
190 """Timer to update ETA status even when progress stalls."""
191 _start_time: float | None
192 """The time when the widget started tracking progress."""
193
194 def __init__(
195 self,
196 name: str | None = None,
197 id: str | None = None,
198 classes: str | None = None,
199 disabled: bool = False,
200 ):
201 super().__init__(name=name, id=id, classes=classes, disabled=disabled)
202 self._percentage = None
203 self._label_text = "--:--:--"
204 self._start_time = None
205
206 def on_mount(self) -> None:
207 """Periodically refresh the countdown so that the ETA is always up to date."""
208 self._refresh_timer = self.set_interval(1 / 2, self.update_eta, pause=True)
209
210 def watch__percentage(self, percentage: float | None) -> None:
211 if percentage is None:
212 self._label_text = "--:--:--"
213 else:
214 self._refresh_timer.reset()
215 self.update_eta()
216
217 def update_eta(self) -> None:
218 """Update the ETA display."""
219 percentage = self._percentage
220 delta = self._get_elapsed_time()
221 # We display --:--:-- if we haven't started, if we are done,
222 # or if we don't know when we started keeping track of time.
223 if not percentage or percentage >= 1 or not delta:
224 self._label_text = "--:--:--"
225 # If we are done, we can delete the timer that periodically refreshes
226 # the countdown display.
227 if percentage is not None and percentage >= 1:
228 self.auto_refresh = None
229 # Render a countdown timer with hh:mm:ss, unless it's a LONG time.
230 else:
231 left = ceil((delta / percentage) * (1 - percentage))
232 minutes, seconds = divmod(left, 60)
233 hours, minutes = divmod(minutes, 60)
234 if hours > 999999:
235 self._label_text = "+999999h"
236 elif hours > 99:
237 self._label_text = f"{hours}h"
238 else:
239 self._label_text = f"{hours:02}:{minutes:02}:{seconds:02}"
240
241 def _get_elapsed_time(self) -> float:
242 """Get time to estimate time to progress completion.
243
244 Returns:
245 The time elapsed since the bar started being animated.
246 """
247 if self._start_time is None:
248 self._start_time = monotonic()
249 return 0
250 return monotonic() - self._start_time
251
252 def watch__label_text(self, label_text: str) -> None:
253 """If the ETA label changed, update the renderable (which also refreshes)."""
254 self.update(label_text)
255
256
257 class ProgressBar(Widget, can_focus=False):
258 """A progress bar widget."""
259
260 DEFAULT_CSS = """
261 ProgressBar > Horizontal {
262 width: auto;
263 height: auto;
264 }
265 ProgressBar {
266 width: auto;
267 height: 1;
268 }
269 """
270
271 progress: reactive[float] = reactive(0.0)
272 """The progress so far, in number of steps."""
273 total: reactive[float | None] = reactive[Optional[float]](None)
274 """The total number of steps associated with this progress bar, when known.
275
276 The value `None` will render an indeterminate progress bar.
277 Once `total` is set to a numerical value, it cannot be set back to `None`.
278 """
279 percentage: reactive[float | None] = reactive[Optional[float]](None)
280 """The percentage of progress that has been completed.
281
282 The percentage is a value between 0 and 1 and the returned value is only
283 `None` if the total progress of the bar hasn't been set yet.
284 In other words, after the progress bar emits the message
285 [`ProgressBar.Started`][textual.widgets.ProgressBar.Started],
286 the value of `percentage` is always not `None`.
287
288 Example:
289 ```py
290 progress_bar = ProgressBar()
291 print(progress_bar.percentage) # None
292 progress_bar.update(total=100)
293 progress_bar.advance(50)
294 print(progress_bar.percentage) # 0.5
295 ```
296 """
297
298 def __init__(
299 self,
300 total: float | None = None,
301 *,
302 show_bar: bool = True,
303 show_percentage: bool = True,
304 show_eta: bool = True,
305 name: str | None = None,
306 id: str | None = None,
307 classes: str | None = None,
308 disabled: bool = False,
309 ):
310 """Create a Progress Bar widget.
311
312 The progress bar uses "steps" as the measurement unit.
313
314 Example:
315 ```py
316 class MyApp(App):
317 def compose(self):
318 yield ProgressBar(total=100)
319
320 def key_space(self):
321 self.query_one(ProgressBar).advance(5)
322 ```
323
324 Args:
325 total: The total number of steps in the progress if known.
326 show_bar: Whether to show the bar portion of the progress bar.
327 show_percentage: Whether to show the percentage status of the bar.
328 show_eta: Whether to show the ETA countdown of the progress bar.
329 name: The name of the widget.
330 id: The ID of the widget in the DOM.
331 classes: The CSS classes for the widget.
332 disabled: Whether the widget is disabled or not.
333 """
334 super().__init__(name=name, id=id, classes=classes, disabled=disabled)
335 self.show_bar = show_bar
336 self.show_percentage = show_percentage
337 self.show_eta = show_eta
338
339 self.total = total
340
341 def compose(self) -> ComposeResult:
342 # We create a closure so that we can determine what are the sub-widgets
343 # that are present and, therefore, will need to be notified about changes
344 # to the percentage.
345 def update_percentage(widget: Widget) -> Callable[[float | None], None]:
346 """Closure to allow updating the percentage of a given widget."""
347
348 def updater(percentage: float | None) -> None:
349 """Update the percentage reactive of the enclosed widget."""
350 widget._percentage = percentage
351
352 return updater
353
354 with Horizontal():
355 if self.show_bar:
356 bar = Bar(id="bar")
357 self.watch(self, "percentage", update_percentage(bar))
358 yield bar
359 if self.show_percentage:
360 percentage_status = PercentageStatus(id="percentage")
361 self.watch(self, "percentage", update_percentage(percentage_status))
362 yield percentage_status
363 if self.show_eta:
364 eta_status = ETAStatus(id="eta")
365 self.watch(self, "percentage", update_percentage(eta_status))
366 yield eta_status
367
368 def validate_progress(self, progress: float) -> float:
369 """Clamp the progress between 0 and the maximum total."""
370 if self.total is not None:
371 return clamp(progress, 0, self.total)
372 return progress
373
374 def validate_total(self, total: float | None) -> float | None:
375 """Ensure the total is not negative."""
376 if total is None:
377 return total
378 return max(0, total)
379
380 def watch_total(self, total: float | None) -> None:
381 """Re-validate progress."""
382 self.progress = self.progress
383
384 def compute_percentage(self) -> float | None:
385 """Keep the percentage of progress updated automatically.
386
387 This will report a percentage of `1` if the total is zero.
388 """
389 if self.total:
390 return self.progress / self.total
391 elif self.total == 0:
392 return 1
393 return None
394
395 def advance(self, advance: float = 1) -> None:
396 """Advance the progress of the progress bar by the given amount.
397
398 Example:
399 ```py
400 progress_bar.advance(10) # Advance 10 steps.
401 ```
402 Args:
403 advance: Number of steps to advance progress by.
404 """
405 self.progress += advance
406
407 def update(
408 self,
409 *,
410 total: float | None = None,
411 progress: float | None = None,
412 advance: float | None = None,
413 ) -> None:
414 """Update the progress bar with the given options.
415
416 Options only affect the progress bar if they are not `None`.
417
418 Example:
419 ```py
420 progress_bar.update(
421 total=200, # Set new total to 200 steps.
422 progress=None, # This has no effect.
423 )
424 ```
425
426 Args:
427 total: New total number of steps (if not `None`).
428 progress: Set the progress to the given number of steps (if not `None`).
429 advance: Advance the progress by this number of steps (if not `None`).
430 """
431 if total is not None:
432 self.total = total
433 if progress is not None:
434 self.progress = progress
435 if advance is not None:
436 self.progress += advance
437
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/textual/widgets/_progress_bar.py b/src/textual/widgets/_progress_bar.py
--- a/src/textual/widgets/_progress_bar.py
+++ b/src/textual/widgets/_progress_bar.py
@@ -281,9 +281,6 @@
The percentage is a value between 0 and 1 and the returned value is only
`None` if the total progress of the bar hasn't been set yet.
- In other words, after the progress bar emits the message
- [`ProgressBar.Started`][textual.widgets.ProgressBar.Started],
- the value of `percentage` is always not `None`.
Example:
```py
| {"golden_diff": "diff --git a/src/textual/widgets/_progress_bar.py b/src/textual/widgets/_progress_bar.py\n--- a/src/textual/widgets/_progress_bar.py\n+++ b/src/textual/widgets/_progress_bar.py\n@@ -281,9 +281,6 @@\n \n The percentage is a value between 0 and 1 and the returned value is only\n `None` if the total progress of the bar hasn't been set yet.\n- In other words, after the progress bar emits the message\n- [`ProgressBar.Started`][textual.widgets.ProgressBar.Started],\n- the value of `percentage` is always not `None`.\n \n Example:\n ```py\n", "issue": "Docs for `ProgressBar.percentage` talk about a `ProgressBar.Started` message that doesn't exist\nLikely a docstring hangover from the evolution of `ProgressBar`?\r\n\r\nPing @rodrigogiraoserrao for obvious reasons.\n", "before_files": [{"content": "\"\"\"Implements a progress bar widget.\"\"\"\n\nfrom __future__ import annotations\n\nfrom math import ceil\nfrom time import monotonic\nfrom typing import Callable, Optional\n\nfrom rich.style import Style\n\nfrom textual.geometry import clamp\n\nfrom ..app import ComposeResult, RenderResult\nfrom ..containers import Horizontal\nfrom ..reactive import reactive\nfrom ..renderables.bar import Bar as BarRenderable\nfrom ..timer import Timer\nfrom ..widget import Widget\nfrom ..widgets import Label\n\n\nclass Bar(Widget, can_focus=False):\n \"\"\"The bar portion of the progress bar.\"\"\"\n\n COMPONENT_CLASSES = {\"bar--bar\", \"bar--complete\", \"bar--indeterminate\"}\n \"\"\"\n The bar sub-widget provides the component classes that follow.\n\n These component classes let you modify the foreground and background color of the\n bar in its different states.\n\n | Class | Description |\n | :- | :- |\n | `bar--bar` | Style of the bar (may be used to change the color). |\n | `bar--complete` | Style of the bar when it's complete. |\n | `bar--indeterminate` | Style of the bar when it's in an indeterminate state. |\n \"\"\"\n\n DEFAULT_CSS = \"\"\"\n Bar {\n width: 32;\n height: 1;\n }\n Bar > .bar--bar {\n color: $warning;\n background: $foreground 10%;\n }\n Bar > .bar--indeterminate {\n color: $error;\n background: $foreground 10%;\n }\n Bar > .bar--complete {\n color: $success;\n background: $foreground 10%;\n }\n \"\"\"\n\n _percentage: reactive[float | None] = reactive[Optional[float]](None)\n \"\"\"The percentage of progress that has been completed.\"\"\"\n _start_time: float | None\n \"\"\"The time when the widget started tracking progress.\"\"\"\n\n def __init__(\n self,\n name: str | None = None,\n id: str | None = None,\n classes: str | None = None,\n disabled: bool = False,\n ):\n \"\"\"Create a bar for a [`ProgressBar`][textual.widgets.ProgressBar].\"\"\"\n super().__init__(name=name, id=id, classes=classes, disabled=disabled)\n self._start_time = None\n self._percentage = None\n\n def watch__percentage(self, percentage: float | None) -> None:\n \"\"\"Manage the timer that enables the indeterminate bar animation.\"\"\"\n if percentage is not None:\n self.auto_refresh = None\n else:\n self.auto_refresh = 1 / 15\n\n def render(self) -> RenderResult:\n \"\"\"Render the bar with the correct portion filled.\"\"\"\n if self._percentage is None:\n return self.render_indeterminate()\n else:\n bar_style = (\n self.get_component_rich_style(\"bar--bar\")\n if self._percentage < 1\n else self.get_component_rich_style(\"bar--complete\")\n )\n return BarRenderable(\n highlight_range=(0, self.size.width * self._percentage),\n highlight_style=Style.from_color(bar_style.color),\n background_style=Style.from_color(bar_style.bgcolor),\n )\n\n def render_indeterminate(self) -> RenderResult:\n \"\"\"Render a frame of the indeterminate progress bar animation.\"\"\"\n width = self.size.width\n highlighted_bar_width = 0.25 * width\n # Width used to enable the visual effect of the bar going into the corners.\n total_imaginary_width = width + highlighted_bar_width\n\n speed = 30 # Cells per second.\n # Compute the position of the bar.\n start = (speed * self._get_elapsed_time()) % (2 * total_imaginary_width)\n if start > total_imaginary_width:\n # If the bar is to the right of its width, wrap it back from right to left.\n start = 2 * total_imaginary_width - start # = (tiw - (start - tiw))\n start -= highlighted_bar_width\n end = start + highlighted_bar_width\n\n bar_style = self.get_component_rich_style(\"bar--indeterminate\")\n return BarRenderable(\n highlight_range=(max(0, start), min(end, width)),\n highlight_style=Style.from_color(bar_style.color),\n background_style=Style.from_color(bar_style.bgcolor),\n )\n\n def _get_elapsed_time(self) -> float:\n \"\"\"Get time for the indeterminate progress animation.\n\n This method ensures that the progress bar animation always starts at the\n beginning and it also makes it easier to test the bar if we monkey patch\n this method.\n\n Returns:\n The time elapsed since the bar started being animated.\n \"\"\"\n if self._start_time is None:\n self._start_time = monotonic()\n return 0\n return monotonic() - self._start_time\n\n\nclass PercentageStatus(Label):\n \"\"\"A label to display the percentage status of the progress bar.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n PercentageStatus {\n width: 5;\n content-align-horizontal: right;\n }\n \"\"\"\n\n _label_text: reactive[str] = reactive(\"\", repaint=False)\n \"\"\"This is used as an auxiliary reactive to only refresh the label when needed.\"\"\"\n _percentage: reactive[float | None] = reactive[Optional[float]](None)\n \"\"\"The percentage of progress that has been completed.\"\"\"\n\n def __init__(\n self,\n name: str | None = None,\n id: str | None = None,\n classes: str | None = None,\n disabled: bool = False,\n ):\n super().__init__(name=name, id=id, classes=classes, disabled=disabled)\n self._percentage = None\n self._label_text = \"--%\"\n\n def watch__percentage(self, percentage: float | None) -> None:\n \"\"\"Manage the text that shows the percentage of progress.\"\"\"\n if percentage is None:\n self._label_text = \"--%\"\n else:\n self._label_text = f\"{int(100 * percentage)}%\"\n\n def watch__label_text(self, label_text: str) -> None:\n \"\"\"If the label text changed, update the renderable (which also refreshes).\"\"\"\n self.update(label_text)\n\n\nclass ETAStatus(Label):\n \"\"\"A label to display the estimated time until completion of the progress bar.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n ETAStatus {\n width: 9;\n content-align-horizontal: right;\n }\n \"\"\"\n\n _label_text: reactive[str] = reactive(\"\", repaint=False)\n \"\"\"This is used as an auxiliary reactive to only refresh the label when needed.\"\"\"\n _percentage: reactive[float | None] = reactive[Optional[float]](None)\n \"\"\"The percentage of progress that has been completed.\"\"\"\n _refresh_timer: Timer\n \"\"\"Timer to update ETA status even when progress stalls.\"\"\"\n _start_time: float | None\n \"\"\"The time when the widget started tracking progress.\"\"\"\n\n def __init__(\n self,\n name: str | None = None,\n id: str | None = None,\n classes: str | None = None,\n disabled: bool = False,\n ):\n super().__init__(name=name, id=id, classes=classes, disabled=disabled)\n self._percentage = None\n self._label_text = \"--:--:--\"\n self._start_time = None\n\n def on_mount(self) -> None:\n \"\"\"Periodically refresh the countdown so that the ETA is always up to date.\"\"\"\n self._refresh_timer = self.set_interval(1 / 2, self.update_eta, pause=True)\n\n def watch__percentage(self, percentage: float | None) -> None:\n if percentage is None:\n self._label_text = \"--:--:--\"\n else:\n self._refresh_timer.reset()\n self.update_eta()\n\n def update_eta(self) -> None:\n \"\"\"Update the ETA display.\"\"\"\n percentage = self._percentage\n delta = self._get_elapsed_time()\n # We display --:--:-- if we haven't started, if we are done,\n # or if we don't know when we started keeping track of time.\n if not percentage or percentage >= 1 or not delta:\n self._label_text = \"--:--:--\"\n # If we are done, we can delete the timer that periodically refreshes\n # the countdown display.\n if percentage is not None and percentage >= 1:\n self.auto_refresh = None\n # Render a countdown timer with hh:mm:ss, unless it's a LONG time.\n else:\n left = ceil((delta / percentage) * (1 - percentage))\n minutes, seconds = divmod(left, 60)\n hours, minutes = divmod(minutes, 60)\n if hours > 999999:\n self._label_text = \"+999999h\"\n elif hours > 99:\n self._label_text = f\"{hours}h\"\n else:\n self._label_text = f\"{hours:02}:{minutes:02}:{seconds:02}\"\n\n def _get_elapsed_time(self) -> float:\n \"\"\"Get time to estimate time to progress completion.\n\n Returns:\n The time elapsed since the bar started being animated.\n \"\"\"\n if self._start_time is None:\n self._start_time = monotonic()\n return 0\n return monotonic() - self._start_time\n\n def watch__label_text(self, label_text: str) -> None:\n \"\"\"If the ETA label changed, update the renderable (which also refreshes).\"\"\"\n self.update(label_text)\n\n\nclass ProgressBar(Widget, can_focus=False):\n \"\"\"A progress bar widget.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n ProgressBar > Horizontal {\n width: auto;\n height: auto;\n }\n ProgressBar {\n width: auto;\n height: 1;\n }\n \"\"\"\n\n progress: reactive[float] = reactive(0.0)\n \"\"\"The progress so far, in number of steps.\"\"\"\n total: reactive[float | None] = reactive[Optional[float]](None)\n \"\"\"The total number of steps associated with this progress bar, when known.\n\n The value `None` will render an indeterminate progress bar.\n Once `total` is set to a numerical value, it cannot be set back to `None`.\n \"\"\"\n percentage: reactive[float | None] = reactive[Optional[float]](None)\n \"\"\"The percentage of progress that has been completed.\n\n The percentage is a value between 0 and 1 and the returned value is only\n `None` if the total progress of the bar hasn't been set yet.\n In other words, after the progress bar emits the message\n [`ProgressBar.Started`][textual.widgets.ProgressBar.Started],\n the value of `percentage` is always not `None`.\n\n Example:\n ```py\n progress_bar = ProgressBar()\n print(progress_bar.percentage) # None\n progress_bar.update(total=100)\n progress_bar.advance(50)\n print(progress_bar.percentage) # 0.5\n ```\n \"\"\"\n\n def __init__(\n self,\n total: float | None = None,\n *,\n show_bar: bool = True,\n show_percentage: bool = True,\n show_eta: bool = True,\n name: str | None = None,\n id: str | None = None,\n classes: str | None = None,\n disabled: bool = False,\n ):\n \"\"\"Create a Progress Bar widget.\n\n The progress bar uses \"steps\" as the measurement unit.\n\n Example:\n ```py\n class MyApp(App):\n def compose(self):\n yield ProgressBar(total=100)\n\n def key_space(self):\n self.query_one(ProgressBar).advance(5)\n ```\n\n Args:\n total: The total number of steps in the progress if known.\n show_bar: Whether to show the bar portion of the progress bar.\n show_percentage: Whether to show the percentage status of the bar.\n show_eta: Whether to show the ETA countdown of the progress bar.\n name: The name of the widget.\n id: The ID of the widget in the DOM.\n classes: The CSS classes for the widget.\n disabled: Whether the widget is disabled or not.\n \"\"\"\n super().__init__(name=name, id=id, classes=classes, disabled=disabled)\n self.show_bar = show_bar\n self.show_percentage = show_percentage\n self.show_eta = show_eta\n\n self.total = total\n\n def compose(self) -> ComposeResult:\n # We create a closure so that we can determine what are the sub-widgets\n # that are present and, therefore, will need to be notified about changes\n # to the percentage.\n def update_percentage(widget: Widget) -> Callable[[float | None], None]:\n \"\"\"Closure to allow updating the percentage of a given widget.\"\"\"\n\n def updater(percentage: float | None) -> None:\n \"\"\"Update the percentage reactive of the enclosed widget.\"\"\"\n widget._percentage = percentage\n\n return updater\n\n with Horizontal():\n if self.show_bar:\n bar = Bar(id=\"bar\")\n self.watch(self, \"percentage\", update_percentage(bar))\n yield bar\n if self.show_percentage:\n percentage_status = PercentageStatus(id=\"percentage\")\n self.watch(self, \"percentage\", update_percentage(percentage_status))\n yield percentage_status\n if self.show_eta:\n eta_status = ETAStatus(id=\"eta\")\n self.watch(self, \"percentage\", update_percentage(eta_status))\n yield eta_status\n\n def validate_progress(self, progress: float) -> float:\n \"\"\"Clamp the progress between 0 and the maximum total.\"\"\"\n if self.total is not None:\n return clamp(progress, 0, self.total)\n return progress\n\n def validate_total(self, total: float | None) -> float | None:\n \"\"\"Ensure the total is not negative.\"\"\"\n if total is None:\n return total\n return max(0, total)\n\n def watch_total(self, total: float | None) -> None:\n \"\"\"Re-validate progress.\"\"\"\n self.progress = self.progress\n\n def compute_percentage(self) -> float | None:\n \"\"\"Keep the percentage of progress updated automatically.\n\n This will report a percentage of `1` if the total is zero.\n \"\"\"\n if self.total:\n return self.progress / self.total\n elif self.total == 0:\n return 1\n return None\n\n def advance(self, advance: float = 1) -> None:\n \"\"\"Advance the progress of the progress bar by the given amount.\n\n Example:\n ```py\n progress_bar.advance(10) # Advance 10 steps.\n ```\n Args:\n advance: Number of steps to advance progress by.\n \"\"\"\n self.progress += advance\n\n def update(\n self,\n *,\n total: float | None = None,\n progress: float | None = None,\n advance: float | None = None,\n ) -> None:\n \"\"\"Update the progress bar with the given options.\n\n Options only affect the progress bar if they are not `None`.\n\n Example:\n ```py\n progress_bar.update(\n total=200, # Set new total to 200 steps.\n progress=None, # This has no effect.\n )\n ```\n\n Args:\n total: New total number of steps (if not `None`).\n progress: Set the progress to the given number of steps (if not `None`).\n advance: Advance the progress by this number of steps (if not `None`).\n \"\"\"\n if total is not None:\n self.total = total\n if progress is not None:\n self.progress = progress\n if advance is not None:\n self.progress += advance\n", "path": "src/textual/widgets/_progress_bar.py"}], "after_files": [{"content": "\"\"\"Implements a progress bar widget.\"\"\"\n\nfrom __future__ import annotations\n\nfrom math import ceil\nfrom time import monotonic\nfrom typing import Callable, Optional\n\nfrom rich.style import Style\n\nfrom textual.geometry import clamp\n\nfrom ..app import ComposeResult, RenderResult\nfrom ..containers import Horizontal\nfrom ..reactive import reactive\nfrom ..renderables.bar import Bar as BarRenderable\nfrom ..timer import Timer\nfrom ..widget import Widget\nfrom ..widgets import Label\n\n\nclass Bar(Widget, can_focus=False):\n \"\"\"The bar portion of the progress bar.\"\"\"\n\n COMPONENT_CLASSES = {\"bar--bar\", \"bar--complete\", \"bar--indeterminate\"}\n \"\"\"\n The bar sub-widget provides the component classes that follow.\n\n These component classes let you modify the foreground and background color of the\n bar in its different states.\n\n | Class | Description |\n | :- | :- |\n | `bar--bar` | Style of the bar (may be used to change the color). |\n | `bar--complete` | Style of the bar when it's complete. |\n | `bar--indeterminate` | Style of the bar when it's in an indeterminate state. |\n \"\"\"\n\n DEFAULT_CSS = \"\"\"\n Bar {\n width: 32;\n height: 1;\n }\n Bar > .bar--bar {\n color: $warning;\n background: $foreground 10%;\n }\n Bar > .bar--indeterminate {\n color: $error;\n background: $foreground 10%;\n }\n Bar > .bar--complete {\n color: $success;\n background: $foreground 10%;\n }\n \"\"\"\n\n _percentage: reactive[float | None] = reactive[Optional[float]](None)\n \"\"\"The percentage of progress that has been completed.\"\"\"\n _start_time: float | None\n \"\"\"The time when the widget started tracking progress.\"\"\"\n\n def __init__(\n self,\n name: str | None = None,\n id: str | None = None,\n classes: str | None = None,\n disabled: bool = False,\n ):\n \"\"\"Create a bar for a [`ProgressBar`][textual.widgets.ProgressBar].\"\"\"\n super().__init__(name=name, id=id, classes=classes, disabled=disabled)\n self._start_time = None\n self._percentage = None\n\n def watch__percentage(self, percentage: float | None) -> None:\n \"\"\"Manage the timer that enables the indeterminate bar animation.\"\"\"\n if percentage is not None:\n self.auto_refresh = None\n else:\n self.auto_refresh = 1 / 15\n\n def render(self) -> RenderResult:\n \"\"\"Render the bar with the correct portion filled.\"\"\"\n if self._percentage is None:\n return self.render_indeterminate()\n else:\n bar_style = (\n self.get_component_rich_style(\"bar--bar\")\n if self._percentage < 1\n else self.get_component_rich_style(\"bar--complete\")\n )\n return BarRenderable(\n highlight_range=(0, self.size.width * self._percentage),\n highlight_style=Style.from_color(bar_style.color),\n background_style=Style.from_color(bar_style.bgcolor),\n )\n\n def render_indeterminate(self) -> RenderResult:\n \"\"\"Render a frame of the indeterminate progress bar animation.\"\"\"\n width = self.size.width\n highlighted_bar_width = 0.25 * width\n # Width used to enable the visual effect of the bar going into the corners.\n total_imaginary_width = width + highlighted_bar_width\n\n speed = 30 # Cells per second.\n # Compute the position of the bar.\n start = (speed * self._get_elapsed_time()) % (2 * total_imaginary_width)\n if start > total_imaginary_width:\n # If the bar is to the right of its width, wrap it back from right to left.\n start = 2 * total_imaginary_width - start # = (tiw - (start - tiw))\n start -= highlighted_bar_width\n end = start + highlighted_bar_width\n\n bar_style = self.get_component_rich_style(\"bar--indeterminate\")\n return BarRenderable(\n highlight_range=(max(0, start), min(end, width)),\n highlight_style=Style.from_color(bar_style.color),\n background_style=Style.from_color(bar_style.bgcolor),\n )\n\n def _get_elapsed_time(self) -> float:\n \"\"\"Get time for the indeterminate progress animation.\n\n This method ensures that the progress bar animation always starts at the\n beginning and it also makes it easier to test the bar if we monkey patch\n this method.\n\n Returns:\n The time elapsed since the bar started being animated.\n \"\"\"\n if self._start_time is None:\n self._start_time = monotonic()\n return 0\n return monotonic() - self._start_time\n\n\nclass PercentageStatus(Label):\n \"\"\"A label to display the percentage status of the progress bar.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n PercentageStatus {\n width: 5;\n content-align-horizontal: right;\n }\n \"\"\"\n\n _label_text: reactive[str] = reactive(\"\", repaint=False)\n \"\"\"This is used as an auxiliary reactive to only refresh the label when needed.\"\"\"\n _percentage: reactive[float | None] = reactive[Optional[float]](None)\n \"\"\"The percentage of progress that has been completed.\"\"\"\n\n def __init__(\n self,\n name: str | None = None,\n id: str | None = None,\n classes: str | None = None,\n disabled: bool = False,\n ):\n super().__init__(name=name, id=id, classes=classes, disabled=disabled)\n self._percentage = None\n self._label_text = \"--%\"\n\n def watch__percentage(self, percentage: float | None) -> None:\n \"\"\"Manage the text that shows the percentage of progress.\"\"\"\n if percentage is None:\n self._label_text = \"--%\"\n else:\n self._label_text = f\"{int(100 * percentage)}%\"\n\n def watch__label_text(self, label_text: str) -> None:\n \"\"\"If the label text changed, update the renderable (which also refreshes).\"\"\"\n self.update(label_text)\n\n\nclass ETAStatus(Label):\n \"\"\"A label to display the estimated time until completion of the progress bar.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n ETAStatus {\n width: 9;\n content-align-horizontal: right;\n }\n \"\"\"\n\n _label_text: reactive[str] = reactive(\"\", repaint=False)\n \"\"\"This is used as an auxiliary reactive to only refresh the label when needed.\"\"\"\n _percentage: reactive[float | None] = reactive[Optional[float]](None)\n \"\"\"The percentage of progress that has been completed.\"\"\"\n _refresh_timer: Timer\n \"\"\"Timer to update ETA status even when progress stalls.\"\"\"\n _start_time: float | None\n \"\"\"The time when the widget started tracking progress.\"\"\"\n\n def __init__(\n self,\n name: str | None = None,\n id: str | None = None,\n classes: str | None = None,\n disabled: bool = False,\n ):\n super().__init__(name=name, id=id, classes=classes, disabled=disabled)\n self._percentage = None\n self._label_text = \"--:--:--\"\n self._start_time = None\n\n def on_mount(self) -> None:\n \"\"\"Periodically refresh the countdown so that the ETA is always up to date.\"\"\"\n self._refresh_timer = self.set_interval(1 / 2, self.update_eta, pause=True)\n\n def watch__percentage(self, percentage: float | None) -> None:\n if percentage is None:\n self._label_text = \"--:--:--\"\n else:\n self._refresh_timer.reset()\n self.update_eta()\n\n def update_eta(self) -> None:\n \"\"\"Update the ETA display.\"\"\"\n percentage = self._percentage\n delta = self._get_elapsed_time()\n # We display --:--:-- if we haven't started, if we are done,\n # or if we don't know when we started keeping track of time.\n if not percentage or percentage >= 1 or not delta:\n self._label_text = \"--:--:--\"\n # If we are done, we can delete the timer that periodically refreshes\n # the countdown display.\n if percentage is not None and percentage >= 1:\n self.auto_refresh = None\n # Render a countdown timer with hh:mm:ss, unless it's a LONG time.\n else:\n left = ceil((delta / percentage) * (1 - percentage))\n minutes, seconds = divmod(left, 60)\n hours, minutes = divmod(minutes, 60)\n if hours > 999999:\n self._label_text = \"+999999h\"\n elif hours > 99:\n self._label_text = f\"{hours}h\"\n else:\n self._label_text = f\"{hours:02}:{minutes:02}:{seconds:02}\"\n\n def _get_elapsed_time(self) -> float:\n \"\"\"Get time to estimate time to progress completion.\n\n Returns:\n The time elapsed since the bar started being animated.\n \"\"\"\n if self._start_time is None:\n self._start_time = monotonic()\n return 0\n return monotonic() - self._start_time\n\n def watch__label_text(self, label_text: str) -> None:\n \"\"\"If the ETA label changed, update the renderable (which also refreshes).\"\"\"\n self.update(label_text)\n\n\nclass ProgressBar(Widget, can_focus=False):\n \"\"\"A progress bar widget.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n ProgressBar > Horizontal {\n width: auto;\n height: auto;\n }\n ProgressBar {\n width: auto;\n height: 1;\n }\n \"\"\"\n\n progress: reactive[float] = reactive(0.0)\n \"\"\"The progress so far, in number of steps.\"\"\"\n total: reactive[float | None] = reactive[Optional[float]](None)\n \"\"\"The total number of steps associated with this progress bar, when known.\n\n The value `None` will render an indeterminate progress bar.\n Once `total` is set to a numerical value, it cannot be set back to `None`.\n \"\"\"\n percentage: reactive[float | None] = reactive[Optional[float]](None)\n \"\"\"The percentage of progress that has been completed.\n\n The percentage is a value between 0 and 1 and the returned value is only\n `None` if the total progress of the bar hasn't been set yet.\n\n Example:\n ```py\n progress_bar = ProgressBar()\n print(progress_bar.percentage) # None\n progress_bar.update(total=100)\n progress_bar.advance(50)\n print(progress_bar.percentage) # 0.5\n ```\n \"\"\"\n\n def __init__(\n self,\n total: float | None = None,\n *,\n show_bar: bool = True,\n show_percentage: bool = True,\n show_eta: bool = True,\n name: str | None = None,\n id: str | None = None,\n classes: str | None = None,\n disabled: bool = False,\n ):\n \"\"\"Create a Progress Bar widget.\n\n The progress bar uses \"steps\" as the measurement unit.\n\n Example:\n ```py\n class MyApp(App):\n def compose(self):\n yield ProgressBar(total=100)\n\n def key_space(self):\n self.query_one(ProgressBar).advance(5)\n ```\n\n Args:\n total: The total number of steps in the progress if known.\n show_bar: Whether to show the bar portion of the progress bar.\n show_percentage: Whether to show the percentage status of the bar.\n show_eta: Whether to show the ETA countdown of the progress bar.\n name: The name of the widget.\n id: The ID of the widget in the DOM.\n classes: The CSS classes for the widget.\n disabled: Whether the widget is disabled or not.\n \"\"\"\n super().__init__(name=name, id=id, classes=classes, disabled=disabled)\n self.show_bar = show_bar\n self.show_percentage = show_percentage\n self.show_eta = show_eta\n\n self.total = total\n\n def compose(self) -> ComposeResult:\n # We create a closure so that we can determine what are the sub-widgets\n # that are present and, therefore, will need to be notified about changes\n # to the percentage.\n def update_percentage(widget: Widget) -> Callable[[float | None], None]:\n \"\"\"Closure to allow updating the percentage of a given widget.\"\"\"\n\n def updater(percentage: float | None) -> None:\n \"\"\"Update the percentage reactive of the enclosed widget.\"\"\"\n widget._percentage = percentage\n\n return updater\n\n with Horizontal():\n if self.show_bar:\n bar = Bar(id=\"bar\")\n self.watch(self, \"percentage\", update_percentage(bar))\n yield bar\n if self.show_percentage:\n percentage_status = PercentageStatus(id=\"percentage\")\n self.watch(self, \"percentage\", update_percentage(percentage_status))\n yield percentage_status\n if self.show_eta:\n eta_status = ETAStatus(id=\"eta\")\n self.watch(self, \"percentage\", update_percentage(eta_status))\n yield eta_status\n\n def validate_progress(self, progress: float) -> float:\n \"\"\"Clamp the progress between 0 and the maximum total.\"\"\"\n if self.total is not None:\n return clamp(progress, 0, self.total)\n return progress\n\n def validate_total(self, total: float | None) -> float | None:\n \"\"\"Ensure the total is not negative.\"\"\"\n if total is None:\n return total\n return max(0, total)\n\n def watch_total(self, total: float | None) -> None:\n \"\"\"Re-validate progress.\"\"\"\n self.progress = self.progress\n\n def compute_percentage(self) -> float | None:\n \"\"\"Keep the percentage of progress updated automatically.\n\n This will report a percentage of `1` if the total is zero.\n \"\"\"\n if self.total:\n return self.progress / self.total\n elif self.total == 0:\n return 1\n return None\n\n def advance(self, advance: float = 1) -> None:\n \"\"\"Advance the progress of the progress bar by the given amount.\n\n Example:\n ```py\n progress_bar.advance(10) # Advance 10 steps.\n ```\n Args:\n advance: Number of steps to advance progress by.\n \"\"\"\n self.progress += advance\n\n def update(\n self,\n *,\n total: float | None = None,\n progress: float | None = None,\n advance: float | None = None,\n ) -> None:\n \"\"\"Update the progress bar with the given options.\n\n Options only affect the progress bar if they are not `None`.\n\n Example:\n ```py\n progress_bar.update(\n total=200, # Set new total to 200 steps.\n progress=None, # This has no effect.\n )\n ```\n\n Args:\n total: New total number of steps (if not `None`).\n progress: Set the progress to the given number of steps (if not `None`).\n advance: Advance the progress by this number of steps (if not `None`).\n \"\"\"\n if total is not None:\n self.total = total\n if progress is not None:\n self.progress = progress\n if advance is not None:\n self.progress += advance\n", "path": "src/textual/widgets/_progress_bar.py"}]} |
gh_patches_debug_1395 | rasdani/github-patches | git_diff | voxel51__fiftyone-1063 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] APP non-JavaScript MIME type of "text/plain"
### System information
- **OS Platform and Distribution**: Windows 10 Pro 10.0.18362.239
- **FiftyOne installed from (pip or source)**: pip
- **FiftyOne version (run `fiftyone --version`)**: 0.9.3
- **Python version**: both 3.8 | 3.9.2
### Commands to reproduce
```
Following installation steps on https://voxel51.com/docs/fiftyone/getting_started/install.html
pip install fiftyone
```
### Describe the problem
After installing the package (tested installing in both Anaconda environment and venv environment) and running the quickstart example, the app shows only blank screen with errors in console described in Other info / logs section.
The issue does not appear to be bound to specific settings with web browsers on the machine since the same error is given when running the fiftyone as remote on this machine and then accessing it from other machines, but rather by something with running web server as i could not reproduce this by installing fiftyone on other machines.
Any idea what might go wrong ?
### Code to reproduce issue
```
import fiftyone as fo
import fiftyone.zoo as foz
dataset = foz.load_zoo_dataset("quickstart")
session = fo.launch_app(dataset)
```
### Other info / logs
Google Chrome: `Failed to load module script: The server responded with a non-JavaScript MIME type of "text/plain". Strict MIME type checking is enforced for module scripts per HTML spec.`
for script
`http://localhost:5151/_dist_/index.js`
Firefox: `Loading module from “http://localhost:5151/_dist_/index.js” was blocked because of a disallowed MIME type (“text/plain”).`
### What areas of FiftyOne does this bug affect?
- [x] `App`: FiftyOne application issue
- [ ] `Core`: Core `fiftyone` Python library issue
- [ ] `Server`: Fiftyone server issue
### Willingness to contribute
- [ ] Yes. I can contribute a fix for this bug independently.
- [x] Yes. I would be willing to contribute a fix for this bug with guidance
from the FiftyOne community.
- [ ] No. I cannot contribute a bug fix at this time.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `fiftyone/server/main.py`
Content:
```
1 """
2 FiftyOne Tornado server.
3
4 | Copyright 2017-2021, Voxel51, Inc.
5 | `voxel51.com <https://voxel51.com/>`_
6 |
7 """
8 import asyncio
9 import argparse
10 from collections import defaultdict
11 import math
12 import os
13 import traceback
14
15 import tornado.escape
16 import tornado.ioloop
17 import tornado.iostream
18 import tornado.options
19 import tornado.web
20 from tornado.web import HTTPError
21 import tornado.websocket
22
23 import eta.core.labels as etal
24 import eta.core.serial as etas
25 import eta.core.video as etav
26
27 os.environ["FIFTYONE_SERVER"] = "1"
28
29 import fiftyone as fo
30 import fiftyone.core.aggregations as foa
31 import fiftyone.constants as foc
32 from fiftyone.core.expressions import ViewField as F, _escape_regex_chars
33 import fiftyone.core.dataset as fod
34 import fiftyone.core.fields as fof
35 import fiftyone.core.labels as fol
36 import fiftyone.core.media as fom
37 import fiftyone.core.odm as foo
38 from fiftyone.core.service import DatabaseService
39 from fiftyone.core.stages import _STAGES
40 import fiftyone.core.stages as fosg
41 import fiftyone.core.state as fos
42 import fiftyone.core.uid as fou
43 import fiftyone.core.view as fov
44
45 from fiftyone.server.extended_view import get_extended_view
46 from fiftyone.server.json_util import convert, FiftyOneJSONEncoder
47 import fiftyone.server.utils as fosu
48
49
50 # connect to the existing DB service to initialize global port information
51 dbs = DatabaseService()
52 dbs.start()
53 db = foo.get_async_db_conn()
54
55
56 class RequestHandler(tornado.web.RequestHandler):
57 """"Base class for HTTP request handlers"""
58
59 def set_default_headers(self, *args, **kwargs):
60 self.set_header("Access-Control-Allow-Origin", "*")
61 self.set_header("Access-Control-Allow-Headers", "x-requested-with")
62 self.set_header("Access-Control-Allow-Methods", "POST, GET, OPTIONS")
63 self.set_header("x-colab-notebook-cache-control", "no-cache")
64
65 async def get(self):
66 self.write(self.get_response())
67
68 def get_response(self):
69 """Returns the serializable response
70
71 Returns:
72 dict
73 """
74 raise NotImplementedError("subclass must implement get_response()")
75
76
77 class FiftyOneHandler(RequestHandler):
78 """Returns the version info of the fiftyone being used"""
79
80 @staticmethod
81 def get_response():
82 """Returns the serializable response
83
84 Returns:
85 dict
86 """
87 uid, _ = fou.get_user_id()
88 isfile = os.path.isfile(foc.FEEDBACK_PATH)
89 if isfile:
90 submitted = etas.load_json(foc.FEEDBACK_PATH)["submitted"]
91 else:
92 submitted = False
93
94 return {
95 "version": foc.VERSION,
96 "user_id": uid,
97 "do_not_track": fo.config.do_not_track,
98 "feedback": {"submitted": submitted, "minimized": isfile},
99 "dev_install": foc.DEV_INSTALL or foc.RC_INSTALL,
100 }
101
102
103 class NotebookHandler(RequestHandler):
104 """Check that the requested handle exists on the server"""
105
106 async def get(self):
107 # pylint: disable=no-value-for-parameter
108 handle_id = self.get_argument("handleId")
109
110 response = self.get_response(handle_id)
111 if response is None:
112 raise tornado.web.HTTPError(status_code=404)
113
114 self.write(response)
115
116 @staticmethod
117 def get_response(handle):
118 """Returns if the notebook handle exists on the server.
119
120 Returns:
121 the handle ID
122 """
123 global _notebook_clients
124 if handle in set(_notebook_clients.values()):
125 return {"exists": True}
126
127
128 class ReactivateHandler(RequestHandler):
129 """Reactivates an IPython display handle"""
130
131 async def get(self):
132 # pylint: disable=no-value-for-parameter
133 handle_id = self.get_argument("handleId")
134 self.write(self.get_response(handle_id))
135
136 @staticmethod
137 def get_response(handle_id):
138 """Returns on success
139
140 Args:
141 handle_id: a handle uuid
142 """
143 StateHandler.state["active_handle"] = handle_id
144 for client in StateHandler.clients:
145 client.write_message({"type": "reactivate", "handle": handle_id})
146
147 return {}
148
149
150 class StagesHandler(RequestHandler):
151 """Returns the definitions of stages available to the App"""
152
153 @staticmethod
154 def get_response():
155 """Returns the serializable response
156
157 Returns:
158 dict
159 """
160 return {
161 "stages": [
162 {"name": stage.__name__, "params": stage._params()}
163 for stage in _STAGES
164 ]
165 }
166
167
168 class FeedbackHandler(RequestHandler):
169 """Returns whether the feedback button should be minimized"""
170
171 def post(self):
172 submitted = self.get_argument("submitted", False)
173 etas.write_json({"submitted": submitted}, foc.FEEDBACK_PATH)
174
175
176 def _catch_errors(func):
177 async def wrapper(self, *args, **kwargs):
178 try:
179 StateHandler.prev_state = StateHandler.state
180 result = await func(self, *args, **kwargs)
181 return result
182 except Exception:
183 StateHandler.state = StateHandler.prev_state
184 clients = list(StateHandler.clients)
185 if isinstance(self, PollingHandler):
186 clients.append(self)
187
188 for client in clients:
189 client.write_message(
190 {
191 "type": "notification",
192 "kind": "Server Error",
193 "message": (
194 "An exception has been raised by the server. Your session "
195 "has been reverted to its previous state."
196 ),
197 "session_items": [traceback.format_exc()],
198 "app_items": [
199 "A traceback has been printed to your Python shell."
200 ],
201 }
202 )
203
204 return wrapper
205
206
207 _notebook_clients = {}
208 _deactivated_clients = set()
209
210
211 class PollingHandler(tornado.web.RequestHandler):
212
213 clients = defaultdict(set)
214 screenshots = {}
215
216 def set_default_headers(self, *args, **kwargs):
217 self.set_header("Access-Control-Allow-Origin", "*")
218 self.set_header("Access-Control-Allow-Headers", "x-requested-with")
219 self.set_header("Access-Control-Allow-Methods", "POST, GET, OPTIONS")
220
221 @staticmethod
222 def gather_messages(client):
223 messages = [
224 {"type": message} for message in PollingHandler.clients[client]
225 ]
226 PollingHandler.clients[client].clear()
227 return messages
228
229 @_catch_errors
230 async def get(self):
231 # pylint: disable=no-value-for-parameter
232 client = self.get_argument("sessionId")
233 if client not in PollingHandler.clients:
234 PollingHandler.clients[client].add("update")
235 PollingHandler.clients[client].add("statistics")
236 PollingHandler.clients[client].add("extended_statistics")
237
238 messages = self.gather_messages(client)
239 self.write_message({"messages": messages})
240
241 @_catch_errors
242 async def post(self):
243 # pylint: disable=no-value-for-parameter
244 client = self.get_argument("sessionId")
245 # pylint: disable=no-value-for-parameter
246 mode = self.get_argument("mode")
247 message = StateHandler.loads(self.request.body)
248 event = message.pop("type")
249 force_update = False
250 if mode == "push":
251 if event == "as_app":
252 if message["notebook"]:
253 message["ignore"] = client
254 global _notebook_clients
255 global _deactivated_clients
256 StateHandler.state["active_handle"] = message["handle"]
257 _deactivated_clients.discard(message["handle"])
258 _notebook_clients[client] = message["handle"]
259 event = "update"
260 force_update = True
261 message = {"state": StateHandler.state}
262
263 if event in {
264 "distinct",
265 "distributions",
266 "page",
267 "get_video_data",
268 "all_tags",
269 "selected_statistics",
270 "tag_modal",
271 }:
272 caller = self
273 elif event in {"capture", "update"}:
274 caller = client
275 else:
276 caller = StateHandler
277
278 if event == "refresh":
279 message["polling_client"] = client
280
281 if event == "update" and not force_update:
282 message["ignore_polling_client"] = client
283
284 handle = getattr(StateHandler, "on_%s" % event)
285 await handle(caller, **message)
286
287 if caller == self:
288 return
289
290 messages = self.gather_messages(client)
291 self.write_message({"messages": messages})
292 return
293
294 if event == "update":
295 self.write_message({"type": "update", "state": StateHandler.state})
296
297 elif event == "deactivate":
298 self.write_message({"type": "deactivate"})
299
300 state = fos.StateDescription.from_dict(StateHandler.state)
301 if state.view is not None:
302 view = state.view
303 else:
304 view = state.dataset
305
306 if event == "statistics":
307 await StateHandler.send_statistics(view, only=self)
308
309 elif event == "extended_statistics":
310 await StateHandler.send_statistics(
311 view, only=self, filters=state.filters
312 )
313
314 def write_message(self, message):
315 message = StateHandler.dumps(message)
316 self.write(message)
317
318
319 def _get_label_object_ids(label):
320 """Returns a list of all object IDs contained in the label.
321
322 Args:
323 label: an ImageLabel instance
324
325 Returns:
326 list of IDs as strings
327 """
328 list_field_name = type(label).__name__.lower()
329 if hasattr(label, "id"):
330 return [label.id]
331
332 if list_field_name in label:
333 return [obj.id for obj in label[list_field_name]]
334
335 raise TypeError("Cannot serialize label type: " + str(type(label)))
336
337
338 class StateHandler(tornado.websocket.WebSocketHandler):
339 """WebSocket handler for bi-directional state communication.
340
341 Attributes:
342 app_clients: active App clients
343 clients: active clients
344 state: the current a serialized
345 :class:`fiftyone.core.state.StateDescription`, serialized
346 prev_state: the previous a serialized
347 :class:`fiftyone.core.state.StateDescription`, serialized
348 """
349
350 app_clients = set()
351 clients = set()
352 state = fos.StateDescription().serialize()
353 prev_state = fos.StateDescription().serialize()
354
355 @staticmethod
356 def dumps(data):
357 """Serializes data to a JSON formatted :class:`str`.
358
359 Args:
360 data: serializable object
361
362 Returns:
363 :class:`str`
364 """
365 return FiftyOneJSONEncoder.dumps(data)
366
367 @staticmethod
368 def loads(data):
369 """Deserialized data to an object.
370
371 Args:
372 data: :class:`str`, :class:`bytes`, or :class:`bytearray`
373
374 Returns:
375 an object
376 """
377 return FiftyOneJSONEncoder.loads(data)
378
379 @staticmethod
380 def sample_collection():
381 """Getter for the current sample collection."""
382 state = fos.StateDescription.from_dict(StateHandler.state)
383 if state.view is not None:
384 dataset = state.view._dataset
385 else:
386 dataset = state.dataset
387
388 return db[dataset._sample_collection_name]
389
390 def write_message(self, message):
391 """Writes a message to the client.
392
393 Args:
394 message: a serializable object
395 """
396 if message is None:
397 return
398 message = self.dumps(message)
399 return super().write_message(message)
400
401 def check_origin(self, origin):
402 """Accepts all origins.
403
404 Returns:
405 True
406 """
407 return True
408
409 def open(self):
410 """On open, add the client to the active clients set, and write the
411 current state to the new client.
412 """
413 StateHandler.clients.add(self)
414 _write_message(
415 {"type": "update", "state": StateHandler.state}, only=self
416 )
417
418 def on_close(self):
419 """On close, remove the client from the active clients set, and
420 active App clients set (if applicable).
421 """
422 StateHandler.clients.remove(self)
423 StateHandler.app_clients.discard(self)
424 if not StateHandler.app_clients:
425 _write_message({"type": "close"}, session=True)
426
427 @_catch_errors
428 async def on_message(self, message):
429 """On message, call the associated event awaitable, with respect to
430 the provided message type.
431
432 Args:
433 message: a serialized message
434 """
435 message = self.loads(message)
436 event = getattr(self, "on_%s" % message.pop("type"))
437 await event(self, **message)
438
439 @staticmethod
440 async def on_capture(self, src, width):
441 global _notebook_clients
442 _write_message(
443 {
444 "type": "capture",
445 "handle": _notebook_clients[self],
446 "src": src,
447 "width": width,
448 }
449 )
450
451 @staticmethod
452 async def on_as_app(self, notebook=False, handle=None, ignore=None):
453 """Event for registering a client as an App."""
454 if isinstance(self, StateHandler):
455 StateHandler.app_clients.add(self)
456
457 global _notebook_clients
458 if isinstance(self, StateHandler) and notebook:
459 _notebook_clients[self] = handle
460
461 if not isinstance(self, StateHandler):
462 return
463
464 awaitables = self.get_statistics_awaitables(only=self)
465 asyncio.gather(*awaitables)
466
467 @staticmethod
468 async def on_refresh(self, polling_client=None):
469 """Event for refreshing an App client."""
470 state = fos.StateDescription.from_dict(StateHandler.state)
471 state.refresh = not state.refresh
472 StateHandler.state = state.serialize()
473
474 if polling_client:
475 PollingHandler.clients[polling_client].update(
476 {"update", "statistics", "extended_statistics"}
477 )
478 else:
479 awaitables = [self.send_updates(only=self)]
480 awaitables += self.get_statistics_awaitables(only=self)
481 asyncio.gather(*awaitables)
482
483 @staticmethod
484 async def on_filters_update(self, filters):
485 """Event for updating state filters. Sends an extended dataset
486 statistics message to active App clients.
487
488 Args:
489 filters: a :class:`dict` mapping field path to a serialized
490 :class:fiftyone.core.stages.Stage`
491 """
492 state = fos.StateDescription.from_dict(StateHandler.state)
493 state.filters = filters
494 state.selected_labels = []
495 state.selected = []
496 if state.view is not None:
497 view = state.view
498 else:
499 view = state.dataset
500
501 StateHandler.state = state.serialize()
502 for clients in PollingHandler.clients.values():
503 clients.update({"extended_statistics"})
504
505 await self.send_statistics(view, filters=filters)
506
507 @classmethod
508 async def on_page(cls, self, page, page_length=20):
509 """Sends a pagination response to the current client.
510
511 Args:
512 page: the page number
513 page_length (20): the number of items to return
514 """
515 state = fos.StateDescription.from_dict(StateHandler.state)
516 if state.view is not None:
517 view = state.view
518 elif state.dataset is not None:
519 view = state.dataset
520 else:
521 _write_message(
522 {"type": "page", "page": page, "results": [], "more": False},
523 only=self,
524 )
525 return
526
527 view = get_extended_view(view, state.filters, count_labels_tags=True)
528 view = view.skip((page - 1) * page_length)
529
530 if view.media_type == fom.VIDEO:
531 view = view.set_field("frames", F("frames")[0])
532
533 results, more = await _get_sample_data(
534 cls.sample_collection(), view, page_length, page
535 )
536
537 message = {
538 "type": "page",
539 "page": page,
540 "results": results,
541 "more": more,
542 }
543
544 _write_message(message, only=self)
545
546 @staticmethod
547 async def on_update(caller, state, ignore_polling_client=None):
548 """Event for state updates. Sends an update message to all active
549 clients, and statistics messages to active App clients.
550
551 Args:
552 state: a serialized :class:`fiftyone.core.state.StateDescription`
553 """
554 StateHandler.state = fos.StateDescription.from_dict(state).serialize()
555 active_handle = state["active_handle"]
556 global _notebook_clients
557 global _deactivated_clients
558 _deactivated_clients.discard(active_handle)
559
560 # ignore deactivated notebook cells
561 if (
562 active_handle
563 and caller in _notebook_clients
564 and _notebook_clients[caller] != active_handle
565 ):
566 return
567
568 for client, events in PollingHandler.clients.items():
569 if client in _notebook_clients:
570 uuid = _notebook_clients[client]
571
572 # deactivate the last active colab cell
573 if uuid != active_handle:
574 events.clear()
575 _deactivated_clients.add(uuid)
576 events.add("deactivate")
577 continue
578
579 if client == ignore_polling_client:
580 events.update({"statistics", "extended_statistics"})
581
582 events.update({"update", "statistics", "extended_statistics"})
583
584 awaitables = [
585 StateHandler.send_updates(),
586 ]
587 awaitables += StateHandler.get_statistics_awaitables()
588 asyncio.gather(*awaitables)
589
590 @staticmethod
591 async def on_set_selection(self, _ids):
592 """Event for setting the selected
593 :class:`fiftyone.core.samples.Sample` _ids
594
595 Args:
596 _ids: a list of sample _id
597 """
598 StateHandler.state["selected"] = _ids
599 await self.send_updates(ignore=self)
600
601 @staticmethod
602 async def on_clear_selection(self):
603 """Event for clearing the currently selected sample _ids.
604
605 Sends state updates to all active clients.
606 """
607 StateHandler.state["selected"] = []
608 await self.send_updates(ignore=self)
609
610 @staticmethod
611 async def on_set_selected_labels(self, selected_labels):
612 """Event for setting the entire selected objects list.
613
614 Args:
615 selected_labels: a list of selected labels
616 """
617 if not isinstance(selected_labels, list):
618 raise TypeError("selected_labels must be a list")
619
620 StateHandler.state["selected_labels"] = selected_labels
621 await self.send_updates(ignore=self)
622
623 @staticmethod
624 async def on_set_dataset(self, dataset_name):
625 """Event for setting the current dataset by name.
626
627 Args:
628 dataset_name: the dataset name
629 """
630 dataset = fod.load_dataset(dataset_name)
631 config = fos.StateDescription.from_dict(StateHandler.state).config
632 active_handle = StateHandler.state["active_handle"]
633 StateHandler.state = fos.StateDescription(
634 dataset=dataset, config=config, active_handle=active_handle
635 ).serialize()
636 await self.on_update(self, StateHandler.state)
637
638 @staticmethod
639 async def on_get_video_data(self, _id):
640 """Gets the frame labels for video samples.
641
642 Args:
643 _id: a sample _id
644 """
645 state = fos.StateDescription.from_dict(StateHandler.state)
646 if state.view is not None:
647 view = state.view
648 else:
649 view = state.dataset
650
651 result = await _get_video_data(
652 StateHandler.sample_collection(), state, view, [_id]
653 )
654 sample, frames, labels = result[0]
655 convert([labels])
656 convert([sample])
657 convert(frames)
658
659 fps = etav.get_frame_rate(sample["filepath"])
660 _write_message(
661 {
662 "type": "video_data-%s" % _id,
663 "frames": frames,
664 "labels": labels.serialize(),
665 "fps": fps,
666 },
667 only=self,
668 )
669
670 @staticmethod
671 async def on_tag(
672 caller, changes, target_labels=False, active_labels=None,
673 ):
674 state = fos.StateDescription.from_dict(StateHandler.state)
675 if state.view is not None:
676 view = state.view
677 else:
678 view = state.dataset
679
680 view = get_extended_view(view, state.filters)
681 if state.selected:
682 view = view.select(state.selected)
683
684 if target_labels:
685 fosu.change_label_tags(view, changes, label_fields=active_labels)
686 else:
687 fosu.change_sample_tags(view, changes)
688
689 StateHandler.state["refresh"] = not state.refresh
690 for clients in PollingHandler.clients.values():
691 clients.update({"update"})
692
693 await StateHandler.on_update(caller, StateHandler.state)
694
695 @staticmethod
696 async def on_all_tags(caller):
697 state = fos.StateDescription.from_dict(StateHandler.state)
698 if state.view is not None:
699 dataset = state.view._dataset
700 else:
701 dataset = state.dataset
702
703 if dataset is None:
704 label = []
705 sample = []
706 else:
707 (_, tag_aggs,) = fos.DatasetStatistics.get_label_aggregations(
708 dataset
709 )
710 results = await dataset._async_aggregate(
711 StateHandler.sample_collection(),
712 [foa.Distinct("tags")] + tag_aggs,
713 )
714 sample = results[0]
715
716 label = set()
717 for result in results[1:]:
718 label |= set(result.keys())
719
720 _write_message(
721 {"type": "all_tags", "sample": sample, "label": label}, only=caller
722 )
723
724 @staticmethod
725 async def on_save_filters(caller, add_stages=[], with_selected=False):
726 state = fos.StateDescription.from_dict(StateHandler.state)
727 if state.view is not None:
728 view = state.view
729 else:
730 view = state.dataset
731
732 view = get_extended_view(view, state.filters)
733
734 if with_selected:
735 if state.selected:
736 view = view.select(state.selected)
737 elif state.selected_labels:
738 view = view.select_labels(state.selected_labels)
739
740 for d in add_stages:
741 stage = fosg.ViewStage._from_dict(d)
742 view = view.add_stage(stage)
743
744 state.selected = []
745 state.selected_labels = []
746 state.view = view
747 state.filters = {}
748
749 await StateHandler.on_update(caller, state.serialize())
750
751 @staticmethod
752 async def on_tag_modal(
753 caller, changes, sample_id=None, labels=None,
754 ):
755 state = fos.StateDescription.from_dict(StateHandler.state)
756 if state.view is not None:
757 view = state.view
758 else:
759 view = state.dataset
760
761 if sample_id:
762 sample_ids = [sample_id]
763 tag_view = view.select(sample_id)
764 fosu.change_sample_tags(tag_view, changes)
765 else:
766 if state.selected_labels:
767 labels = state.selected_labels
768
769 sample_ids = list({label["sample_id"] for label in labels})
770 tag_view = view.select_labels(labels=labels)
771
772 fields = {label["field"] for label in labels}
773 fosu.change_label_tags(tag_view, changes, label_fields=fields)
774
775 for clients in PollingHandler.clients.values():
776 clients.update({"extended_statistics", "statistics"})
777
778 if isinstance(caller, PollingHandler):
779 await StateHandler.send_samples(sample_ids, only=caller)
780
781 awaitables = [StateHandler.send_samples(sample_ids)]
782 awaitables += StateHandler.get_statistics_awaitables()
783
784 asyncio.gather(*awaitables)
785
786 @staticmethod
787 async def on_selected_statistics(caller, active_labels=[]):
788 state = fos.StateDescription.from_dict(StateHandler.state)
789 if state.view is not None:
790 view = state.view
791 else:
792 view = state.dataset
793
794 view = get_extended_view(view, state.filters)
795 view = view.select(state.selected).select_fields(active_labels)
796
797 count_aggs, tag_aggs = fos.DatasetStatistics.get_label_aggregations(
798 view
799 )
800 results = await view._async_aggregate(
801 StateHandler.sample_collection(), count_aggs + tag_aggs
802 )
803
804 count = sum(results[: len(count_aggs)])
805
806 tags = defaultdict(int)
807 for result in results[len(count_aggs) :]:
808 for tag, num in result.items():
809 tags[tag] += num
810
811 _write_message(
812 {"type": "selected_statistics", "count": count, "tags": tags},
813 only=caller,
814 )
815
816 @classmethod
817 async def send_samples(cls, sample_ids, only=None):
818 state = fos.StateDescription.from_dict(StateHandler.state)
819 if state.view is not None:
820 view = state.view
821 else:
822 view = state.dataset
823
824 view = get_extended_view(view, state.filters, count_labels_tags=True)
825
826 col = cls.sample_collection()
827
828 if view.media_type == fom.VIDEO:
829 samples = await _get_video_data(col, state, view, sample_ids)
830 result = [
831 {"sample": s, "frames": f, "labels": l.serialize()}
832 for (s, f, l) in samples
833 ]
834 else:
835 view = view.select(sample_ids)
836 result, _ = await _get_sample_data(col, view, len(sample_ids), 1)
837
838 _write_message(
839 {"type": "samples_update", "samples": result}, app=True, only=only
840 )
841
842 @classmethod
843 def get_statistics_awaitables(cls, only=None):
844 """Gets statistic awaitables that will send statistics to the relevant
845 client(s) when executed
846
847 Args:
848 only (None): a client to restrict the messages to
849
850 Returns:
851 a list of coroutines
852 """
853 if StateHandler.state["dataset"] is None:
854 return []
855
856 state = fos.StateDescription.from_dict(StateHandler.state)
857 if state.view is not None:
858 view = state.view
859 else:
860 view = state.dataset
861
862 awaitables = [cls.send_statistics(view, only=only)]
863
864 awaitables.append(
865 cls.send_statistics(view, filters=state.filters, only=only)
866 )
867 return awaitables
868
869 @classmethod
870 async def send_updates(cls, ignore=None, only=None):
871 """Sends an update event to the all clients, exluding the ignore
872 client, if it is not None.
873
874 Args:
875 ignore (None): a client to not send the update to
876 only (None): a client to restrict the updates to
877 """
878 _write_message(
879 {"type": "update", "state": StateHandler.state},
880 ignore=ignore,
881 only=only,
882 )
883
884 @classmethod
885 async def send_statistics(cls, view, filters=None, only=None):
886 """Sends a statistics event given using the provided view to all App
887 clients, unless an only client is provided in which case it is only
888 sent to the that client.
889
890 Args:
891 view: a view
892 filters (None): filter stages to append to the view
893 only (None): a client to restrict the message to
894 """
895 base_view = view
896 data = {"main": [], "none": []}
897 if view is not None and (filters is None or len(filters)):
898 view = get_extended_view(view, filters)
899
900 stats = fos.DatasetStatistics(view)
901 aggs = stats.aggregations
902 exists_aggs = stats.exists_aggregations
903 num_aggs = len(aggs)
904
905 results = await view._async_aggregate(
906 cls.sample_collection(), aggs + exists_aggs
907 )
908 aggs_results = results[:num_aggs]
909 exists_results = results[num_aggs:]
910
911 for a, r, k in [
912 (aggs, aggs_results, "main"),
913 (exists_aggs, exists_results, "none"),
914 ]:
915 for agg, result in zip(a, r):
916 data[k].append(
917 {
918 "_CLS": agg.__class__.__name__,
919 "name": agg.field_name,
920 "result": result,
921 }
922 )
923
924 view = (
925 base_view._serialize()
926 if isinstance(base_view, fov.DatasetView)
927 else []
928 )
929
930 message = {
931 "type": "statistics",
932 "stats": data,
933 "view": view,
934 "filters": filters,
935 }
936
937 _write_message(message, app=True, only=only)
938
939 @classmethod
940 async def on_distinct(
941 cls, self, path, uuid=None, selected=[], search="", limit=10
942 ):
943 state = fos.StateDescription.from_dict(StateHandler.state)
944 results = None
945 col = cls.sample_collection()
946 if state.view is not None:
947 view = state.view
948 elif state.dataset is not None:
949 view = state.dataset
950 else:
951 results = []
952
953 view = _get_search_view(view, path, search, selected)
954
955 count, first = await view._async_aggregate(
956 col, foa.Distinct(path, _first=limit)
957 )
958
959 message = {
960 "type": uuid,
961 "count": count,
962 "results": first,
963 }
964 _write_message(message, app=True, only=self)
965
966 @classmethod
967 async def on_distributions(cls, self, group, omit=[]):
968 """Sends distribution data with respect to a group to the requesting
969 client.
970
971 Args:
972 group: the distribution group. Valid groups are 'labels', 'scalars',
973 and 'tags'.
974 """
975 state = fos.StateDescription.from_dict(StateHandler.state)
976 results = None
977 col = cls.sample_collection()
978 if state.view is not None:
979 view = state.view
980 elif state.dataset is not None:
981 view = state.dataset
982 else:
983 results = []
984
985 view = get_extended_view(view, state.filters)
986
987 if group == "label tags" and results is None:
988
989 def filter(field):
990 path = _label_filter(field)
991
992 if path is not None:
993 path = "%s.tags" % path
994
995 return path
996
997 aggs, fields = _count_values(filter, view)
998 results = await _gather_results(col, aggs, fields, view)
999
1000 elif group == "labels" and results is None:
1001
1002 def filter(field):
1003 path = _label_filter(field)
1004
1005 if path is not None:
1006 path = "%s.label" % path
1007
1008 return path
1009
1010 aggs, fields = _count_values(filter, view)
1011 results = await _gather_results(col, aggs, fields, view)
1012
1013 elif group == "sample tags" and results is None:
1014 aggs = [foa.CountValues("tags")]
1015 try:
1016 fields = [view.get_field_schema()["tags"]]
1017 results = await _gather_results(col, aggs, fields, view)
1018 except:
1019 results = []
1020
1021 elif results is None:
1022
1023 def filter(field):
1024 if (
1025 field.name in {"tags"}
1026 or field.name in omit
1027 or field.name.startswith("_")
1028 ):
1029 return None
1030
1031 if fos._meets_type(field, (fof.BooleanField, fof.StringField)):
1032 return field.name
1033
1034 return None
1035
1036 aggs, fields = _count_values(filter, view)
1037
1038 hist_aggs, hist_fields, ticks = await _numeric_histograms(
1039 col, view, view.get_field_schema()
1040 )
1041 aggs.extend(hist_aggs)
1042 fields.extend(hist_fields)
1043 results = await _gather_results(col, aggs, fields, view, ticks)
1044
1045 results = sorted(results, key=lambda i: i["name"])
1046 _write_message(
1047 {"type": "distributions", "results": results}, only=self
1048 )
1049
1050
1051 def _label_filter(field):
1052 path = None
1053 if isinstance(field, fof.EmbeddedDocumentField) and issubclass(
1054 field.document_type, fol.Label
1055 ):
1056 path = field.name
1057 if issubclass(field.document_type, fol._HasLabelList):
1058 path = "%s.%s" % (path, field.document_type._LABEL_LIST_FIELD,)
1059
1060 return path
1061
1062
1063 def _get_search_view(view, path, search, selected):
1064 search = _escape_regex_chars(search)
1065
1066 if search == "" and not selected:
1067 return view
1068
1069 if "." in path:
1070 fields = path.split(".")
1071 if view.media_type == fom.VIDEO and fields[0] == "frames":
1072 field = ".".join(fields[:2])
1073 else:
1074 field = fields[0]
1075
1076 vf = F("label")
1077 meth = lambda expr: view.filter_labels(field, expr)
1078 else:
1079 vf = F(path)
1080 meth = view.match
1081
1082 if search != "" and selected:
1083 expr = vf.re_match(search) & ~vf.is_in(selected)
1084 elif search != "":
1085 expr = vf.re_match(search)
1086 elif selected:
1087 expr = ~vf.is_in(selected)
1088
1089 return meth(expr)
1090
1091
1092 def _write_message(message, app=False, session=False, ignore=None, only=None):
1093 clients = StateHandler.app_clients if app else StateHandler.clients
1094 clients = _filter_deactivated_clients(clients)
1095
1096 if only:
1097 only.write_message(message)
1098 return
1099
1100 for client in clients:
1101 if session and client in StateHandler.app_clients:
1102 continue
1103
1104 if client == ignore:
1105 continue
1106
1107 client.write_message(message)
1108
1109
1110 def _filter_deactivated_clients(clients):
1111 global _notebook_clients
1112 global _deactivated_clients
1113 active_handle = StateHandler.state["active_handle"]
1114
1115 filtered = []
1116
1117 for client in clients:
1118 if client in _notebook_clients:
1119 uuid = _notebook_clients[client]
1120 if uuid != active_handle and uuid not in _deactivated_clients:
1121 _deactivated_clients.add(uuid)
1122 client.write_message({"type": "deactivate"})
1123
1124 if uuid != active_handle:
1125 continue
1126
1127 filtered.append(client)
1128
1129 return filtered
1130
1131
1132 _DEFAULT_NUM_HISTOGRAM_BINS = 25
1133
1134
1135 def _parse_histogram_values(result, field):
1136 counts, edges, other = result
1137 data = sorted(
1138 [
1139 {
1140 "key": round((k + edges[idx + 1]) / 2, 4),
1141 "count": v,
1142 "edges": (k, edges[idx + 1]),
1143 }
1144 for idx, (k, v) in enumerate(zip(edges, counts))
1145 ],
1146 key=lambda i: i["key"],
1147 )
1148 if (
1149 fos._meets_type(field, fof.IntField)
1150 and len(data) == _DEFAULT_NUM_HISTOGRAM_BINS
1151 ):
1152 for bin_ in data:
1153 bin_["edges"] = [math.ceil(e) for e in bin_["edges"]]
1154 bin_["key"] = math.ceil(bin_["key"])
1155 elif fos._meets_type(field, fof.IntField):
1156 for bin_ in data:
1157 del bin_["edges"]
1158
1159 if other > 0:
1160 data.append({"key": "None", "count": other})
1161
1162 return data
1163
1164
1165 def _parse_count_values(result, field):
1166 return sorted(
1167 [{"key": k, "count": v} for k, v in result.items()],
1168 key=lambda i: i["count"],
1169 reverse=True,
1170 )
1171
1172
1173 async def _gather_results(col, aggs, fields, view, ticks=None):
1174 response = await view._async_aggregate(col, aggs)
1175
1176 sorters = {
1177 foa.HistogramValues: _parse_histogram_values,
1178 foa.CountValues: _parse_count_values,
1179 }
1180
1181 results = []
1182 for idx, (result, agg) in enumerate(zip(response, aggs)):
1183 field = fields[idx]
1184 try:
1185 type_ = field.document_type.__name__
1186 cls = field.document_type
1187 except:
1188 type_ = field.__class__.__name__
1189 cls = None
1190
1191 name = agg.field_name
1192 if cls and issubclass(cls, fol.Label):
1193 if view.media_type == fom.VIDEO and name.startswith(
1194 view._FRAMES_PREFIX
1195 ):
1196 name = "".join(name.split(".")[:2])
1197 else:
1198 name = name.split(".")[0]
1199
1200 data = sorters[type(agg)](result, field)
1201 result_ticks = 0
1202 if type(agg) == foa.HistogramValues:
1203 result_ticks = ticks.pop(0)
1204 if result_ticks is None:
1205 result_ticks = []
1206 step = max(len(data) // 4, 1)
1207 for i in range(0, len(data), step):
1208 result_ticks.append(data[i]["key"])
1209
1210 if result[2] > 0 and len(data) and data[-1]["key"] != "None":
1211 result_ticks.append("None")
1212
1213 if data:
1214 results.append(
1215 {
1216 "data": data,
1217 "name": name,
1218 "ticks": result_ticks,
1219 "type": type_,
1220 }
1221 )
1222
1223 return results
1224
1225
1226 def _count_values(f, view):
1227 aggregations = []
1228 fields = []
1229 schemas = [(view.get_field_schema(), "")]
1230 if view.media_type == fom.VIDEO:
1231 schemas.append((view.get_frame_field_schema(), view._FRAMES_PREFIX))
1232
1233 for schema, prefix in schemas:
1234 for field in schema.values():
1235 path = f(field)
1236 if path is None:
1237 continue
1238
1239 fields.append(field)
1240 aggregations.append(foa.CountValues("%s%s" % (prefix, path)))
1241
1242 return aggregations, fields
1243
1244
1245 def _numeric_bounds(paths):
1246 return [foa.Bounds(path) for path in paths]
1247
1248
1249 async def _numeric_histograms(coll, view, schema, prefix=""):
1250 paths = []
1251 fields = []
1252 numerics = (fof.IntField, fof.FloatField)
1253 for name, field in schema.items():
1254 if prefix != "" and name == "frame_number":
1255 continue
1256
1257 if fos._meets_type(field, numerics):
1258 paths.append("%s%s" % (prefix, name))
1259 fields.append(field)
1260
1261 aggs = _numeric_bounds(paths)
1262 bounds = await view._async_aggregate(coll, aggs)
1263 aggregations = []
1264 ticks = []
1265 for range_, field, path in zip(bounds, fields, paths):
1266 bins = _DEFAULT_NUM_HISTOGRAM_BINS
1267 num_ticks = None
1268 if range_[0] == range_[1]:
1269 bins = 1
1270
1271 if range_ == (None, None):
1272 range_ = (0, 1)
1273 elif fos._meets_type(field, fof.IntField):
1274 delta = range_[1] - range_[0]
1275 range_ = (range_[0] - 0.5, range_[1] + 0.5)
1276 if delta < _DEFAULT_NUM_HISTOGRAM_BINS:
1277 bins = delta + 1
1278 num_ticks = 0
1279 else:
1280 range_ = (range_[0], range_[1] + 0.01)
1281
1282 ticks.append(num_ticks)
1283 aggregations.append(foa.HistogramValues(path, bins=bins, range=range_))
1284
1285 return aggregations, fields, ticks
1286
1287
1288 async def _get_sample_data(col, view, page_length, page):
1289 pipeline = view._pipeline()
1290
1291 samples = await foo.aggregate(col, pipeline).to_list(page_length + 1)
1292 convert(samples)
1293 more = False
1294 if len(samples) > page_length:
1295 samples = samples[:page_length]
1296 more = page + 1
1297
1298 results = [{"sample": s} for s in samples]
1299 for r in results:
1300 w, h = fosu.get_file_dimensions(r["sample"]["filepath"])
1301 r["width"] = w
1302 r["height"] = h
1303
1304 return results, more
1305
1306
1307 async def _get_video_data(col, state, view, _ids):
1308 view = view.select(_ids)
1309 pipeline = view._pipeline(attach_frames=True)
1310 results = []
1311 async for sample in col.aggregate(pipeline):
1312 frames = sample["frames"]
1313 if frames and frames[0]["frame_number"] == 1:
1314 sample["frames"] = frames[0]
1315 else:
1316 sample["frames"] = None
1317
1318 labels = _make_video_labels(state, view, sample, frames)
1319 results.append((sample, frames, labels))
1320
1321 return results
1322
1323
1324 def _make_frame_labels(name, label, frame_number, prefix=""):
1325 label = fol.ImageLabel.from_dict(label)
1326 labels = etav.VideoFrameLabels.from_image_labels(
1327 label.to_image_labels(name=prefix + name), frame_number,
1328 )
1329
1330 for obj in labels.objects:
1331 obj.frame_number = frame_number
1332
1333 for attr in labels.attributes():
1334 container = getattr(labels, attr)
1335
1336 if isinstance(container, etal.LabelsContainer):
1337 object_ids = _get_label_object_ids(label)
1338 assert len(container) == len(object_ids)
1339 for (obj, object_id) in zip(container, object_ids):
1340 # force _id to be serialized
1341 obj._id = object_id
1342 attrs = obj.attributes() + ["_id"]
1343 obj.attributes = lambda: attrs
1344
1345 return labels
1346
1347
1348 def _make_video_labels(state, view, sample, frames):
1349 labels = etav.VideoLabels()
1350 for frame_dict in frames:
1351 frame_number = frame_dict["frame_number"]
1352 frame_labels = etav.VideoFrameLabels(frame_number=frame_number)
1353 for k, v in frame_dict.items():
1354 if isinstance(v, dict) and "_cls" in v:
1355 field_labels = _make_frame_labels(
1356 k, v, frame_number, prefix=view._FRAMES_PREFIX
1357 )
1358 frame_labels.merge_labels(field_labels)
1359
1360 labels.add_frame(frame_labels)
1361
1362 if state.view is not None:
1363 dataset = state.view._dataset
1364 else:
1365 dataset = state.dataset
1366
1367 sample_schema = dataset.get_field_schema()
1368 for frame_number in range(1, etav.get_frame_count(sample["filepath"]) + 1):
1369 frame_labels = etav.VideoFrameLabels(frame_number=frame_number)
1370 for k, v in sample.items():
1371 if k not in sample_schema:
1372 continue
1373
1374 field = sample_schema[k]
1375 if not isinstance(field, fof.EmbeddedDocumentField):
1376 continue
1377
1378 if not issubclass(field.document_type, fol.Label):
1379 continue
1380
1381 field_labels = _make_frame_labels(k, v, frame_number)
1382 for obj in field_labels.objects:
1383 obj.frame_number = frame_number
1384
1385 frame_labels.merge_labels(field_labels)
1386
1387 labels.add_frame(frame_labels, overwrite=False)
1388
1389 return labels
1390
1391
1392 class FileHandler(tornado.web.StaticFileHandler):
1393 def set_headers(self):
1394 super().set_headers()
1395 self.set_header("Access-Control-Allow-Origin", "*")
1396 self.set_header("Access-Control-Allow-Headers", "x-requested-with")
1397 self.set_header("Access-Control-Allow-Methods", "GET, HEAD, OPTIONS")
1398 self.set_header("content-length", self.get_content_size())
1399 self.set_header("x-colab-notebook-cache-control", "no-cache")
1400
1401
1402 class MediaHandler(FileHandler):
1403 @classmethod
1404 def get_absolute_path(cls, root, path):
1405 if os.name != "nt":
1406 path = os.path.join("/", path)
1407
1408 return path
1409
1410 def validate_absolute_path(self, root, absolute_path):
1411 if os.path.isdir(absolute_path) and self.default_filename is not None:
1412 if not self.request.path.endswith("/"):
1413 self.redirect(self.request.path + "/", permanent=True)
1414 return None
1415
1416 absolute_path = os.path.join(absolute_path, self.default_filename)
1417 if not os.path.exists(absolute_path):
1418 raise HTTPError(404)
1419
1420 if not os.path.isfile(absolute_path):
1421 raise HTTPError(403, "%s is not a file", self.path)
1422
1423 return absolute_path
1424
1425
1426 class Application(tornado.web.Application):
1427 """FiftyOne Tornado Application"""
1428
1429 def __init__(self, **settings):
1430 server_path = os.path.dirname(os.path.abspath(__file__))
1431 rel_web_path = "static"
1432 web_path = os.path.join(server_path, rel_web_path)
1433 handlers = [
1434 (r"/fiftyone", FiftyOneHandler),
1435 (r"/polling", PollingHandler),
1436 (r"/feedback", FeedbackHandler),
1437 (r"/filepath/(.*)", MediaHandler, {"path": ""},),
1438 (r"/notebook", NotebookHandler),
1439 (r"/stages", StagesHandler),
1440 (r"/state", StateHandler),
1441 (r"/reactivate", ReactivateHandler),
1442 (
1443 r"/(.*)",
1444 FileHandler,
1445 {"path": web_path, "default_filename": "index.html"},
1446 ),
1447 ]
1448 super().__init__(handlers, **settings)
1449
1450
1451 if __name__ == "__main__":
1452 parser = argparse.ArgumentParser()
1453 parser.add_argument("--port", type=int, default=fo.config.default_app_port)
1454 args = parser.parse_args()
1455 app = Application(debug=foc.DEV_INSTALL)
1456 app.listen(args.port)
1457 tornado.ioloop.IOLoop.current().start()
1458
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/fiftyone/server/main.py b/fiftyone/server/main.py
--- a/fiftyone/server/main.py
+++ b/fiftyone/server/main.py
@@ -1398,6 +1398,12 @@
self.set_header("content-length", self.get_content_size())
self.set_header("x-colab-notebook-cache-control", "no-cache")
+ def get_content_type(self):
+ if self.absolute_path.endswith(".js"):
+ return "text/javascript"
+
+ return super().get_content_type()
+
class MediaHandler(FileHandler):
@classmethod
| {"golden_diff": "diff --git a/fiftyone/server/main.py b/fiftyone/server/main.py\n--- a/fiftyone/server/main.py\n+++ b/fiftyone/server/main.py\n@@ -1398,6 +1398,12 @@\n self.set_header(\"content-length\", self.get_content_size())\n self.set_header(\"x-colab-notebook-cache-control\", \"no-cache\")\n \n+ def get_content_type(self):\n+ if self.absolute_path.endswith(\".js\"):\n+ return \"text/javascript\"\n+\n+ return super().get_content_type()\n+\n \n class MediaHandler(FileHandler):\n @classmethod\n", "issue": "[BUG] APP non-JavaScript MIME type of \"text/plain\"\n### System information\r\n\r\n- **OS Platform and Distribution**: Windows 10 Pro 10.0.18362.239\r\n- **FiftyOne installed from (pip or source)**: pip\r\n- **FiftyOne version (run `fiftyone --version`)**: 0.9.3\r\n- **Python version**: both 3.8 | 3.9.2\r\n\r\n### Commands to reproduce\r\n\r\n```\r\nFollowing installation steps on https://voxel51.com/docs/fiftyone/getting_started/install.html\r\npip install fiftyone\r\n```\r\n\r\n### Describe the problem\r\n\r\nAfter installing the package (tested installing in both Anaconda environment and venv environment) and running the quickstart example, the app shows only blank screen with errors in console described in Other info / logs section.\r\nThe issue does not appear to be bound to specific settings with web browsers on the machine since the same error is given when running the fiftyone as remote on this machine and then accessing it from other machines, but rather by something with running web server as i could not reproduce this by installing fiftyone on other machines.\r\n\r\nAny idea what might go wrong ?\r\n\r\n### Code to reproduce issue\r\n\r\n```\r\nimport fiftyone as fo\r\nimport fiftyone.zoo as foz\r\n\r\ndataset = foz.load_zoo_dataset(\"quickstart\")\r\nsession = fo.launch_app(dataset)\r\n```\r\n\r\n### Other info / logs\r\n\r\nGoogle Chrome: `Failed to load module script: The server responded with a non-JavaScript MIME type of \"text/plain\". Strict MIME type checking is enforced for module scripts per HTML spec.` \r\nfor script \r\n`http://localhost:5151/_dist_/index.js`\r\n\r\nFirefox: `Loading module from \u201chttp://localhost:5151/_dist_/index.js\u201d was blocked because of a disallowed MIME type (\u201ctext/plain\u201d).`\r\n\r\n\r\n### What areas of FiftyOne does this bug affect?\r\n\r\n- [x] `App`: FiftyOne application issue\r\n- [ ] `Core`: Core `fiftyone` Python library issue\r\n- [ ] `Server`: Fiftyone server issue\r\n\r\n### Willingness to contribute\r\n\r\n- [ ] Yes. I can contribute a fix for this bug independently.\r\n- [x] Yes. I would be willing to contribute a fix for this bug with guidance\r\n from the FiftyOne community.\r\n- [ ] No. I cannot contribute a bug fix at this time.\r\n\n", "before_files": [{"content": "\"\"\"\nFiftyOne Tornado server.\n\n| Copyright 2017-2021, Voxel51, Inc.\n| `voxel51.com <https://voxel51.com/>`_\n|\n\"\"\"\nimport asyncio\nimport argparse\nfrom collections import defaultdict\nimport math\nimport os\nimport traceback\n\nimport tornado.escape\nimport tornado.ioloop\nimport tornado.iostream\nimport tornado.options\nimport tornado.web\nfrom tornado.web import HTTPError\nimport tornado.websocket\n\nimport eta.core.labels as etal\nimport eta.core.serial as etas\nimport eta.core.video as etav\n\nos.environ[\"FIFTYONE_SERVER\"] = \"1\"\n\nimport fiftyone as fo\nimport fiftyone.core.aggregations as foa\nimport fiftyone.constants as foc\nfrom fiftyone.core.expressions import ViewField as F, _escape_regex_chars\nimport fiftyone.core.dataset as fod\nimport fiftyone.core.fields as fof\nimport fiftyone.core.labels as fol\nimport fiftyone.core.media as fom\nimport fiftyone.core.odm as foo\nfrom fiftyone.core.service import DatabaseService\nfrom fiftyone.core.stages import _STAGES\nimport fiftyone.core.stages as fosg\nimport fiftyone.core.state as fos\nimport fiftyone.core.uid as fou\nimport fiftyone.core.view as fov\n\nfrom fiftyone.server.extended_view import get_extended_view\nfrom fiftyone.server.json_util import convert, FiftyOneJSONEncoder\nimport fiftyone.server.utils as fosu\n\n\n# connect to the existing DB service to initialize global port information\ndbs = DatabaseService()\ndbs.start()\ndb = foo.get_async_db_conn()\n\n\nclass RequestHandler(tornado.web.RequestHandler):\n \"\"\"\"Base class for HTTP request handlers\"\"\"\n\n def set_default_headers(self, *args, **kwargs):\n self.set_header(\"Access-Control-Allow-Origin\", \"*\")\n self.set_header(\"Access-Control-Allow-Headers\", \"x-requested-with\")\n self.set_header(\"Access-Control-Allow-Methods\", \"POST, GET, OPTIONS\")\n self.set_header(\"x-colab-notebook-cache-control\", \"no-cache\")\n\n async def get(self):\n self.write(self.get_response())\n\n def get_response(self):\n \"\"\"Returns the serializable response\n\n Returns:\n dict\n \"\"\"\n raise NotImplementedError(\"subclass must implement get_response()\")\n\n\nclass FiftyOneHandler(RequestHandler):\n \"\"\"Returns the version info of the fiftyone being used\"\"\"\n\n @staticmethod\n def get_response():\n \"\"\"Returns the serializable response\n\n Returns:\n dict\n \"\"\"\n uid, _ = fou.get_user_id()\n isfile = os.path.isfile(foc.FEEDBACK_PATH)\n if isfile:\n submitted = etas.load_json(foc.FEEDBACK_PATH)[\"submitted\"]\n else:\n submitted = False\n\n return {\n \"version\": foc.VERSION,\n \"user_id\": uid,\n \"do_not_track\": fo.config.do_not_track,\n \"feedback\": {\"submitted\": submitted, \"minimized\": isfile},\n \"dev_install\": foc.DEV_INSTALL or foc.RC_INSTALL,\n }\n\n\nclass NotebookHandler(RequestHandler):\n \"\"\"Check that the requested handle exists on the server\"\"\"\n\n async def get(self):\n # pylint: disable=no-value-for-parameter\n handle_id = self.get_argument(\"handleId\")\n\n response = self.get_response(handle_id)\n if response is None:\n raise tornado.web.HTTPError(status_code=404)\n\n self.write(response)\n\n @staticmethod\n def get_response(handle):\n \"\"\"Returns if the notebook handle exists on the server.\n\n Returns:\n the handle ID\n \"\"\"\n global _notebook_clients\n if handle in set(_notebook_clients.values()):\n return {\"exists\": True}\n\n\nclass ReactivateHandler(RequestHandler):\n \"\"\"Reactivates an IPython display handle\"\"\"\n\n async def get(self):\n # pylint: disable=no-value-for-parameter\n handle_id = self.get_argument(\"handleId\")\n self.write(self.get_response(handle_id))\n\n @staticmethod\n def get_response(handle_id):\n \"\"\"Returns on success\n\n Args:\n handle_id: a handle uuid\n \"\"\"\n StateHandler.state[\"active_handle\"] = handle_id\n for client in StateHandler.clients:\n client.write_message({\"type\": \"reactivate\", \"handle\": handle_id})\n\n return {}\n\n\nclass StagesHandler(RequestHandler):\n \"\"\"Returns the definitions of stages available to the App\"\"\"\n\n @staticmethod\n def get_response():\n \"\"\"Returns the serializable response\n\n Returns:\n dict\n \"\"\"\n return {\n \"stages\": [\n {\"name\": stage.__name__, \"params\": stage._params()}\n for stage in _STAGES\n ]\n }\n\n\nclass FeedbackHandler(RequestHandler):\n \"\"\"Returns whether the feedback button should be minimized\"\"\"\n\n def post(self):\n submitted = self.get_argument(\"submitted\", False)\n etas.write_json({\"submitted\": submitted}, foc.FEEDBACK_PATH)\n\n\ndef _catch_errors(func):\n async def wrapper(self, *args, **kwargs):\n try:\n StateHandler.prev_state = StateHandler.state\n result = await func(self, *args, **kwargs)\n return result\n except Exception:\n StateHandler.state = StateHandler.prev_state\n clients = list(StateHandler.clients)\n if isinstance(self, PollingHandler):\n clients.append(self)\n\n for client in clients:\n client.write_message(\n {\n \"type\": \"notification\",\n \"kind\": \"Server Error\",\n \"message\": (\n \"An exception has been raised by the server. Your session \"\n \"has been reverted to its previous state.\"\n ),\n \"session_items\": [traceback.format_exc()],\n \"app_items\": [\n \"A traceback has been printed to your Python shell.\"\n ],\n }\n )\n\n return wrapper\n\n\n_notebook_clients = {}\n_deactivated_clients = set()\n\n\nclass PollingHandler(tornado.web.RequestHandler):\n\n clients = defaultdict(set)\n screenshots = {}\n\n def set_default_headers(self, *args, **kwargs):\n self.set_header(\"Access-Control-Allow-Origin\", \"*\")\n self.set_header(\"Access-Control-Allow-Headers\", \"x-requested-with\")\n self.set_header(\"Access-Control-Allow-Methods\", \"POST, GET, OPTIONS\")\n\n @staticmethod\n def gather_messages(client):\n messages = [\n {\"type\": message} for message in PollingHandler.clients[client]\n ]\n PollingHandler.clients[client].clear()\n return messages\n\n @_catch_errors\n async def get(self):\n # pylint: disable=no-value-for-parameter\n client = self.get_argument(\"sessionId\")\n if client not in PollingHandler.clients:\n PollingHandler.clients[client].add(\"update\")\n PollingHandler.clients[client].add(\"statistics\")\n PollingHandler.clients[client].add(\"extended_statistics\")\n\n messages = self.gather_messages(client)\n self.write_message({\"messages\": messages})\n\n @_catch_errors\n async def post(self):\n # pylint: disable=no-value-for-parameter\n client = self.get_argument(\"sessionId\")\n # pylint: disable=no-value-for-parameter\n mode = self.get_argument(\"mode\")\n message = StateHandler.loads(self.request.body)\n event = message.pop(\"type\")\n force_update = False\n if mode == \"push\":\n if event == \"as_app\":\n if message[\"notebook\"]:\n message[\"ignore\"] = client\n global _notebook_clients\n global _deactivated_clients\n StateHandler.state[\"active_handle\"] = message[\"handle\"]\n _deactivated_clients.discard(message[\"handle\"])\n _notebook_clients[client] = message[\"handle\"]\n event = \"update\"\n force_update = True\n message = {\"state\": StateHandler.state}\n\n if event in {\n \"distinct\",\n \"distributions\",\n \"page\",\n \"get_video_data\",\n \"all_tags\",\n \"selected_statistics\",\n \"tag_modal\",\n }:\n caller = self\n elif event in {\"capture\", \"update\"}:\n caller = client\n else:\n caller = StateHandler\n\n if event == \"refresh\":\n message[\"polling_client\"] = client\n\n if event == \"update\" and not force_update:\n message[\"ignore_polling_client\"] = client\n\n handle = getattr(StateHandler, \"on_%s\" % event)\n await handle(caller, **message)\n\n if caller == self:\n return\n\n messages = self.gather_messages(client)\n self.write_message({\"messages\": messages})\n return\n\n if event == \"update\":\n self.write_message({\"type\": \"update\", \"state\": StateHandler.state})\n\n elif event == \"deactivate\":\n self.write_message({\"type\": \"deactivate\"})\n\n state = fos.StateDescription.from_dict(StateHandler.state)\n if state.view is not None:\n view = state.view\n else:\n view = state.dataset\n\n if event == \"statistics\":\n await StateHandler.send_statistics(view, only=self)\n\n elif event == \"extended_statistics\":\n await StateHandler.send_statistics(\n view, only=self, filters=state.filters\n )\n\n def write_message(self, message):\n message = StateHandler.dumps(message)\n self.write(message)\n\n\ndef _get_label_object_ids(label):\n \"\"\"Returns a list of all object IDs contained in the label.\n\n Args:\n label: an ImageLabel instance\n\n Returns:\n list of IDs as strings\n \"\"\"\n list_field_name = type(label).__name__.lower()\n if hasattr(label, \"id\"):\n return [label.id]\n\n if list_field_name in label:\n return [obj.id for obj in label[list_field_name]]\n\n raise TypeError(\"Cannot serialize label type: \" + str(type(label)))\n\n\nclass StateHandler(tornado.websocket.WebSocketHandler):\n \"\"\"WebSocket handler for bi-directional state communication.\n\n Attributes:\n app_clients: active App clients\n clients: active clients\n state: the current a serialized\n :class:`fiftyone.core.state.StateDescription`, serialized\n prev_state: the previous a serialized\n :class:`fiftyone.core.state.StateDescription`, serialized\n \"\"\"\n\n app_clients = set()\n clients = set()\n state = fos.StateDescription().serialize()\n prev_state = fos.StateDescription().serialize()\n\n @staticmethod\n def dumps(data):\n \"\"\"Serializes data to a JSON formatted :class:`str`.\n\n Args:\n data: serializable object\n\n Returns:\n :class:`str`\n \"\"\"\n return FiftyOneJSONEncoder.dumps(data)\n\n @staticmethod\n def loads(data):\n \"\"\"Deserialized data to an object.\n\n Args:\n data: :class:`str`, :class:`bytes`, or :class:`bytearray`\n\n Returns:\n an object\n \"\"\"\n return FiftyOneJSONEncoder.loads(data)\n\n @staticmethod\n def sample_collection():\n \"\"\"Getter for the current sample collection.\"\"\"\n state = fos.StateDescription.from_dict(StateHandler.state)\n if state.view is not None:\n dataset = state.view._dataset\n else:\n dataset = state.dataset\n\n return db[dataset._sample_collection_name]\n\n def write_message(self, message):\n \"\"\"Writes a message to the client.\n\n Args:\n message: a serializable object\n \"\"\"\n if message is None:\n return\n message = self.dumps(message)\n return super().write_message(message)\n\n def check_origin(self, origin):\n \"\"\"Accepts all origins.\n\n Returns:\n True\n \"\"\"\n return True\n\n def open(self):\n \"\"\"On open, add the client to the active clients set, and write the\n current state to the new client.\n \"\"\"\n StateHandler.clients.add(self)\n _write_message(\n {\"type\": \"update\", \"state\": StateHandler.state}, only=self\n )\n\n def on_close(self):\n \"\"\"On close, remove the client from the active clients set, and\n active App clients set (if applicable).\n \"\"\"\n StateHandler.clients.remove(self)\n StateHandler.app_clients.discard(self)\n if not StateHandler.app_clients:\n _write_message({\"type\": \"close\"}, session=True)\n\n @_catch_errors\n async def on_message(self, message):\n \"\"\"On message, call the associated event awaitable, with respect to\n the provided message type.\n\n Args:\n message: a serialized message\n \"\"\"\n message = self.loads(message)\n event = getattr(self, \"on_%s\" % message.pop(\"type\"))\n await event(self, **message)\n\n @staticmethod\n async def on_capture(self, src, width):\n global _notebook_clients\n _write_message(\n {\n \"type\": \"capture\",\n \"handle\": _notebook_clients[self],\n \"src\": src,\n \"width\": width,\n }\n )\n\n @staticmethod\n async def on_as_app(self, notebook=False, handle=None, ignore=None):\n \"\"\"Event for registering a client as an App.\"\"\"\n if isinstance(self, StateHandler):\n StateHandler.app_clients.add(self)\n\n global _notebook_clients\n if isinstance(self, StateHandler) and notebook:\n _notebook_clients[self] = handle\n\n if not isinstance(self, StateHandler):\n return\n\n awaitables = self.get_statistics_awaitables(only=self)\n asyncio.gather(*awaitables)\n\n @staticmethod\n async def on_refresh(self, polling_client=None):\n \"\"\"Event for refreshing an App client.\"\"\"\n state = fos.StateDescription.from_dict(StateHandler.state)\n state.refresh = not state.refresh\n StateHandler.state = state.serialize()\n\n if polling_client:\n PollingHandler.clients[polling_client].update(\n {\"update\", \"statistics\", \"extended_statistics\"}\n )\n else:\n awaitables = [self.send_updates(only=self)]\n awaitables += self.get_statistics_awaitables(only=self)\n asyncio.gather(*awaitables)\n\n @staticmethod\n async def on_filters_update(self, filters):\n \"\"\"Event for updating state filters. Sends an extended dataset\n statistics message to active App clients.\n\n Args:\n filters: a :class:`dict` mapping field path to a serialized\n :class:fiftyone.core.stages.Stage`\n \"\"\"\n state = fos.StateDescription.from_dict(StateHandler.state)\n state.filters = filters\n state.selected_labels = []\n state.selected = []\n if state.view is not None:\n view = state.view\n else:\n view = state.dataset\n\n StateHandler.state = state.serialize()\n for clients in PollingHandler.clients.values():\n clients.update({\"extended_statistics\"})\n\n await self.send_statistics(view, filters=filters)\n\n @classmethod\n async def on_page(cls, self, page, page_length=20):\n \"\"\"Sends a pagination response to the current client.\n\n Args:\n page: the page number\n page_length (20): the number of items to return\n \"\"\"\n state = fos.StateDescription.from_dict(StateHandler.state)\n if state.view is not None:\n view = state.view\n elif state.dataset is not None:\n view = state.dataset\n else:\n _write_message(\n {\"type\": \"page\", \"page\": page, \"results\": [], \"more\": False},\n only=self,\n )\n return\n\n view = get_extended_view(view, state.filters, count_labels_tags=True)\n view = view.skip((page - 1) * page_length)\n\n if view.media_type == fom.VIDEO:\n view = view.set_field(\"frames\", F(\"frames\")[0])\n\n results, more = await _get_sample_data(\n cls.sample_collection(), view, page_length, page\n )\n\n message = {\n \"type\": \"page\",\n \"page\": page,\n \"results\": results,\n \"more\": more,\n }\n\n _write_message(message, only=self)\n\n @staticmethod\n async def on_update(caller, state, ignore_polling_client=None):\n \"\"\"Event for state updates. Sends an update message to all active\n clients, and statistics messages to active App clients.\n\n Args:\n state: a serialized :class:`fiftyone.core.state.StateDescription`\n \"\"\"\n StateHandler.state = fos.StateDescription.from_dict(state).serialize()\n active_handle = state[\"active_handle\"]\n global _notebook_clients\n global _deactivated_clients\n _deactivated_clients.discard(active_handle)\n\n # ignore deactivated notebook cells\n if (\n active_handle\n and caller in _notebook_clients\n and _notebook_clients[caller] != active_handle\n ):\n return\n\n for client, events in PollingHandler.clients.items():\n if client in _notebook_clients:\n uuid = _notebook_clients[client]\n\n # deactivate the last active colab cell\n if uuid != active_handle:\n events.clear()\n _deactivated_clients.add(uuid)\n events.add(\"deactivate\")\n continue\n\n if client == ignore_polling_client:\n events.update({\"statistics\", \"extended_statistics\"})\n\n events.update({\"update\", \"statistics\", \"extended_statistics\"})\n\n awaitables = [\n StateHandler.send_updates(),\n ]\n awaitables += StateHandler.get_statistics_awaitables()\n asyncio.gather(*awaitables)\n\n @staticmethod\n async def on_set_selection(self, _ids):\n \"\"\"Event for setting the selected\n :class:`fiftyone.core.samples.Sample` _ids\n\n Args:\n _ids: a list of sample _id\n \"\"\"\n StateHandler.state[\"selected\"] = _ids\n await self.send_updates(ignore=self)\n\n @staticmethod\n async def on_clear_selection(self):\n \"\"\"Event for clearing the currently selected sample _ids.\n\n Sends state updates to all active clients.\n \"\"\"\n StateHandler.state[\"selected\"] = []\n await self.send_updates(ignore=self)\n\n @staticmethod\n async def on_set_selected_labels(self, selected_labels):\n \"\"\"Event for setting the entire selected objects list.\n\n Args:\n selected_labels: a list of selected labels\n \"\"\"\n if not isinstance(selected_labels, list):\n raise TypeError(\"selected_labels must be a list\")\n\n StateHandler.state[\"selected_labels\"] = selected_labels\n await self.send_updates(ignore=self)\n\n @staticmethod\n async def on_set_dataset(self, dataset_name):\n \"\"\"Event for setting the current dataset by name.\n\n Args:\n dataset_name: the dataset name\n \"\"\"\n dataset = fod.load_dataset(dataset_name)\n config = fos.StateDescription.from_dict(StateHandler.state).config\n active_handle = StateHandler.state[\"active_handle\"]\n StateHandler.state = fos.StateDescription(\n dataset=dataset, config=config, active_handle=active_handle\n ).serialize()\n await self.on_update(self, StateHandler.state)\n\n @staticmethod\n async def on_get_video_data(self, _id):\n \"\"\"Gets the frame labels for video samples.\n\n Args:\n _id: a sample _id\n \"\"\"\n state = fos.StateDescription.from_dict(StateHandler.state)\n if state.view is not None:\n view = state.view\n else:\n view = state.dataset\n\n result = await _get_video_data(\n StateHandler.sample_collection(), state, view, [_id]\n )\n sample, frames, labels = result[0]\n convert([labels])\n convert([sample])\n convert(frames)\n\n fps = etav.get_frame_rate(sample[\"filepath\"])\n _write_message(\n {\n \"type\": \"video_data-%s\" % _id,\n \"frames\": frames,\n \"labels\": labels.serialize(),\n \"fps\": fps,\n },\n only=self,\n )\n\n @staticmethod\n async def on_tag(\n caller, changes, target_labels=False, active_labels=None,\n ):\n state = fos.StateDescription.from_dict(StateHandler.state)\n if state.view is not None:\n view = state.view\n else:\n view = state.dataset\n\n view = get_extended_view(view, state.filters)\n if state.selected:\n view = view.select(state.selected)\n\n if target_labels:\n fosu.change_label_tags(view, changes, label_fields=active_labels)\n else:\n fosu.change_sample_tags(view, changes)\n\n StateHandler.state[\"refresh\"] = not state.refresh\n for clients in PollingHandler.clients.values():\n clients.update({\"update\"})\n\n await StateHandler.on_update(caller, StateHandler.state)\n\n @staticmethod\n async def on_all_tags(caller):\n state = fos.StateDescription.from_dict(StateHandler.state)\n if state.view is not None:\n dataset = state.view._dataset\n else:\n dataset = state.dataset\n\n if dataset is None:\n label = []\n sample = []\n else:\n (_, tag_aggs,) = fos.DatasetStatistics.get_label_aggregations(\n dataset\n )\n results = await dataset._async_aggregate(\n StateHandler.sample_collection(),\n [foa.Distinct(\"tags\")] + tag_aggs,\n )\n sample = results[0]\n\n label = set()\n for result in results[1:]:\n label |= set(result.keys())\n\n _write_message(\n {\"type\": \"all_tags\", \"sample\": sample, \"label\": label}, only=caller\n )\n\n @staticmethod\n async def on_save_filters(caller, add_stages=[], with_selected=False):\n state = fos.StateDescription.from_dict(StateHandler.state)\n if state.view is not None:\n view = state.view\n else:\n view = state.dataset\n\n view = get_extended_view(view, state.filters)\n\n if with_selected:\n if state.selected:\n view = view.select(state.selected)\n elif state.selected_labels:\n view = view.select_labels(state.selected_labels)\n\n for d in add_stages:\n stage = fosg.ViewStage._from_dict(d)\n view = view.add_stage(stage)\n\n state.selected = []\n state.selected_labels = []\n state.view = view\n state.filters = {}\n\n await StateHandler.on_update(caller, state.serialize())\n\n @staticmethod\n async def on_tag_modal(\n caller, changes, sample_id=None, labels=None,\n ):\n state = fos.StateDescription.from_dict(StateHandler.state)\n if state.view is not None:\n view = state.view\n else:\n view = state.dataset\n\n if sample_id:\n sample_ids = [sample_id]\n tag_view = view.select(sample_id)\n fosu.change_sample_tags(tag_view, changes)\n else:\n if state.selected_labels:\n labels = state.selected_labels\n\n sample_ids = list({label[\"sample_id\"] for label in labels})\n tag_view = view.select_labels(labels=labels)\n\n fields = {label[\"field\"] for label in labels}\n fosu.change_label_tags(tag_view, changes, label_fields=fields)\n\n for clients in PollingHandler.clients.values():\n clients.update({\"extended_statistics\", \"statistics\"})\n\n if isinstance(caller, PollingHandler):\n await StateHandler.send_samples(sample_ids, only=caller)\n\n awaitables = [StateHandler.send_samples(sample_ids)]\n awaitables += StateHandler.get_statistics_awaitables()\n\n asyncio.gather(*awaitables)\n\n @staticmethod\n async def on_selected_statistics(caller, active_labels=[]):\n state = fos.StateDescription.from_dict(StateHandler.state)\n if state.view is not None:\n view = state.view\n else:\n view = state.dataset\n\n view = get_extended_view(view, state.filters)\n view = view.select(state.selected).select_fields(active_labels)\n\n count_aggs, tag_aggs = fos.DatasetStatistics.get_label_aggregations(\n view\n )\n results = await view._async_aggregate(\n StateHandler.sample_collection(), count_aggs + tag_aggs\n )\n\n count = sum(results[: len(count_aggs)])\n\n tags = defaultdict(int)\n for result in results[len(count_aggs) :]:\n for tag, num in result.items():\n tags[tag] += num\n\n _write_message(\n {\"type\": \"selected_statistics\", \"count\": count, \"tags\": tags},\n only=caller,\n )\n\n @classmethod\n async def send_samples(cls, sample_ids, only=None):\n state = fos.StateDescription.from_dict(StateHandler.state)\n if state.view is not None:\n view = state.view\n else:\n view = state.dataset\n\n view = get_extended_view(view, state.filters, count_labels_tags=True)\n\n col = cls.sample_collection()\n\n if view.media_type == fom.VIDEO:\n samples = await _get_video_data(col, state, view, sample_ids)\n result = [\n {\"sample\": s, \"frames\": f, \"labels\": l.serialize()}\n for (s, f, l) in samples\n ]\n else:\n view = view.select(sample_ids)\n result, _ = await _get_sample_data(col, view, len(sample_ids), 1)\n\n _write_message(\n {\"type\": \"samples_update\", \"samples\": result}, app=True, only=only\n )\n\n @classmethod\n def get_statistics_awaitables(cls, only=None):\n \"\"\"Gets statistic awaitables that will send statistics to the relevant\n client(s) when executed\n\n Args:\n only (None): a client to restrict the messages to\n\n Returns:\n a list of coroutines\n \"\"\"\n if StateHandler.state[\"dataset\"] is None:\n return []\n\n state = fos.StateDescription.from_dict(StateHandler.state)\n if state.view is not None:\n view = state.view\n else:\n view = state.dataset\n\n awaitables = [cls.send_statistics(view, only=only)]\n\n awaitables.append(\n cls.send_statistics(view, filters=state.filters, only=only)\n )\n return awaitables\n\n @classmethod\n async def send_updates(cls, ignore=None, only=None):\n \"\"\"Sends an update event to the all clients, exluding the ignore\n client, if it is not None.\n\n Args:\n ignore (None): a client to not send the update to\n only (None): a client to restrict the updates to\n \"\"\"\n _write_message(\n {\"type\": \"update\", \"state\": StateHandler.state},\n ignore=ignore,\n only=only,\n )\n\n @classmethod\n async def send_statistics(cls, view, filters=None, only=None):\n \"\"\"Sends a statistics event given using the provided view to all App\n clients, unless an only client is provided in which case it is only\n sent to the that client.\n\n Args:\n view: a view\n filters (None): filter stages to append to the view\n only (None): a client to restrict the message to\n \"\"\"\n base_view = view\n data = {\"main\": [], \"none\": []}\n if view is not None and (filters is None or len(filters)):\n view = get_extended_view(view, filters)\n\n stats = fos.DatasetStatistics(view)\n aggs = stats.aggregations\n exists_aggs = stats.exists_aggregations\n num_aggs = len(aggs)\n\n results = await view._async_aggregate(\n cls.sample_collection(), aggs + exists_aggs\n )\n aggs_results = results[:num_aggs]\n exists_results = results[num_aggs:]\n\n for a, r, k in [\n (aggs, aggs_results, \"main\"),\n (exists_aggs, exists_results, \"none\"),\n ]:\n for agg, result in zip(a, r):\n data[k].append(\n {\n \"_CLS\": agg.__class__.__name__,\n \"name\": agg.field_name,\n \"result\": result,\n }\n )\n\n view = (\n base_view._serialize()\n if isinstance(base_view, fov.DatasetView)\n else []\n )\n\n message = {\n \"type\": \"statistics\",\n \"stats\": data,\n \"view\": view,\n \"filters\": filters,\n }\n\n _write_message(message, app=True, only=only)\n\n @classmethod\n async def on_distinct(\n cls, self, path, uuid=None, selected=[], search=\"\", limit=10\n ):\n state = fos.StateDescription.from_dict(StateHandler.state)\n results = None\n col = cls.sample_collection()\n if state.view is not None:\n view = state.view\n elif state.dataset is not None:\n view = state.dataset\n else:\n results = []\n\n view = _get_search_view(view, path, search, selected)\n\n count, first = await view._async_aggregate(\n col, foa.Distinct(path, _first=limit)\n )\n\n message = {\n \"type\": uuid,\n \"count\": count,\n \"results\": first,\n }\n _write_message(message, app=True, only=self)\n\n @classmethod\n async def on_distributions(cls, self, group, omit=[]):\n \"\"\"Sends distribution data with respect to a group to the requesting\n client.\n\n Args:\n group: the distribution group. Valid groups are 'labels', 'scalars',\n and 'tags'.\n \"\"\"\n state = fos.StateDescription.from_dict(StateHandler.state)\n results = None\n col = cls.sample_collection()\n if state.view is not None:\n view = state.view\n elif state.dataset is not None:\n view = state.dataset\n else:\n results = []\n\n view = get_extended_view(view, state.filters)\n\n if group == \"label tags\" and results is None:\n\n def filter(field):\n path = _label_filter(field)\n\n if path is not None:\n path = \"%s.tags\" % path\n\n return path\n\n aggs, fields = _count_values(filter, view)\n results = await _gather_results(col, aggs, fields, view)\n\n elif group == \"labels\" and results is None:\n\n def filter(field):\n path = _label_filter(field)\n\n if path is not None:\n path = \"%s.label\" % path\n\n return path\n\n aggs, fields = _count_values(filter, view)\n results = await _gather_results(col, aggs, fields, view)\n\n elif group == \"sample tags\" and results is None:\n aggs = [foa.CountValues(\"tags\")]\n try:\n fields = [view.get_field_schema()[\"tags\"]]\n results = await _gather_results(col, aggs, fields, view)\n except:\n results = []\n\n elif results is None:\n\n def filter(field):\n if (\n field.name in {\"tags\"}\n or field.name in omit\n or field.name.startswith(\"_\")\n ):\n return None\n\n if fos._meets_type(field, (fof.BooleanField, fof.StringField)):\n return field.name\n\n return None\n\n aggs, fields = _count_values(filter, view)\n\n hist_aggs, hist_fields, ticks = await _numeric_histograms(\n col, view, view.get_field_schema()\n )\n aggs.extend(hist_aggs)\n fields.extend(hist_fields)\n results = await _gather_results(col, aggs, fields, view, ticks)\n\n results = sorted(results, key=lambda i: i[\"name\"])\n _write_message(\n {\"type\": \"distributions\", \"results\": results}, only=self\n )\n\n\ndef _label_filter(field):\n path = None\n if isinstance(field, fof.EmbeddedDocumentField) and issubclass(\n field.document_type, fol.Label\n ):\n path = field.name\n if issubclass(field.document_type, fol._HasLabelList):\n path = \"%s.%s\" % (path, field.document_type._LABEL_LIST_FIELD,)\n\n return path\n\n\ndef _get_search_view(view, path, search, selected):\n search = _escape_regex_chars(search)\n\n if search == \"\" and not selected:\n return view\n\n if \".\" in path:\n fields = path.split(\".\")\n if view.media_type == fom.VIDEO and fields[0] == \"frames\":\n field = \".\".join(fields[:2])\n else:\n field = fields[0]\n\n vf = F(\"label\")\n meth = lambda expr: view.filter_labels(field, expr)\n else:\n vf = F(path)\n meth = view.match\n\n if search != \"\" and selected:\n expr = vf.re_match(search) & ~vf.is_in(selected)\n elif search != \"\":\n expr = vf.re_match(search)\n elif selected:\n expr = ~vf.is_in(selected)\n\n return meth(expr)\n\n\ndef _write_message(message, app=False, session=False, ignore=None, only=None):\n clients = StateHandler.app_clients if app else StateHandler.clients\n clients = _filter_deactivated_clients(clients)\n\n if only:\n only.write_message(message)\n return\n\n for client in clients:\n if session and client in StateHandler.app_clients:\n continue\n\n if client == ignore:\n continue\n\n client.write_message(message)\n\n\ndef _filter_deactivated_clients(clients):\n global _notebook_clients\n global _deactivated_clients\n active_handle = StateHandler.state[\"active_handle\"]\n\n filtered = []\n\n for client in clients:\n if client in _notebook_clients:\n uuid = _notebook_clients[client]\n if uuid != active_handle and uuid not in _deactivated_clients:\n _deactivated_clients.add(uuid)\n client.write_message({\"type\": \"deactivate\"})\n\n if uuid != active_handle:\n continue\n\n filtered.append(client)\n\n return filtered\n\n\n_DEFAULT_NUM_HISTOGRAM_BINS = 25\n\n\ndef _parse_histogram_values(result, field):\n counts, edges, other = result\n data = sorted(\n [\n {\n \"key\": round((k + edges[idx + 1]) / 2, 4),\n \"count\": v,\n \"edges\": (k, edges[idx + 1]),\n }\n for idx, (k, v) in enumerate(zip(edges, counts))\n ],\n key=lambda i: i[\"key\"],\n )\n if (\n fos._meets_type(field, fof.IntField)\n and len(data) == _DEFAULT_NUM_HISTOGRAM_BINS\n ):\n for bin_ in data:\n bin_[\"edges\"] = [math.ceil(e) for e in bin_[\"edges\"]]\n bin_[\"key\"] = math.ceil(bin_[\"key\"])\n elif fos._meets_type(field, fof.IntField):\n for bin_ in data:\n del bin_[\"edges\"]\n\n if other > 0:\n data.append({\"key\": \"None\", \"count\": other})\n\n return data\n\n\ndef _parse_count_values(result, field):\n return sorted(\n [{\"key\": k, \"count\": v} for k, v in result.items()],\n key=lambda i: i[\"count\"],\n reverse=True,\n )\n\n\nasync def _gather_results(col, aggs, fields, view, ticks=None):\n response = await view._async_aggregate(col, aggs)\n\n sorters = {\n foa.HistogramValues: _parse_histogram_values,\n foa.CountValues: _parse_count_values,\n }\n\n results = []\n for idx, (result, agg) in enumerate(zip(response, aggs)):\n field = fields[idx]\n try:\n type_ = field.document_type.__name__\n cls = field.document_type\n except:\n type_ = field.__class__.__name__\n cls = None\n\n name = agg.field_name\n if cls and issubclass(cls, fol.Label):\n if view.media_type == fom.VIDEO and name.startswith(\n view._FRAMES_PREFIX\n ):\n name = \"\".join(name.split(\".\")[:2])\n else:\n name = name.split(\".\")[0]\n\n data = sorters[type(agg)](result, field)\n result_ticks = 0\n if type(agg) == foa.HistogramValues:\n result_ticks = ticks.pop(0)\n if result_ticks is None:\n result_ticks = []\n step = max(len(data) // 4, 1)\n for i in range(0, len(data), step):\n result_ticks.append(data[i][\"key\"])\n\n if result[2] > 0 and len(data) and data[-1][\"key\"] != \"None\":\n result_ticks.append(\"None\")\n\n if data:\n results.append(\n {\n \"data\": data,\n \"name\": name,\n \"ticks\": result_ticks,\n \"type\": type_,\n }\n )\n\n return results\n\n\ndef _count_values(f, view):\n aggregations = []\n fields = []\n schemas = [(view.get_field_schema(), \"\")]\n if view.media_type == fom.VIDEO:\n schemas.append((view.get_frame_field_schema(), view._FRAMES_PREFIX))\n\n for schema, prefix in schemas:\n for field in schema.values():\n path = f(field)\n if path is None:\n continue\n\n fields.append(field)\n aggregations.append(foa.CountValues(\"%s%s\" % (prefix, path)))\n\n return aggregations, fields\n\n\ndef _numeric_bounds(paths):\n return [foa.Bounds(path) for path in paths]\n\n\nasync def _numeric_histograms(coll, view, schema, prefix=\"\"):\n paths = []\n fields = []\n numerics = (fof.IntField, fof.FloatField)\n for name, field in schema.items():\n if prefix != \"\" and name == \"frame_number\":\n continue\n\n if fos._meets_type(field, numerics):\n paths.append(\"%s%s\" % (prefix, name))\n fields.append(field)\n\n aggs = _numeric_bounds(paths)\n bounds = await view._async_aggregate(coll, aggs)\n aggregations = []\n ticks = []\n for range_, field, path in zip(bounds, fields, paths):\n bins = _DEFAULT_NUM_HISTOGRAM_BINS\n num_ticks = None\n if range_[0] == range_[1]:\n bins = 1\n\n if range_ == (None, None):\n range_ = (0, 1)\n elif fos._meets_type(field, fof.IntField):\n delta = range_[1] - range_[0]\n range_ = (range_[0] - 0.5, range_[1] + 0.5)\n if delta < _DEFAULT_NUM_HISTOGRAM_BINS:\n bins = delta + 1\n num_ticks = 0\n else:\n range_ = (range_[0], range_[1] + 0.01)\n\n ticks.append(num_ticks)\n aggregations.append(foa.HistogramValues(path, bins=bins, range=range_))\n\n return aggregations, fields, ticks\n\n\nasync def _get_sample_data(col, view, page_length, page):\n pipeline = view._pipeline()\n\n samples = await foo.aggregate(col, pipeline).to_list(page_length + 1)\n convert(samples)\n more = False\n if len(samples) > page_length:\n samples = samples[:page_length]\n more = page + 1\n\n results = [{\"sample\": s} for s in samples]\n for r in results:\n w, h = fosu.get_file_dimensions(r[\"sample\"][\"filepath\"])\n r[\"width\"] = w\n r[\"height\"] = h\n\n return results, more\n\n\nasync def _get_video_data(col, state, view, _ids):\n view = view.select(_ids)\n pipeline = view._pipeline(attach_frames=True)\n results = []\n async for sample in col.aggregate(pipeline):\n frames = sample[\"frames\"]\n if frames and frames[0][\"frame_number\"] == 1:\n sample[\"frames\"] = frames[0]\n else:\n sample[\"frames\"] = None\n\n labels = _make_video_labels(state, view, sample, frames)\n results.append((sample, frames, labels))\n\n return results\n\n\ndef _make_frame_labels(name, label, frame_number, prefix=\"\"):\n label = fol.ImageLabel.from_dict(label)\n labels = etav.VideoFrameLabels.from_image_labels(\n label.to_image_labels(name=prefix + name), frame_number,\n )\n\n for obj in labels.objects:\n obj.frame_number = frame_number\n\n for attr in labels.attributes():\n container = getattr(labels, attr)\n\n if isinstance(container, etal.LabelsContainer):\n object_ids = _get_label_object_ids(label)\n assert len(container) == len(object_ids)\n for (obj, object_id) in zip(container, object_ids):\n # force _id to be serialized\n obj._id = object_id\n attrs = obj.attributes() + [\"_id\"]\n obj.attributes = lambda: attrs\n\n return labels\n\n\ndef _make_video_labels(state, view, sample, frames):\n labels = etav.VideoLabels()\n for frame_dict in frames:\n frame_number = frame_dict[\"frame_number\"]\n frame_labels = etav.VideoFrameLabels(frame_number=frame_number)\n for k, v in frame_dict.items():\n if isinstance(v, dict) and \"_cls\" in v:\n field_labels = _make_frame_labels(\n k, v, frame_number, prefix=view._FRAMES_PREFIX\n )\n frame_labels.merge_labels(field_labels)\n\n labels.add_frame(frame_labels)\n\n if state.view is not None:\n dataset = state.view._dataset\n else:\n dataset = state.dataset\n\n sample_schema = dataset.get_field_schema()\n for frame_number in range(1, etav.get_frame_count(sample[\"filepath\"]) + 1):\n frame_labels = etav.VideoFrameLabels(frame_number=frame_number)\n for k, v in sample.items():\n if k not in sample_schema:\n continue\n\n field = sample_schema[k]\n if not isinstance(field, fof.EmbeddedDocumentField):\n continue\n\n if not issubclass(field.document_type, fol.Label):\n continue\n\n field_labels = _make_frame_labels(k, v, frame_number)\n for obj in field_labels.objects:\n obj.frame_number = frame_number\n\n frame_labels.merge_labels(field_labels)\n\n labels.add_frame(frame_labels, overwrite=False)\n\n return labels\n\n\nclass FileHandler(tornado.web.StaticFileHandler):\n def set_headers(self):\n super().set_headers()\n self.set_header(\"Access-Control-Allow-Origin\", \"*\")\n self.set_header(\"Access-Control-Allow-Headers\", \"x-requested-with\")\n self.set_header(\"Access-Control-Allow-Methods\", \"GET, HEAD, OPTIONS\")\n self.set_header(\"content-length\", self.get_content_size())\n self.set_header(\"x-colab-notebook-cache-control\", \"no-cache\")\n\n\nclass MediaHandler(FileHandler):\n @classmethod\n def get_absolute_path(cls, root, path):\n if os.name != \"nt\":\n path = os.path.join(\"/\", path)\n\n return path\n\n def validate_absolute_path(self, root, absolute_path):\n if os.path.isdir(absolute_path) and self.default_filename is not None:\n if not self.request.path.endswith(\"/\"):\n self.redirect(self.request.path + \"/\", permanent=True)\n return None\n\n absolute_path = os.path.join(absolute_path, self.default_filename)\n if not os.path.exists(absolute_path):\n raise HTTPError(404)\n\n if not os.path.isfile(absolute_path):\n raise HTTPError(403, \"%s is not a file\", self.path)\n\n return absolute_path\n\n\nclass Application(tornado.web.Application):\n \"\"\"FiftyOne Tornado Application\"\"\"\n\n def __init__(self, **settings):\n server_path = os.path.dirname(os.path.abspath(__file__))\n rel_web_path = \"static\"\n web_path = os.path.join(server_path, rel_web_path)\n handlers = [\n (r\"/fiftyone\", FiftyOneHandler),\n (r\"/polling\", PollingHandler),\n (r\"/feedback\", FeedbackHandler),\n (r\"/filepath/(.*)\", MediaHandler, {\"path\": \"\"},),\n (r\"/notebook\", NotebookHandler),\n (r\"/stages\", StagesHandler),\n (r\"/state\", StateHandler),\n (r\"/reactivate\", ReactivateHandler),\n (\n r\"/(.*)\",\n FileHandler,\n {\"path\": web_path, \"default_filename\": \"index.html\"},\n ),\n ]\n super().__init__(handlers, **settings)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--port\", type=int, default=fo.config.default_app_port)\n args = parser.parse_args()\n app = Application(debug=foc.DEV_INSTALL)\n app.listen(args.port)\n tornado.ioloop.IOLoop.current().start()\n", "path": "fiftyone/server/main.py"}], "after_files": [{"content": "\"\"\"\nFiftyOne Tornado server.\n\n| Copyright 2017-2021, Voxel51, Inc.\n| `voxel51.com <https://voxel51.com/>`_\n|\n\"\"\"\nimport asyncio\nimport argparse\nfrom collections import defaultdict\nimport math\nimport os\nimport traceback\n\nimport tornado.escape\nimport tornado.ioloop\nimport tornado.iostream\nimport tornado.options\nimport tornado.web\nfrom tornado.web import HTTPError\nimport tornado.websocket\n\nimport eta.core.labels as etal\nimport eta.core.serial as etas\nimport eta.core.video as etav\n\nos.environ[\"FIFTYONE_SERVER\"] = \"1\"\n\nimport fiftyone as fo\nimport fiftyone.core.aggregations as foa\nimport fiftyone.constants as foc\nfrom fiftyone.core.expressions import ViewField as F, _escape_regex_chars\nimport fiftyone.core.dataset as fod\nimport fiftyone.core.fields as fof\nimport fiftyone.core.labels as fol\nimport fiftyone.core.media as fom\nimport fiftyone.core.odm as foo\nfrom fiftyone.core.service import DatabaseService\nfrom fiftyone.core.stages import _STAGES\nimport fiftyone.core.stages as fosg\nimport fiftyone.core.state as fos\nimport fiftyone.core.uid as fou\nimport fiftyone.core.view as fov\n\nfrom fiftyone.server.extended_view import get_extended_view\nfrom fiftyone.server.json_util import convert, FiftyOneJSONEncoder\nimport fiftyone.server.utils as fosu\n\n\n# connect to the existing DB service to initialize global port information\ndbs = DatabaseService()\ndbs.start()\ndb = foo.get_async_db_conn()\n\n\nclass RequestHandler(tornado.web.RequestHandler):\n \"\"\"\"Base class for HTTP request handlers\"\"\"\n\n def set_default_headers(self, *args, **kwargs):\n self.set_header(\"Access-Control-Allow-Origin\", \"*\")\n self.set_header(\"Access-Control-Allow-Headers\", \"x-requested-with\")\n self.set_header(\"Access-Control-Allow-Methods\", \"POST, GET, OPTIONS\")\n self.set_header(\"x-colab-notebook-cache-control\", \"no-cache\")\n\n async def get(self):\n self.write(self.get_response())\n\n def get_response(self):\n \"\"\"Returns the serializable response\n\n Returns:\n dict\n \"\"\"\n raise NotImplementedError(\"subclass must implement get_response()\")\n\n\nclass FiftyOneHandler(RequestHandler):\n \"\"\"Returns the version info of the fiftyone being used\"\"\"\n\n @staticmethod\n def get_response():\n \"\"\"Returns the serializable response\n\n Returns:\n dict\n \"\"\"\n uid, _ = fou.get_user_id()\n isfile = os.path.isfile(foc.FEEDBACK_PATH)\n if isfile:\n submitted = etas.load_json(foc.FEEDBACK_PATH)[\"submitted\"]\n else:\n submitted = False\n\n return {\n \"version\": foc.VERSION,\n \"user_id\": uid,\n \"do_not_track\": fo.config.do_not_track,\n \"feedback\": {\"submitted\": submitted, \"minimized\": isfile},\n \"dev_install\": foc.DEV_INSTALL or foc.RC_INSTALL,\n }\n\n\nclass NotebookHandler(RequestHandler):\n \"\"\"Check that the requested handle exists on the server\"\"\"\n\n async def get(self):\n # pylint: disable=no-value-for-parameter\n handle_id = self.get_argument(\"handleId\")\n\n response = self.get_response(handle_id)\n if response is None:\n raise tornado.web.HTTPError(status_code=404)\n\n self.write(response)\n\n @staticmethod\n def get_response(handle):\n \"\"\"Returns if the notebook handle exists on the server.\n\n Returns:\n the handle ID\n \"\"\"\n global _notebook_clients\n if handle in set(_notebook_clients.values()):\n return {\"exists\": True}\n\n\nclass ReactivateHandler(RequestHandler):\n \"\"\"Reactivates an IPython display handle\"\"\"\n\n async def get(self):\n # pylint: disable=no-value-for-parameter\n handle_id = self.get_argument(\"handleId\")\n self.write(self.get_response(handle_id))\n\n @staticmethod\n def get_response(handle_id):\n \"\"\"Returns on success\n\n Args:\n handle_id: a handle uuid\n \"\"\"\n StateHandler.state[\"active_handle\"] = handle_id\n for client in StateHandler.clients:\n client.write_message({\"type\": \"reactivate\", \"handle\": handle_id})\n\n return {}\n\n\nclass StagesHandler(RequestHandler):\n \"\"\"Returns the definitions of stages available to the App\"\"\"\n\n @staticmethod\n def get_response():\n \"\"\"Returns the serializable response\n\n Returns:\n dict\n \"\"\"\n return {\n \"stages\": [\n {\"name\": stage.__name__, \"params\": stage._params()}\n for stage in _STAGES\n ]\n }\n\n\nclass FeedbackHandler(RequestHandler):\n \"\"\"Returns whether the feedback button should be minimized\"\"\"\n\n def post(self):\n submitted = self.get_argument(\"submitted\", False)\n etas.write_json({\"submitted\": submitted}, foc.FEEDBACK_PATH)\n\n\ndef _catch_errors(func):\n async def wrapper(self, *args, **kwargs):\n try:\n StateHandler.prev_state = StateHandler.state\n result = await func(self, *args, **kwargs)\n return result\n except Exception:\n StateHandler.state = StateHandler.prev_state\n clients = list(StateHandler.clients)\n if isinstance(self, PollingHandler):\n clients.append(self)\n\n for client in clients:\n client.write_message(\n {\n \"type\": \"notification\",\n \"kind\": \"Server Error\",\n \"message\": (\n \"An exception has been raised by the server. Your session \"\n \"has been reverted to its previous state.\"\n ),\n \"session_items\": [traceback.format_exc()],\n \"app_items\": [\n \"A traceback has been printed to your Python shell.\"\n ],\n }\n )\n\n return wrapper\n\n\n_notebook_clients = {}\n_deactivated_clients = set()\n\n\nclass PollingHandler(tornado.web.RequestHandler):\n\n clients = defaultdict(set)\n screenshots = {}\n\n def set_default_headers(self, *args, **kwargs):\n self.set_header(\"Access-Control-Allow-Origin\", \"*\")\n self.set_header(\"Access-Control-Allow-Headers\", \"x-requested-with\")\n self.set_header(\"Access-Control-Allow-Methods\", \"POST, GET, OPTIONS\")\n\n @staticmethod\n def gather_messages(client):\n messages = [\n {\"type\": message} for message in PollingHandler.clients[client]\n ]\n PollingHandler.clients[client].clear()\n return messages\n\n @_catch_errors\n async def get(self):\n # pylint: disable=no-value-for-parameter\n client = self.get_argument(\"sessionId\")\n if client not in PollingHandler.clients:\n PollingHandler.clients[client].add(\"update\")\n PollingHandler.clients[client].add(\"statistics\")\n PollingHandler.clients[client].add(\"extended_statistics\")\n\n messages = self.gather_messages(client)\n self.write_message({\"messages\": messages})\n\n @_catch_errors\n async def post(self):\n # pylint: disable=no-value-for-parameter\n client = self.get_argument(\"sessionId\")\n # pylint: disable=no-value-for-parameter\n mode = self.get_argument(\"mode\")\n message = StateHandler.loads(self.request.body)\n event = message.pop(\"type\")\n force_update = False\n if mode == \"push\":\n if event == \"as_app\":\n if message[\"notebook\"]:\n message[\"ignore\"] = client\n global _notebook_clients\n global _deactivated_clients\n StateHandler.state[\"active_handle\"] = message[\"handle\"]\n _deactivated_clients.discard(message[\"handle\"])\n _notebook_clients[client] = message[\"handle\"]\n event = \"update\"\n force_update = True\n message = {\"state\": StateHandler.state}\n\n if event in {\n \"distinct\",\n \"distributions\",\n \"page\",\n \"get_video_data\",\n \"all_tags\",\n \"selected_statistics\",\n \"tag_modal\",\n }:\n caller = self\n elif event in {\"capture\", \"update\"}:\n caller = client\n else:\n caller = StateHandler\n\n if event == \"refresh\":\n message[\"polling_client\"] = client\n\n if event == \"update\" and not force_update:\n message[\"ignore_polling_client\"] = client\n\n handle = getattr(StateHandler, \"on_%s\" % event)\n await handle(caller, **message)\n\n if caller == self:\n return\n\n messages = self.gather_messages(client)\n self.write_message({\"messages\": messages})\n return\n\n if event == \"update\":\n self.write_message({\"type\": \"update\", \"state\": StateHandler.state})\n\n elif event == \"deactivate\":\n self.write_message({\"type\": \"deactivate\"})\n\n state = fos.StateDescription.from_dict(StateHandler.state)\n if state.view is not None:\n view = state.view\n else:\n view = state.dataset\n\n if event == \"statistics\":\n await StateHandler.send_statistics(view, only=self)\n\n elif event == \"extended_statistics\":\n await StateHandler.send_statistics(\n view, only=self, filters=state.filters\n )\n\n def write_message(self, message):\n message = StateHandler.dumps(message)\n self.write(message)\n\n\ndef _get_label_object_ids(label):\n \"\"\"Returns a list of all object IDs contained in the label.\n\n Args:\n label: an ImageLabel instance\n\n Returns:\n list of IDs as strings\n \"\"\"\n list_field_name = type(label).__name__.lower()\n if hasattr(label, \"id\"):\n return [label.id]\n\n if list_field_name in label:\n return [obj.id for obj in label[list_field_name]]\n\n raise TypeError(\"Cannot serialize label type: \" + str(type(label)))\n\n\nclass StateHandler(tornado.websocket.WebSocketHandler):\n \"\"\"WebSocket handler for bi-directional state communication.\n\n Attributes:\n app_clients: active App clients\n clients: active clients\n state: the current a serialized\n :class:`fiftyone.core.state.StateDescription`, serialized\n prev_state: the previous a serialized\n :class:`fiftyone.core.state.StateDescription`, serialized\n \"\"\"\n\n app_clients = set()\n clients = set()\n state = fos.StateDescription().serialize()\n prev_state = fos.StateDescription().serialize()\n\n @staticmethod\n def dumps(data):\n \"\"\"Serializes data to a JSON formatted :class:`str`.\n\n Args:\n data: serializable object\n\n Returns:\n :class:`str`\n \"\"\"\n return FiftyOneJSONEncoder.dumps(data)\n\n @staticmethod\n def loads(data):\n \"\"\"Deserialized data to an object.\n\n Args:\n data: :class:`str`, :class:`bytes`, or :class:`bytearray`\n\n Returns:\n an object\n \"\"\"\n return FiftyOneJSONEncoder.loads(data)\n\n @staticmethod\n def sample_collection():\n \"\"\"Getter for the current sample collection.\"\"\"\n state = fos.StateDescription.from_dict(StateHandler.state)\n if state.view is not None:\n dataset = state.view._dataset\n else:\n dataset = state.dataset\n\n return db[dataset._sample_collection_name]\n\n def write_message(self, message):\n \"\"\"Writes a message to the client.\n\n Args:\n message: a serializable object\n \"\"\"\n if message is None:\n return\n message = self.dumps(message)\n return super().write_message(message)\n\n def check_origin(self, origin):\n \"\"\"Accepts all origins.\n\n Returns:\n True\n \"\"\"\n return True\n\n def open(self):\n \"\"\"On open, add the client to the active clients set, and write the\n current state to the new client.\n \"\"\"\n StateHandler.clients.add(self)\n _write_message(\n {\"type\": \"update\", \"state\": StateHandler.state}, only=self\n )\n\n def on_close(self):\n \"\"\"On close, remove the client from the active clients set, and\n active App clients set (if applicable).\n \"\"\"\n StateHandler.clients.remove(self)\n StateHandler.app_clients.discard(self)\n if not StateHandler.app_clients:\n _write_message({\"type\": \"close\"}, session=True)\n\n @_catch_errors\n async def on_message(self, message):\n \"\"\"On message, call the associated event awaitable, with respect to\n the provided message type.\n\n Args:\n message: a serialized message\n \"\"\"\n message = self.loads(message)\n event = getattr(self, \"on_%s\" % message.pop(\"type\"))\n await event(self, **message)\n\n @staticmethod\n async def on_capture(self, src, width):\n global _notebook_clients\n _write_message(\n {\n \"type\": \"capture\",\n \"handle\": _notebook_clients[self],\n \"src\": src,\n \"width\": width,\n }\n )\n\n @staticmethod\n async def on_as_app(self, notebook=False, handle=None, ignore=None):\n \"\"\"Event for registering a client as an App.\"\"\"\n if isinstance(self, StateHandler):\n StateHandler.app_clients.add(self)\n\n global _notebook_clients\n if isinstance(self, StateHandler) and notebook:\n _notebook_clients[self] = handle\n\n if not isinstance(self, StateHandler):\n return\n\n awaitables = self.get_statistics_awaitables(only=self)\n asyncio.gather(*awaitables)\n\n @staticmethod\n async def on_refresh(self, polling_client=None):\n \"\"\"Event for refreshing an App client.\"\"\"\n state = fos.StateDescription.from_dict(StateHandler.state)\n state.refresh = not state.refresh\n StateHandler.state = state.serialize()\n\n if polling_client:\n PollingHandler.clients[polling_client].update(\n {\"update\", \"statistics\", \"extended_statistics\"}\n )\n else:\n awaitables = [self.send_updates(only=self)]\n awaitables += self.get_statistics_awaitables(only=self)\n asyncio.gather(*awaitables)\n\n @staticmethod\n async def on_filters_update(self, filters):\n \"\"\"Event for updating state filters. Sends an extended dataset\n statistics message to active App clients.\n\n Args:\n filters: a :class:`dict` mapping field path to a serialized\n :class:fiftyone.core.stages.Stage`\n \"\"\"\n state = fos.StateDescription.from_dict(StateHandler.state)\n state.filters = filters\n state.selected_labels = []\n state.selected = []\n if state.view is not None:\n view = state.view\n else:\n view = state.dataset\n\n StateHandler.state = state.serialize()\n for clients in PollingHandler.clients.values():\n clients.update({\"extended_statistics\"})\n\n await self.send_statistics(view, filters=filters)\n\n @classmethod\n async def on_page(cls, self, page, page_length=20):\n \"\"\"Sends a pagination response to the current client.\n\n Args:\n page: the page number\n page_length (20): the number of items to return\n \"\"\"\n state = fos.StateDescription.from_dict(StateHandler.state)\n if state.view is not None:\n view = state.view\n elif state.dataset is not None:\n view = state.dataset\n else:\n _write_message(\n {\"type\": \"page\", \"page\": page, \"results\": [], \"more\": False},\n only=self,\n )\n return\n\n view = get_extended_view(view, state.filters, count_labels_tags=True)\n view = view.skip((page - 1) * page_length)\n\n if view.media_type == fom.VIDEO:\n view = view.set_field(\"frames\", F(\"frames\")[0])\n\n results, more = await _get_sample_data(\n cls.sample_collection(), view, page_length, page\n )\n\n message = {\n \"type\": \"page\",\n \"page\": page,\n \"results\": results,\n \"more\": more,\n }\n\n _write_message(message, only=self)\n\n @staticmethod\n async def on_update(caller, state, ignore_polling_client=None):\n \"\"\"Event for state updates. Sends an update message to all active\n clients, and statistics messages to active App clients.\n\n Args:\n state: a serialized :class:`fiftyone.core.state.StateDescription`\n \"\"\"\n StateHandler.state = fos.StateDescription.from_dict(state).serialize()\n active_handle = state[\"active_handle\"]\n global _notebook_clients\n global _deactivated_clients\n _deactivated_clients.discard(active_handle)\n\n # ignore deactivated notebook cells\n if (\n active_handle\n and caller in _notebook_clients\n and _notebook_clients[caller] != active_handle\n ):\n return\n\n for client, events in PollingHandler.clients.items():\n if client in _notebook_clients:\n uuid = _notebook_clients[client]\n\n # deactivate the last active colab cell\n if uuid != active_handle:\n events.clear()\n _deactivated_clients.add(uuid)\n events.add(\"deactivate\")\n continue\n\n if client == ignore_polling_client:\n events.update({\"statistics\", \"extended_statistics\"})\n\n events.update({\"update\", \"statistics\", \"extended_statistics\"})\n\n awaitables = [\n StateHandler.send_updates(),\n ]\n awaitables += StateHandler.get_statistics_awaitables()\n asyncio.gather(*awaitables)\n\n @staticmethod\n async def on_set_selection(self, _ids):\n \"\"\"Event for setting the selected\n :class:`fiftyone.core.samples.Sample` _ids\n\n Args:\n _ids: a list of sample _id\n \"\"\"\n StateHandler.state[\"selected\"] = _ids\n await self.send_updates(ignore=self)\n\n @staticmethod\n async def on_clear_selection(self):\n \"\"\"Event for clearing the currently selected sample _ids.\n\n Sends state updates to all active clients.\n \"\"\"\n StateHandler.state[\"selected\"] = []\n await self.send_updates(ignore=self)\n\n @staticmethod\n async def on_set_selected_labels(self, selected_labels):\n \"\"\"Event for setting the entire selected objects list.\n\n Args:\n selected_labels: a list of selected labels\n \"\"\"\n if not isinstance(selected_labels, list):\n raise TypeError(\"selected_labels must be a list\")\n\n StateHandler.state[\"selected_labels\"] = selected_labels\n await self.send_updates(ignore=self)\n\n @staticmethod\n async def on_set_dataset(self, dataset_name):\n \"\"\"Event for setting the current dataset by name.\n\n Args:\n dataset_name: the dataset name\n \"\"\"\n dataset = fod.load_dataset(dataset_name)\n config = fos.StateDescription.from_dict(StateHandler.state).config\n active_handle = StateHandler.state[\"active_handle\"]\n StateHandler.state = fos.StateDescription(\n dataset=dataset, config=config, active_handle=active_handle\n ).serialize()\n await self.on_update(self, StateHandler.state)\n\n @staticmethod\n async def on_get_video_data(self, _id):\n \"\"\"Gets the frame labels for video samples.\n\n Args:\n _id: a sample _id\n \"\"\"\n state = fos.StateDescription.from_dict(StateHandler.state)\n if state.view is not None:\n view = state.view\n else:\n view = state.dataset\n\n result = await _get_video_data(\n StateHandler.sample_collection(), state, view, [_id]\n )\n sample, frames, labels = result[0]\n convert([labels])\n convert([sample])\n convert(frames)\n\n fps = etav.get_frame_rate(sample[\"filepath\"])\n _write_message(\n {\n \"type\": \"video_data-%s\" % _id,\n \"frames\": frames,\n \"labels\": labels.serialize(),\n \"fps\": fps,\n },\n only=self,\n )\n\n @staticmethod\n async def on_tag(\n caller, changes, target_labels=False, active_labels=None,\n ):\n state = fos.StateDescription.from_dict(StateHandler.state)\n if state.view is not None:\n view = state.view\n else:\n view = state.dataset\n\n view = get_extended_view(view, state.filters)\n if state.selected:\n view = view.select(state.selected)\n\n if target_labels:\n fosu.change_label_tags(view, changes, label_fields=active_labels)\n else:\n fosu.change_sample_tags(view, changes)\n\n StateHandler.state[\"refresh\"] = not state.refresh\n for clients in PollingHandler.clients.values():\n clients.update({\"update\"})\n\n await StateHandler.on_update(caller, StateHandler.state)\n\n @staticmethod\n async def on_all_tags(caller):\n state = fos.StateDescription.from_dict(StateHandler.state)\n if state.view is not None:\n dataset = state.view._dataset\n else:\n dataset = state.dataset\n\n if dataset is None:\n label = []\n sample = []\n else:\n (_, tag_aggs,) = fos.DatasetStatistics.get_label_aggregations(\n dataset\n )\n results = await dataset._async_aggregate(\n StateHandler.sample_collection(),\n [foa.Distinct(\"tags\")] + tag_aggs,\n )\n sample = results[0]\n\n label = set()\n for result in results[1:]:\n label |= set(result.keys())\n\n _write_message(\n {\"type\": \"all_tags\", \"sample\": sample, \"label\": label}, only=caller\n )\n\n @staticmethod\n async def on_save_filters(caller, add_stages=[], with_selected=False):\n state = fos.StateDescription.from_dict(StateHandler.state)\n if state.view is not None:\n view = state.view\n else:\n view = state.dataset\n\n view = get_extended_view(view, state.filters)\n\n if with_selected:\n if state.selected:\n view = view.select(state.selected)\n elif state.selected_labels:\n view = view.select_labels(state.selected_labels)\n\n for d in add_stages:\n stage = fosg.ViewStage._from_dict(d)\n view = view.add_stage(stage)\n\n state.selected = []\n state.selected_labels = []\n state.view = view\n state.filters = {}\n\n await StateHandler.on_update(caller, state.serialize())\n\n @staticmethod\n async def on_tag_modal(\n caller, changes, sample_id=None, labels=None,\n ):\n state = fos.StateDescription.from_dict(StateHandler.state)\n if state.view is not None:\n view = state.view\n else:\n view = state.dataset\n\n if sample_id:\n sample_ids = [sample_id]\n tag_view = view.select(sample_id)\n fosu.change_sample_tags(tag_view, changes)\n else:\n if state.selected_labels:\n labels = state.selected_labels\n\n sample_ids = list({label[\"sample_id\"] for label in labels})\n tag_view = view.select_labels(labels=labels)\n\n fields = {label[\"field\"] for label in labels}\n fosu.change_label_tags(tag_view, changes, label_fields=fields)\n\n for clients in PollingHandler.clients.values():\n clients.update({\"extended_statistics\", \"statistics\"})\n\n if isinstance(caller, PollingHandler):\n await StateHandler.send_samples(sample_ids, only=caller)\n\n awaitables = [StateHandler.send_samples(sample_ids)]\n awaitables += StateHandler.get_statistics_awaitables()\n\n asyncio.gather(*awaitables)\n\n @staticmethod\n async def on_selected_statistics(caller, active_labels=[]):\n state = fos.StateDescription.from_dict(StateHandler.state)\n if state.view is not None:\n view = state.view\n else:\n view = state.dataset\n\n view = get_extended_view(view, state.filters)\n view = view.select(state.selected).select_fields(active_labels)\n\n count_aggs, tag_aggs = fos.DatasetStatistics.get_label_aggregations(\n view\n )\n results = await view._async_aggregate(\n StateHandler.sample_collection(), count_aggs + tag_aggs\n )\n\n count = sum(results[: len(count_aggs)])\n\n tags = defaultdict(int)\n for result in results[len(count_aggs) :]:\n for tag, num in result.items():\n tags[tag] += num\n\n _write_message(\n {\"type\": \"selected_statistics\", \"count\": count, \"tags\": tags},\n only=caller,\n )\n\n @classmethod\n async def send_samples(cls, sample_ids, only=None):\n state = fos.StateDescription.from_dict(StateHandler.state)\n if state.view is not None:\n view = state.view\n else:\n view = state.dataset\n\n view = get_extended_view(view, state.filters, count_labels_tags=True)\n\n col = cls.sample_collection()\n\n if view.media_type == fom.VIDEO:\n samples = await _get_video_data(col, state, view, sample_ids)\n result = [\n {\"sample\": s, \"frames\": f, \"labels\": l.serialize()}\n for (s, f, l) in samples\n ]\n else:\n view = view.select(sample_ids)\n result, _ = await _get_sample_data(col, view, len(sample_ids), 1)\n\n _write_message(\n {\"type\": \"samples_update\", \"samples\": result}, app=True, only=only\n )\n\n @classmethod\n def get_statistics_awaitables(cls, only=None):\n \"\"\"Gets statistic awaitables that will send statistics to the relevant\n client(s) when executed\n\n Args:\n only (None): a client to restrict the messages to\n\n Returns:\n a list of coroutines\n \"\"\"\n if StateHandler.state[\"dataset\"] is None:\n return []\n\n state = fos.StateDescription.from_dict(StateHandler.state)\n if state.view is not None:\n view = state.view\n else:\n view = state.dataset\n\n awaitables = [cls.send_statistics(view, only=only)]\n\n awaitables.append(\n cls.send_statistics(view, filters=state.filters, only=only)\n )\n return awaitables\n\n @classmethod\n async def send_updates(cls, ignore=None, only=None):\n \"\"\"Sends an update event to the all clients, exluding the ignore\n client, if it is not None.\n\n Args:\n ignore (None): a client to not send the update to\n only (None): a client to restrict the updates to\n \"\"\"\n _write_message(\n {\"type\": \"update\", \"state\": StateHandler.state},\n ignore=ignore,\n only=only,\n )\n\n @classmethod\n async def send_statistics(cls, view, filters=None, only=None):\n \"\"\"Sends a statistics event given using the provided view to all App\n clients, unless an only client is provided in which case it is only\n sent to the that client.\n\n Args:\n view: a view\n filters (None): filter stages to append to the view\n only (None): a client to restrict the message to\n \"\"\"\n base_view = view\n data = {\"main\": [], \"none\": []}\n if view is not None and (filters is None or len(filters)):\n view = get_extended_view(view, filters)\n\n stats = fos.DatasetStatistics(view)\n aggs = stats.aggregations\n exists_aggs = stats.exists_aggregations\n num_aggs = len(aggs)\n\n results = await view._async_aggregate(\n cls.sample_collection(), aggs + exists_aggs\n )\n aggs_results = results[:num_aggs]\n exists_results = results[num_aggs:]\n\n for a, r, k in [\n (aggs, aggs_results, \"main\"),\n (exists_aggs, exists_results, \"none\"),\n ]:\n for agg, result in zip(a, r):\n data[k].append(\n {\n \"_CLS\": agg.__class__.__name__,\n \"name\": agg.field_name,\n \"result\": result,\n }\n )\n\n view = (\n base_view._serialize()\n if isinstance(base_view, fov.DatasetView)\n else []\n )\n\n message = {\n \"type\": \"statistics\",\n \"stats\": data,\n \"view\": view,\n \"filters\": filters,\n }\n\n _write_message(message, app=True, only=only)\n\n @classmethod\n async def on_distinct(\n cls, self, path, uuid=None, selected=[], search=\"\", limit=10\n ):\n state = fos.StateDescription.from_dict(StateHandler.state)\n results = None\n col = cls.sample_collection()\n if state.view is not None:\n view = state.view\n elif state.dataset is not None:\n view = state.dataset\n else:\n results = []\n\n view = _get_search_view(view, path, search, selected)\n\n count, first = await view._async_aggregate(\n col, foa.Distinct(path, _first=limit)\n )\n\n message = {\n \"type\": uuid,\n \"count\": count,\n \"results\": first,\n }\n _write_message(message, app=True, only=self)\n\n @classmethod\n async def on_distributions(cls, self, group, omit=[]):\n \"\"\"Sends distribution data with respect to a group to the requesting\n client.\n\n Args:\n group: the distribution group. Valid groups are 'labels', 'scalars',\n and 'tags'.\n \"\"\"\n state = fos.StateDescription.from_dict(StateHandler.state)\n results = None\n col = cls.sample_collection()\n if state.view is not None:\n view = state.view\n elif state.dataset is not None:\n view = state.dataset\n else:\n results = []\n\n view = get_extended_view(view, state.filters)\n\n if group == \"label tags\" and results is None:\n\n def filter(field):\n path = _label_filter(field)\n\n if path is not None:\n path = \"%s.tags\" % path\n\n return path\n\n aggs, fields = _count_values(filter, view)\n results = await _gather_results(col, aggs, fields, view)\n\n elif group == \"labels\" and results is None:\n\n def filter(field):\n path = _label_filter(field)\n\n if path is not None:\n path = \"%s.label\" % path\n\n return path\n\n aggs, fields = _count_values(filter, view)\n results = await _gather_results(col, aggs, fields, view)\n\n elif group == \"sample tags\" and results is None:\n aggs = [foa.CountValues(\"tags\")]\n try:\n fields = [view.get_field_schema()[\"tags\"]]\n results = await _gather_results(col, aggs, fields, view)\n except:\n results = []\n\n elif results is None:\n\n def filter(field):\n if (\n field.name in {\"tags\"}\n or field.name in omit\n or field.name.startswith(\"_\")\n ):\n return None\n\n if fos._meets_type(field, (fof.BooleanField, fof.StringField)):\n return field.name\n\n return None\n\n aggs, fields = _count_values(filter, view)\n\n hist_aggs, hist_fields, ticks = await _numeric_histograms(\n col, view, view.get_field_schema()\n )\n aggs.extend(hist_aggs)\n fields.extend(hist_fields)\n results = await _gather_results(col, aggs, fields, view, ticks)\n\n results = sorted(results, key=lambda i: i[\"name\"])\n _write_message(\n {\"type\": \"distributions\", \"results\": results}, only=self\n )\n\n\ndef _label_filter(field):\n path = None\n if isinstance(field, fof.EmbeddedDocumentField) and issubclass(\n field.document_type, fol.Label\n ):\n path = field.name\n if issubclass(field.document_type, fol._HasLabelList):\n path = \"%s.%s\" % (path, field.document_type._LABEL_LIST_FIELD,)\n\n return path\n\n\ndef _get_search_view(view, path, search, selected):\n search = _escape_regex_chars(search)\n\n if search == \"\" and not selected:\n return view\n\n if \".\" in path:\n fields = path.split(\".\")\n if view.media_type == fom.VIDEO and fields[0] == \"frames\":\n field = \".\".join(fields[:2])\n else:\n field = fields[0]\n\n vf = F(\"label\")\n meth = lambda expr: view.filter_labels(field, expr)\n else:\n vf = F(path)\n meth = view.match\n\n if search != \"\" and selected:\n expr = vf.re_match(search) & ~vf.is_in(selected)\n elif search != \"\":\n expr = vf.re_match(search)\n elif selected:\n expr = ~vf.is_in(selected)\n\n return meth(expr)\n\n\ndef _write_message(message, app=False, session=False, ignore=None, only=None):\n clients = StateHandler.app_clients if app else StateHandler.clients\n clients = _filter_deactivated_clients(clients)\n\n if only:\n only.write_message(message)\n return\n\n for client in clients:\n if session and client in StateHandler.app_clients:\n continue\n\n if client == ignore:\n continue\n\n client.write_message(message)\n\n\ndef _filter_deactivated_clients(clients):\n global _notebook_clients\n global _deactivated_clients\n active_handle = StateHandler.state[\"active_handle\"]\n\n filtered = []\n\n for client in clients:\n if client in _notebook_clients:\n uuid = _notebook_clients[client]\n if uuid != active_handle and uuid not in _deactivated_clients:\n _deactivated_clients.add(uuid)\n client.write_message({\"type\": \"deactivate\"})\n\n if uuid != active_handle:\n continue\n\n filtered.append(client)\n\n return filtered\n\n\n_DEFAULT_NUM_HISTOGRAM_BINS = 25\n\n\ndef _parse_histogram_values(result, field):\n counts, edges, other = result\n data = sorted(\n [\n {\n \"key\": round((k + edges[idx + 1]) / 2, 4),\n \"count\": v,\n \"edges\": (k, edges[idx + 1]),\n }\n for idx, (k, v) in enumerate(zip(edges, counts))\n ],\n key=lambda i: i[\"key\"],\n )\n if (\n fos._meets_type(field, fof.IntField)\n and len(data) == _DEFAULT_NUM_HISTOGRAM_BINS\n ):\n for bin_ in data:\n bin_[\"edges\"] = [math.ceil(e) for e in bin_[\"edges\"]]\n bin_[\"key\"] = math.ceil(bin_[\"key\"])\n elif fos._meets_type(field, fof.IntField):\n for bin_ in data:\n del bin_[\"edges\"]\n\n if other > 0:\n data.append({\"key\": \"None\", \"count\": other})\n\n return data\n\n\ndef _parse_count_values(result, field):\n return sorted(\n [{\"key\": k, \"count\": v} for k, v in result.items()],\n key=lambda i: i[\"count\"],\n reverse=True,\n )\n\n\nasync def _gather_results(col, aggs, fields, view, ticks=None):\n response = await view._async_aggregate(col, aggs)\n\n sorters = {\n foa.HistogramValues: _parse_histogram_values,\n foa.CountValues: _parse_count_values,\n }\n\n results = []\n for idx, (result, agg) in enumerate(zip(response, aggs)):\n field = fields[idx]\n try:\n type_ = field.document_type.__name__\n cls = field.document_type\n except:\n type_ = field.__class__.__name__\n cls = None\n\n name = agg.field_name\n if cls and issubclass(cls, fol.Label):\n if view.media_type == fom.VIDEO and name.startswith(\n view._FRAMES_PREFIX\n ):\n name = \"\".join(name.split(\".\")[:2])\n else:\n name = name.split(\".\")[0]\n\n data = sorters[type(agg)](result, field)\n result_ticks = 0\n if type(agg) == foa.HistogramValues:\n result_ticks = ticks.pop(0)\n if result_ticks is None:\n result_ticks = []\n step = max(len(data) // 4, 1)\n for i in range(0, len(data), step):\n result_ticks.append(data[i][\"key\"])\n\n if result[2] > 0 and len(data) and data[-1][\"key\"] != \"None\":\n result_ticks.append(\"None\")\n\n if data:\n results.append(\n {\n \"data\": data,\n \"name\": name,\n \"ticks\": result_ticks,\n \"type\": type_,\n }\n )\n\n return results\n\n\ndef _count_values(f, view):\n aggregations = []\n fields = []\n schemas = [(view.get_field_schema(), \"\")]\n if view.media_type == fom.VIDEO:\n schemas.append((view.get_frame_field_schema(), view._FRAMES_PREFIX))\n\n for schema, prefix in schemas:\n for field in schema.values():\n path = f(field)\n if path is None:\n continue\n\n fields.append(field)\n aggregations.append(foa.CountValues(\"%s%s\" % (prefix, path)))\n\n return aggregations, fields\n\n\ndef _numeric_bounds(paths):\n return [foa.Bounds(path) for path in paths]\n\n\nasync def _numeric_histograms(coll, view, schema, prefix=\"\"):\n paths = []\n fields = []\n numerics = (fof.IntField, fof.FloatField)\n for name, field in schema.items():\n if prefix != \"\" and name == \"frame_number\":\n continue\n\n if fos._meets_type(field, numerics):\n paths.append(\"%s%s\" % (prefix, name))\n fields.append(field)\n\n aggs = _numeric_bounds(paths)\n bounds = await view._async_aggregate(coll, aggs)\n aggregations = []\n ticks = []\n for range_, field, path in zip(bounds, fields, paths):\n bins = _DEFAULT_NUM_HISTOGRAM_BINS\n num_ticks = None\n if range_[0] == range_[1]:\n bins = 1\n\n if range_ == (None, None):\n range_ = (0, 1)\n elif fos._meets_type(field, fof.IntField):\n delta = range_[1] - range_[0]\n range_ = (range_[0] - 0.5, range_[1] + 0.5)\n if delta < _DEFAULT_NUM_HISTOGRAM_BINS:\n bins = delta + 1\n num_ticks = 0\n else:\n range_ = (range_[0], range_[1] + 0.01)\n\n ticks.append(num_ticks)\n aggregations.append(foa.HistogramValues(path, bins=bins, range=range_))\n\n return aggregations, fields, ticks\n\n\nasync def _get_sample_data(col, view, page_length, page):\n pipeline = view._pipeline()\n\n samples = await foo.aggregate(col, pipeline).to_list(page_length + 1)\n convert(samples)\n more = False\n if len(samples) > page_length:\n samples = samples[:page_length]\n more = page + 1\n\n results = [{\"sample\": s} for s in samples]\n for r in results:\n w, h = fosu.get_file_dimensions(r[\"sample\"][\"filepath\"])\n r[\"width\"] = w\n r[\"height\"] = h\n\n return results, more\n\n\nasync def _get_video_data(col, state, view, _ids):\n view = view.select(_ids)\n pipeline = view._pipeline(attach_frames=True)\n results = []\n async for sample in col.aggregate(pipeline):\n frames = sample[\"frames\"]\n if frames and frames[0][\"frame_number\"] == 1:\n sample[\"frames\"] = frames[0]\n else:\n sample[\"frames\"] = None\n\n labels = _make_video_labels(state, view, sample, frames)\n results.append((sample, frames, labels))\n\n return results\n\n\ndef _make_frame_labels(name, label, frame_number, prefix=\"\"):\n label = fol.ImageLabel.from_dict(label)\n labels = etav.VideoFrameLabels.from_image_labels(\n label.to_image_labels(name=prefix + name), frame_number,\n )\n\n for obj in labels.objects:\n obj.frame_number = frame_number\n\n for attr in labels.attributes():\n container = getattr(labels, attr)\n\n if isinstance(container, etal.LabelsContainer):\n object_ids = _get_label_object_ids(label)\n assert len(container) == len(object_ids)\n for (obj, object_id) in zip(container, object_ids):\n # force _id to be serialized\n obj._id = object_id\n attrs = obj.attributes() + [\"_id\"]\n obj.attributes = lambda: attrs\n\n return labels\n\n\ndef _make_video_labels(state, view, sample, frames):\n labels = etav.VideoLabels()\n for frame_dict in frames:\n frame_number = frame_dict[\"frame_number\"]\n frame_labels = etav.VideoFrameLabels(frame_number=frame_number)\n for k, v in frame_dict.items():\n if isinstance(v, dict) and \"_cls\" in v:\n field_labels = _make_frame_labels(\n k, v, frame_number, prefix=view._FRAMES_PREFIX\n )\n frame_labels.merge_labels(field_labels)\n\n labels.add_frame(frame_labels)\n\n if state.view is not None:\n dataset = state.view._dataset\n else:\n dataset = state.dataset\n\n sample_schema = dataset.get_field_schema()\n for frame_number in range(1, etav.get_frame_count(sample[\"filepath\"]) + 1):\n frame_labels = etav.VideoFrameLabels(frame_number=frame_number)\n for k, v in sample.items():\n if k not in sample_schema:\n continue\n\n field = sample_schema[k]\n if not isinstance(field, fof.EmbeddedDocumentField):\n continue\n\n if not issubclass(field.document_type, fol.Label):\n continue\n\n field_labels = _make_frame_labels(k, v, frame_number)\n for obj in field_labels.objects:\n obj.frame_number = frame_number\n\n frame_labels.merge_labels(field_labels)\n\n labels.add_frame(frame_labels, overwrite=False)\n\n return labels\n\n\nclass FileHandler(tornado.web.StaticFileHandler):\n def set_headers(self):\n super().set_headers()\n self.set_header(\"Access-Control-Allow-Origin\", \"*\")\n self.set_header(\"Access-Control-Allow-Headers\", \"x-requested-with\")\n self.set_header(\"Access-Control-Allow-Methods\", \"GET, HEAD, OPTIONS\")\n self.set_header(\"content-length\", self.get_content_size())\n self.set_header(\"x-colab-notebook-cache-control\", \"no-cache\")\n\n def get_content_type(self):\n if self.absolute_path.endswith(\".js\"):\n return \"text/javascript\"\n\n return super().get_content_type()\n\n\nclass MediaHandler(FileHandler):\n @classmethod\n def get_absolute_path(cls, root, path):\n if os.name != \"nt\":\n path = os.path.join(\"/\", path)\n\n return path\n\n def validate_absolute_path(self, root, absolute_path):\n if os.path.isdir(absolute_path) and self.default_filename is not None:\n if not self.request.path.endswith(\"/\"):\n self.redirect(self.request.path + \"/\", permanent=True)\n return None\n\n absolute_path = os.path.join(absolute_path, self.default_filename)\n if not os.path.exists(absolute_path):\n raise HTTPError(404)\n\n if not os.path.isfile(absolute_path):\n raise HTTPError(403, \"%s is not a file\", self.path)\n\n return absolute_path\n\n\nclass Application(tornado.web.Application):\n \"\"\"FiftyOne Tornado Application\"\"\"\n\n def __init__(self, **settings):\n server_path = os.path.dirname(os.path.abspath(__file__))\n rel_web_path = \"static\"\n web_path = os.path.join(server_path, rel_web_path)\n handlers = [\n (r\"/fiftyone\", FiftyOneHandler),\n (r\"/polling\", PollingHandler),\n (r\"/feedback\", FeedbackHandler),\n (r\"/filepath/(.*)\", MediaHandler, {\"path\": \"\"},),\n (r\"/notebook\", NotebookHandler),\n (r\"/stages\", StagesHandler),\n (r\"/state\", StateHandler),\n (r\"/reactivate\", ReactivateHandler),\n (\n r\"/(.*)\",\n FileHandler,\n {\"path\": web_path, \"default_filename\": \"index.html\"},\n ),\n ]\n super().__init__(handlers, **settings)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--port\", type=int, default=fo.config.default_app_port)\n args = parser.parse_args()\n app = Application(debug=foc.DEV_INSTALL)\n app.listen(args.port)\n tornado.ioloop.IOLoop.current().start()\n", "path": "fiftyone/server/main.py"}]} |
gh_patches_debug_1396 | rasdani/github-patches | git_diff | holoviz__panel-5608 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Improve error on invalid serve URL
The following simple app:
```python
import panel as pn
pn.extension()
pn.serve({"/spam/alot/": "Spam!"})
```
fails like this:
```
Launching server at http://localhost:49227
2023-10-07 14:01:16,537 404 GET /spam/static/js/bokeh.min.js?v=42698ba71b55a4634cf5e5295f093fe1788c0e0490180931b0805da09afea8a1250e1dc5138d8a90816cd53eb0749cc3eb145e7d9599734fcfb037e9ec223efd (::1) 0.42ms
2023-10-07 14:01:16,540 404 GET /spam/static/js/bokeh-gl.min.js?v=bb6b0349fc67ef87c67e017bb1124d47d870b3e58a6eece0f2a2ebd0485412e64def06faf8764c9157add17d41de5fe4dd33e9126b17c11cdf7bd6691d68f379 (::1) 0.31ms
2023-10-07 14:01:16,540 404 GET /spam/static/extensions/panel/panel.min.js?v=fb7e691caef7a59ca25d1fd9580b7a7d13146fd012a16bf047f631f7bf8a1326 (::1) 0.35ms
2023-10-07 14:01:16,541 404 GET /spam/static/js/bokeh-widgets.min.js?v=ddb73df6eae93f1df236ed7e56f58298ebb768a4ed15b068d68a2c492100a71fbf50245d6e4516a69de4f3665bf8cef661d4b1dcf756eea3804c58169fb0fb15 (::1) 0.43ms
2023-10-07 14:01:16,541 404 GET /spam/static/js/bokeh-tables.min.js?v=2627454bb0563a61287a353dcdbd4870dcd6f4cda89abca94325f271f910e04044091d74651ab12316c9b6260cd4d7c3765be89ac62e4e35166ea4ca9c564c6c (::1) 0.26ms
2023-10-07 14:01:16,548 WebSocket connection opened
2023-10-07 14:01:16,548 ServerConnection created
2023-10-07 14:01:16,569 404 GET /spam/static/extensions/panel/images/favicon.ico (::1) 0.19ms
```
As pointed out by @philippjfr in the thread https://github.com/holoviz/panel/issues/5576#issuecomment-1751694610 the problem is the trailing slash `/`, after removing it the app works as intended.
I think if it's (a) really the case that Bokeh can never serve such URLs and (b) there is a robust way and clear place to put the URL validation logic, then it would be nice if you could give a better error massage here, e.g. something like a "ValueError: invalid URL - trailing slash / not supported".
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `panel/io/server.py`
Content:
```
1 """
2 Utilities for creating bokeh Server instances.
3 """
4 from __future__ import annotations
5
6 import asyncio
7 import datetime as dt
8 import gc
9 import html
10 import importlib
11 import inspect
12 import logging
13 import os
14 import pathlib
15 import signal
16 import sys
17 import threading
18 import traceback
19 import uuid
20 import weakref
21
22 from collections import OrderedDict
23 from contextlib import contextmanager
24 from functools import partial, wraps
25 from html import escape
26 from types import FunctionType, MethodType
27 from typing import (
28 TYPE_CHECKING, Any, Callable, Dict, Mapping, Optional, Union,
29 )
30 from urllib.parse import urljoin, urlparse
31
32 import bokeh
33 import bokeh.command.util
34 import param
35 import tornado
36
37 # Bokeh imports
38 from bokeh.application import Application as BkApplication
39 from bokeh.application.handlers.code import (
40 CodeHandler, _monkeypatch_io, patch_curdoc,
41 )
42 from bokeh.application.handlers.function import FunctionHandler
43 from bokeh.core.json_encoder import serialize_json
44 from bokeh.core.templates import AUTOLOAD_JS, FILE, MACROS
45 from bokeh.core.validation import silence
46 from bokeh.core.validation.warnings import EMPTY_LAYOUT
47 from bokeh.embed.bundle import Script
48 from bokeh.embed.elements import script_for_render_items
49 from bokeh.embed.util import RenderItem
50 from bokeh.embed.wrappers import wrap_in_script_tag
51 from bokeh.io import curdoc
52 from bokeh.models import CustomJS
53 from bokeh.server.server import Server as BokehServer
54 from bokeh.server.urls import per_app_patterns, toplevel_patterns
55 from bokeh.server.views.autoload_js_handler import (
56 AutoloadJsHandler as BkAutoloadJsHandler,
57 )
58 from bokeh.server.views.doc_handler import DocHandler as BkDocHandler
59 from bokeh.server.views.root_handler import RootHandler as BkRootHandler
60 from bokeh.server.views.static_handler import StaticHandler
61 from bokeh.util.serialization import make_id
62 from bokeh.util.token import (
63 generate_jwt_token, generate_session_id, get_token_payload,
64 )
65 # Tornado imports
66 from tornado.ioloop import IOLoop
67 from tornado.web import (
68 HTTPError, RequestHandler, StaticFileHandler, authenticated,
69 )
70 from tornado.wsgi import WSGIContainer
71
72 # Internal imports
73 from ..config import config
74 from ..util import edit_readonly, fullpath
75 from ..util.warnings import warn
76 from .document import init_doc, unlocked, with_lock # noqa
77 from .liveness import LivenessHandler
78 from .loading import LOADING_INDICATOR_CSS_CLASS
79 from .logging import (
80 LOG_SESSION_CREATED, LOG_SESSION_DESTROYED, LOG_SESSION_LAUNCHING,
81 )
82 from .markdown import build_single_handler_application
83 from .profile import profile_ctx
84 from .reload import autoreload_watcher
85 from .resources import (
86 BASE_TEMPLATE, CDN_DIST, COMPONENT_PATH, ERROR_TEMPLATE, LOCAL_DIST,
87 Resources, _env, bundle_resources, patch_model_css, resolve_custom_path,
88 )
89 from .state import set_curdoc, state
90
91 logger = logging.getLogger(__name__)
92
93 if TYPE_CHECKING:
94 from bokeh.bundle import Bundle
95 from bokeh.core.types import ID
96 from bokeh.document.document import DocJson, Document
97 from bokeh.server.contexts import BokehSessionContext
98 from bokeh.server.session import ServerSession
99 from jinja2 import Template
100
101 from ..template.base import BaseTemplate
102 from ..viewable import Viewable, Viewer
103 from .location import Location
104
105 TViewable = Union[Viewable, Viewer, BaseTemplate]
106 TViewableFuncOrPath = Union[TViewable, Callable[[], TViewable], os.PathLike, str]
107
108 #---------------------------------------------------------------------
109 # Private API
110 #---------------------------------------------------------------------
111
112 INDEX_HTML = os.path.join(os.path.dirname(__file__), '..', '_templates', "index.html")
113 DEFAULT_TITLE = "Panel Application"
114
115 def _origin_url(url: str) -> str:
116 if url.startswith("http"):
117 url = url.split("//")[1]
118 return url
119
120 def _server_url(url: str, port: int) -> str:
121 if url.startswith("http"):
122 return '%s:%d%s' % (url.rsplit(':', 1)[0], port, "/")
123 else:
124 return 'http://%s:%d%s' % (url.split(':')[0], port, "/")
125
126 def _eval_panel(
127 panel: TViewableFuncOrPath, server_id: str, title: str,
128 location: bool | Location, admin: bool, doc: Document
129 ):
130 from ..pane import panel as as_panel
131 from ..template import BaseTemplate
132
133 if config.global_loading_spinner:
134 doc.js_on_event(
135 'document_ready', CustomJS(code=f"""
136 const body = document.getElementsByTagName('body')[0]
137 body.classList.remove({LOADING_INDICATOR_CSS_CLASS!r}, {config.loading_spinner!r})
138 """)
139 )
140
141 # Set up instrumentation for logging sessions
142 logger.info(LOG_SESSION_LAUNCHING, id(doc))
143 def _log_session_destroyed(session_context):
144 logger.info(LOG_SESSION_DESTROYED, id(doc))
145 doc.on_session_destroyed(_log_session_destroyed)
146
147 with set_curdoc(doc):
148 if isinstance(panel, (FunctionType, MethodType)):
149 panel = panel()
150 if isinstance(panel, BaseTemplate):
151 doc = panel._modify_doc(server_id, title, doc, location)
152 else:
153 doc = as_panel(panel)._modify_doc(server_id, title, doc, location)
154 return doc
155
156 def async_execute(func: Callable[..., None]) -> None:
157 """
158 Wrap async event loop scheduling to ensure that with_lock flag
159 is propagated from function to partial wrapping it.
160 """
161 if not state.curdoc or not state.curdoc.session_context:
162 ioloop = IOLoop.current()
163 event_loop = ioloop.asyncio_loop # type: ignore
164 wrapper = state._handle_exception_wrapper(func)
165 if event_loop.is_running():
166 ioloop.add_callback(wrapper)
167 else:
168 event_loop.run_until_complete(wrapper())
169 return
170
171 if isinstance(func, partial) and hasattr(func.func, 'lock'):
172 unlock = not func.func.lock # type: ignore
173 else:
174 unlock = not getattr(func, 'lock', False)
175 curdoc = state.curdoc
176 @wraps(func)
177 async def wrapper(*args, **kw):
178 with set_curdoc(curdoc):
179 try:
180 return await func(*args, **kw)
181 except Exception as e:
182 state._handle_exception(e)
183 if unlock:
184 wrapper.nolock = True # type: ignore
185 state.curdoc.add_next_tick_callback(wrapper)
186
187 param.parameterized.async_executor = async_execute
188
189 def _initialize_session_info(session_context: 'BokehSessionContext'):
190 from ..config import config
191 session_id = session_context.id
192 sessions = state.session_info['sessions']
193 history = -1 if config._admin else config.session_history
194 if not config._admin and (history == 0 or session_id in sessions):
195 return
196
197 state.session_info['total'] += 1
198 if history > 0 and len(sessions) >= history:
199 old_history = list(sessions.items())
200 sessions = OrderedDict(old_history[-(history-1):])
201 state.session_info['sessions'] = sessions
202 sessions[session_id] = {
203 'launched': dt.datetime.now().timestamp(),
204 'started': None,
205 'rendered': None,
206 'ended': None,
207 'user_agent': session_context.request.headers.get('User-Agent')
208 }
209 state.param.trigger('session_info')
210
211 state._on_session_created_internal.append(_initialize_session_info)
212
213 #---------------------------------------------------------------------
214 # Bokeh patches
215 #---------------------------------------------------------------------
216
217
218 def html_page_for_render_items(
219 bundle: Bundle | tuple[str, str], docs_json: dict[ID, DocJson],
220 render_items: list[RenderItem], title: str, template: Template | str | None = None,
221 template_variables: dict[str, Any] = {}
222 ) -> str:
223 """
224 Render an HTML page from a template and Bokeh render items.
225
226 Arguments
227 ---------
228 bundle (tuple):
229 A tuple containing (bokehjs, bokehcss)
230 docs_json (JSON-like):
231 Serialized Bokeh Document
232 render_items (RenderItems)
233 Specific items to render from the document and where
234 title (str or None)
235 A title for the HTML page. If None, DEFAULT_TITLE is used
236 template (str or Template or None, optional) :
237 A Template to be used for the HTML page. If None, FILE is used.
238 template_variables (dict, optional):
239 Any Additional variables to pass to the template
240
241 Returns
242 -------
243 str
244 """
245 if title is None:
246 title = DEFAULT_TITLE
247
248 bokeh_js, bokeh_css = bundle
249
250 json_id = make_id()
251 json = escape(serialize_json(docs_json), quote=False)
252 json = wrap_in_script_tag(json, "application/json", json_id)
253
254 script = wrap_in_script_tag(script_for_render_items(json_id, render_items))
255
256 context = template_variables.copy()
257
258 context.update(dict(
259 title = title,
260 bokeh_js = bokeh_js,
261 bokeh_css = bokeh_css,
262 plot_script = json + script,
263 docs = render_items,
264 base = BASE_TEMPLATE,
265 macros = MACROS,
266 ))
267
268 if len(render_items) == 1:
269 context["doc"] = context["docs"][0]
270 context["roots"] = context["doc"].roots
271
272 if template is None:
273 template = BASE_TEMPLATE
274 elif isinstance(template, str):
275 template = _env.from_string("{% extends base %}\n" + template)
276
277 html = template.render(context)
278 return html
279
280 def server_html_page_for_session(
281 session: 'ServerSession',
282 resources: 'Resources',
283 title: str,
284 token: str | None = None,
285 template: str | Template = BASE_TEMPLATE,
286 template_variables: Optional[Dict[str, Any]] = None,
287 ) -> str:
288
289 # ALERT: Replace with better approach before Bokeh 3.x compatible release
290 if resources.mode == 'server':
291 dist_url = f'{state.rel_path}/{LOCAL_DIST}' if state.rel_path else LOCAL_DIST
292 else:
293 dist_url = CDN_DIST
294
295 doc = session.document
296 doc._template_variables['theme_name'] = config.theme
297 doc._template_variables['dist_url'] = dist_url
298 for root in doc.roots:
299 patch_model_css(root, dist_url=dist_url)
300
301 render_item = RenderItem(
302 token = token or session.token,
303 roots = doc.roots,
304 use_for_title = False,
305 )
306
307 if template_variables is None:
308 template_variables = {}
309
310 if template is FILE:
311 template = BASE_TEMPLATE
312
313 with set_curdoc(doc):
314 bundle = bundle_resources(doc.roots, resources)
315 html = html_page_for_render_items(
316 bundle, {}, [render_item], title, template=template,
317 template_variables=template_variables
318 )
319 if config.global_loading_spinner:
320 html = html.replace(
321 '<body>', f'<body class="{LOADING_INDICATOR_CSS_CLASS} pn-{config.loading_spinner}">'
322 )
323 return html
324
325
326 def autoload_js_script(doc, resources, token, element_id, app_path, absolute_url, absolute=False):
327 resources = Resources.from_bokeh(resources, absolute=absolute)
328 bundle = bundle_resources(doc.roots, resources)
329
330 render_items = [RenderItem(token=token, elementid=element_id, use_for_title=False)]
331 bundle.add(Script(script_for_render_items({}, render_items, app_path=app_path, absolute_url=absolute_url)))
332
333 return AUTOLOAD_JS.render(bundle=bundle, elementid=element_id)
334
335 def destroy_document(self, session):
336 """
337 Override for Document.destroy() without calling gc.collect directly.
338 The gc.collect() call is scheduled as a task, ensuring that when
339 multiple documents are destroyed in quick succession we do not
340 schedule excessive garbage collection.
341 """
342 if session is not None:
343 self.remove_on_change(session)
344
345 del self._roots
346 del self._theme
347 del self._template
348 self._session_context = None
349
350 self.callbacks.destroy()
351 self.models.destroy()
352 self.modules.destroy()
353
354 # Clear periodic callbacks
355 for cb in state._periodic.get(self, []):
356 cb.stop()
357
358 # Clean up pn.state to avoid tasks getting executed on dead session
359 for attr in dir(state):
360 # _param_watchers is deprecated in Param 2.0 and will raise a warning
361 if not attr.startswith('_') or attr == "_param_watchers":
362 continue
363 state_obj = getattr(state, attr)
364 if isinstance(state_obj, weakref.WeakKeyDictionary) and self in state_obj:
365 del state_obj[self]
366
367 # Schedule GC
368 at = dt.datetime.now() + dt.timedelta(seconds=5)
369 state.schedule_task('gc.collect', gc.collect, at=at)
370
371 del self.destroy
372
373 # Patch Server to attach task factory to asyncio loop and handle Admin server context
374 class Server(BokehServer):
375
376 def __init__(self, *args, **kwargs):
377 super().__init__(*args, **kwargs)
378 if state._admin_context:
379 state._admin_context._loop = self._loop
380
381 def start(self) -> None:
382 super().start()
383 if state._admin_context:
384 self._loop.add_callback(state._admin_context.run_load_hook)
385
386 def stop(self, wait: bool = True) -> None:
387 super().stop(wait=wait)
388 if state._admin_context:
389 state._admin_context.run_unload_hook()
390
391 bokeh.server.server.Server = Server
392
393
394 # Patch Application to handle session callbacks
395 class Application(BkApplication):
396
397 def __init__(self, *args, **kwargs):
398 self._admin = kwargs.pop('admin', None)
399 super().__init__(*args, **kwargs)
400
401 async def on_session_created(self, session_context):
402 with set_curdoc(session_context._document):
403 if self._admin is not None:
404 config._admin = self._admin
405 for cb in state._on_session_created_internal+state._on_session_created:
406 cb(session_context)
407 await super().on_session_created(session_context)
408
409 def initialize_document(self, doc):
410 super().initialize_document(doc)
411 if doc in state._templates and doc not in state._templates[doc]._documents:
412 template = state._templates[doc]
413 with set_curdoc(doc):
414 template.server_doc(title=template.title, location=True, doc=doc)
415
416 bokeh.command.util.Application = Application # type: ignore
417
418 class SessionPrefixHandler:
419
420 @contextmanager
421 def _session_prefix(self):
422 prefix = self.request.uri.replace(self.application_context._url, '')
423 if not prefix.endswith('/'):
424 prefix += '/'
425 base_url = urljoin('/', prefix)
426 rel_path = '/'.join(['..'] * self.application_context._url.strip('/').count('/'))
427 old_url, old_rel = state.base_url, state.rel_path
428
429 # Handle autoload.js absolute paths
430 abs_url = self.get_argument('bokeh-absolute-url', default=None)
431 if abs_url is not None:
432 rel_path = abs_url.replace(self.application_context._url, '')
433
434 with edit_readonly(state):
435 state.base_url = base_url
436 state.rel_path = rel_path
437 try:
438 yield
439 finally:
440 with edit_readonly(state):
441 state.base_url = old_url
442 state.rel_path = old_rel
443
444 class LoginUrlMixin:
445 """
446 Overrides the AuthRequestHandler.get_login_url implementation to
447 correctly handle prefixes.
448 """
449
450 def get_login_url(self):
451 ''' Delegates to``get_login_url`` method of the auth provider, or the
452 ``login_url`` attribute.
453
454 '''
455 if self.application.auth_provider.get_login_url is not None:
456 return '.' + self.application.auth_provider.get_login_url(self)
457 if self.application.auth_provider.login_url is not None:
458 return '.' + self.application.auth_provider.login_url
459 raise RuntimeError('login_url or get_login_url() must be supplied when authentication hooks are enabled')
460
461
462 # Patch Bokeh DocHandler URL
463 class DocHandler(LoginUrlMixin, BkDocHandler, SessionPrefixHandler):
464
465 @authenticated
466 async def get_session(self):
467 from ..config import config
468 path = self.request.path
469 session = None
470 if config.reuse_sessions and path in state._session_key_funcs:
471 key = state._session_key_funcs[path](self.request)
472 session = state._sessions.get(key)
473 if session is None:
474 session = await super().get_session()
475 with set_curdoc(session.document):
476 if config.reuse_sessions:
477 key_func = config.session_key_func or (lambda r: (r.path, r.arguments.get('theme', [b'default'])[0].decode('utf-8')))
478 state._session_key_funcs[path] = key_func
479 key = key_func(self.request)
480 state._sessions[key] = session
481 session.block_expiration()
482 return session
483
484 @authenticated
485 async def get(self, *args, **kwargs):
486 app = self.application
487 with self._session_prefix():
488 key_func = state._session_key_funcs.get(self.request.path, lambda r: r.path)
489 old_request = key_func(self.request) in state._sessions
490 session = await self.get_session()
491 if old_request and state._sessions.get(key_func(self.request)) is session:
492 session_id = generate_session_id(
493 secret_key=self.application.secret_key,
494 signed=self.application.sign_sessions
495 )
496 payload = get_token_payload(session.token)
497 del payload['session_expiry']
498 token = generate_jwt_token(
499 session_id,
500 secret_key=app.secret_key,
501 signed=app.sign_sessions,
502 expiration=app.session_token_expiration,
503 extra_payload=payload
504 )
505 else:
506 token = session.token
507 logger.info(LOG_SESSION_CREATED, id(session.document))
508 with set_curdoc(session.document):
509 resources = Resources.from_bokeh(self.application.resources())
510 auth_cb = config.authorize_callback
511 authorized = False
512 if auth_cb:
513 auth_cb = config.authorize_callback
514 auth_params = inspect.signature(auth_cb).parameters
515 if len(auth_params) == 1:
516 auth_args = (state.user_info,)
517 elif len(auth_params) == 2:
518 auth_args = (state.user_info, self.request.path,)
519 else:
520 raise RuntimeError(
521 'Authorization callback must accept either 1) a single argument '
522 'which is the user name or 2) two arguments which includes the '
523 'user name and the url path the user is trying to access.'
524 )
525 auth_error = f'{state.user} is not authorized to access this application.'
526 try:
527 authorized = auth_cb(*auth_args)
528 if isinstance(authorized, str):
529 self.redirect(authorized)
530 return
531 elif not authorized:
532 auth_error = (
533 f'Authorization callback errored. Could not validate user name "{state.user}" '
534 f'for the given app "{self.request.path}".'
535 )
536 if authorized:
537 auth_error = None
538 except Exception:
539 auth_error = f'Authorization callback errored. Could not validate user {state.user}.'
540 else:
541 authorized = True
542
543 if authorized:
544 page = server_html_page_for_session(
545 session, resources=resources, title=session.document.title,
546 token=token, template=session.document.template,
547 template_variables=session.document.template_variables,
548 )
549 else:
550 if config.auth_template:
551 with open(config.auth_template) as f:
552 template = _env.from_string(f.read())
553 else:
554 template = ERROR_TEMPLATE
555 page = template.render(
556 npm_cdn=config.npm_cdn,
557 title='Panel: Authorization Error',
558 error_type='Authorization Error',
559 error='User is not authorized.',
560 error_msg=auth_error
561 )
562 self.set_header("Content-Type", 'text/html')
563 self.write(page)
564
565 per_app_patterns[0] = (r'/?', DocHandler)
566
567 # Patch Bokeh Autoload handler
568 class AutoloadJsHandler(BkAutoloadJsHandler, SessionPrefixHandler):
569 ''' Implements a custom Tornado handler for the autoload JS chunk
570
571 '''
572
573 async def get(self, *args, **kwargs) -> None:
574 element_id = self.get_argument("bokeh-autoload-element", default=None)
575 if not element_id:
576 self.send_error(status_code=400, reason='No bokeh-autoload-element query parameter')
577 return
578
579 app_path = self.get_argument("bokeh-app-path", default="/")
580 absolute_url = self.get_argument("bokeh-absolute-url", default=None)
581
582 if absolute_url:
583 server_url = '{uri.scheme}://{uri.netloc}'.format(uri=urlparse(absolute_url))
584 else:
585 server_url = None
586
587 with self._session_prefix():
588 session = await self.get_session()
589 with set_curdoc(session.document):
590 resources = Resources.from_bokeh(
591 self.application.resources(server_url), absolute=True
592 )
593 js = autoload_js_script(
594 session.document, resources, session.token, element_id,
595 app_path, absolute_url, absolute=True
596 )
597
598 self.set_header("Content-Type", 'application/javascript')
599 self.write(js)
600
601 per_app_patterns[3] = (r'/autoload.js', AutoloadJsHandler)
602
603 class RootHandler(LoginUrlMixin, BkRootHandler):
604 """
605 Custom RootHandler that provides the CDN_DIST directory as a
606 template variable.
607 """
608
609 def render(self, *args, **kwargs):
610 kwargs['PANEL_CDN'] = CDN_DIST
611 return super().render(*args, **kwargs)
612
613 toplevel_patterns[0] = (r'/?', RootHandler)
614 bokeh.server.tornado.RootHandler = RootHandler
615
616
617 class ComponentResourceHandler(StaticFileHandler):
618 """
619 A handler that serves local resources relative to a Python module.
620 The handler resolves a specific Panel component by module reference
621 and name, then resolves an attribute on that component to check
622 if it contains the requested resource path.
623
624 /<endpoint>/<module>/<class>/<attribute>/<path>
625 """
626
627 _resource_attrs = [
628 '__css__', '__javascript__', '__js_module__', '__javascript_modules__', '_resources',
629 '_css', '_js', 'base_css', 'css', '_stylesheets', 'modifiers'
630 ]
631
632 def initialize(self, path: Optional[str] = None, default_filename: Optional[str] = None):
633 self.root = path
634 self.default_filename = default_filename
635
636 def parse_url_path(self, path: str) -> str:
637 """
638 Resolves the resource the URL pattern refers to.
639 """
640 parts = path.split('/')
641 if len(parts) < 4:
642 raise HTTPError(400, 'Malformed URL')
643 mod, cls, rtype, *subpath = parts
644 try:
645 module = importlib.import_module(mod)
646 except ModuleNotFoundError:
647 raise HTTPError(404, 'Module not found')
648 try:
649 component = getattr(module, cls)
650 except AttributeError:
651 raise HTTPError(404, 'Component not found')
652
653 # May only access resources listed in specific attributes
654 if rtype not in self._resource_attrs:
655 raise HTTPError(403, 'Requested resource type not valid.')
656
657 try:
658 resources = getattr(component, rtype)
659 except AttributeError:
660 raise HTTPError(404, 'Resource type not found')
661
662 # Handle template resources
663 if rtype == '_resources':
664 rtype = subpath[0]
665 subpath = subpath[1:]
666 if rtype not in resources:
667 raise HTTPError(404, 'Resource type not found')
668 resources = resources[rtype]
669 rtype = f'_resources/{rtype}'
670 elif rtype == 'modifiers':
671 resources = [
672 st for rs in resources.values() for st in rs.get('stylesheets', [])
673 if isinstance(st, str)
674 ]
675
676 if isinstance(resources, dict):
677 resources = list(resources.values())
678 elif isinstance(resources, (str, pathlib.PurePath)):
679 resources = [resources]
680 resources = [
681 str(resolve_custom_path(component, resource, relative=True)).replace(os.path.sep, '/')
682 for resource in resources
683 ]
684
685 rel_path = '/'.join(subpath)
686
687 # Important: May only access resources explicitly listed on the component
688 # Otherwise this potentially exposes all files to the web
689 if rel_path not in resources:
690 raise HTTPError(403, 'Requested resource was not listed.')
691
692 if not module.__file__:
693 raise HTTPError(404, 'Requested module does not reference a file.')
694
695 return str(pathlib.Path(module.__file__).parent / rel_path)
696
697 @classmethod
698 def get_absolute_path(cls, root: str, path: str) -> str:
699 return path
700
701 def validate_absolute_path(self, root: str, absolute_path: str) -> str:
702 if not os.path.exists(absolute_path):
703 raise HTTPError(404)
704 if not os.path.isfile(absolute_path):
705 raise HTTPError(403, "%s is not a file", self.path)
706 return absolute_path
707
708
709 def modify_document(self, doc: 'Document'):
710 from bokeh.io.doc import set_curdoc as bk_set_curdoc
711
712 from ..config import config
713
714 logger.info(LOG_SESSION_LAUNCHING, id(doc))
715
716 if config.autoreload:
717 path = self._runner.path
718 argv = self._runner._argv
719 handler = type(self)(filename=path, argv=argv)
720 self._runner = handler._runner
721
722 module = self._runner.new_module()
723
724 # If no module was returned it means the code runner has some permanent
725 # unfixable problem, e.g. the configured source code has a syntax error
726 if module is None:
727 return
728
729 # One reason modules are stored is to prevent the module
730 # from being gc'd before the document is. A symptom of a
731 # gc'd module is that its globals become None. Additionally
732 # stored modules are used to provide correct paths to
733 # custom models resolver.
734 sys.modules[module.__name__] = module
735 doc.modules._modules.append(module)
736
737 try:
738 old_doc = curdoc()
739 except RuntimeError:
740 old_doc = None
741 bk_set_curdoc(doc)
742
743 if config.autoreload:
744 set_curdoc(doc)
745 state.onload(autoreload_watcher)
746
747 sessions = []
748
749 try:
750 def post_check():
751 newdoc = curdoc()
752 # Do not let curdoc track modules when autoreload is enabled
753 # otherwise it will erroneously complain that there is
754 # a memory leak
755 if config.autoreload:
756 newdoc.modules._modules = []
757
758 # script is supposed to edit the doc not replace it
759 if newdoc is not doc:
760 raise RuntimeError("%s at '%s' replaced the output document" % (self._origin, self._runner.path))
761
762 def handle_exception(handler, e):
763 from bokeh.application.handlers.handler import handle_exception
764
765 from ..pane import Alert
766
767 # Clean up
768 del sys.modules[module.__name__]
769
770 if hasattr(doc, 'modules'):
771 doc.modules._modules.remove(module)
772 else:
773 doc._modules.remove(module)
774 bokeh.application.handlers.code_runner.handle_exception = handle_exception
775 tb = html.escape(traceback.format_exc()).replace('\033[1m', '<b>').replace('\033[0m', '</b>')
776
777 # Serve error
778 e_msg = str(e).replace('\033[1m', '<b>').replace('\033[0m', '</b>')
779 Alert(
780 f'<b>{type(e).__name__}</b>: {e_msg}\n<pre style="overflow-y: auto">{tb}</pre>',
781 alert_type='danger', margin=5, sizing_mode='stretch_width'
782 ).servable()
783
784 if config.autoreload:
785 bokeh.application.handlers.code_runner.handle_exception = handle_exception
786
787 state._launching.append(doc)
788 with _monkeypatch_io(self._loggers):
789 with patch_curdoc(doc):
790 with profile_ctx(config.profiler) as sessions:
791 self._runner.run(module, post_check)
792
793 def _log_session_destroyed(session_context):
794 logger.info(LOG_SESSION_DESTROYED, id(doc))
795
796 doc.on_session_destroyed(_log_session_destroyed)
797 doc.destroy = partial(destroy_document, doc) # type: ignore
798 finally:
799 state._launching.remove(doc)
800 if config.profiler:
801 try:
802 path = doc.session_context.request.path
803 state._profiles[(path, config.profiler)] += sessions
804 state.param.trigger('_profiles')
805 except Exception:
806 pass
807 if old_doc is not None:
808 bk_set_curdoc(old_doc)
809
810 CodeHandler.modify_document = modify_document # type: ignore
811
812 # Copied from bokeh 2.4.0, to fix directly in bokeh at some point.
813 def create_static_handler(prefix, key, app):
814 # patch
815 key = '/__patchedroot' if key == '/' else key
816
817 route = prefix
818 route += "/static/(.*)" if key == "/" else key + "/static/(.*)"
819 if app.static_path is not None:
820 return (route, StaticFileHandler, {"path" : app.static_path})
821 return (route, StaticHandler, {})
822
823 bokeh.server.tornado.create_static_handler = create_static_handler
824
825 #---------------------------------------------------------------------
826 # Async patches
827 #---------------------------------------------------------------------
828
829 # Bokeh 2.4.x patches the asyncio event loop policy but Tornado 6.1
830 # support the WindowsProactorEventLoopPolicy so we restore it,
831 # unless we detect we are running on jupyter_server.
832 if (
833 sys.platform == 'win32' and
834 sys.version_info[:3] >= (3, 8, 0) and
835 tornado.version_info >= (6, 1) and
836 type(asyncio.get_event_loop_policy()) is asyncio.WindowsSelectorEventLoopPolicy and
837 (('jupyter_server' not in sys.modules and
838 'jupyter_client' not in sys.modules) or
839 'pytest' in sys.modules)
840 ):
841 asyncio.set_event_loop_policy(asyncio.WindowsProactorEventLoopPolicy())
842
843 #---------------------------------------------------------------------
844 # Public API
845 #---------------------------------------------------------------------
846
847 def serve(
848 panels: TViewableFuncOrPath | Mapping[str, TViewableFuncOrPath],
849 port: int = 0,
850 address: Optional[str] = None,
851 websocket_origin: Optional[str | list[str]] = None,
852 loop: Optional[IOLoop] = None,
853 show: bool = True,
854 start: bool = True,
855 title: Optional[str] = None,
856 verbose: bool = True,
857 location: bool = True,
858 threaded: bool = False,
859 admin: bool = False,
860 **kwargs
861 ) -> StoppableThread | Server:
862 """
863 Allows serving one or more panel objects on a single server.
864 The panels argument should be either a Panel object or a function
865 returning a Panel object or a dictionary of these two. If a
866 dictionary is supplied the keys represent the slugs at which
867 each app is served, e.g. `serve({'app': panel1, 'app2': panel2})`
868 will serve apps at /app and /app2 on the server.
869
870 Reference: https://panel.holoviz.org/user_guide/Server_Configuration.html#serving-multiple-apps
871
872 Arguments
873 ---------
874 panel: Viewable, function or {str: Viewable or function}
875 A Panel object, a function returning a Panel object or a
876 dictionary mapping from the URL slug to either.
877 port: int (optional, default=0)
878 Allows specifying a specific port
879 address : str
880 The address the server should listen on for HTTP requests.
881 websocket_origin: str or list(str) (optional)
882 A list of hosts that can connect to the websocket.
883
884 This is typically required when embedding a server app in
885 an external web site.
886
887 If None, "localhost" is used.
888 loop : tornado.ioloop.IOLoop (optional, default=IOLoop.current())
889 The tornado IOLoop to run the Server on
890 show : boolean (optional, default=True)
891 Whether to open the server in a new browser tab on start
892 start : boolean(optional, default=True)
893 Whether to start the Server
894 title: str or {str: str} (optional, default=None)
895 An HTML title for the application or a dictionary mapping
896 from the URL slug to a customized title
897 verbose: boolean (optional, default=True)
898 Whether to print the address and port
899 location : boolean or panel.io.location.Location
900 Whether to create a Location component to observe and
901 set the URL location.
902 threaded: boolean (default=False)
903 Whether to start the server on a new Thread
904 admin: boolean (default=False)
905 Whether to enable the admin panel
906 kwargs: dict
907 Additional keyword arguments to pass to Server instance
908 """
909 # Empty layout are valid and the Bokeh warning is silenced as usually
910 # not relevant to Panel users.
911 silence(EMPTY_LAYOUT, True)
912 kwargs = dict(kwargs, **dict(
913 port=port, address=address, websocket_origin=websocket_origin,
914 loop=loop, show=show, start=start, title=title, verbose=verbose,
915 location=location, admin=admin
916 ))
917 if threaded:
918 kwargs['loop'] = loop = IOLoop(make_current=False) if loop is None else loop
919 server = StoppableThread(
920 target=get_server, io_loop=loop, args=(panels,), kwargs=kwargs
921 )
922 server_id = kwargs.get('server_id', uuid.uuid4().hex)
923 state._threads[server_id] = server
924 server.start()
925 else:
926 return get_server(panels, **kwargs)
927 return server
928
929
930 class ProxyFallbackHandler(RequestHandler):
931 """A `RequestHandler` that wraps another HTTP server callback and
932 proxies the subpath.
933 """
934
935 def initialize(self, fallback, proxy=None):
936 self.fallback = fallback
937 self.proxy = proxy
938
939 def prepare(self):
940 if self.proxy:
941 self.request.path = self.request.path.replace(self.proxy, '')
942 self.fallback(self.request)
943 self._finished = True
944 self.on_finish()
945
946
947 def get_static_routes(static_dirs):
948 """
949 Returns a list of tornado routes of StaticFileHandlers given a
950 dictionary of slugs and file paths to serve.
951 """
952 patterns = []
953 for slug, path in static_dirs.items():
954 if not slug.startswith('/'):
955 slug = '/' + slug
956 if slug == '/static':
957 raise ValueError("Static file route may not use /static "
958 "this is reserved for internal use.")
959 path = fullpath(path)
960 if not os.path.isdir(path):
961 raise ValueError("Cannot serve non-existent path %s" % path)
962 patterns.append(
963 (r"%s/(.*)" % slug, StaticFileHandler, {"path": path})
964 )
965 patterns.append((
966 f'/{COMPONENT_PATH}(.*)', ComponentResourceHandler, {}
967 ))
968 return patterns
969
970 def get_server(
971 panel: TViewableFuncOrPath | Mapping[str, TViewableFuncOrPath],
972 port: int = 0,
973 address: Optional[str] = None,
974 websocket_origin: Optional[str | list[str]] = None,
975 loop: Optional[IOLoop] = None,
976 show: bool = False,
977 start: bool = False,
978 title: bool = None,
979 verbose: bool = False,
980 location: bool | Location = True,
981 admin: bool = False,
982 static_dirs: Mapping[str, str] = {},
983 basic_auth: str = None,
984 oauth_provider: Optional[str] = None,
985 oauth_key: Optional[str] = None,
986 oauth_secret: Optional[str] = None,
987 oauth_redirect_uri: Optional[str] = None,
988 oauth_extra_params: Mapping[str, str] = {},
989 oauth_error_template: Optional[str] = None,
990 cookie_secret: Optional[str] = None,
991 oauth_encryption_key: Optional[str] = None,
992 logout_template: Optional[str] = None,
993 session_history: Optional[int] = None,
994 liveness: bool | str = False,
995 **kwargs
996 ) -> Server:
997 """
998 Returns a Server instance with this panel attached as the root
999 app.
1000
1001 Arguments
1002 ---------
1003 panel: Viewable, function or {str: Viewable}
1004 A Panel object, a function returning a Panel object or a
1005 dictionary mapping from the URL slug to either.
1006 port: int (optional, default=0)
1007 Allows specifying a specific port
1008 address : str
1009 The address the server should listen on for HTTP requests.
1010 websocket_origin: str or list(str) (optional)
1011 A list of hosts that can connect to the websocket.
1012
1013 This is typically required when embedding a server app in
1014 an external web site.
1015
1016 If None, "localhost" is used.
1017 loop : tornado.ioloop.IOLoop (optional, default=IOLoop.current())
1018 The tornado IOLoop to run the Server on.
1019 show : boolean (optional, default=False)
1020 Whether to open the server in a new browser tab on start.
1021 start : boolean(optional, default=False)
1022 Whether to start the Server.
1023 title : str or {str: str} (optional, default=None)
1024 An HTML title for the application or a dictionary mapping
1025 from the URL slug to a customized title.
1026 verbose: boolean (optional, default=False)
1027 Whether to report the address and port.
1028 location : boolean or panel.io.location.Location
1029 Whether to create a Location component to observe and
1030 set the URL location.
1031 admin: boolean (default=False)
1032 Whether to enable the admin panel
1033 static_dirs: dict (optional, default={})
1034 A dictionary of routes and local paths to serve as static file
1035 directories on those routes.
1036 basic_auth: str (optional, default=None)
1037 Password or filepath to use with basic auth provider.
1038 oauth_provider: str
1039 One of the available OAuth providers
1040 oauth_key: str (optional, default=None)
1041 The public OAuth identifier
1042 oauth_secret: str (optional, default=None)
1043 The client secret for the OAuth provider
1044 oauth_redirect_uri: Optional[str] = None,
1045 Overrides the default OAuth redirect URI
1046 oauth_extra_params: dict (optional, default={})
1047 Additional information for the OAuth provider
1048 oauth_error_template: str (optional, default=None)
1049 Jinja2 template used when displaying authentication errors.
1050 cookie_secret: str (optional, default=None)
1051 A random secret string to sign cookies (required for OAuth)
1052 oauth_encryption_key: str (optional, default=False)
1053 A random encryption key used for encrypting OAuth user
1054 information and access tokens.
1055 logout_template: str (optional, default=None)
1056 Jinja2 template served when viewing the logout endpoint when
1057 authentication is enabled.
1058 session_history: int (optional, default=None)
1059 The amount of session history to accumulate. If set to non-zero
1060 and non-None value will launch a REST endpoint at
1061 /rest/session_info, which returns information about the session
1062 history.
1063 liveness: bool | str (optional, default=False)
1064 Whether to add a liveness endpoint. If a string is provided
1065 then this will be used as the endpoint, otherwise the endpoint
1066 will be hosted at /liveness.
1067 kwargs: dict
1068 Additional keyword arguments to pass to Server instance.
1069
1070 Returns
1071 -------
1072 server : panel.io.server.Server
1073 Bokeh Server instance running this panel
1074 """
1075 from ..config import config
1076 from .rest import REST_PROVIDERS
1077
1078 server_id = kwargs.pop('server_id', uuid.uuid4().hex)
1079 kwargs['extra_patterns'] = extra_patterns = kwargs.get('extra_patterns', [])
1080 if isinstance(panel, dict):
1081 apps = {}
1082 for slug, app in panel.items():
1083 if isinstance(title, dict):
1084 try:
1085 title_ = title[slug]
1086 except KeyError:
1087 raise KeyError(
1088 "Keys of the title dictionary and of the apps "
1089 f"dictionary must match. No {slug} key found in the "
1090 "title dictionary.")
1091 else:
1092 title_ = title
1093 slug = slug if slug.startswith('/') else '/'+slug
1094 if 'flask' in sys.modules:
1095 from flask import Flask
1096 if isinstance(app, Flask):
1097 wsgi = WSGIContainer(app)
1098 if slug == '/':
1099 raise ValueError('Flask apps must be served on a subpath.')
1100 if not slug.endswith('/'):
1101 slug += '/'
1102 extra_patterns.append(('^'+slug+'.*', ProxyFallbackHandler,
1103 dict(fallback=wsgi, proxy=slug)))
1104 continue
1105 if isinstance(app, pathlib.Path):
1106 app = str(app) # enables serving apps from Paths
1107 if (isinstance(app, str) and (app.endswith(".py") or app.endswith(".ipynb") or app.endswith('.md'))
1108 and os.path.isfile(app)):
1109 apps[slug] = app = build_single_handler_application(app)
1110 app._admin = admin
1111 elif isinstance(app, BkApplication):
1112 apps[slug] = app
1113 else:
1114 handler = FunctionHandler(partial(_eval_panel, app, server_id, title_, location, admin))
1115 apps[slug] = Application(handler, admin=admin)
1116 else:
1117 if isinstance(panel, pathlib.Path):
1118 panel = str(panel) # enables serving apps from Paths
1119 if (isinstance(panel, str) and (panel.endswith(".py") or panel.endswith(".ipynb") or panel.endswith('.md'))
1120 and os.path.isfile(panel)):
1121 apps = {'/': build_single_handler_application(panel)}
1122 else:
1123 handler = FunctionHandler(partial(_eval_panel, panel, server_id, title, location, admin))
1124 apps = {'/': Application(handler, admin=admin)}
1125
1126 if admin:
1127 if '/admin' in apps:
1128 raise ValueError(
1129 'Cannot enable admin panel because another app is being served '
1130 'on the /admin endpoint'
1131 )
1132 from .admin import admin_panel
1133 admin_handler = FunctionHandler(admin_panel)
1134 apps['/admin'] = Application(admin_handler)
1135
1136 extra_patterns += get_static_routes(static_dirs)
1137
1138 if session_history is not None:
1139 config.session_history = session_history
1140 if config.session_history != 0:
1141 pattern = REST_PROVIDERS['param']([], 'rest')
1142 extra_patterns.extend(pattern)
1143 state.publish('session_info', state, ['session_info'])
1144
1145 if liveness:
1146 liveness_endpoint = 'liveness' if isinstance(liveness, bool) else liveness
1147 extra_patterns += [(r"/%s" % liveness_endpoint, LivenessHandler, dict(applications=apps))]
1148
1149 opts = dict(kwargs)
1150 if loop:
1151 asyncio.set_event_loop(loop.asyncio_loop)
1152 opts['io_loop'] = loop
1153 elif opts.get('num_procs', 1) == 1:
1154 opts['io_loop'] = IOLoop.current()
1155
1156 if 'index' not in opts:
1157 opts['index'] = INDEX_HTML
1158
1159 if address is not None:
1160 opts['address'] = address
1161
1162 if websocket_origin:
1163 if not isinstance(websocket_origin, list):
1164 websocket_origin = [websocket_origin]
1165 opts['allow_websocket_origin'] = websocket_origin
1166
1167 # Configure OAuth
1168 from ..config import config
1169 server_config = {}
1170 if basic_auth:
1171 from ..auth import BasicProvider
1172 server_config['basic_auth'] = basic_auth
1173 basic_login_template = kwargs.pop('basic_login_template', None)
1174 opts['auth_provider'] = BasicProvider(
1175 basic_login_template,
1176 logout_template=logout_template
1177 )
1178 elif oauth_provider:
1179 from ..auth import OAuthProvider
1180 config.oauth_provider = oauth_provider # type: ignore
1181 opts['auth_provider'] = OAuthProvider(
1182 error_template=oauth_error_template,
1183 logout_template=logout_template
1184 )
1185 if oauth_key:
1186 config.oauth_key = oauth_key # type: ignore
1187 if oauth_secret:
1188 config.oauth_secret = oauth_secret # type: ignore
1189 if oauth_extra_params:
1190 config.oauth_extra_params = oauth_extra_params # type: ignore
1191 if cookie_secret:
1192 config.cookie_secret = cookie_secret # type: ignore
1193 if oauth_redirect_uri:
1194 config.oauth_redirect_uri = oauth_redirect_uri # type: ignore
1195 opts['cookie_secret'] = config.cookie_secret
1196
1197 server = Server(apps, port=port, **opts)
1198 if verbose:
1199 address = server.address or 'localhost'
1200 url = f"http://{address}:{server.port}{server.prefix}"
1201 print(f"Launching server at {url}")
1202
1203 state._servers[server_id] = (server, panel, [])
1204 state._server_config[server._tornado] = server_config
1205
1206 if show:
1207 def show_callback():
1208 server.show('/login' if config.oauth_provider else '/')
1209 server.io_loop.add_callback(show_callback)
1210
1211 def sig_exit(*args, **kwargs):
1212 server.io_loop.add_callback_from_signal(do_stop)
1213
1214 def do_stop(*args, **kwargs):
1215 server.io_loop.stop()
1216
1217 try:
1218 signal.signal(signal.SIGINT, sig_exit)
1219 except ValueError:
1220 pass # Can't use signal on a thread
1221
1222 if start:
1223 server.start()
1224 try:
1225 server.io_loop.start()
1226 except RuntimeError:
1227 pass
1228 except TypeError:
1229 warn(
1230 "IOLoop couldn't be started. Ensure it is started by "
1231 "process invoking the panel.io.server.serve."
1232 )
1233 return server
1234
1235
1236 class StoppableThread(threading.Thread):
1237 """Thread class with a stop() method."""
1238
1239 def __init__(self, io_loop: IOLoop, **kwargs):
1240 super().__init__(**kwargs)
1241 self.io_loop = io_loop
1242
1243 def run(self) -> None:
1244 if hasattr(self, '_target'):
1245 target, args, kwargs = self._target, self._args, self._kwargs # type: ignore
1246 else:
1247 target, args, kwargs = self._Thread__target, self._Thread__args, self._Thread__kwargs # type: ignore
1248 if not target:
1249 return
1250 bokeh_server = None
1251 try:
1252 bokeh_server = target(*args, **kwargs)
1253 finally:
1254 if isinstance(bokeh_server, Server):
1255 try:
1256 bokeh_server.stop()
1257 except Exception:
1258 pass
1259 if hasattr(self, '_target'):
1260 del self._target, self._args, self._kwargs # type: ignore
1261 else:
1262 del self._Thread__target, self._Thread__args, self._Thread__kwargs # type: ignore
1263
1264 def stop(self) -> None:
1265 self.io_loop.add_callback(self.io_loop.stop)
1266
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/panel/io/server.py b/panel/io/server.py
--- a/panel/io/server.py
+++ b/panel/io/server.py
@@ -1080,6 +1080,8 @@
if isinstance(panel, dict):
apps = {}
for slug, app in panel.items():
+ if slug.endswith('/') and not slug == '/':
+ raise ValueError(f"Invalid URL: trailing slash '/' used for {slug!r} not supported.")
if isinstance(title, dict):
try:
title_ = title[slug]
| {"golden_diff": "diff --git a/panel/io/server.py b/panel/io/server.py\n--- a/panel/io/server.py\n+++ b/panel/io/server.py\n@@ -1080,6 +1080,8 @@\n if isinstance(panel, dict):\n apps = {}\n for slug, app in panel.items():\n+ if slug.endswith('/') and not slug == '/':\n+ raise ValueError(f\"Invalid URL: trailing slash '/' used for {slug!r} not supported.\")\n if isinstance(title, dict):\n try:\n title_ = title[slug]\n", "issue": "Improve error on invalid serve URL\nThe following simple app:\r\n\r\n```python\r\nimport panel as pn\r\npn.extension()\r\npn.serve({\"/spam/alot/\": \"Spam!\"})\r\n```\r\n\r\nfails like this:\r\n```\r\nLaunching server at http://localhost:49227\r\n2023-10-07 14:01:16,537 404 GET /spam/static/js/bokeh.min.js?v=42698ba71b55a4634cf5e5295f093fe1788c0e0490180931b0805da09afea8a1250e1dc5138d8a90816cd53eb0749cc3eb145e7d9599734fcfb037e9ec223efd (::1) 0.42ms\r\n2023-10-07 14:01:16,540 404 GET /spam/static/js/bokeh-gl.min.js?v=bb6b0349fc67ef87c67e017bb1124d47d870b3e58a6eece0f2a2ebd0485412e64def06faf8764c9157add17d41de5fe4dd33e9126b17c11cdf7bd6691d68f379 (::1) 0.31ms\r\n2023-10-07 14:01:16,540 404 GET /spam/static/extensions/panel/panel.min.js?v=fb7e691caef7a59ca25d1fd9580b7a7d13146fd012a16bf047f631f7bf8a1326 (::1) 0.35ms\r\n2023-10-07 14:01:16,541 404 GET /spam/static/js/bokeh-widgets.min.js?v=ddb73df6eae93f1df236ed7e56f58298ebb768a4ed15b068d68a2c492100a71fbf50245d6e4516a69de4f3665bf8cef661d4b1dcf756eea3804c58169fb0fb15 (::1) 0.43ms\r\n2023-10-07 14:01:16,541 404 GET /spam/static/js/bokeh-tables.min.js?v=2627454bb0563a61287a353dcdbd4870dcd6f4cda89abca94325f271f910e04044091d74651ab12316c9b6260cd4d7c3765be89ac62e4e35166ea4ca9c564c6c (::1) 0.26ms\r\n2023-10-07 14:01:16,548 WebSocket connection opened\r\n2023-10-07 14:01:16,548 ServerConnection created\r\n2023-10-07 14:01:16,569 404 GET /spam/static/extensions/panel/images/favicon.ico (::1) 0.19ms\r\n```\r\n\r\nAs pointed out by @philippjfr in the thread https://github.com/holoviz/panel/issues/5576#issuecomment-1751694610 the problem is the trailing slash `/`, after removing it the app works as intended.\r\n\r\nI think if it's (a) really the case that Bokeh can never serve such URLs and (b) there is a robust way and clear place to put the URL validation logic, then it would be nice if you could give a better error massage here, e.g. something like a \"ValueError: invalid URL - trailing slash / not supported\".\n", "before_files": [{"content": "\"\"\"\nUtilities for creating bokeh Server instances.\n\"\"\"\nfrom __future__ import annotations\n\nimport asyncio\nimport datetime as dt\nimport gc\nimport html\nimport importlib\nimport inspect\nimport logging\nimport os\nimport pathlib\nimport signal\nimport sys\nimport threading\nimport traceback\nimport uuid\nimport weakref\n\nfrom collections import OrderedDict\nfrom contextlib import contextmanager\nfrom functools import partial, wraps\nfrom html import escape\nfrom types import FunctionType, MethodType\nfrom typing import (\n TYPE_CHECKING, Any, Callable, Dict, Mapping, Optional, Union,\n)\nfrom urllib.parse import urljoin, urlparse\n\nimport bokeh\nimport bokeh.command.util\nimport param\nimport tornado\n\n# Bokeh imports\nfrom bokeh.application import Application as BkApplication\nfrom bokeh.application.handlers.code import (\n CodeHandler, _monkeypatch_io, patch_curdoc,\n)\nfrom bokeh.application.handlers.function import FunctionHandler\nfrom bokeh.core.json_encoder import serialize_json\nfrom bokeh.core.templates import AUTOLOAD_JS, FILE, MACROS\nfrom bokeh.core.validation import silence\nfrom bokeh.core.validation.warnings import EMPTY_LAYOUT\nfrom bokeh.embed.bundle import Script\nfrom bokeh.embed.elements import script_for_render_items\nfrom bokeh.embed.util import RenderItem\nfrom bokeh.embed.wrappers import wrap_in_script_tag\nfrom bokeh.io import curdoc\nfrom bokeh.models import CustomJS\nfrom bokeh.server.server import Server as BokehServer\nfrom bokeh.server.urls import per_app_patterns, toplevel_patterns\nfrom bokeh.server.views.autoload_js_handler import (\n AutoloadJsHandler as BkAutoloadJsHandler,\n)\nfrom bokeh.server.views.doc_handler import DocHandler as BkDocHandler\nfrom bokeh.server.views.root_handler import RootHandler as BkRootHandler\nfrom bokeh.server.views.static_handler import StaticHandler\nfrom bokeh.util.serialization import make_id\nfrom bokeh.util.token import (\n generate_jwt_token, generate_session_id, get_token_payload,\n)\n# Tornado imports\nfrom tornado.ioloop import IOLoop\nfrom tornado.web import (\n HTTPError, RequestHandler, StaticFileHandler, authenticated,\n)\nfrom tornado.wsgi import WSGIContainer\n\n# Internal imports\nfrom ..config import config\nfrom ..util import edit_readonly, fullpath\nfrom ..util.warnings import warn\nfrom .document import init_doc, unlocked, with_lock # noqa\nfrom .liveness import LivenessHandler\nfrom .loading import LOADING_INDICATOR_CSS_CLASS\nfrom .logging import (\n LOG_SESSION_CREATED, LOG_SESSION_DESTROYED, LOG_SESSION_LAUNCHING,\n)\nfrom .markdown import build_single_handler_application\nfrom .profile import profile_ctx\nfrom .reload import autoreload_watcher\nfrom .resources import (\n BASE_TEMPLATE, CDN_DIST, COMPONENT_PATH, ERROR_TEMPLATE, LOCAL_DIST,\n Resources, _env, bundle_resources, patch_model_css, resolve_custom_path,\n)\nfrom .state import set_curdoc, state\n\nlogger = logging.getLogger(__name__)\n\nif TYPE_CHECKING:\n from bokeh.bundle import Bundle\n from bokeh.core.types import ID\n from bokeh.document.document import DocJson, Document\n from bokeh.server.contexts import BokehSessionContext\n from bokeh.server.session import ServerSession\n from jinja2 import Template\n\n from ..template.base import BaseTemplate\n from ..viewable import Viewable, Viewer\n from .location import Location\n\n TViewable = Union[Viewable, Viewer, BaseTemplate]\n TViewableFuncOrPath = Union[TViewable, Callable[[], TViewable], os.PathLike, str]\n\n#---------------------------------------------------------------------\n# Private API\n#---------------------------------------------------------------------\n\nINDEX_HTML = os.path.join(os.path.dirname(__file__), '..', '_templates', \"index.html\")\nDEFAULT_TITLE = \"Panel Application\"\n\ndef _origin_url(url: str) -> str:\n if url.startswith(\"http\"):\n url = url.split(\"//\")[1]\n return url\n\ndef _server_url(url: str, port: int) -> str:\n if url.startswith(\"http\"):\n return '%s:%d%s' % (url.rsplit(':', 1)[0], port, \"/\")\n else:\n return 'http://%s:%d%s' % (url.split(':')[0], port, \"/\")\n\ndef _eval_panel(\n panel: TViewableFuncOrPath, server_id: str, title: str,\n location: bool | Location, admin: bool, doc: Document\n):\n from ..pane import panel as as_panel\n from ..template import BaseTemplate\n\n if config.global_loading_spinner:\n doc.js_on_event(\n 'document_ready', CustomJS(code=f\"\"\"\n const body = document.getElementsByTagName('body')[0]\n body.classList.remove({LOADING_INDICATOR_CSS_CLASS!r}, {config.loading_spinner!r})\n \"\"\")\n )\n\n # Set up instrumentation for logging sessions\n logger.info(LOG_SESSION_LAUNCHING, id(doc))\n def _log_session_destroyed(session_context):\n logger.info(LOG_SESSION_DESTROYED, id(doc))\n doc.on_session_destroyed(_log_session_destroyed)\n\n with set_curdoc(doc):\n if isinstance(panel, (FunctionType, MethodType)):\n panel = panel()\n if isinstance(panel, BaseTemplate):\n doc = panel._modify_doc(server_id, title, doc, location)\n else:\n doc = as_panel(panel)._modify_doc(server_id, title, doc, location)\n return doc\n\ndef async_execute(func: Callable[..., None]) -> None:\n \"\"\"\n Wrap async event loop scheduling to ensure that with_lock flag\n is propagated from function to partial wrapping it.\n \"\"\"\n if not state.curdoc or not state.curdoc.session_context:\n ioloop = IOLoop.current()\n event_loop = ioloop.asyncio_loop # type: ignore\n wrapper = state._handle_exception_wrapper(func)\n if event_loop.is_running():\n ioloop.add_callback(wrapper)\n else:\n event_loop.run_until_complete(wrapper())\n return\n\n if isinstance(func, partial) and hasattr(func.func, 'lock'):\n unlock = not func.func.lock # type: ignore\n else:\n unlock = not getattr(func, 'lock', False)\n curdoc = state.curdoc\n @wraps(func)\n async def wrapper(*args, **kw):\n with set_curdoc(curdoc):\n try:\n return await func(*args, **kw)\n except Exception as e:\n state._handle_exception(e)\n if unlock:\n wrapper.nolock = True # type: ignore\n state.curdoc.add_next_tick_callback(wrapper)\n\nparam.parameterized.async_executor = async_execute\n\ndef _initialize_session_info(session_context: 'BokehSessionContext'):\n from ..config import config\n session_id = session_context.id\n sessions = state.session_info['sessions']\n history = -1 if config._admin else config.session_history\n if not config._admin and (history == 0 or session_id in sessions):\n return\n\n state.session_info['total'] += 1\n if history > 0 and len(sessions) >= history:\n old_history = list(sessions.items())\n sessions = OrderedDict(old_history[-(history-1):])\n state.session_info['sessions'] = sessions\n sessions[session_id] = {\n 'launched': dt.datetime.now().timestamp(),\n 'started': None,\n 'rendered': None,\n 'ended': None,\n 'user_agent': session_context.request.headers.get('User-Agent')\n }\n state.param.trigger('session_info')\n\nstate._on_session_created_internal.append(_initialize_session_info)\n\n#---------------------------------------------------------------------\n# Bokeh patches\n#---------------------------------------------------------------------\n\n\ndef html_page_for_render_items(\n bundle: Bundle | tuple[str, str], docs_json: dict[ID, DocJson],\n render_items: list[RenderItem], title: str, template: Template | str | None = None,\n template_variables: dict[str, Any] = {}\n) -> str:\n \"\"\"\n Render an HTML page from a template and Bokeh render items.\n\n Arguments\n ---------\n bundle (tuple):\n A tuple containing (bokehjs, bokehcss)\n docs_json (JSON-like):\n Serialized Bokeh Document\n render_items (RenderItems)\n Specific items to render from the document and where\n title (str or None)\n A title for the HTML page. If None, DEFAULT_TITLE is used\n template (str or Template or None, optional) :\n A Template to be used for the HTML page. If None, FILE is used.\n template_variables (dict, optional):\n Any Additional variables to pass to the template\n\n Returns\n -------\n str\n \"\"\"\n if title is None:\n title = DEFAULT_TITLE\n\n bokeh_js, bokeh_css = bundle\n\n json_id = make_id()\n json = escape(serialize_json(docs_json), quote=False)\n json = wrap_in_script_tag(json, \"application/json\", json_id)\n\n script = wrap_in_script_tag(script_for_render_items(json_id, render_items))\n\n context = template_variables.copy()\n\n context.update(dict(\n title = title,\n bokeh_js = bokeh_js,\n bokeh_css = bokeh_css,\n plot_script = json + script,\n docs = render_items,\n base = BASE_TEMPLATE,\n macros = MACROS,\n ))\n\n if len(render_items) == 1:\n context[\"doc\"] = context[\"docs\"][0]\n context[\"roots\"] = context[\"doc\"].roots\n\n if template is None:\n template = BASE_TEMPLATE\n elif isinstance(template, str):\n template = _env.from_string(\"{% extends base %}\\n\" + template)\n\n html = template.render(context)\n return html\n\ndef server_html_page_for_session(\n session: 'ServerSession',\n resources: 'Resources',\n title: str,\n token: str | None = None,\n template: str | Template = BASE_TEMPLATE,\n template_variables: Optional[Dict[str, Any]] = None,\n) -> str:\n\n # ALERT: Replace with better approach before Bokeh 3.x compatible release\n if resources.mode == 'server':\n dist_url = f'{state.rel_path}/{LOCAL_DIST}' if state.rel_path else LOCAL_DIST\n else:\n dist_url = CDN_DIST\n\n doc = session.document\n doc._template_variables['theme_name'] = config.theme\n doc._template_variables['dist_url'] = dist_url\n for root in doc.roots:\n patch_model_css(root, dist_url=dist_url)\n\n render_item = RenderItem(\n token = token or session.token,\n roots = doc.roots,\n use_for_title = False,\n )\n\n if template_variables is None:\n template_variables = {}\n\n if template is FILE:\n template = BASE_TEMPLATE\n\n with set_curdoc(doc):\n bundle = bundle_resources(doc.roots, resources)\n html = html_page_for_render_items(\n bundle, {}, [render_item], title, template=template,\n template_variables=template_variables\n )\n if config.global_loading_spinner:\n html = html.replace(\n '<body>', f'<body class=\"{LOADING_INDICATOR_CSS_CLASS} pn-{config.loading_spinner}\">'\n )\n return html\n\n\ndef autoload_js_script(doc, resources, token, element_id, app_path, absolute_url, absolute=False):\n resources = Resources.from_bokeh(resources, absolute=absolute)\n bundle = bundle_resources(doc.roots, resources)\n\n render_items = [RenderItem(token=token, elementid=element_id, use_for_title=False)]\n bundle.add(Script(script_for_render_items({}, render_items, app_path=app_path, absolute_url=absolute_url)))\n\n return AUTOLOAD_JS.render(bundle=bundle, elementid=element_id)\n\ndef destroy_document(self, session):\n \"\"\"\n Override for Document.destroy() without calling gc.collect directly.\n The gc.collect() call is scheduled as a task, ensuring that when\n multiple documents are destroyed in quick succession we do not\n schedule excessive garbage collection.\n \"\"\"\n if session is not None:\n self.remove_on_change(session)\n\n del self._roots\n del self._theme\n del self._template\n self._session_context = None\n\n self.callbacks.destroy()\n self.models.destroy()\n self.modules.destroy()\n\n # Clear periodic callbacks\n for cb in state._periodic.get(self, []):\n cb.stop()\n\n # Clean up pn.state to avoid tasks getting executed on dead session\n for attr in dir(state):\n # _param_watchers is deprecated in Param 2.0 and will raise a warning\n if not attr.startswith('_') or attr == \"_param_watchers\":\n continue\n state_obj = getattr(state, attr)\n if isinstance(state_obj, weakref.WeakKeyDictionary) and self in state_obj:\n del state_obj[self]\n\n # Schedule GC\n at = dt.datetime.now() + dt.timedelta(seconds=5)\n state.schedule_task('gc.collect', gc.collect, at=at)\n\n del self.destroy\n\n# Patch Server to attach task factory to asyncio loop and handle Admin server context\nclass Server(BokehServer):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n if state._admin_context:\n state._admin_context._loop = self._loop\n\n def start(self) -> None:\n super().start()\n if state._admin_context:\n self._loop.add_callback(state._admin_context.run_load_hook)\n\n def stop(self, wait: bool = True) -> None:\n super().stop(wait=wait)\n if state._admin_context:\n state._admin_context.run_unload_hook()\n\nbokeh.server.server.Server = Server\n\n\n# Patch Application to handle session callbacks\nclass Application(BkApplication):\n\n def __init__(self, *args, **kwargs):\n self._admin = kwargs.pop('admin', None)\n super().__init__(*args, **kwargs)\n\n async def on_session_created(self, session_context):\n with set_curdoc(session_context._document):\n if self._admin is not None:\n config._admin = self._admin\n for cb in state._on_session_created_internal+state._on_session_created:\n cb(session_context)\n await super().on_session_created(session_context)\n\n def initialize_document(self, doc):\n super().initialize_document(doc)\n if doc in state._templates and doc not in state._templates[doc]._documents:\n template = state._templates[doc]\n with set_curdoc(doc):\n template.server_doc(title=template.title, location=True, doc=doc)\n\nbokeh.command.util.Application = Application # type: ignore\n\nclass SessionPrefixHandler:\n\n @contextmanager\n def _session_prefix(self):\n prefix = self.request.uri.replace(self.application_context._url, '')\n if not prefix.endswith('/'):\n prefix += '/'\n base_url = urljoin('/', prefix)\n rel_path = '/'.join(['..'] * self.application_context._url.strip('/').count('/'))\n old_url, old_rel = state.base_url, state.rel_path\n\n # Handle autoload.js absolute paths\n abs_url = self.get_argument('bokeh-absolute-url', default=None)\n if abs_url is not None:\n rel_path = abs_url.replace(self.application_context._url, '')\n\n with edit_readonly(state):\n state.base_url = base_url\n state.rel_path = rel_path\n try:\n yield\n finally:\n with edit_readonly(state):\n state.base_url = old_url\n state.rel_path = old_rel\n\nclass LoginUrlMixin:\n \"\"\"\n Overrides the AuthRequestHandler.get_login_url implementation to\n correctly handle prefixes.\n \"\"\"\n\n def get_login_url(self):\n ''' Delegates to``get_login_url`` method of the auth provider, or the\n ``login_url`` attribute.\n\n '''\n if self.application.auth_provider.get_login_url is not None:\n return '.' + self.application.auth_provider.get_login_url(self)\n if self.application.auth_provider.login_url is not None:\n return '.' + self.application.auth_provider.login_url\n raise RuntimeError('login_url or get_login_url() must be supplied when authentication hooks are enabled')\n\n\n# Patch Bokeh DocHandler URL\nclass DocHandler(LoginUrlMixin, BkDocHandler, SessionPrefixHandler):\n\n @authenticated\n async def get_session(self):\n from ..config import config\n path = self.request.path\n session = None\n if config.reuse_sessions and path in state._session_key_funcs:\n key = state._session_key_funcs[path](self.request)\n session = state._sessions.get(key)\n if session is None:\n session = await super().get_session()\n with set_curdoc(session.document):\n if config.reuse_sessions:\n key_func = config.session_key_func or (lambda r: (r.path, r.arguments.get('theme', [b'default'])[0].decode('utf-8')))\n state._session_key_funcs[path] = key_func\n key = key_func(self.request)\n state._sessions[key] = session\n session.block_expiration()\n return session\n\n @authenticated\n async def get(self, *args, **kwargs):\n app = self.application\n with self._session_prefix():\n key_func = state._session_key_funcs.get(self.request.path, lambda r: r.path)\n old_request = key_func(self.request) in state._sessions\n session = await self.get_session()\n if old_request and state._sessions.get(key_func(self.request)) is session:\n session_id = generate_session_id(\n secret_key=self.application.secret_key,\n signed=self.application.sign_sessions\n )\n payload = get_token_payload(session.token)\n del payload['session_expiry']\n token = generate_jwt_token(\n session_id,\n secret_key=app.secret_key,\n signed=app.sign_sessions,\n expiration=app.session_token_expiration,\n extra_payload=payload\n )\n else:\n token = session.token\n logger.info(LOG_SESSION_CREATED, id(session.document))\n with set_curdoc(session.document):\n resources = Resources.from_bokeh(self.application.resources())\n auth_cb = config.authorize_callback\n authorized = False\n if auth_cb:\n auth_cb = config.authorize_callback\n auth_params = inspect.signature(auth_cb).parameters\n if len(auth_params) == 1:\n auth_args = (state.user_info,)\n elif len(auth_params) == 2:\n auth_args = (state.user_info, self.request.path,)\n else:\n raise RuntimeError(\n 'Authorization callback must accept either 1) a single argument '\n 'which is the user name or 2) two arguments which includes the '\n 'user name and the url path the user is trying to access.'\n )\n auth_error = f'{state.user} is not authorized to access this application.'\n try:\n authorized = auth_cb(*auth_args)\n if isinstance(authorized, str):\n self.redirect(authorized)\n return\n elif not authorized:\n auth_error = (\n f'Authorization callback errored. Could not validate user name \"{state.user}\" '\n f'for the given app \"{self.request.path}\".'\n )\n if authorized:\n auth_error = None\n except Exception:\n auth_error = f'Authorization callback errored. Could not validate user {state.user}.'\n else:\n authorized = True\n\n if authorized:\n page = server_html_page_for_session(\n session, resources=resources, title=session.document.title,\n token=token, template=session.document.template,\n template_variables=session.document.template_variables,\n )\n else:\n if config.auth_template:\n with open(config.auth_template) as f:\n template = _env.from_string(f.read())\n else:\n template = ERROR_TEMPLATE\n page = template.render(\n npm_cdn=config.npm_cdn,\n title='Panel: Authorization Error',\n error_type='Authorization Error',\n error='User is not authorized.',\n error_msg=auth_error\n )\n self.set_header(\"Content-Type\", 'text/html')\n self.write(page)\n\nper_app_patterns[0] = (r'/?', DocHandler)\n\n# Patch Bokeh Autoload handler\nclass AutoloadJsHandler(BkAutoloadJsHandler, SessionPrefixHandler):\n ''' Implements a custom Tornado handler for the autoload JS chunk\n\n '''\n\n async def get(self, *args, **kwargs) -> None:\n element_id = self.get_argument(\"bokeh-autoload-element\", default=None)\n if not element_id:\n self.send_error(status_code=400, reason='No bokeh-autoload-element query parameter')\n return\n\n app_path = self.get_argument(\"bokeh-app-path\", default=\"/\")\n absolute_url = self.get_argument(\"bokeh-absolute-url\", default=None)\n\n if absolute_url:\n server_url = '{uri.scheme}://{uri.netloc}'.format(uri=urlparse(absolute_url))\n else:\n server_url = None\n\n with self._session_prefix():\n session = await self.get_session()\n with set_curdoc(session.document):\n resources = Resources.from_bokeh(\n self.application.resources(server_url), absolute=True\n )\n js = autoload_js_script(\n session.document, resources, session.token, element_id,\n app_path, absolute_url, absolute=True\n )\n\n self.set_header(\"Content-Type\", 'application/javascript')\n self.write(js)\n\nper_app_patterns[3] = (r'/autoload.js', AutoloadJsHandler)\n\nclass RootHandler(LoginUrlMixin, BkRootHandler):\n \"\"\"\n Custom RootHandler that provides the CDN_DIST directory as a\n template variable.\n \"\"\"\n\n def render(self, *args, **kwargs):\n kwargs['PANEL_CDN'] = CDN_DIST\n return super().render(*args, **kwargs)\n\ntoplevel_patterns[0] = (r'/?', RootHandler)\nbokeh.server.tornado.RootHandler = RootHandler\n\n\nclass ComponentResourceHandler(StaticFileHandler):\n \"\"\"\n A handler that serves local resources relative to a Python module.\n The handler resolves a specific Panel component by module reference\n and name, then resolves an attribute on that component to check\n if it contains the requested resource path.\n\n /<endpoint>/<module>/<class>/<attribute>/<path>\n \"\"\"\n\n _resource_attrs = [\n '__css__', '__javascript__', '__js_module__', '__javascript_modules__', '_resources',\n '_css', '_js', 'base_css', 'css', '_stylesheets', 'modifiers'\n ]\n\n def initialize(self, path: Optional[str] = None, default_filename: Optional[str] = None):\n self.root = path\n self.default_filename = default_filename\n\n def parse_url_path(self, path: str) -> str:\n \"\"\"\n Resolves the resource the URL pattern refers to.\n \"\"\"\n parts = path.split('/')\n if len(parts) < 4:\n raise HTTPError(400, 'Malformed URL')\n mod, cls, rtype, *subpath = parts\n try:\n module = importlib.import_module(mod)\n except ModuleNotFoundError:\n raise HTTPError(404, 'Module not found')\n try:\n component = getattr(module, cls)\n except AttributeError:\n raise HTTPError(404, 'Component not found')\n\n # May only access resources listed in specific attributes\n if rtype not in self._resource_attrs:\n raise HTTPError(403, 'Requested resource type not valid.')\n\n try:\n resources = getattr(component, rtype)\n except AttributeError:\n raise HTTPError(404, 'Resource type not found')\n\n # Handle template resources\n if rtype == '_resources':\n rtype = subpath[0]\n subpath = subpath[1:]\n if rtype not in resources:\n raise HTTPError(404, 'Resource type not found')\n resources = resources[rtype]\n rtype = f'_resources/{rtype}'\n elif rtype == 'modifiers':\n resources = [\n st for rs in resources.values() for st in rs.get('stylesheets', [])\n if isinstance(st, str)\n ]\n\n if isinstance(resources, dict):\n resources = list(resources.values())\n elif isinstance(resources, (str, pathlib.PurePath)):\n resources = [resources]\n resources = [\n str(resolve_custom_path(component, resource, relative=True)).replace(os.path.sep, '/')\n for resource in resources\n ]\n\n rel_path = '/'.join(subpath)\n\n # Important: May only access resources explicitly listed on the component\n # Otherwise this potentially exposes all files to the web\n if rel_path not in resources:\n raise HTTPError(403, 'Requested resource was not listed.')\n\n if not module.__file__:\n raise HTTPError(404, 'Requested module does not reference a file.')\n\n return str(pathlib.Path(module.__file__).parent / rel_path)\n\n @classmethod\n def get_absolute_path(cls, root: str, path: str) -> str:\n return path\n\n def validate_absolute_path(self, root: str, absolute_path: str) -> str:\n if not os.path.exists(absolute_path):\n raise HTTPError(404)\n if not os.path.isfile(absolute_path):\n raise HTTPError(403, \"%s is not a file\", self.path)\n return absolute_path\n\n\ndef modify_document(self, doc: 'Document'):\n from bokeh.io.doc import set_curdoc as bk_set_curdoc\n\n from ..config import config\n\n logger.info(LOG_SESSION_LAUNCHING, id(doc))\n\n if config.autoreload:\n path = self._runner.path\n argv = self._runner._argv\n handler = type(self)(filename=path, argv=argv)\n self._runner = handler._runner\n\n module = self._runner.new_module()\n\n # If no module was returned it means the code runner has some permanent\n # unfixable problem, e.g. the configured source code has a syntax error\n if module is None:\n return\n\n # One reason modules are stored is to prevent the module\n # from being gc'd before the document is. A symptom of a\n # gc'd module is that its globals become None. Additionally\n # stored modules are used to provide correct paths to\n # custom models resolver.\n sys.modules[module.__name__] = module\n doc.modules._modules.append(module)\n\n try:\n old_doc = curdoc()\n except RuntimeError:\n old_doc = None\n bk_set_curdoc(doc)\n\n if config.autoreload:\n set_curdoc(doc)\n state.onload(autoreload_watcher)\n\n sessions = []\n\n try:\n def post_check():\n newdoc = curdoc()\n # Do not let curdoc track modules when autoreload is enabled\n # otherwise it will erroneously complain that there is\n # a memory leak\n if config.autoreload:\n newdoc.modules._modules = []\n\n # script is supposed to edit the doc not replace it\n if newdoc is not doc:\n raise RuntimeError(\"%s at '%s' replaced the output document\" % (self._origin, self._runner.path))\n\n def handle_exception(handler, e):\n from bokeh.application.handlers.handler import handle_exception\n\n from ..pane import Alert\n\n # Clean up\n del sys.modules[module.__name__]\n\n if hasattr(doc, 'modules'):\n doc.modules._modules.remove(module)\n else:\n doc._modules.remove(module)\n bokeh.application.handlers.code_runner.handle_exception = handle_exception\n tb = html.escape(traceback.format_exc()).replace('\\033[1m', '<b>').replace('\\033[0m', '</b>')\n\n # Serve error\n e_msg = str(e).replace('\\033[1m', '<b>').replace('\\033[0m', '</b>')\n Alert(\n f'<b>{type(e).__name__}</b>: {e_msg}\\n<pre style=\"overflow-y: auto\">{tb}</pre>',\n alert_type='danger', margin=5, sizing_mode='stretch_width'\n ).servable()\n\n if config.autoreload:\n bokeh.application.handlers.code_runner.handle_exception = handle_exception\n\n state._launching.append(doc)\n with _monkeypatch_io(self._loggers):\n with patch_curdoc(doc):\n with profile_ctx(config.profiler) as sessions:\n self._runner.run(module, post_check)\n\n def _log_session_destroyed(session_context):\n logger.info(LOG_SESSION_DESTROYED, id(doc))\n\n doc.on_session_destroyed(_log_session_destroyed)\n doc.destroy = partial(destroy_document, doc) # type: ignore\n finally:\n state._launching.remove(doc)\n if config.profiler:\n try:\n path = doc.session_context.request.path\n state._profiles[(path, config.profiler)] += sessions\n state.param.trigger('_profiles')\n except Exception:\n pass\n if old_doc is not None:\n bk_set_curdoc(old_doc)\n\nCodeHandler.modify_document = modify_document # type: ignore\n\n# Copied from bokeh 2.4.0, to fix directly in bokeh at some point.\ndef create_static_handler(prefix, key, app):\n # patch\n key = '/__patchedroot' if key == '/' else key\n\n route = prefix\n route += \"/static/(.*)\" if key == \"/\" else key + \"/static/(.*)\"\n if app.static_path is not None:\n return (route, StaticFileHandler, {\"path\" : app.static_path})\n return (route, StaticHandler, {})\n\nbokeh.server.tornado.create_static_handler = create_static_handler\n\n#---------------------------------------------------------------------\n# Async patches\n#---------------------------------------------------------------------\n\n# Bokeh 2.4.x patches the asyncio event loop policy but Tornado 6.1\n# support the WindowsProactorEventLoopPolicy so we restore it,\n# unless we detect we are running on jupyter_server.\nif (\n sys.platform == 'win32' and\n sys.version_info[:3] >= (3, 8, 0) and\n tornado.version_info >= (6, 1) and\n type(asyncio.get_event_loop_policy()) is asyncio.WindowsSelectorEventLoopPolicy and\n (('jupyter_server' not in sys.modules and\n 'jupyter_client' not in sys.modules) or\n 'pytest' in sys.modules)\n):\n asyncio.set_event_loop_policy(asyncio.WindowsProactorEventLoopPolicy())\n\n#---------------------------------------------------------------------\n# Public API\n#---------------------------------------------------------------------\n\ndef serve(\n panels: TViewableFuncOrPath | Mapping[str, TViewableFuncOrPath],\n port: int = 0,\n address: Optional[str] = None,\n websocket_origin: Optional[str | list[str]] = None,\n loop: Optional[IOLoop] = None,\n show: bool = True,\n start: bool = True,\n title: Optional[str] = None,\n verbose: bool = True,\n location: bool = True,\n threaded: bool = False,\n admin: bool = False,\n **kwargs\n) -> StoppableThread | Server:\n \"\"\"\n Allows serving one or more panel objects on a single server.\n The panels argument should be either a Panel object or a function\n returning a Panel object or a dictionary of these two. If a\n dictionary is supplied the keys represent the slugs at which\n each app is served, e.g. `serve({'app': panel1, 'app2': panel2})`\n will serve apps at /app and /app2 on the server.\n\n Reference: https://panel.holoviz.org/user_guide/Server_Configuration.html#serving-multiple-apps\n\n Arguments\n ---------\n panel: Viewable, function or {str: Viewable or function}\n A Panel object, a function returning a Panel object or a\n dictionary mapping from the URL slug to either.\n port: int (optional, default=0)\n Allows specifying a specific port\n address : str\n The address the server should listen on for HTTP requests.\n websocket_origin: str or list(str) (optional)\n A list of hosts that can connect to the websocket.\n\n This is typically required when embedding a server app in\n an external web site.\n\n If None, \"localhost\" is used.\n loop : tornado.ioloop.IOLoop (optional, default=IOLoop.current())\n The tornado IOLoop to run the Server on\n show : boolean (optional, default=True)\n Whether to open the server in a new browser tab on start\n start : boolean(optional, default=True)\n Whether to start the Server\n title: str or {str: str} (optional, default=None)\n An HTML title for the application or a dictionary mapping\n from the URL slug to a customized title\n verbose: boolean (optional, default=True)\n Whether to print the address and port\n location : boolean or panel.io.location.Location\n Whether to create a Location component to observe and\n set the URL location.\n threaded: boolean (default=False)\n Whether to start the server on a new Thread\n admin: boolean (default=False)\n Whether to enable the admin panel\n kwargs: dict\n Additional keyword arguments to pass to Server instance\n \"\"\"\n # Empty layout are valid and the Bokeh warning is silenced as usually\n # not relevant to Panel users.\n silence(EMPTY_LAYOUT, True)\n kwargs = dict(kwargs, **dict(\n port=port, address=address, websocket_origin=websocket_origin,\n loop=loop, show=show, start=start, title=title, verbose=verbose,\n location=location, admin=admin\n ))\n if threaded:\n kwargs['loop'] = loop = IOLoop(make_current=False) if loop is None else loop\n server = StoppableThread(\n target=get_server, io_loop=loop, args=(panels,), kwargs=kwargs\n )\n server_id = kwargs.get('server_id', uuid.uuid4().hex)\n state._threads[server_id] = server\n server.start()\n else:\n return get_server(panels, **kwargs)\n return server\n\n\nclass ProxyFallbackHandler(RequestHandler):\n \"\"\"A `RequestHandler` that wraps another HTTP server callback and\n proxies the subpath.\n \"\"\"\n\n def initialize(self, fallback, proxy=None):\n self.fallback = fallback\n self.proxy = proxy\n\n def prepare(self):\n if self.proxy:\n self.request.path = self.request.path.replace(self.proxy, '')\n self.fallback(self.request)\n self._finished = True\n self.on_finish()\n\n\ndef get_static_routes(static_dirs):\n \"\"\"\n Returns a list of tornado routes of StaticFileHandlers given a\n dictionary of slugs and file paths to serve.\n \"\"\"\n patterns = []\n for slug, path in static_dirs.items():\n if not slug.startswith('/'):\n slug = '/' + slug\n if slug == '/static':\n raise ValueError(\"Static file route may not use /static \"\n \"this is reserved for internal use.\")\n path = fullpath(path)\n if not os.path.isdir(path):\n raise ValueError(\"Cannot serve non-existent path %s\" % path)\n patterns.append(\n (r\"%s/(.*)\" % slug, StaticFileHandler, {\"path\": path})\n )\n patterns.append((\n f'/{COMPONENT_PATH}(.*)', ComponentResourceHandler, {}\n ))\n return patterns\n\ndef get_server(\n panel: TViewableFuncOrPath | Mapping[str, TViewableFuncOrPath],\n port: int = 0,\n address: Optional[str] = None,\n websocket_origin: Optional[str | list[str]] = None,\n loop: Optional[IOLoop] = None,\n show: bool = False,\n start: bool = False,\n title: bool = None,\n verbose: bool = False,\n location: bool | Location = True,\n admin: bool = False,\n static_dirs: Mapping[str, str] = {},\n basic_auth: str = None,\n oauth_provider: Optional[str] = None,\n oauth_key: Optional[str] = None,\n oauth_secret: Optional[str] = None,\n oauth_redirect_uri: Optional[str] = None,\n oauth_extra_params: Mapping[str, str] = {},\n oauth_error_template: Optional[str] = None,\n cookie_secret: Optional[str] = None,\n oauth_encryption_key: Optional[str] = None,\n logout_template: Optional[str] = None,\n session_history: Optional[int] = None,\n liveness: bool | str = False,\n **kwargs\n) -> Server:\n \"\"\"\n Returns a Server instance with this panel attached as the root\n app.\n\n Arguments\n ---------\n panel: Viewable, function or {str: Viewable}\n A Panel object, a function returning a Panel object or a\n dictionary mapping from the URL slug to either.\n port: int (optional, default=0)\n Allows specifying a specific port\n address : str\n The address the server should listen on for HTTP requests.\n websocket_origin: str or list(str) (optional)\n A list of hosts that can connect to the websocket.\n\n This is typically required when embedding a server app in\n an external web site.\n\n If None, \"localhost\" is used.\n loop : tornado.ioloop.IOLoop (optional, default=IOLoop.current())\n The tornado IOLoop to run the Server on.\n show : boolean (optional, default=False)\n Whether to open the server in a new browser tab on start.\n start : boolean(optional, default=False)\n Whether to start the Server.\n title : str or {str: str} (optional, default=None)\n An HTML title for the application or a dictionary mapping\n from the URL slug to a customized title.\n verbose: boolean (optional, default=False)\n Whether to report the address and port.\n location : boolean or panel.io.location.Location\n Whether to create a Location component to observe and\n set the URL location.\n admin: boolean (default=False)\n Whether to enable the admin panel\n static_dirs: dict (optional, default={})\n A dictionary of routes and local paths to serve as static file\n directories on those routes.\n basic_auth: str (optional, default=None)\n Password or filepath to use with basic auth provider.\n oauth_provider: str\n One of the available OAuth providers\n oauth_key: str (optional, default=None)\n The public OAuth identifier\n oauth_secret: str (optional, default=None)\n The client secret for the OAuth provider\n oauth_redirect_uri: Optional[str] = None,\n Overrides the default OAuth redirect URI\n oauth_extra_params: dict (optional, default={})\n Additional information for the OAuth provider\n oauth_error_template: str (optional, default=None)\n Jinja2 template used when displaying authentication errors.\n cookie_secret: str (optional, default=None)\n A random secret string to sign cookies (required for OAuth)\n oauth_encryption_key: str (optional, default=False)\n A random encryption key used for encrypting OAuth user\n information and access tokens.\n logout_template: str (optional, default=None)\n Jinja2 template served when viewing the logout endpoint when\n authentication is enabled.\n session_history: int (optional, default=None)\n The amount of session history to accumulate. If set to non-zero\n and non-None value will launch a REST endpoint at\n /rest/session_info, which returns information about the session\n history.\n liveness: bool | str (optional, default=False)\n Whether to add a liveness endpoint. If a string is provided\n then this will be used as the endpoint, otherwise the endpoint\n will be hosted at /liveness.\n kwargs: dict\n Additional keyword arguments to pass to Server instance.\n\n Returns\n -------\n server : panel.io.server.Server\n Bokeh Server instance running this panel\n \"\"\"\n from ..config import config\n from .rest import REST_PROVIDERS\n\n server_id = kwargs.pop('server_id', uuid.uuid4().hex)\n kwargs['extra_patterns'] = extra_patterns = kwargs.get('extra_patterns', [])\n if isinstance(panel, dict):\n apps = {}\n for slug, app in panel.items():\n if isinstance(title, dict):\n try:\n title_ = title[slug]\n except KeyError:\n raise KeyError(\n \"Keys of the title dictionary and of the apps \"\n f\"dictionary must match. No {slug} key found in the \"\n \"title dictionary.\")\n else:\n title_ = title\n slug = slug if slug.startswith('/') else '/'+slug\n if 'flask' in sys.modules:\n from flask import Flask\n if isinstance(app, Flask):\n wsgi = WSGIContainer(app)\n if slug == '/':\n raise ValueError('Flask apps must be served on a subpath.')\n if not slug.endswith('/'):\n slug += '/'\n extra_patterns.append(('^'+slug+'.*', ProxyFallbackHandler,\n dict(fallback=wsgi, proxy=slug)))\n continue\n if isinstance(app, pathlib.Path):\n app = str(app) # enables serving apps from Paths\n if (isinstance(app, str) and (app.endswith(\".py\") or app.endswith(\".ipynb\") or app.endswith('.md'))\n and os.path.isfile(app)):\n apps[slug] = app = build_single_handler_application(app)\n app._admin = admin\n elif isinstance(app, BkApplication):\n apps[slug] = app\n else:\n handler = FunctionHandler(partial(_eval_panel, app, server_id, title_, location, admin))\n apps[slug] = Application(handler, admin=admin)\n else:\n if isinstance(panel, pathlib.Path):\n panel = str(panel) # enables serving apps from Paths\n if (isinstance(panel, str) and (panel.endswith(\".py\") or panel.endswith(\".ipynb\") or panel.endswith('.md'))\n and os.path.isfile(panel)):\n apps = {'/': build_single_handler_application(panel)}\n else:\n handler = FunctionHandler(partial(_eval_panel, panel, server_id, title, location, admin))\n apps = {'/': Application(handler, admin=admin)}\n\n if admin:\n if '/admin' in apps:\n raise ValueError(\n 'Cannot enable admin panel because another app is being served '\n 'on the /admin endpoint'\n )\n from .admin import admin_panel\n admin_handler = FunctionHandler(admin_panel)\n apps['/admin'] = Application(admin_handler)\n\n extra_patterns += get_static_routes(static_dirs)\n\n if session_history is not None:\n config.session_history = session_history\n if config.session_history != 0:\n pattern = REST_PROVIDERS['param']([], 'rest')\n extra_patterns.extend(pattern)\n state.publish('session_info', state, ['session_info'])\n\n if liveness:\n liveness_endpoint = 'liveness' if isinstance(liveness, bool) else liveness\n extra_patterns += [(r\"/%s\" % liveness_endpoint, LivenessHandler, dict(applications=apps))]\n\n opts = dict(kwargs)\n if loop:\n asyncio.set_event_loop(loop.asyncio_loop)\n opts['io_loop'] = loop\n elif opts.get('num_procs', 1) == 1:\n opts['io_loop'] = IOLoop.current()\n\n if 'index' not in opts:\n opts['index'] = INDEX_HTML\n\n if address is not None:\n opts['address'] = address\n\n if websocket_origin:\n if not isinstance(websocket_origin, list):\n websocket_origin = [websocket_origin]\n opts['allow_websocket_origin'] = websocket_origin\n\n # Configure OAuth\n from ..config import config\n server_config = {}\n if basic_auth:\n from ..auth import BasicProvider\n server_config['basic_auth'] = basic_auth\n basic_login_template = kwargs.pop('basic_login_template', None)\n opts['auth_provider'] = BasicProvider(\n basic_login_template,\n logout_template=logout_template\n )\n elif oauth_provider:\n from ..auth import OAuthProvider\n config.oauth_provider = oauth_provider # type: ignore\n opts['auth_provider'] = OAuthProvider(\n error_template=oauth_error_template,\n logout_template=logout_template\n )\n if oauth_key:\n config.oauth_key = oauth_key # type: ignore\n if oauth_secret:\n config.oauth_secret = oauth_secret # type: ignore\n if oauth_extra_params:\n config.oauth_extra_params = oauth_extra_params # type: ignore\n if cookie_secret:\n config.cookie_secret = cookie_secret # type: ignore\n if oauth_redirect_uri:\n config.oauth_redirect_uri = oauth_redirect_uri # type: ignore\n opts['cookie_secret'] = config.cookie_secret\n\n server = Server(apps, port=port, **opts)\n if verbose:\n address = server.address or 'localhost'\n url = f\"http://{address}:{server.port}{server.prefix}\"\n print(f\"Launching server at {url}\")\n\n state._servers[server_id] = (server, panel, [])\n state._server_config[server._tornado] = server_config\n\n if show:\n def show_callback():\n server.show('/login' if config.oauth_provider else '/')\n server.io_loop.add_callback(show_callback)\n\n def sig_exit(*args, **kwargs):\n server.io_loop.add_callback_from_signal(do_stop)\n\n def do_stop(*args, **kwargs):\n server.io_loop.stop()\n\n try:\n signal.signal(signal.SIGINT, sig_exit)\n except ValueError:\n pass # Can't use signal on a thread\n\n if start:\n server.start()\n try:\n server.io_loop.start()\n except RuntimeError:\n pass\n except TypeError:\n warn(\n \"IOLoop couldn't be started. Ensure it is started by \"\n \"process invoking the panel.io.server.serve.\"\n )\n return server\n\n\nclass StoppableThread(threading.Thread):\n \"\"\"Thread class with a stop() method.\"\"\"\n\n def __init__(self, io_loop: IOLoop, **kwargs):\n super().__init__(**kwargs)\n self.io_loop = io_loop\n\n def run(self) -> None:\n if hasattr(self, '_target'):\n target, args, kwargs = self._target, self._args, self._kwargs # type: ignore\n else:\n target, args, kwargs = self._Thread__target, self._Thread__args, self._Thread__kwargs # type: ignore\n if not target:\n return\n bokeh_server = None\n try:\n bokeh_server = target(*args, **kwargs)\n finally:\n if isinstance(bokeh_server, Server):\n try:\n bokeh_server.stop()\n except Exception:\n pass\n if hasattr(self, '_target'):\n del self._target, self._args, self._kwargs # type: ignore\n else:\n del self._Thread__target, self._Thread__args, self._Thread__kwargs # type: ignore\n\n def stop(self) -> None:\n self.io_loop.add_callback(self.io_loop.stop)\n", "path": "panel/io/server.py"}], "after_files": [{"content": "\"\"\"\nUtilities for creating bokeh Server instances.\n\"\"\"\nfrom __future__ import annotations\n\nimport asyncio\nimport datetime as dt\nimport gc\nimport html\nimport importlib\nimport inspect\nimport logging\nimport os\nimport pathlib\nimport signal\nimport sys\nimport threading\nimport traceback\nimport uuid\nimport weakref\n\nfrom collections import OrderedDict\nfrom contextlib import contextmanager\nfrom functools import partial, wraps\nfrom html import escape\nfrom types import FunctionType, MethodType\nfrom typing import (\n TYPE_CHECKING, Any, Callable, Dict, Mapping, Optional, Union,\n)\nfrom urllib.parse import urljoin, urlparse\n\nimport bokeh\nimport bokeh.command.util\nimport param\nimport tornado\n\n# Bokeh imports\nfrom bokeh.application import Application as BkApplication\nfrom bokeh.application.handlers.code import (\n CodeHandler, _monkeypatch_io, patch_curdoc,\n)\nfrom bokeh.application.handlers.function import FunctionHandler\nfrom bokeh.core.json_encoder import serialize_json\nfrom bokeh.core.templates import AUTOLOAD_JS, FILE, MACROS\nfrom bokeh.core.validation import silence\nfrom bokeh.core.validation.warnings import EMPTY_LAYOUT\nfrom bokeh.embed.bundle import Script\nfrom bokeh.embed.elements import script_for_render_items\nfrom bokeh.embed.util import RenderItem\nfrom bokeh.embed.wrappers import wrap_in_script_tag\nfrom bokeh.io import curdoc\nfrom bokeh.models import CustomJS\nfrom bokeh.server.server import Server as BokehServer\nfrom bokeh.server.urls import per_app_patterns, toplevel_patterns\nfrom bokeh.server.views.autoload_js_handler import (\n AutoloadJsHandler as BkAutoloadJsHandler,\n)\nfrom bokeh.server.views.doc_handler import DocHandler as BkDocHandler\nfrom bokeh.server.views.root_handler import RootHandler as BkRootHandler\nfrom bokeh.server.views.static_handler import StaticHandler\nfrom bokeh.util.serialization import make_id\nfrom bokeh.util.token import (\n generate_jwt_token, generate_session_id, get_token_payload,\n)\n# Tornado imports\nfrom tornado.ioloop import IOLoop\nfrom tornado.web import (\n HTTPError, RequestHandler, StaticFileHandler, authenticated,\n)\nfrom tornado.wsgi import WSGIContainer\n\n# Internal imports\nfrom ..config import config\nfrom ..util import edit_readonly, fullpath\nfrom ..util.warnings import warn\nfrom .document import init_doc, unlocked, with_lock # noqa\nfrom .liveness import LivenessHandler\nfrom .loading import LOADING_INDICATOR_CSS_CLASS\nfrom .logging import (\n LOG_SESSION_CREATED, LOG_SESSION_DESTROYED, LOG_SESSION_LAUNCHING,\n)\nfrom .markdown import build_single_handler_application\nfrom .profile import profile_ctx\nfrom .reload import autoreload_watcher\nfrom .resources import (\n BASE_TEMPLATE, CDN_DIST, COMPONENT_PATH, ERROR_TEMPLATE, LOCAL_DIST,\n Resources, _env, bundle_resources, patch_model_css, resolve_custom_path,\n)\nfrom .state import set_curdoc, state\n\nlogger = logging.getLogger(__name__)\n\nif TYPE_CHECKING:\n from bokeh.bundle import Bundle\n from bokeh.core.types import ID\n from bokeh.document.document import DocJson, Document\n from bokeh.server.contexts import BokehSessionContext\n from bokeh.server.session import ServerSession\n from jinja2 import Template\n\n from ..template.base import BaseTemplate\n from ..viewable import Viewable, Viewer\n from .location import Location\n\n TViewable = Union[Viewable, Viewer, BaseTemplate]\n TViewableFuncOrPath = Union[TViewable, Callable[[], TViewable], os.PathLike, str]\n\n#---------------------------------------------------------------------\n# Private API\n#---------------------------------------------------------------------\n\nINDEX_HTML = os.path.join(os.path.dirname(__file__), '..', '_templates', \"index.html\")\nDEFAULT_TITLE = \"Panel Application\"\n\ndef _origin_url(url: str) -> str:\n if url.startswith(\"http\"):\n url = url.split(\"//\")[1]\n return url\n\ndef _server_url(url: str, port: int) -> str:\n if url.startswith(\"http\"):\n return '%s:%d%s' % (url.rsplit(':', 1)[0], port, \"/\")\n else:\n return 'http://%s:%d%s' % (url.split(':')[0], port, \"/\")\n\ndef _eval_panel(\n panel: TViewableFuncOrPath, server_id: str, title: str,\n location: bool | Location, admin: bool, doc: Document\n):\n from ..pane import panel as as_panel\n from ..template import BaseTemplate\n\n if config.global_loading_spinner:\n doc.js_on_event(\n 'document_ready', CustomJS(code=f\"\"\"\n const body = document.getElementsByTagName('body')[0]\n body.classList.remove({LOADING_INDICATOR_CSS_CLASS!r}, {config.loading_spinner!r})\n \"\"\")\n )\n\n # Set up instrumentation for logging sessions\n logger.info(LOG_SESSION_LAUNCHING, id(doc))\n def _log_session_destroyed(session_context):\n logger.info(LOG_SESSION_DESTROYED, id(doc))\n doc.on_session_destroyed(_log_session_destroyed)\n\n with set_curdoc(doc):\n if isinstance(panel, (FunctionType, MethodType)):\n panel = panel()\n if isinstance(panel, BaseTemplate):\n doc = panel._modify_doc(server_id, title, doc, location)\n else:\n doc = as_panel(panel)._modify_doc(server_id, title, doc, location)\n return doc\n\ndef async_execute(func: Callable[..., None]) -> None:\n \"\"\"\n Wrap async event loop scheduling to ensure that with_lock flag\n is propagated from function to partial wrapping it.\n \"\"\"\n if not state.curdoc or not state.curdoc.session_context:\n ioloop = IOLoop.current()\n event_loop = ioloop.asyncio_loop # type: ignore\n wrapper = state._handle_exception_wrapper(func)\n if event_loop.is_running():\n ioloop.add_callback(wrapper)\n else:\n event_loop.run_until_complete(wrapper())\n return\n\n if isinstance(func, partial) and hasattr(func.func, 'lock'):\n unlock = not func.func.lock # type: ignore\n else:\n unlock = not getattr(func, 'lock', False)\n curdoc = state.curdoc\n @wraps(func)\n async def wrapper(*args, **kw):\n with set_curdoc(curdoc):\n try:\n return await func(*args, **kw)\n except Exception as e:\n state._handle_exception(e)\n if unlock:\n wrapper.nolock = True # type: ignore\n state.curdoc.add_next_tick_callback(wrapper)\n\nparam.parameterized.async_executor = async_execute\n\ndef _initialize_session_info(session_context: 'BokehSessionContext'):\n from ..config import config\n session_id = session_context.id\n sessions = state.session_info['sessions']\n history = -1 if config._admin else config.session_history\n if not config._admin and (history == 0 or session_id in sessions):\n return\n\n state.session_info['total'] += 1\n if history > 0 and len(sessions) >= history:\n old_history = list(sessions.items())\n sessions = OrderedDict(old_history[-(history-1):])\n state.session_info['sessions'] = sessions\n sessions[session_id] = {\n 'launched': dt.datetime.now().timestamp(),\n 'started': None,\n 'rendered': None,\n 'ended': None,\n 'user_agent': session_context.request.headers.get('User-Agent')\n }\n state.param.trigger('session_info')\n\nstate._on_session_created_internal.append(_initialize_session_info)\n\n#---------------------------------------------------------------------\n# Bokeh patches\n#---------------------------------------------------------------------\n\n\ndef html_page_for_render_items(\n bundle: Bundle | tuple[str, str], docs_json: dict[ID, DocJson],\n render_items: list[RenderItem], title: str, template: Template | str | None = None,\n template_variables: dict[str, Any] = {}\n) -> str:\n \"\"\"\n Render an HTML page from a template and Bokeh render items.\n\n Arguments\n ---------\n bundle (tuple):\n A tuple containing (bokehjs, bokehcss)\n docs_json (JSON-like):\n Serialized Bokeh Document\n render_items (RenderItems)\n Specific items to render from the document and where\n title (str or None)\n A title for the HTML page. If None, DEFAULT_TITLE is used\n template (str or Template or None, optional) :\n A Template to be used for the HTML page. If None, FILE is used.\n template_variables (dict, optional):\n Any Additional variables to pass to the template\n\n Returns\n -------\n str\n \"\"\"\n if title is None:\n title = DEFAULT_TITLE\n\n bokeh_js, bokeh_css = bundle\n\n json_id = make_id()\n json = escape(serialize_json(docs_json), quote=False)\n json = wrap_in_script_tag(json, \"application/json\", json_id)\n\n script = wrap_in_script_tag(script_for_render_items(json_id, render_items))\n\n context = template_variables.copy()\n\n context.update(dict(\n title = title,\n bokeh_js = bokeh_js,\n bokeh_css = bokeh_css,\n plot_script = json + script,\n docs = render_items,\n base = BASE_TEMPLATE,\n macros = MACROS,\n ))\n\n if len(render_items) == 1:\n context[\"doc\"] = context[\"docs\"][0]\n context[\"roots\"] = context[\"doc\"].roots\n\n if template is None:\n template = BASE_TEMPLATE\n elif isinstance(template, str):\n template = _env.from_string(\"{% extends base %}\\n\" + template)\n\n html = template.render(context)\n return html\n\ndef server_html_page_for_session(\n session: 'ServerSession',\n resources: 'Resources',\n title: str,\n token: str | None = None,\n template: str | Template = BASE_TEMPLATE,\n template_variables: Optional[Dict[str, Any]] = None,\n) -> str:\n\n # ALERT: Replace with better approach before Bokeh 3.x compatible release\n if resources.mode == 'server':\n dist_url = f'{state.rel_path}/{LOCAL_DIST}' if state.rel_path else LOCAL_DIST\n else:\n dist_url = CDN_DIST\n\n doc = session.document\n doc._template_variables['theme_name'] = config.theme\n doc._template_variables['dist_url'] = dist_url\n for root in doc.roots:\n patch_model_css(root, dist_url=dist_url)\n\n render_item = RenderItem(\n token = token or session.token,\n roots = doc.roots,\n use_for_title = False,\n )\n\n if template_variables is None:\n template_variables = {}\n\n if template is FILE:\n template = BASE_TEMPLATE\n\n with set_curdoc(doc):\n bundle = bundle_resources(doc.roots, resources)\n html = html_page_for_render_items(\n bundle, {}, [render_item], title, template=template,\n template_variables=template_variables\n )\n if config.global_loading_spinner:\n html = html.replace(\n '<body>', f'<body class=\"{LOADING_INDICATOR_CSS_CLASS} pn-{config.loading_spinner}\">'\n )\n return html\n\n\ndef autoload_js_script(doc, resources, token, element_id, app_path, absolute_url, absolute=False):\n resources = Resources.from_bokeh(resources, absolute=absolute)\n bundle = bundle_resources(doc.roots, resources)\n\n render_items = [RenderItem(token=token, elementid=element_id, use_for_title=False)]\n bundle.add(Script(script_for_render_items({}, render_items, app_path=app_path, absolute_url=absolute_url)))\n\n return AUTOLOAD_JS.render(bundle=bundle, elementid=element_id)\n\ndef destroy_document(self, session):\n \"\"\"\n Override for Document.destroy() without calling gc.collect directly.\n The gc.collect() call is scheduled as a task, ensuring that when\n multiple documents are destroyed in quick succession we do not\n schedule excessive garbage collection.\n \"\"\"\n if session is not None:\n self.remove_on_change(session)\n\n del self._roots\n del self._theme\n del self._template\n self._session_context = None\n\n self.callbacks.destroy()\n self.models.destroy()\n self.modules.destroy()\n\n # Clear periodic callbacks\n for cb in state._periodic.get(self, []):\n cb.stop()\n\n # Clean up pn.state to avoid tasks getting executed on dead session\n for attr in dir(state):\n # _param_watchers is deprecated in Param 2.0 and will raise a warning\n if not attr.startswith('_') or attr == \"_param_watchers\":\n continue\n state_obj = getattr(state, attr)\n if isinstance(state_obj, weakref.WeakKeyDictionary) and self in state_obj:\n del state_obj[self]\n\n # Schedule GC\n at = dt.datetime.now() + dt.timedelta(seconds=5)\n state.schedule_task('gc.collect', gc.collect, at=at)\n\n del self.destroy\n\n# Patch Server to attach task factory to asyncio loop and handle Admin server context\nclass Server(BokehServer):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n if state._admin_context:\n state._admin_context._loop = self._loop\n\n def start(self) -> None:\n super().start()\n if state._admin_context:\n self._loop.add_callback(state._admin_context.run_load_hook)\n\n def stop(self, wait: bool = True) -> None:\n super().stop(wait=wait)\n if state._admin_context:\n state._admin_context.run_unload_hook()\n\nbokeh.server.server.Server = Server\n\n\n# Patch Application to handle session callbacks\nclass Application(BkApplication):\n\n def __init__(self, *args, **kwargs):\n self._admin = kwargs.pop('admin', None)\n super().__init__(*args, **kwargs)\n\n async def on_session_created(self, session_context):\n with set_curdoc(session_context._document):\n if self._admin is not None:\n config._admin = self._admin\n for cb in state._on_session_created_internal+state._on_session_created:\n cb(session_context)\n await super().on_session_created(session_context)\n\n def initialize_document(self, doc):\n super().initialize_document(doc)\n if doc in state._templates and doc not in state._templates[doc]._documents:\n template = state._templates[doc]\n with set_curdoc(doc):\n template.server_doc(title=template.title, location=True, doc=doc)\n\nbokeh.command.util.Application = Application # type: ignore\n\nclass SessionPrefixHandler:\n\n @contextmanager\n def _session_prefix(self):\n prefix = self.request.uri.replace(self.application_context._url, '')\n if not prefix.endswith('/'):\n prefix += '/'\n base_url = urljoin('/', prefix)\n rel_path = '/'.join(['..'] * self.application_context._url.strip('/').count('/'))\n old_url, old_rel = state.base_url, state.rel_path\n\n # Handle autoload.js absolute paths\n abs_url = self.get_argument('bokeh-absolute-url', default=None)\n if abs_url is not None:\n rel_path = abs_url.replace(self.application_context._url, '')\n\n with edit_readonly(state):\n state.base_url = base_url\n state.rel_path = rel_path\n try:\n yield\n finally:\n with edit_readonly(state):\n state.base_url = old_url\n state.rel_path = old_rel\n\nclass LoginUrlMixin:\n \"\"\"\n Overrides the AuthRequestHandler.get_login_url implementation to\n correctly handle prefixes.\n \"\"\"\n\n def get_login_url(self):\n ''' Delegates to``get_login_url`` method of the auth provider, or the\n ``login_url`` attribute.\n\n '''\n if self.application.auth_provider.get_login_url is not None:\n return '.' + self.application.auth_provider.get_login_url(self)\n if self.application.auth_provider.login_url is not None:\n return '.' + self.application.auth_provider.login_url\n raise RuntimeError('login_url or get_login_url() must be supplied when authentication hooks are enabled')\n\n\n# Patch Bokeh DocHandler URL\nclass DocHandler(LoginUrlMixin, BkDocHandler, SessionPrefixHandler):\n\n @authenticated\n async def get_session(self):\n from ..config import config\n path = self.request.path\n session = None\n if config.reuse_sessions and path in state._session_key_funcs:\n key = state._session_key_funcs[path](self.request)\n session = state._sessions.get(key)\n if session is None:\n session = await super().get_session()\n with set_curdoc(session.document):\n if config.reuse_sessions:\n key_func = config.session_key_func or (lambda r: (r.path, r.arguments.get('theme', [b'default'])[0].decode('utf-8')))\n state._session_key_funcs[path] = key_func\n key = key_func(self.request)\n state._sessions[key] = session\n session.block_expiration()\n return session\n\n @authenticated\n async def get(self, *args, **kwargs):\n app = self.application\n with self._session_prefix():\n key_func = state._session_key_funcs.get(self.request.path, lambda r: r.path)\n old_request = key_func(self.request) in state._sessions\n session = await self.get_session()\n if old_request and state._sessions.get(key_func(self.request)) is session:\n session_id = generate_session_id(\n secret_key=self.application.secret_key,\n signed=self.application.sign_sessions\n )\n payload = get_token_payload(session.token)\n del payload['session_expiry']\n token = generate_jwt_token(\n session_id,\n secret_key=app.secret_key,\n signed=app.sign_sessions,\n expiration=app.session_token_expiration,\n extra_payload=payload\n )\n else:\n token = session.token\n logger.info(LOG_SESSION_CREATED, id(session.document))\n with set_curdoc(session.document):\n resources = Resources.from_bokeh(self.application.resources())\n auth_cb = config.authorize_callback\n authorized = False\n if auth_cb:\n auth_cb = config.authorize_callback\n auth_params = inspect.signature(auth_cb).parameters\n if len(auth_params) == 1:\n auth_args = (state.user_info,)\n elif len(auth_params) == 2:\n auth_args = (state.user_info, self.request.path,)\n else:\n raise RuntimeError(\n 'Authorization callback must accept either 1) a single argument '\n 'which is the user name or 2) two arguments which includes the '\n 'user name and the url path the user is trying to access.'\n )\n auth_error = f'{state.user} is not authorized to access this application.'\n try:\n authorized = auth_cb(*auth_args)\n if isinstance(authorized, str):\n self.redirect(authorized)\n return\n elif not authorized:\n auth_error = (\n f'Authorization callback errored. Could not validate user name \"{state.user}\" '\n f'for the given app \"{self.request.path}\".'\n )\n if authorized:\n auth_error = None\n except Exception:\n auth_error = f'Authorization callback errored. Could not validate user {state.user}.'\n else:\n authorized = True\n\n if authorized:\n page = server_html_page_for_session(\n session, resources=resources, title=session.document.title,\n token=token, template=session.document.template,\n template_variables=session.document.template_variables,\n )\n else:\n if config.auth_template:\n with open(config.auth_template) as f:\n template = _env.from_string(f.read())\n else:\n template = ERROR_TEMPLATE\n page = template.render(\n npm_cdn=config.npm_cdn,\n title='Panel: Authorization Error',\n error_type='Authorization Error',\n error='User is not authorized.',\n error_msg=auth_error\n )\n self.set_header(\"Content-Type\", 'text/html')\n self.write(page)\n\nper_app_patterns[0] = (r'/?', DocHandler)\n\n# Patch Bokeh Autoload handler\nclass AutoloadJsHandler(BkAutoloadJsHandler, SessionPrefixHandler):\n ''' Implements a custom Tornado handler for the autoload JS chunk\n\n '''\n\n async def get(self, *args, **kwargs) -> None:\n element_id = self.get_argument(\"bokeh-autoload-element\", default=None)\n if not element_id:\n self.send_error(status_code=400, reason='No bokeh-autoload-element query parameter')\n return\n\n app_path = self.get_argument(\"bokeh-app-path\", default=\"/\")\n absolute_url = self.get_argument(\"bokeh-absolute-url\", default=None)\n\n if absolute_url:\n server_url = '{uri.scheme}://{uri.netloc}'.format(uri=urlparse(absolute_url))\n else:\n server_url = None\n\n with self._session_prefix():\n session = await self.get_session()\n with set_curdoc(session.document):\n resources = Resources.from_bokeh(\n self.application.resources(server_url), absolute=True\n )\n js = autoload_js_script(\n session.document, resources, session.token, element_id,\n app_path, absolute_url, absolute=True\n )\n\n self.set_header(\"Content-Type\", 'application/javascript')\n self.write(js)\n\nper_app_patterns[3] = (r'/autoload.js', AutoloadJsHandler)\n\nclass RootHandler(LoginUrlMixin, BkRootHandler):\n \"\"\"\n Custom RootHandler that provides the CDN_DIST directory as a\n template variable.\n \"\"\"\n\n def render(self, *args, **kwargs):\n kwargs['PANEL_CDN'] = CDN_DIST\n return super().render(*args, **kwargs)\n\ntoplevel_patterns[0] = (r'/?', RootHandler)\nbokeh.server.tornado.RootHandler = RootHandler\n\n\nclass ComponentResourceHandler(StaticFileHandler):\n \"\"\"\n A handler that serves local resources relative to a Python module.\n The handler resolves a specific Panel component by module reference\n and name, then resolves an attribute on that component to check\n if it contains the requested resource path.\n\n /<endpoint>/<module>/<class>/<attribute>/<path>\n \"\"\"\n\n _resource_attrs = [\n '__css__', '__javascript__', '__js_module__', '__javascript_modules__', '_resources',\n '_css', '_js', 'base_css', 'css', '_stylesheets', 'modifiers'\n ]\n\n def initialize(self, path: Optional[str] = None, default_filename: Optional[str] = None):\n self.root = path\n self.default_filename = default_filename\n\n def parse_url_path(self, path: str) -> str:\n \"\"\"\n Resolves the resource the URL pattern refers to.\n \"\"\"\n parts = path.split('/')\n if len(parts) < 4:\n raise HTTPError(400, 'Malformed URL')\n mod, cls, rtype, *subpath = parts\n try:\n module = importlib.import_module(mod)\n except ModuleNotFoundError:\n raise HTTPError(404, 'Module not found')\n try:\n component = getattr(module, cls)\n except AttributeError:\n raise HTTPError(404, 'Component not found')\n\n # May only access resources listed in specific attributes\n if rtype not in self._resource_attrs:\n raise HTTPError(403, 'Requested resource type not valid.')\n\n try:\n resources = getattr(component, rtype)\n except AttributeError:\n raise HTTPError(404, 'Resource type not found')\n\n # Handle template resources\n if rtype == '_resources':\n rtype = subpath[0]\n subpath = subpath[1:]\n if rtype not in resources:\n raise HTTPError(404, 'Resource type not found')\n resources = resources[rtype]\n rtype = f'_resources/{rtype}'\n elif rtype == 'modifiers':\n resources = [\n st for rs in resources.values() for st in rs.get('stylesheets', [])\n if isinstance(st, str)\n ]\n\n if isinstance(resources, dict):\n resources = list(resources.values())\n elif isinstance(resources, (str, pathlib.PurePath)):\n resources = [resources]\n resources = [\n str(resolve_custom_path(component, resource, relative=True)).replace(os.path.sep, '/')\n for resource in resources\n ]\n\n rel_path = '/'.join(subpath)\n\n # Important: May only access resources explicitly listed on the component\n # Otherwise this potentially exposes all files to the web\n if rel_path not in resources:\n raise HTTPError(403, 'Requested resource was not listed.')\n\n if not module.__file__:\n raise HTTPError(404, 'Requested module does not reference a file.')\n\n return str(pathlib.Path(module.__file__).parent / rel_path)\n\n @classmethod\n def get_absolute_path(cls, root: str, path: str) -> str:\n return path\n\n def validate_absolute_path(self, root: str, absolute_path: str) -> str:\n if not os.path.exists(absolute_path):\n raise HTTPError(404)\n if not os.path.isfile(absolute_path):\n raise HTTPError(403, \"%s is not a file\", self.path)\n return absolute_path\n\n\ndef modify_document(self, doc: 'Document'):\n from bokeh.io.doc import set_curdoc as bk_set_curdoc\n\n from ..config import config\n\n logger.info(LOG_SESSION_LAUNCHING, id(doc))\n\n if config.autoreload:\n path = self._runner.path\n argv = self._runner._argv\n handler = type(self)(filename=path, argv=argv)\n self._runner = handler._runner\n\n module = self._runner.new_module()\n\n # If no module was returned it means the code runner has some permanent\n # unfixable problem, e.g. the configured source code has a syntax error\n if module is None:\n return\n\n # One reason modules are stored is to prevent the module\n # from being gc'd before the document is. A symptom of a\n # gc'd module is that its globals become None. Additionally\n # stored modules are used to provide correct paths to\n # custom models resolver.\n sys.modules[module.__name__] = module\n doc.modules._modules.append(module)\n\n try:\n old_doc = curdoc()\n except RuntimeError:\n old_doc = None\n bk_set_curdoc(doc)\n\n if config.autoreload:\n set_curdoc(doc)\n state.onload(autoreload_watcher)\n\n sessions = []\n\n try:\n def post_check():\n newdoc = curdoc()\n # Do not let curdoc track modules when autoreload is enabled\n # otherwise it will erroneously complain that there is\n # a memory leak\n if config.autoreload:\n newdoc.modules._modules = []\n\n # script is supposed to edit the doc not replace it\n if newdoc is not doc:\n raise RuntimeError(\"%s at '%s' replaced the output document\" % (self._origin, self._runner.path))\n\n def handle_exception(handler, e):\n from bokeh.application.handlers.handler import handle_exception\n\n from ..pane import Alert\n\n # Clean up\n del sys.modules[module.__name__]\n\n if hasattr(doc, 'modules'):\n doc.modules._modules.remove(module)\n else:\n doc._modules.remove(module)\n bokeh.application.handlers.code_runner.handle_exception = handle_exception\n tb = html.escape(traceback.format_exc()).replace('\\033[1m', '<b>').replace('\\033[0m', '</b>')\n\n # Serve error\n e_msg = str(e).replace('\\033[1m', '<b>').replace('\\033[0m', '</b>')\n Alert(\n f'<b>{type(e).__name__}</b>: {e_msg}\\n<pre style=\"overflow-y: auto\">{tb}</pre>',\n alert_type='danger', margin=5, sizing_mode='stretch_width'\n ).servable()\n\n if config.autoreload:\n bokeh.application.handlers.code_runner.handle_exception = handle_exception\n\n state._launching.append(doc)\n with _monkeypatch_io(self._loggers):\n with patch_curdoc(doc):\n with profile_ctx(config.profiler) as sessions:\n self._runner.run(module, post_check)\n\n def _log_session_destroyed(session_context):\n logger.info(LOG_SESSION_DESTROYED, id(doc))\n\n doc.on_session_destroyed(_log_session_destroyed)\n doc.destroy = partial(destroy_document, doc) # type: ignore\n finally:\n state._launching.remove(doc)\n if config.profiler:\n try:\n path = doc.session_context.request.path\n state._profiles[(path, config.profiler)] += sessions\n state.param.trigger('_profiles')\n except Exception:\n pass\n if old_doc is not None:\n bk_set_curdoc(old_doc)\n\nCodeHandler.modify_document = modify_document # type: ignore\n\n# Copied from bokeh 2.4.0, to fix directly in bokeh at some point.\ndef create_static_handler(prefix, key, app):\n # patch\n key = '/__patchedroot' if key == '/' else key\n\n route = prefix\n route += \"/static/(.*)\" if key == \"/\" else key + \"/static/(.*)\"\n if app.static_path is not None:\n return (route, StaticFileHandler, {\"path\" : app.static_path})\n return (route, StaticHandler, {})\n\nbokeh.server.tornado.create_static_handler = create_static_handler\n\n#---------------------------------------------------------------------\n# Async patches\n#---------------------------------------------------------------------\n\n# Bokeh 2.4.x patches the asyncio event loop policy but Tornado 6.1\n# support the WindowsProactorEventLoopPolicy so we restore it,\n# unless we detect we are running on jupyter_server.\nif (\n sys.platform == 'win32' and\n sys.version_info[:3] >= (3, 8, 0) and\n tornado.version_info >= (6, 1) and\n type(asyncio.get_event_loop_policy()) is asyncio.WindowsSelectorEventLoopPolicy and\n (('jupyter_server' not in sys.modules and\n 'jupyter_client' not in sys.modules) or\n 'pytest' in sys.modules)\n):\n asyncio.set_event_loop_policy(asyncio.WindowsProactorEventLoopPolicy())\n\n#---------------------------------------------------------------------\n# Public API\n#---------------------------------------------------------------------\n\ndef serve(\n panels: TViewableFuncOrPath | Mapping[str, TViewableFuncOrPath],\n port: int = 0,\n address: Optional[str] = None,\n websocket_origin: Optional[str | list[str]] = None,\n loop: Optional[IOLoop] = None,\n show: bool = True,\n start: bool = True,\n title: Optional[str] = None,\n verbose: bool = True,\n location: bool = True,\n threaded: bool = False,\n admin: bool = False,\n **kwargs\n) -> StoppableThread | Server:\n \"\"\"\n Allows serving one or more panel objects on a single server.\n The panels argument should be either a Panel object or a function\n returning a Panel object or a dictionary of these two. If a\n dictionary is supplied the keys represent the slugs at which\n each app is served, e.g. `serve({'app': panel1, 'app2': panel2})`\n will serve apps at /app and /app2 on the server.\n\n Reference: https://panel.holoviz.org/user_guide/Server_Configuration.html#serving-multiple-apps\n\n Arguments\n ---------\n panel: Viewable, function or {str: Viewable or function}\n A Panel object, a function returning a Panel object or a\n dictionary mapping from the URL slug to either.\n port: int (optional, default=0)\n Allows specifying a specific port\n address : str\n The address the server should listen on for HTTP requests.\n websocket_origin: str or list(str) (optional)\n A list of hosts that can connect to the websocket.\n\n This is typically required when embedding a server app in\n an external web site.\n\n If None, \"localhost\" is used.\n loop : tornado.ioloop.IOLoop (optional, default=IOLoop.current())\n The tornado IOLoop to run the Server on\n show : boolean (optional, default=True)\n Whether to open the server in a new browser tab on start\n start : boolean(optional, default=True)\n Whether to start the Server\n title: str or {str: str} (optional, default=None)\n An HTML title for the application or a dictionary mapping\n from the URL slug to a customized title\n verbose: boolean (optional, default=True)\n Whether to print the address and port\n location : boolean or panel.io.location.Location\n Whether to create a Location component to observe and\n set the URL location.\n threaded: boolean (default=False)\n Whether to start the server on a new Thread\n admin: boolean (default=False)\n Whether to enable the admin panel\n kwargs: dict\n Additional keyword arguments to pass to Server instance\n \"\"\"\n # Empty layout are valid and the Bokeh warning is silenced as usually\n # not relevant to Panel users.\n silence(EMPTY_LAYOUT, True)\n kwargs = dict(kwargs, **dict(\n port=port, address=address, websocket_origin=websocket_origin,\n loop=loop, show=show, start=start, title=title, verbose=verbose,\n location=location, admin=admin\n ))\n if threaded:\n kwargs['loop'] = loop = IOLoop(make_current=False) if loop is None else loop\n server = StoppableThread(\n target=get_server, io_loop=loop, args=(panels,), kwargs=kwargs\n )\n server_id = kwargs.get('server_id', uuid.uuid4().hex)\n state._threads[server_id] = server\n server.start()\n else:\n return get_server(panels, **kwargs)\n return server\n\n\nclass ProxyFallbackHandler(RequestHandler):\n \"\"\"A `RequestHandler` that wraps another HTTP server callback and\n proxies the subpath.\n \"\"\"\n\n def initialize(self, fallback, proxy=None):\n self.fallback = fallback\n self.proxy = proxy\n\n def prepare(self):\n if self.proxy:\n self.request.path = self.request.path.replace(self.proxy, '')\n self.fallback(self.request)\n self._finished = True\n self.on_finish()\n\n\ndef get_static_routes(static_dirs):\n \"\"\"\n Returns a list of tornado routes of StaticFileHandlers given a\n dictionary of slugs and file paths to serve.\n \"\"\"\n patterns = []\n for slug, path in static_dirs.items():\n if not slug.startswith('/'):\n slug = '/' + slug\n if slug == '/static':\n raise ValueError(\"Static file route may not use /static \"\n \"this is reserved for internal use.\")\n path = fullpath(path)\n if not os.path.isdir(path):\n raise ValueError(\"Cannot serve non-existent path %s\" % path)\n patterns.append(\n (r\"%s/(.*)\" % slug, StaticFileHandler, {\"path\": path})\n )\n patterns.append((\n f'/{COMPONENT_PATH}(.*)', ComponentResourceHandler, {}\n ))\n return patterns\n\ndef get_server(\n panel: TViewableFuncOrPath | Mapping[str, TViewableFuncOrPath],\n port: int = 0,\n address: Optional[str] = None,\n websocket_origin: Optional[str | list[str]] = None,\n loop: Optional[IOLoop] = None,\n show: bool = False,\n start: bool = False,\n title: bool = None,\n verbose: bool = False,\n location: bool | Location = True,\n admin: bool = False,\n static_dirs: Mapping[str, str] = {},\n basic_auth: str = None,\n oauth_provider: Optional[str] = None,\n oauth_key: Optional[str] = None,\n oauth_secret: Optional[str] = None,\n oauth_redirect_uri: Optional[str] = None,\n oauth_extra_params: Mapping[str, str] = {},\n oauth_error_template: Optional[str] = None,\n cookie_secret: Optional[str] = None,\n oauth_encryption_key: Optional[str] = None,\n logout_template: Optional[str] = None,\n session_history: Optional[int] = None,\n liveness: bool | str = False,\n **kwargs\n) -> Server:\n \"\"\"\n Returns a Server instance with this panel attached as the root\n app.\n\n Arguments\n ---------\n panel: Viewable, function or {str: Viewable}\n A Panel object, a function returning a Panel object or a\n dictionary mapping from the URL slug to either.\n port: int (optional, default=0)\n Allows specifying a specific port\n address : str\n The address the server should listen on for HTTP requests.\n websocket_origin: str or list(str) (optional)\n A list of hosts that can connect to the websocket.\n\n This is typically required when embedding a server app in\n an external web site.\n\n If None, \"localhost\" is used.\n loop : tornado.ioloop.IOLoop (optional, default=IOLoop.current())\n The tornado IOLoop to run the Server on.\n show : boolean (optional, default=False)\n Whether to open the server in a new browser tab on start.\n start : boolean(optional, default=False)\n Whether to start the Server.\n title : str or {str: str} (optional, default=None)\n An HTML title for the application or a dictionary mapping\n from the URL slug to a customized title.\n verbose: boolean (optional, default=False)\n Whether to report the address and port.\n location : boolean or panel.io.location.Location\n Whether to create a Location component to observe and\n set the URL location.\n admin: boolean (default=False)\n Whether to enable the admin panel\n static_dirs: dict (optional, default={})\n A dictionary of routes and local paths to serve as static file\n directories on those routes.\n basic_auth: str (optional, default=None)\n Password or filepath to use with basic auth provider.\n oauth_provider: str\n One of the available OAuth providers\n oauth_key: str (optional, default=None)\n The public OAuth identifier\n oauth_secret: str (optional, default=None)\n The client secret for the OAuth provider\n oauth_redirect_uri: Optional[str] = None,\n Overrides the default OAuth redirect URI\n oauth_extra_params: dict (optional, default={})\n Additional information for the OAuth provider\n oauth_error_template: str (optional, default=None)\n Jinja2 template used when displaying authentication errors.\n cookie_secret: str (optional, default=None)\n A random secret string to sign cookies (required for OAuth)\n oauth_encryption_key: str (optional, default=False)\n A random encryption key used for encrypting OAuth user\n information and access tokens.\n logout_template: str (optional, default=None)\n Jinja2 template served when viewing the logout endpoint when\n authentication is enabled.\n session_history: int (optional, default=None)\n The amount of session history to accumulate. If set to non-zero\n and non-None value will launch a REST endpoint at\n /rest/session_info, which returns information about the session\n history.\n liveness: bool | str (optional, default=False)\n Whether to add a liveness endpoint. If a string is provided\n then this will be used as the endpoint, otherwise the endpoint\n will be hosted at /liveness.\n kwargs: dict\n Additional keyword arguments to pass to Server instance.\n\n Returns\n -------\n server : panel.io.server.Server\n Bokeh Server instance running this panel\n \"\"\"\n from ..config import config\n from .rest import REST_PROVIDERS\n\n server_id = kwargs.pop('server_id', uuid.uuid4().hex)\n kwargs['extra_patterns'] = extra_patterns = kwargs.get('extra_patterns', [])\n if isinstance(panel, dict):\n apps = {}\n for slug, app in panel.items():\n if slug.endswith('/') and not slug == '/':\n raise ValueError(f\"Invalid URL: trailing slash '/' used for {slug!r} not supported.\")\n if isinstance(title, dict):\n try:\n title_ = title[slug]\n except KeyError:\n raise KeyError(\n \"Keys of the title dictionary and of the apps \"\n f\"dictionary must match. No {slug} key found in the \"\n \"title dictionary.\")\n else:\n title_ = title\n slug = slug if slug.startswith('/') else '/'+slug\n if 'flask' in sys.modules:\n from flask import Flask\n if isinstance(app, Flask):\n wsgi = WSGIContainer(app)\n if slug == '/':\n raise ValueError('Flask apps must be served on a subpath.')\n if not slug.endswith('/'):\n slug += '/'\n extra_patterns.append(('^'+slug+'.*', ProxyFallbackHandler,\n dict(fallback=wsgi, proxy=slug)))\n continue\n if isinstance(app, pathlib.Path):\n app = str(app) # enables serving apps from Paths\n if (isinstance(app, str) and (app.endswith(\".py\") or app.endswith(\".ipynb\") or app.endswith('.md'))\n and os.path.isfile(app)):\n apps[slug] = app = build_single_handler_application(app)\n app._admin = admin\n elif isinstance(app, BkApplication):\n apps[slug] = app\n else:\n handler = FunctionHandler(partial(_eval_panel, app, server_id, title_, location, admin))\n apps[slug] = Application(handler, admin=admin)\n else:\n if isinstance(panel, pathlib.Path):\n panel = str(panel) # enables serving apps from Paths\n if (isinstance(panel, str) and (panel.endswith(\".py\") or panel.endswith(\".ipynb\") or panel.endswith('.md'))\n and os.path.isfile(panel)):\n apps = {'/': build_single_handler_application(panel)}\n else:\n handler = FunctionHandler(partial(_eval_panel, panel, server_id, title, location, admin))\n apps = {'/': Application(handler, admin=admin)}\n\n if admin:\n if '/admin' in apps:\n raise ValueError(\n 'Cannot enable admin panel because another app is being served '\n 'on the /admin endpoint'\n )\n from .admin import admin_panel\n admin_handler = FunctionHandler(admin_panel)\n apps['/admin'] = Application(admin_handler)\n\n extra_patterns += get_static_routes(static_dirs)\n\n if session_history is not None:\n config.session_history = session_history\n if config.session_history != 0:\n pattern = REST_PROVIDERS['param']([], 'rest')\n extra_patterns.extend(pattern)\n state.publish('session_info', state, ['session_info'])\n\n if liveness:\n liveness_endpoint = 'liveness' if isinstance(liveness, bool) else liveness\n extra_patterns += [(r\"/%s\" % liveness_endpoint, LivenessHandler, dict(applications=apps))]\n\n opts = dict(kwargs)\n if loop:\n asyncio.set_event_loop(loop.asyncio_loop)\n opts['io_loop'] = loop\n elif opts.get('num_procs', 1) == 1:\n opts['io_loop'] = IOLoop.current()\n\n if 'index' not in opts:\n opts['index'] = INDEX_HTML\n\n if address is not None:\n opts['address'] = address\n\n if websocket_origin:\n if not isinstance(websocket_origin, list):\n websocket_origin = [websocket_origin]\n opts['allow_websocket_origin'] = websocket_origin\n\n # Configure OAuth\n from ..config import config\n server_config = {}\n if basic_auth:\n from ..auth import BasicProvider\n server_config['basic_auth'] = basic_auth\n basic_login_template = kwargs.pop('basic_login_template', None)\n opts['auth_provider'] = BasicProvider(\n basic_login_template,\n logout_template=logout_template\n )\n elif oauth_provider:\n from ..auth import OAuthProvider\n config.oauth_provider = oauth_provider # type: ignore\n opts['auth_provider'] = OAuthProvider(\n error_template=oauth_error_template,\n logout_template=logout_template\n )\n if oauth_key:\n config.oauth_key = oauth_key # type: ignore\n if oauth_secret:\n config.oauth_secret = oauth_secret # type: ignore\n if oauth_extra_params:\n config.oauth_extra_params = oauth_extra_params # type: ignore\n if cookie_secret:\n config.cookie_secret = cookie_secret # type: ignore\n if oauth_redirect_uri:\n config.oauth_redirect_uri = oauth_redirect_uri # type: ignore\n opts['cookie_secret'] = config.cookie_secret\n\n server = Server(apps, port=port, **opts)\n if verbose:\n address = server.address or 'localhost'\n url = f\"http://{address}:{server.port}{server.prefix}\"\n print(f\"Launching server at {url}\")\n\n state._servers[server_id] = (server, panel, [])\n state._server_config[server._tornado] = server_config\n\n if show:\n def show_callback():\n server.show('/login' if config.oauth_provider else '/')\n server.io_loop.add_callback(show_callback)\n\n def sig_exit(*args, **kwargs):\n server.io_loop.add_callback_from_signal(do_stop)\n\n def do_stop(*args, **kwargs):\n server.io_loop.stop()\n\n try:\n signal.signal(signal.SIGINT, sig_exit)\n except ValueError:\n pass # Can't use signal on a thread\n\n if start:\n server.start()\n try:\n server.io_loop.start()\n except RuntimeError:\n pass\n except TypeError:\n warn(\n \"IOLoop couldn't be started. Ensure it is started by \"\n \"process invoking the panel.io.server.serve.\"\n )\n return server\n\n\nclass StoppableThread(threading.Thread):\n \"\"\"Thread class with a stop() method.\"\"\"\n\n def __init__(self, io_loop: IOLoop, **kwargs):\n super().__init__(**kwargs)\n self.io_loop = io_loop\n\n def run(self) -> None:\n if hasattr(self, '_target'):\n target, args, kwargs = self._target, self._args, self._kwargs # type: ignore\n else:\n target, args, kwargs = self._Thread__target, self._Thread__args, self._Thread__kwargs # type: ignore\n if not target:\n return\n bokeh_server = None\n try:\n bokeh_server = target(*args, **kwargs)\n finally:\n if isinstance(bokeh_server, Server):\n try:\n bokeh_server.stop()\n except Exception:\n pass\n if hasattr(self, '_target'):\n del self._target, self._args, self._kwargs # type: ignore\n else:\n del self._Thread__target, self._Thread__args, self._Thread__kwargs # type: ignore\n\n def stop(self) -> None:\n self.io_loop.add_callback(self.io_loop.stop)\n", "path": "panel/io/server.py"}]} |
gh_patches_debug_1397 | rasdani/github-patches | git_diff | wagtail__wagtail-11226 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
When WAGTAILIMAGES_FEATURE_DETECTION_ENABLED = True, upload of svg files gives an error
Uploading svg-images fails when WAGTAILIMAGES_FEATURE_DETECTION_ENABLED = True.
We get an AttributeError at /admin/images/multiple/add/ :
'SvgImageFile' object has no attribute 'detect_faces'
We are working on Debian and have the following packages installed (amongst others) in our virtual env:
wagtail==5.1.3
Willow==1.6.2
Pillow==10.1.0
pillow-heif==0.13.1
Wand==0.6.11
opencv-python==4.8.1.78
In our settings:
WAGTAILIMAGES_EXTENSIONS = ["gif", "jpg", "jpeg", "png", "webp", "svg"]
WAGTAILIMAGES_FEATURE_DETECTION_ENABLED = True
Commenting the last line allows for svg uploads.
Python 3.9.2
Django==4.2.7
In the Firefox browser. Thanks for looking into this!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `wagtail/images/models.py`
Content:
```
1 import hashlib
2 import itertools
3 import logging
4 import os.path
5 import re
6 import time
7 from collections import OrderedDict, defaultdict
8 from concurrent.futures import ThreadPoolExecutor
9 from contextlib import contextmanager
10 from io import BytesIO
11 from tempfile import SpooledTemporaryFile
12 from typing import Any, Dict, Iterable, List, Optional, Union
13
14 import willow
15 from django.apps import apps
16 from django.conf import settings
17 from django.core import checks
18 from django.core.cache import DEFAULT_CACHE_ALIAS, InvalidCacheBackendError, caches
19 from django.core.cache.backends.base import BaseCache
20 from django.core.files import File
21 from django.core.files.base import ContentFile
22 from django.core.files.storage import default_storage
23 from django.db import models
24 from django.db.models import Q
25 from django.forms.utils import flatatt
26 from django.urls import reverse
27 from django.utils.functional import cached_property, classproperty
28 from django.utils.module_loading import import_string
29 from django.utils.safestring import mark_safe
30 from django.utils.translation import gettext_lazy as _
31 from taggit.managers import TaggableManager
32
33 from wagtail import hooks
34 from wagtail.coreutils import string_to_ascii
35 from wagtail.images.exceptions import (
36 InvalidFilterSpecError,
37 UnknownOutputImageFormatError,
38 )
39 from wagtail.images.fields import image_format_name_to_content_type
40 from wagtail.images.image_operations import (
41 FilterOperation,
42 FormatOperation,
43 ImageTransform,
44 TransformOperation,
45 )
46 from wagtail.images.rect import Rect
47 from wagtail.models import CollectionMember, ReferenceIndex
48 from wagtail.search import index
49 from wagtail.search.queryset import SearchableQuerySetMixin
50 from wagtail.utils.file import hash_filelike
51
52 logger = logging.getLogger("wagtail.images")
53
54
55 IMAGE_FORMAT_EXTENSIONS = {
56 "avif": ".avif",
57 "jpeg": ".jpg",
58 "png": ".png",
59 "gif": ".gif",
60 "webp": ".webp",
61 "svg": ".svg",
62 }
63
64
65 class SourceImageIOError(IOError):
66 """
67 Custom exception to distinguish IOErrors that were thrown while opening the source image
68 """
69
70 pass
71
72
73 class ImageQuerySet(SearchableQuerySetMixin, models.QuerySet):
74 def prefetch_renditions(self, *filters):
75 """
76 Prefetches generated renditions for the given filters.
77 Returns all renditions when no filters are provided.
78 """
79 rendition_model = self.model.get_rendition_model()
80 queryset = rendition_model.objects.all()
81
82 if filters:
83 # Get a list of filter spec strings. The given value could contain Filter objects
84 filter_specs = [
85 filter.spec if isinstance(filter, Filter) else filter
86 for filter in filters
87 ]
88 queryset = queryset.filter(filter_spec__in=filter_specs)
89
90 return self.prefetch_related(
91 models.Prefetch(
92 "renditions",
93 queryset=queryset,
94 to_attr="prefetched_renditions",
95 )
96 )
97
98
99 def get_upload_to(instance, filename):
100 """
101 Obtain a valid upload path for an image file.
102
103 This needs to be a module-level function so that it can be referenced within migrations,
104 but simply delegates to the `get_upload_to` method of the instance, so that AbstractImage
105 subclasses can override it.
106 """
107 return instance.get_upload_to(filename)
108
109
110 def get_rendition_upload_to(instance, filename):
111 """
112 Obtain a valid upload path for an image rendition file.
113
114 This needs to be a module-level function so that it can be referenced within migrations,
115 but simply delegates to the `get_upload_to` method of the instance, so that AbstractRendition
116 subclasses can override it.
117 """
118 return instance.get_upload_to(filename)
119
120
121 def get_rendition_storage():
122 """
123 Obtain the storage object for an image rendition file.
124 Returns custom storage (if defined), or the default storage.
125
126 This needs to be a module-level function, because we do not yet
127 have an instance when Django loads the models.
128 """
129 storage = getattr(settings, "WAGTAILIMAGES_RENDITION_STORAGE", default_storage)
130 if isinstance(storage, str):
131 module = import_string(storage)
132 storage = module()
133 return storage
134
135
136 class ImageFileMixin:
137 def is_stored_locally(self):
138 """
139 Returns True if the image is hosted on the local filesystem
140 """
141 try:
142 self.file.path
143
144 return True
145 except NotImplementedError:
146 return False
147
148 def get_file_size(self):
149 if self.file_size is None:
150 try:
151 self.file_size = self.file.size
152 except Exception as e: # noqa: BLE001
153 # File not found
154 #
155 # Have to catch everything, because the exception
156 # depends on the file subclass, and therefore the
157 # storage being used.
158 raise SourceImageIOError(str(e))
159
160 self.save(update_fields=["file_size"])
161
162 return self.file_size
163
164 @contextmanager
165 def open_file(self):
166 # Open file if it is closed
167 close_file = False
168 try:
169 image_file = self.file
170
171 if self.file.closed:
172 # Reopen the file
173 if self.is_stored_locally():
174 self.file.open("rb")
175 else:
176 # Some external storage backends don't allow reopening
177 # the file. Get a fresh file instance. #1397
178 storage = self._meta.get_field("file").storage
179 image_file = storage.open(self.file.name, "rb")
180
181 close_file = True
182 except OSError as e:
183 # re-throw this as a SourceImageIOError so that calling code can distinguish
184 # these from IOErrors elsewhere in the process
185 raise SourceImageIOError(str(e))
186
187 # Seek to beginning
188 image_file.seek(0)
189
190 try:
191 yield image_file
192 finally:
193 if close_file:
194 image_file.close()
195
196 @contextmanager
197 def get_willow_image(self):
198 with self.open_file() as image_file:
199 yield willow.Image.open(image_file)
200
201
202 class WagtailImageFieldFile(models.fields.files.ImageFieldFile):
203 """
204 Override the ImageFieldFile in order to use Willow instead
205 of Pillow.
206 """
207
208 def _get_image_dimensions(self):
209 """
210 override _get_image_dimensions to call our own get_image_dimensions.
211 """
212 if not hasattr(self, "_dimensions_cache"):
213 self._dimensions_cache = self.get_image_dimensions()
214 return self._dimensions_cache
215
216 def get_image_dimensions(self):
217 """
218 The upstream ImageFieldFile calls a local function get_image_dimensions. In this implementation we've made get_image_dimensions
219 a method to make it easier to override for Wagtail developers in the future.
220 """
221 close = self.closed
222 try:
223 self.open()
224 image = willow.Image.open(self)
225 return image.get_size()
226 finally:
227 if close:
228 self.close()
229 else:
230 self.seek(0)
231
232
233 class WagtailImageField(models.ImageField):
234 """
235 Override the attr_class on the Django ImageField Model to inject our ImageFieldFile
236 with Willow support.
237 """
238
239 attr_class = WagtailImageFieldFile
240
241
242 class AbstractImage(ImageFileMixin, CollectionMember, index.Indexed, models.Model):
243 title = models.CharField(max_length=255, verbose_name=_("title"))
244 """ Use local ImageField with Willow support. """
245 file = WagtailImageField(
246 verbose_name=_("file"),
247 upload_to=get_upload_to,
248 width_field="width",
249 height_field="height",
250 )
251 width = models.IntegerField(verbose_name=_("width"), editable=False)
252 height = models.IntegerField(verbose_name=_("height"), editable=False)
253 created_at = models.DateTimeField(
254 verbose_name=_("created at"), auto_now_add=True, db_index=True
255 )
256 uploaded_by_user = models.ForeignKey(
257 settings.AUTH_USER_MODEL,
258 verbose_name=_("uploaded by user"),
259 null=True,
260 blank=True,
261 editable=False,
262 on_delete=models.SET_NULL,
263 )
264 uploaded_by_user.wagtail_reference_index_ignore = True
265
266 tags = TaggableManager(help_text=None, blank=True, verbose_name=_("tags"))
267
268 focal_point_x = models.PositiveIntegerField(null=True, blank=True)
269 focal_point_y = models.PositiveIntegerField(null=True, blank=True)
270 focal_point_width = models.PositiveIntegerField(null=True, blank=True)
271 focal_point_height = models.PositiveIntegerField(null=True, blank=True)
272
273 file_size = models.PositiveIntegerField(null=True, editable=False)
274 # A SHA-1 hash of the file contents
275 file_hash = models.CharField(
276 max_length=40, blank=True, editable=False, db_index=True
277 )
278
279 objects = ImageQuerySet.as_manager()
280
281 def _set_file_hash(self):
282 with self.open_file() as f:
283 self.file_hash = hash_filelike(f)
284
285 def get_file_hash(self):
286 if self.file_hash == "":
287 self._set_file_hash()
288 self.save(update_fields=["file_hash"])
289
290 return self.file_hash
291
292 def _set_image_file_metadata(self):
293 self.file.open()
294
295 # Set new image file size
296 self.file_size = self.file.size
297
298 # Set new image file hash
299 self._set_file_hash()
300 self.file.seek(0)
301
302 def get_upload_to(self, filename):
303 folder_name = "original_images"
304 filename = self.file.field.storage.get_valid_name(filename)
305
306 # convert the filename to simple ascii characters and then
307 # replace non-ascii characters in filename with _ , to sidestep issues with filesystem encoding
308 filename = "".join(
309 (i if ord(i) < 128 else "_") for i in string_to_ascii(filename)
310 )
311
312 # Truncate filename so it fits in the 100 character limit
313 # https://code.djangoproject.com/ticket/9893
314 full_path = os.path.join(folder_name, filename)
315 if len(full_path) >= 95:
316 chars_to_trim = len(full_path) - 94
317 prefix, extension = os.path.splitext(filename)
318 filename = prefix[:-chars_to_trim] + extension
319 full_path = os.path.join(folder_name, filename)
320
321 return full_path
322
323 def get_usage(self):
324 return ReferenceIndex.get_grouped_references_to(self)
325
326 @property
327 def usage_url(self):
328 return reverse("wagtailimages:image_usage", args=(self.id,))
329
330 search_fields = CollectionMember.search_fields + [
331 index.SearchField("title", boost=10),
332 index.AutocompleteField("title"),
333 index.FilterField("title"),
334 index.RelatedFields(
335 "tags",
336 [
337 index.SearchField("name", boost=10),
338 index.AutocompleteField("name"),
339 ],
340 ),
341 index.FilterField("uploaded_by_user"),
342 ]
343
344 def __str__(self):
345 return self.title
346
347 def get_rect(self):
348 return Rect(0, 0, self.width, self.height)
349
350 def get_focal_point(self):
351 if (
352 self.focal_point_x is not None
353 and self.focal_point_y is not None
354 and self.focal_point_width is not None
355 and self.focal_point_height is not None
356 ):
357 return Rect.from_point(
358 self.focal_point_x,
359 self.focal_point_y,
360 self.focal_point_width,
361 self.focal_point_height,
362 )
363
364 def has_focal_point(self):
365 return self.get_focal_point() is not None
366
367 def set_focal_point(self, rect):
368 if rect is not None:
369 self.focal_point_x = rect.centroid_x
370 self.focal_point_y = rect.centroid_y
371 self.focal_point_width = rect.width
372 self.focal_point_height = rect.height
373 else:
374 self.focal_point_x = None
375 self.focal_point_y = None
376 self.focal_point_width = None
377 self.focal_point_height = None
378
379 def get_suggested_focal_point(self):
380 with self.get_willow_image() as willow:
381 faces = willow.detect_faces()
382
383 if faces:
384 # Create a bounding box around all faces
385 left = min(face[0] for face in faces)
386 top = min(face[1] for face in faces)
387 right = max(face[2] for face in faces)
388 bottom = max(face[3] for face in faces)
389 focal_point = Rect(left, top, right, bottom)
390 else:
391 features = willow.detect_features()
392 if features:
393 # Create a bounding box around all features
394 left = min(feature[0] for feature in features)
395 top = min(feature[1] for feature in features)
396 right = max(feature[0] for feature in features)
397 bottom = max(feature[1] for feature in features)
398 focal_point = Rect(left, top, right, bottom)
399 else:
400 return None
401
402 # Add 20% to width and height and give it a minimum size
403 x, y = focal_point.centroid
404 width, height = focal_point.size
405
406 width *= 1.20
407 height *= 1.20
408
409 width = max(width, 100)
410 height = max(height, 100)
411
412 return Rect.from_point(x, y, width, height)
413
414 @classmethod
415 def get_rendition_model(cls):
416 """Get the Rendition model for this Image model"""
417 return cls.renditions.rel.related_model
418
419 def _get_prefetched_renditions(self) -> Union[Iterable["AbstractRendition"], None]:
420 if "renditions" in getattr(self, "_prefetched_objects_cache", {}):
421 return self.renditions.all()
422 return getattr(self, "prefetched_renditions", None)
423
424 def _add_to_prefetched_renditions(self, rendition: "AbstractRendition") -> None:
425 # Reuse this rendition if requested again from this object
426 try:
427 self._prefetched_objects_cache["renditions"]._result_cache.append(rendition)
428 except (AttributeError, KeyError):
429 pass
430 try:
431 self.prefetched_renditions.append(rendition)
432 except AttributeError:
433 pass
434
435 def get_rendition(self, filter: Union["Filter", str]) -> "AbstractRendition":
436 """
437 Returns a ``Rendition`` instance with a ``file`` field value (an
438 image) reflecting the supplied ``filter`` value and focal point values
439 from this object.
440
441 Note: If using custom image models, an instance of the custom rendition
442 model will be returned.
443 """
444 Rendition = self.get_rendition_model()
445
446 if isinstance(filter, str):
447 filter = Filter(spec=filter)
448
449 try:
450 rendition = self.find_existing_rendition(filter)
451 except Rendition.DoesNotExist:
452 rendition = self.create_rendition(filter)
453 # Reuse this rendition if requested again from this object
454 self._add_to_prefetched_renditions(rendition)
455
456 cache_key = Rendition.construct_cache_key(
457 self, filter.get_cache_key(self), filter.spec
458 )
459 Rendition.cache_backend.set(cache_key, rendition)
460
461 return rendition
462
463 def find_existing_rendition(self, filter: "Filter") -> "AbstractRendition":
464 """
465 Returns an existing ``Rendition`` instance with a ``file`` field value
466 (an image) reflecting the supplied ``filter`` value and focal point
467 values from this object.
468
469 If no such rendition exists, a ``DoesNotExist`` error is raised for the
470 relevant model.
471
472 Note: If using custom image models, an instance of the custom rendition
473 model will be returned.
474 """
475 Rendition = self.get_rendition_model()
476
477 try:
478 return self.find_existing_renditions(filter)[filter]
479 except KeyError:
480 raise Rendition.DoesNotExist
481
482 def create_rendition(self, filter: "Filter") -> "AbstractRendition":
483 """
484 Creates and returns a ``Rendition`` instance with a ``file`` field
485 value (an image) reflecting the supplied ``filter`` value and focal
486 point values from this object.
487
488 This method is usually called by ``Image.get_rendition()``, after first
489 checking that a suitable rendition does not already exist.
490
491 Note: If using custom image models, an instance of the custom rendition
492 model will be returned.
493 """
494 # Because of unique constraints applied to the model, we use
495 # get_or_create() to guard against race conditions
496 rendition, created = self.renditions.get_or_create(
497 filter_spec=filter.spec,
498 focal_point_key=filter.get_cache_key(self),
499 defaults={"file": self.generate_rendition_file(filter)},
500 )
501 return rendition
502
503 def get_renditions(
504 self, *filters: Union["Filter", str]
505 ) -> Dict[str, "AbstractRendition"]:
506 """
507 Returns a ``dict`` of ``Rendition`` instances with image files reflecting
508 the supplied ``filters``, keyed by filter spec patterns.
509
510 Note: If using custom image models, instances of the custom rendition
511 model will be returned.
512 """
513 Rendition = self.get_rendition_model()
514 # We don’t support providing mixed Filter and string arguments in the same call.
515 if isinstance(filters[0], str):
516 filters = [Filter(spec) for spec in dict.fromkeys(filters).keys()]
517
518 # Find existing renditions where possible
519 renditions = self.find_existing_renditions(*filters)
520
521 # Create any renditions not found in prefetched values, cache or database
522 not_found = [f for f in filters if f not in renditions]
523 for filter, rendition in self.create_renditions(*not_found).items():
524 self._add_to_prefetched_renditions(rendition)
525 renditions[filter] = rendition
526
527 # Update the cache
528 cache_additions = {
529 Rendition.construct_cache_key(
530 self, filter.get_cache_key(self), filter.spec
531 ): rendition
532 for filter, rendition in renditions.items()
533 # prevent writing of cached data back to the cache
534 if not getattr(rendition, "_from_cache", False)
535 }
536 if cache_additions:
537 Rendition.cache_backend.set_many(cache_additions)
538
539 # Make sure key insertion order matches the input order.
540 return {filter.spec: renditions[filter] for filter in filters}
541
542 def find_existing_renditions(
543 self, *filters: "Filter"
544 ) -> Dict["Filter", "AbstractRendition"]:
545 """
546 Returns a dictionary of existing ``Rendition`` instances with ``file``
547 values (images) reflecting the supplied ``filters`` and the focal point
548 values from this object.
549
550 Filters for which an existing rendition cannot be found are ommitted
551 from the return value. If none of the requested renditions have been
552 created before, the return value will be an empty dict.
553 """
554 Rendition = self.get_rendition_model()
555 filters_by_spec: Dict[str, Filter] = {f.spec: f for f in filters}
556 found: Dict[Filter, AbstractRendition] = {}
557
558 # Interrogate prefetched values first (where available)
559 prefetched_renditions = self._get_prefetched_renditions()
560 if prefetched_renditions is not None:
561 # NOTE: When renditions are prefetched, it's assumed that if the
562 # requested renditions exist, they will be present in the
563 # prefetched value, and further cache/database lookups are avoided.
564
565 # group renditions by the filters of interest
566 potential_matches: Dict[Filter, List[AbstractRendition]] = defaultdict(list)
567 for rendition in prefetched_renditions:
568 try:
569 filter = filters_by_spec[rendition.filter_spec]
570 except KeyError:
571 continue # this rendition can be ignored
572 else:
573 potential_matches[filter].append(rendition)
574
575 # For each filter we have renditions for, look for one with a
576 # 'focal_point_key' value matching filter.get_cache_key()
577 for filter, renditions in potential_matches.items():
578 focal_point_key = filter.get_cache_key(self)
579 for rendition in renditions:
580 if rendition.focal_point_key == focal_point_key:
581 # to prevent writing of cached data back to the cache
582 rendition._from_cache = True
583 # use this rendition
584 found[filter] = rendition
585 # skip to the next filter
586 break
587 else:
588 # Renditions are not prefetched, so attempt to find suitable
589 # items in the cache or database
590
591 # Query the cache first
592 cache_keys = [
593 Rendition.construct_cache_key(self, filter.get_cache_key(self), spec)
594 for spec, filter in filters_by_spec.items()
595 ]
596 for rendition in Rendition.cache_backend.get_many(cache_keys).values():
597 filter = filters_by_spec[rendition.filter_spec]
598 found[filter] = rendition
599
600 # For items not found in the cache, look in the database
601 not_found = [f for f in filters if f not in found]
602 if not_found:
603 lookup_q = Q()
604 for filter in not_found:
605 lookup_q |= Q(
606 filter_spec=filter.spec,
607 focal_point_key=filter.get_cache_key(self),
608 )
609 for rendition in self.renditions.filter(lookup_q):
610 filter = filters_by_spec[rendition.filter_spec]
611 found[filter] = rendition
612 return found
613
614 def create_renditions(
615 self, *filters: "Filter"
616 ) -> Dict["Filter", "AbstractRendition"]:
617 """
618 Creates multiple ``Rendition`` instances with image files reflecting the supplied
619 ``filters``, and returns them as a ``dict`` keyed by the relevant ``Filter`` instance.
620 Where suitable renditions already exist in the database, they will be returned instead,
621 so as not to create duplicates.
622
623 This method is usually called by ``Image.get_renditions()``, after first
624 checking that a suitable rendition does not already exist.
625
626 Note: If using custom image models, an instance of the custom rendition
627 model will be returned.
628 """
629 Rendition = self.get_rendition_model()
630
631 if not filters:
632 return {}
633
634 if len(filters) == 1:
635 # create_rendition() is better for single renditions, as it can
636 # utilize QuerySet.get_or_create(), which has better handling of
637 # race conditions
638 filter = filters[0]
639 return {filter: self.create_rendition(filter)}
640
641 return_value: Dict[Filter, AbstractRendition] = {}
642 filter_map: Dict[str, Filter] = {f.spec: f for f in filters}
643
644 with self.open_file() as file:
645 original_image_bytes = file.read()
646
647 to_create = []
648
649 def _generate_single_rendition(filter):
650 # Using ContentFile here ensures we generate all renditions. Simply
651 # passing self.file required several page reloads to generate all
652 image_file = self.generate_rendition_file(
653 filter, source=ContentFile(original_image_bytes, name=self.file.name)
654 )
655 to_create.append(
656 Rendition(
657 image=self,
658 filter_spec=filter.spec,
659 focal_point_key=filter.get_cache_key(self),
660 file=image_file,
661 )
662 )
663
664 with ThreadPoolExecutor() as executor:
665 executor.map(_generate_single_rendition, filters)
666
667 # Rendition generation can take a while. So, if other processes have created
668 # identical renditions in the meantime, we should find them to avoid clashes.
669 # NB: Clashes can still occur, because there is no get_or_create() equivalent
670 # for multiple objects. However, this will reduce that risk considerably.
671 files_for_deletion: List[File] = []
672
673 # Assemble Q() to identify potential clashes
674 lookup_q = Q()
675 for rendition in to_create:
676 lookup_q |= Q(
677 filter_spec=rendition.filter_spec,
678 focal_point_key=rendition.focal_point_key,
679 )
680
681 for existing in self.renditions.filter(lookup_q):
682 # Include the existing rendition in the return value
683 filter = filter_map[existing.filter_spec]
684 return_value[filter] = existing
685
686 for new in to_create:
687 if (
688 new.filter_spec == existing.filter_spec
689 and new.focal_point_key == existing.focal_point_key
690 ):
691 # Avoid creating the new version
692 to_create.remove(new)
693 # Mark for deletion later, so as not to hold up creation
694 files_for_deletion.append(new.file)
695
696 for new in Rendition.objects.bulk_create(to_create, ignore_conflicts=True):
697 filter = filter_map[new.filter_spec]
698 return_value[filter] = new
699
700 # Delete redundant rendition image files
701 for file in files_for_deletion:
702 file.delete(save=False)
703
704 return return_value
705
706 def generate_rendition_file(self, filter: "Filter", *, source: File = None) -> File:
707 """
708 Generates an in-memory image matching the supplied ``filter`` value
709 and focal point value from this object, wraps it in a ``File`` object
710 with a suitable filename, and returns it. The return value is used
711 as the ``file`` field value for rendition objects saved by
712 ``AbstractImage.create_rendition()``.
713
714 If the contents of ``self.file`` has already been read into memory, the
715 ``source`` keyword can be used to provide a reference to the in-memory
716 ``File``, bypassing the need to reload the image contents from storage.
717
718 NOTE: The responsibility of generating the new image from the original
719 falls to the supplied ``filter`` object. If you want to do anything
720 custom with rendition images (for example, to preserve metadata from
721 the original image), you might want to consider swapping out ``filter``
722 for an instance of a custom ``Filter`` subclass of your design.
723 """
724
725 cache_key = filter.get_cache_key(self)
726
727 logger.debug(
728 "Generating '%s' rendition for image %d",
729 filter.spec,
730 self.pk,
731 )
732
733 start_time = time.time()
734
735 try:
736 generated_image = filter.run(
737 self,
738 SpooledTemporaryFile(max_size=settings.FILE_UPLOAD_MAX_MEMORY_SIZE),
739 source=source,
740 )
741
742 logger.debug(
743 "Generated '%s' rendition for image %d in %.1fms",
744 filter.spec,
745 self.pk,
746 (time.time() - start_time) * 1000,
747 )
748 except: # noqa:B901,E722
749 logger.debug(
750 "Failed to generate '%s' rendition for image %d",
751 filter.spec,
752 self.pk,
753 )
754 raise
755
756 # Generate filename
757 input_filename = os.path.basename(self.file.name)
758 input_filename_without_extension, input_extension = os.path.splitext(
759 input_filename
760 )
761 output_extension = (
762 filter.spec.replace("|", ".")
763 + IMAGE_FORMAT_EXTENSIONS[generated_image.format_name]
764 )
765 if cache_key:
766 output_extension = cache_key + "." + output_extension
767
768 # Truncate filename to prevent it going over 60 chars
769 output_filename_without_extension = input_filename_without_extension[
770 : (59 - len(output_extension))
771 ]
772 output_filename = output_filename_without_extension + "." + output_extension
773
774 return File(generated_image.f, name=output_filename)
775
776 def is_portrait(self):
777 return self.width < self.height
778
779 def is_landscape(self):
780 return self.height < self.width
781
782 def is_svg(self):
783 _, ext = os.path.splitext(self.file.name)
784 return ext.lower() == ".svg"
785
786 @property
787 def filename(self):
788 return os.path.basename(self.file.name)
789
790 @property
791 def default_alt_text(self):
792 # by default the alt text field (used in rich text insertion) is populated
793 # from the title. Subclasses might provide a separate alt field, and
794 # override this
795 return self.title
796
797 def is_editable_by_user(self, user):
798 from wagtail.images.permissions import permission_policy
799
800 return permission_policy.user_has_permission_for_instance(user, "change", self)
801
802 class Meta:
803 abstract = True
804
805
806 class Image(AbstractImage):
807 admin_form_fields = (
808 "title",
809 "file",
810 "collection",
811 "tags",
812 "focal_point_x",
813 "focal_point_y",
814 "focal_point_width",
815 "focal_point_height",
816 )
817
818 class Meta(AbstractImage.Meta):
819 verbose_name = _("image")
820 verbose_name_plural = _("images")
821 permissions = [
822 ("choose_image", "Can choose image"),
823 ]
824
825
826 class Filter:
827 """
828 Represents one or more operations that can be applied to an Image to produce a rendition
829 appropriate for final display on the website. Usually this would be a resize operation,
830 but could potentially involve colour processing, etc.
831 """
832
833 spec_pattern = re.compile(r"^[A-Za-z0-9_\-\.]+$")
834 pipe_spec_pattern = re.compile(r"^[A-Za-z0-9_\-\.\|]+$")
835 expanding_spec_pattern = re.compile(r"^[A-Za-z0-9_\-\.{},]+$")
836 pipe_expanding_spec_pattern = re.compile(r"^[A-Za-z0-9_\-\.{},\|]+$")
837
838 def __init__(self, spec=None):
839 # The spec pattern is operation1-var1-var2|operation2-var1
840 self.spec = spec
841
842 @classmethod
843 def expand_spec(self, spec: Union["str", Iterable["str"]]) -> List["str"]:
844 """
845 Converts a spec pattern with brace-expansions, into a list of spec patterns.
846 For example, "width-{100,200}" becomes ["width-100", "width-200"].
847
848 Supports providing filter specs already split, or pipe or space-separated.
849 """
850 if isinstance(spec, str):
851 separator = "|" if "|" in spec else " "
852 spec = spec.split(separator)
853
854 expanded_segments = []
855 for segment in spec:
856 # Check if segment has braces to expand
857 if "{" in segment and "}" in segment:
858 prefix, options_suffixed = segment.split("{")
859 options_pattern, suffix = options_suffixed.split("}")
860 options = options_pattern.split(",")
861 expanded_segments.append(
862 [prefix + option + suffix for option in options]
863 )
864 else:
865 expanded_segments.append([segment])
866
867 # Cartesian product of all expanded segments (equivalent to nested for loops).
868 combinations = itertools.product(*expanded_segments)
869
870 return ["|".join(combination) for combination in combinations]
871
872 @cached_property
873 def operations(self):
874 # Search for operations
875 registered_operations = {}
876 for fn in hooks.get_hooks("register_image_operations"):
877 registered_operations.update(dict(fn()))
878
879 # Build list of operation objects
880 operations = []
881 for op_spec in self.spec.split("|"):
882 op_spec_parts = op_spec.split("-")
883
884 if op_spec_parts[0] not in registered_operations:
885 raise InvalidFilterSpecError(
886 "Unrecognised operation: %s" % op_spec_parts[0]
887 )
888
889 op_class = registered_operations[op_spec_parts[0]]
890 operations.append(op_class(*op_spec_parts))
891 return operations
892
893 @property
894 def transform_operations(self):
895 return [
896 operation
897 for operation in self.operations
898 if isinstance(operation, TransformOperation)
899 ]
900
901 @property
902 def filter_operations(self):
903 return [
904 operation
905 for operation in self.operations
906 if isinstance(operation, FilterOperation)
907 ]
908
909 def get_transform(self, image, size=None):
910 """
911 Returns an ImageTransform with all the transforms in this filter applied.
912
913 The ImageTransform is an object with two attributes:
914 - .size - The size of the final image
915 - .matrix - An affine transformation matrix that combines any
916 transform/scale/rotation operations that need to be applied to the image
917 """
918
919 if not size:
920 size = (image.width, image.height)
921
922 transform = ImageTransform(size, image_is_svg=image.is_svg())
923 for operation in self.transform_operations:
924 transform = operation.run(transform, image)
925 return transform
926
927 @contextmanager
928 def get_willow_image(self, image: AbstractImage, source: File = None):
929 if source is not None:
930 yield willow.Image.open(source)
931 else:
932 with image.get_willow_image() as willow_image:
933 yield willow_image
934
935 def run(self, image: AbstractImage, output: BytesIO, source: File = None):
936 with self.get_willow_image(image, source) as willow:
937
938 original_format = willow.format_name
939
940 # Fix orientation of image
941 willow = willow.auto_orient()
942
943 # Transform the image
944 transform = self.get_transform(
945 image, (willow.image.width, willow.image.height)
946 )
947 willow = willow.crop(transform.get_rect().round())
948 willow = willow.resize(transform.size)
949
950 # Apply filters
951 env = {
952 "original-format": original_format,
953 }
954 for operation in self.filter_operations:
955 willow = operation.run(willow, image, env) or willow
956
957 # Find the output format to use
958 if "output-format" in env:
959 # Developer specified an output format
960 output_format = env["output-format"]
961 else:
962 # Convert bmp and webp to png by default
963 default_conversions = {
964 "avif": "png",
965 "bmp": "png",
966 "webp": "png",
967 }
968
969 # Convert unanimated GIFs to PNG as well
970 if not willow.has_animation():
971 default_conversions["gif"] = "png"
972
973 # Allow the user to override the conversions
974 conversion = getattr(settings, "WAGTAILIMAGES_FORMAT_CONVERSIONS", {})
975 default_conversions.update(conversion)
976
977 # Get the converted output format falling back to the original
978 output_format = default_conversions.get(
979 original_format, original_format
980 )
981
982 if output_format == "jpeg":
983 # Allow changing of JPEG compression quality
984 if "jpeg-quality" in env:
985 quality = env["jpeg-quality"]
986 else:
987 quality = getattr(settings, "WAGTAILIMAGES_JPEG_QUALITY", 85)
988
989 # If the image has an alpha channel, give it a white background
990 if willow.has_alpha():
991 willow = willow.set_background_color_rgb((255, 255, 255))
992
993 return willow.save_as_jpeg(
994 output, quality=quality, progressive=True, optimize=True
995 )
996 elif output_format == "png":
997 return willow.save_as_png(output, optimize=True)
998 elif output_format == "gif":
999 return willow.save_as_gif(output)
1000 elif output_format == "webp":
1001 # Allow changing of WebP compression quality
1002 if (
1003 "output-format-options" in env
1004 and "lossless" in env["output-format-options"]
1005 ):
1006 return willow.save_as_webp(output, lossless=True)
1007 elif "webp-quality" in env:
1008 quality = env["webp-quality"]
1009 else:
1010 quality = getattr(settings, "WAGTAILIMAGES_WEBP_QUALITY", 80)
1011
1012 return willow.save_as_webp(output, quality=quality)
1013 elif output_format == "avif":
1014 # Allow changing of AVIF compression quality
1015 if (
1016 "output-format-options" in env
1017 and "lossless" in env["output-format-options"]
1018 ):
1019 return willow.save_as_avif(output, lossless=True)
1020 elif "avif-quality" in env:
1021 quality = env["avif-quality"]
1022 else:
1023 quality = getattr(settings, "WAGTAILIMAGES_AVIF_QUALITY", 80)
1024 return willow.save_as_avif(output, quality=quality)
1025 elif output_format == "svg":
1026 return willow.save_as_svg(output)
1027 raise UnknownOutputImageFormatError(
1028 f"Unknown output image format '{output_format}'"
1029 )
1030
1031 def get_cache_key(self, image):
1032 vary_parts = []
1033
1034 for operation in self.operations:
1035 for field in getattr(operation, "vary_fields", []):
1036 value = getattr(image, field, "")
1037 vary_parts.append(str(value))
1038
1039 vary_string = "-".join(vary_parts)
1040
1041 # Return blank string if there are no vary fields
1042 if not vary_string:
1043 return ""
1044
1045 return hashlib.sha1(vary_string.encode("utf-8")).hexdigest()[:8]
1046
1047
1048 class ResponsiveImage:
1049 """
1050 A custom object used to represent a collection of renditions.
1051 Provides a 'renditions' property to access the renditions,
1052 and renders to the front-end HTML.
1053 """
1054
1055 def __init__(
1056 self,
1057 renditions: Dict[str, "AbstractRendition"],
1058 attrs: Optional[Dict[str, Any]] = None,
1059 ):
1060 self.renditions = list(renditions.values())
1061 self.attrs = attrs
1062
1063 @classmethod
1064 def get_width_srcset(cls, renditions_list: List["AbstractRendition"]):
1065 if len(renditions_list) == 1:
1066 # No point in using width descriptors if there is a single image.
1067 return renditions_list[0].url
1068
1069 return ", ".join([f"{r.url} {r.width}w" for r in renditions_list])
1070
1071 def __html__(self):
1072 attrs = self.attrs or {}
1073
1074 # No point in adding a srcset if there is a single image.
1075 if len(self.renditions) > 1:
1076 attrs["srcset"] = self.get_width_srcset(self.renditions)
1077
1078 # The first rendition is the "base" / "fallback" image.
1079 return self.renditions[0].img_tag(attrs)
1080
1081 def __str__(self):
1082 return mark_safe(self.__html__())
1083
1084 def __bool__(self):
1085 return bool(self.renditions)
1086
1087 def __eq__(self, other: "ResponsiveImage"):
1088 if isinstance(other, ResponsiveImage):
1089 return self.renditions == other.renditions and self.attrs == other.attrs
1090 return False
1091
1092
1093 class Picture(ResponsiveImage):
1094 # Keep this separate from FormatOperation.supported_formats,
1095 # as the order our formats are defined in is essential for the picture tag.
1096 # Defines the order of <source> elements in the tag when format operations
1097 # are in use, and the priority order to identify the "fallback" format.
1098 # The browser will pick the first supported format in this list.
1099 source_format_order = ["avif", "webp", "jpeg", "png", "gif"]
1100
1101 def __init__(
1102 self,
1103 renditions: Dict[str, "AbstractRendition"],
1104 attrs: Optional[Dict[str, Any]] = None,
1105 ):
1106 super().__init__(renditions, attrs)
1107 # Store renditions grouped by format separately for access from templates.
1108 self.formats = self.get_formats(renditions)
1109
1110 def get_formats(
1111 self, renditions: Dict[str, "AbstractRendition"]
1112 ) -> Dict[str, List["AbstractRendition"]]:
1113 """
1114 Group renditions by the format they are for, if any.
1115 If there is only one format, no grouping is required.
1116 """
1117 formats = defaultdict(list)
1118 for spec, rendition in renditions.items():
1119 for fmt in FormatOperation.supported_formats:
1120 # Identify the spec’s format (if any).
1121 if f"format-{fmt}" in spec:
1122 formats[fmt].append(rendition)
1123 break
1124 # Avoid the split by format if there is only one.
1125 if len(formats.keys()) < 2:
1126 return {}
1127
1128 return formats
1129
1130 def get_fallback_format(self):
1131 for fmt in reversed(self.source_format_order):
1132 if fmt in self.formats:
1133 return fmt
1134
1135 def __html__(self):
1136 # If there aren’t multiple formats, render a vanilla img tag with srcset.
1137 if not self.formats:
1138 return mark_safe(f"<picture>{super().__html__()}</picture>")
1139
1140 attrs = self.attrs or {}
1141
1142 sizes = f'sizes="{attrs["sizes"]}" ' if "sizes" in attrs else ""
1143 fallback_format = self.get_fallback_format()
1144 fallback_renditions = self.formats[fallback_format]
1145
1146 sources = []
1147
1148 for fmt in self.source_format_order:
1149 if fmt != fallback_format and fmt in self.formats:
1150 srcset = self.get_width_srcset(self.formats[fmt])
1151 mime = image_format_name_to_content_type(fmt)
1152 sources.append(f'<source srcset="{srcset}" {sizes}type="{mime}">')
1153
1154 if len(fallback_renditions) > 1:
1155 attrs["srcset"] = self.get_width_srcset(fallback_renditions)
1156
1157 # The first rendition is the "base" / "fallback" image.
1158 fallback = fallback_renditions[0].img_tag(attrs)
1159
1160 return mark_safe(f"<picture>{''.join(sources)}{fallback}</picture>")
1161
1162
1163 class AbstractRendition(ImageFileMixin, models.Model):
1164 filter_spec = models.CharField(max_length=255, db_index=True)
1165 """ Use local ImageField with Willow support. """
1166 file = WagtailImageField(
1167 upload_to=get_rendition_upload_to,
1168 storage=get_rendition_storage,
1169 width_field="width",
1170 height_field="height",
1171 )
1172 width = models.IntegerField(editable=False)
1173 height = models.IntegerField(editable=False)
1174 focal_point_key = models.CharField(
1175 max_length=16, blank=True, default="", editable=False
1176 )
1177
1178 wagtail_reference_index_ignore = True
1179
1180 @property
1181 def url(self):
1182 return self.file.url
1183
1184 @property
1185 def alt(self):
1186 return self.image.default_alt_text
1187
1188 @property
1189 def attrs(self):
1190 """
1191 The src, width, height, and alt attributes for an <img> tag, as a HTML
1192 string
1193 """
1194 return flatatt(self.attrs_dict)
1195
1196 @property
1197 def attrs_dict(self):
1198 """
1199 A dict of the src, width, height, and alt attributes for an <img> tag.
1200 """
1201 return OrderedDict(
1202 [
1203 ("src", self.url),
1204 ("width", self.width),
1205 ("height", self.height),
1206 ("alt", self.alt),
1207 ]
1208 )
1209
1210 @property
1211 def full_url(self):
1212 url = self.url
1213 if hasattr(settings, "WAGTAILADMIN_BASE_URL") and url.startswith("/"):
1214 url = settings.WAGTAILADMIN_BASE_URL + url
1215 return url
1216
1217 @property
1218 def filter(self):
1219 return Filter(self.filter_spec)
1220
1221 @cached_property
1222 def focal_point(self):
1223 image_focal_point = self.image.get_focal_point()
1224 if image_focal_point:
1225 transform = self.filter.get_transform(self.image)
1226 return image_focal_point.transform(transform)
1227
1228 @property
1229 def background_position_style(self):
1230 """
1231 Returns a `background-position` rule to be put in the inline style of an element which uses the rendition for its background.
1232
1233 This positions the rendition according to the value of the focal point. This is helpful for when the element does not have
1234 the same aspect ratio as the rendition.
1235
1236 For example:
1237
1238 {% image page.image fill-1920x600 as image %}
1239 <div style="background-image: url('{{ image.url }}'); {{ image.background_position_style }}">
1240 </div>
1241 """
1242 focal_point = self.focal_point
1243 if focal_point:
1244 horz = int((focal_point.x * 100) // self.width)
1245 vert = int((focal_point.y * 100) // self.height)
1246 return f"background-position: {horz}% {vert}%;"
1247 else:
1248 return "background-position: 50% 50%;"
1249
1250 def img_tag(self, extra_attributes={}):
1251 attrs = self.attrs_dict.copy()
1252
1253 attrs.update(apps.get_app_config("wagtailimages").default_attrs)
1254
1255 attrs.update(extra_attributes)
1256
1257 return mark_safe(f"<img{flatatt(attrs)}>")
1258
1259 def __html__(self):
1260 return self.img_tag()
1261
1262 def get_upload_to(self, filename):
1263 folder_name = "images"
1264 filename = self.file.field.storage.get_valid_name(filename)
1265 return os.path.join(folder_name, filename)
1266
1267 @classmethod
1268 def check(cls, **kwargs):
1269 errors = super().check(**kwargs)
1270 if not cls._meta.abstract:
1271 if not any(
1272 set(constraint) == {"image", "filter_spec", "focal_point_key"}
1273 for constraint in cls._meta.unique_together
1274 ):
1275 errors.append(
1276 checks.Error(
1277 "Custom rendition model %r has an invalid unique_together setting"
1278 % cls,
1279 hint="Custom rendition models must include the constraint "
1280 "('image', 'filter_spec', 'focal_point_key') in their unique_together definition.",
1281 obj=cls,
1282 id="wagtailimages.E001",
1283 )
1284 )
1285
1286 return errors
1287
1288 @staticmethod
1289 def construct_cache_key(image, filter_cache_key, filter_spec):
1290 return "wagtail-rendition-" + "-".join(
1291 [str(image.id), image.file_hash, filter_cache_key, filter_spec]
1292 )
1293
1294 @classproperty
1295 def cache_backend(cls) -> BaseCache:
1296 try:
1297 return caches["renditions"]
1298 except InvalidCacheBackendError:
1299 return caches[DEFAULT_CACHE_ALIAS]
1300
1301 def get_cache_key(self):
1302 return self.construct_cache_key(
1303 self.image, self.focal_point_key, self.filter_spec
1304 )
1305
1306 def purge_from_cache(self):
1307 self.cache_backend.delete(self.get_cache_key())
1308
1309 class Meta:
1310 abstract = True
1311
1312
1313 class Rendition(AbstractRendition):
1314 image = models.ForeignKey(
1315 Image, related_name="renditions", on_delete=models.CASCADE
1316 )
1317
1318 class Meta:
1319 unique_together = (("image", "filter_spec", "focal_point_key"),)
1320
1321
1322 class UploadedImage(models.Model):
1323 """
1324 Temporary storage for images uploaded through the multiple image uploader, when validation rules (e.g.
1325 required metadata fields) prevent creating an Image object from the image file alone. In this case,
1326 the image file is stored against this model, to be turned into an Image object once the full form
1327 has been filled in.
1328 """
1329
1330 file = models.ImageField(upload_to="uploaded_images", max_length=200)
1331 uploaded_by_user = models.ForeignKey(
1332 settings.AUTH_USER_MODEL,
1333 verbose_name=_("uploaded by user"),
1334 null=True,
1335 blank=True,
1336 editable=False,
1337 on_delete=models.SET_NULL,
1338 )
1339 uploaded_by_user.wagtail_reference_index_ignore = True
1340
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/wagtail/images/models.py b/wagtail/images/models.py
--- a/wagtail/images/models.py
+++ b/wagtail/images/models.py
@@ -377,6 +377,11 @@
self.focal_point_height = None
def get_suggested_focal_point(self):
+ if self.is_svg():
+ # We can't run feature detection on SVGs, and don't provide a
+ # pathway from SVG -> raster formats, so don't try it.
+ return None
+
with self.get_willow_image() as willow:
faces = willow.detect_faces()
| {"golden_diff": "diff --git a/wagtail/images/models.py b/wagtail/images/models.py\n--- a/wagtail/images/models.py\n+++ b/wagtail/images/models.py\n@@ -377,6 +377,11 @@\n self.focal_point_height = None\n \n def get_suggested_focal_point(self):\n+ if self.is_svg():\n+ # We can't run feature detection on SVGs, and don't provide a\n+ # pathway from SVG -> raster formats, so don't try it.\n+ return None\n+\n with self.get_willow_image() as willow:\n faces = willow.detect_faces()\n", "issue": "When WAGTAILIMAGES_FEATURE_DETECTION_ENABLED = True, upload of svg files gives an error\nUploading svg-images fails when WAGTAILIMAGES_FEATURE_DETECTION_ENABLED = True. \r\n\r\nWe get an AttributeError at /admin/images/multiple/add/ : \r\n'SvgImageFile' object has no attribute 'detect_faces'\r\n\r\nWe are working on Debian and have the following packages installed (amongst others) in our virtual env:\r\nwagtail==5.1.3\r\nWillow==1.6.2\r\nPillow==10.1.0\r\npillow-heif==0.13.1\r\nWand==0.6.11\r\nopencv-python==4.8.1.78\r\n\r\nIn our settings:\r\nWAGTAILIMAGES_EXTENSIONS = [\"gif\", \"jpg\", \"jpeg\", \"png\", \"webp\", \"svg\"] \r\nWAGTAILIMAGES_FEATURE_DETECTION_ENABLED = True\r\n\r\nCommenting the last line allows for svg uploads.\r\n\r\nPython 3.9.2\r\nDjango==4.2.7\r\n\r\nIn the Firefox browser. Thanks for looking into this! \n", "before_files": [{"content": "import hashlib\nimport itertools\nimport logging\nimport os.path\nimport re\nimport time\nfrom collections import OrderedDict, defaultdict\nfrom concurrent.futures import ThreadPoolExecutor\nfrom contextlib import contextmanager\nfrom io import BytesIO\nfrom tempfile import SpooledTemporaryFile\nfrom typing import Any, Dict, Iterable, List, Optional, Union\n\nimport willow\nfrom django.apps import apps\nfrom django.conf import settings\nfrom django.core import checks\nfrom django.core.cache import DEFAULT_CACHE_ALIAS, InvalidCacheBackendError, caches\nfrom django.core.cache.backends.base import BaseCache\nfrom django.core.files import File\nfrom django.core.files.base import ContentFile\nfrom django.core.files.storage import default_storage\nfrom django.db import models\nfrom django.db.models import Q\nfrom django.forms.utils import flatatt\nfrom django.urls import reverse\nfrom django.utils.functional import cached_property, classproperty\nfrom django.utils.module_loading import import_string\nfrom django.utils.safestring import mark_safe\nfrom django.utils.translation import gettext_lazy as _\nfrom taggit.managers import TaggableManager\n\nfrom wagtail import hooks\nfrom wagtail.coreutils import string_to_ascii\nfrom wagtail.images.exceptions import (\n InvalidFilterSpecError,\n UnknownOutputImageFormatError,\n)\nfrom wagtail.images.fields import image_format_name_to_content_type\nfrom wagtail.images.image_operations import (\n FilterOperation,\n FormatOperation,\n ImageTransform,\n TransformOperation,\n)\nfrom wagtail.images.rect import Rect\nfrom wagtail.models import CollectionMember, ReferenceIndex\nfrom wagtail.search import index\nfrom wagtail.search.queryset import SearchableQuerySetMixin\nfrom wagtail.utils.file import hash_filelike\n\nlogger = logging.getLogger(\"wagtail.images\")\n\n\nIMAGE_FORMAT_EXTENSIONS = {\n \"avif\": \".avif\",\n \"jpeg\": \".jpg\",\n \"png\": \".png\",\n \"gif\": \".gif\",\n \"webp\": \".webp\",\n \"svg\": \".svg\",\n}\n\n\nclass SourceImageIOError(IOError):\n \"\"\"\n Custom exception to distinguish IOErrors that were thrown while opening the source image\n \"\"\"\n\n pass\n\n\nclass ImageQuerySet(SearchableQuerySetMixin, models.QuerySet):\n def prefetch_renditions(self, *filters):\n \"\"\"\n Prefetches generated renditions for the given filters.\n Returns all renditions when no filters are provided.\n \"\"\"\n rendition_model = self.model.get_rendition_model()\n queryset = rendition_model.objects.all()\n\n if filters:\n # Get a list of filter spec strings. The given value could contain Filter objects\n filter_specs = [\n filter.spec if isinstance(filter, Filter) else filter\n for filter in filters\n ]\n queryset = queryset.filter(filter_spec__in=filter_specs)\n\n return self.prefetch_related(\n models.Prefetch(\n \"renditions\",\n queryset=queryset,\n to_attr=\"prefetched_renditions\",\n )\n )\n\n\ndef get_upload_to(instance, filename):\n \"\"\"\n Obtain a valid upload path for an image file.\n\n This needs to be a module-level function so that it can be referenced within migrations,\n but simply delegates to the `get_upload_to` method of the instance, so that AbstractImage\n subclasses can override it.\n \"\"\"\n return instance.get_upload_to(filename)\n\n\ndef get_rendition_upload_to(instance, filename):\n \"\"\"\n Obtain a valid upload path for an image rendition file.\n\n This needs to be a module-level function so that it can be referenced within migrations,\n but simply delegates to the `get_upload_to` method of the instance, so that AbstractRendition\n subclasses can override it.\n \"\"\"\n return instance.get_upload_to(filename)\n\n\ndef get_rendition_storage():\n \"\"\"\n Obtain the storage object for an image rendition file.\n Returns custom storage (if defined), or the default storage.\n\n This needs to be a module-level function, because we do not yet\n have an instance when Django loads the models.\n \"\"\"\n storage = getattr(settings, \"WAGTAILIMAGES_RENDITION_STORAGE\", default_storage)\n if isinstance(storage, str):\n module = import_string(storage)\n storage = module()\n return storage\n\n\nclass ImageFileMixin:\n def is_stored_locally(self):\n \"\"\"\n Returns True if the image is hosted on the local filesystem\n \"\"\"\n try:\n self.file.path\n\n return True\n except NotImplementedError:\n return False\n\n def get_file_size(self):\n if self.file_size is None:\n try:\n self.file_size = self.file.size\n except Exception as e: # noqa: BLE001\n # File not found\n #\n # Have to catch everything, because the exception\n # depends on the file subclass, and therefore the\n # storage being used.\n raise SourceImageIOError(str(e))\n\n self.save(update_fields=[\"file_size\"])\n\n return self.file_size\n\n @contextmanager\n def open_file(self):\n # Open file if it is closed\n close_file = False\n try:\n image_file = self.file\n\n if self.file.closed:\n # Reopen the file\n if self.is_stored_locally():\n self.file.open(\"rb\")\n else:\n # Some external storage backends don't allow reopening\n # the file. Get a fresh file instance. #1397\n storage = self._meta.get_field(\"file\").storage\n image_file = storage.open(self.file.name, \"rb\")\n\n close_file = True\n except OSError as e:\n # re-throw this as a SourceImageIOError so that calling code can distinguish\n # these from IOErrors elsewhere in the process\n raise SourceImageIOError(str(e))\n\n # Seek to beginning\n image_file.seek(0)\n\n try:\n yield image_file\n finally:\n if close_file:\n image_file.close()\n\n @contextmanager\n def get_willow_image(self):\n with self.open_file() as image_file:\n yield willow.Image.open(image_file)\n\n\nclass WagtailImageFieldFile(models.fields.files.ImageFieldFile):\n \"\"\"\n Override the ImageFieldFile in order to use Willow instead\n of Pillow.\n \"\"\"\n\n def _get_image_dimensions(self):\n \"\"\"\n override _get_image_dimensions to call our own get_image_dimensions.\n \"\"\"\n if not hasattr(self, \"_dimensions_cache\"):\n self._dimensions_cache = self.get_image_dimensions()\n return self._dimensions_cache\n\n def get_image_dimensions(self):\n \"\"\"\n The upstream ImageFieldFile calls a local function get_image_dimensions. In this implementation we've made get_image_dimensions\n a method to make it easier to override for Wagtail developers in the future.\n \"\"\"\n close = self.closed\n try:\n self.open()\n image = willow.Image.open(self)\n return image.get_size()\n finally:\n if close:\n self.close()\n else:\n self.seek(0)\n\n\nclass WagtailImageField(models.ImageField):\n \"\"\"\n Override the attr_class on the Django ImageField Model to inject our ImageFieldFile\n with Willow support.\n \"\"\"\n\n attr_class = WagtailImageFieldFile\n\n\nclass AbstractImage(ImageFileMixin, CollectionMember, index.Indexed, models.Model):\n title = models.CharField(max_length=255, verbose_name=_(\"title\"))\n \"\"\" Use local ImageField with Willow support. \"\"\"\n file = WagtailImageField(\n verbose_name=_(\"file\"),\n upload_to=get_upload_to,\n width_field=\"width\",\n height_field=\"height\",\n )\n width = models.IntegerField(verbose_name=_(\"width\"), editable=False)\n height = models.IntegerField(verbose_name=_(\"height\"), editable=False)\n created_at = models.DateTimeField(\n verbose_name=_(\"created at\"), auto_now_add=True, db_index=True\n )\n uploaded_by_user = models.ForeignKey(\n settings.AUTH_USER_MODEL,\n verbose_name=_(\"uploaded by user\"),\n null=True,\n blank=True,\n editable=False,\n on_delete=models.SET_NULL,\n )\n uploaded_by_user.wagtail_reference_index_ignore = True\n\n tags = TaggableManager(help_text=None, blank=True, verbose_name=_(\"tags\"))\n\n focal_point_x = models.PositiveIntegerField(null=True, blank=True)\n focal_point_y = models.PositiveIntegerField(null=True, blank=True)\n focal_point_width = models.PositiveIntegerField(null=True, blank=True)\n focal_point_height = models.PositiveIntegerField(null=True, blank=True)\n\n file_size = models.PositiveIntegerField(null=True, editable=False)\n # A SHA-1 hash of the file contents\n file_hash = models.CharField(\n max_length=40, blank=True, editable=False, db_index=True\n )\n\n objects = ImageQuerySet.as_manager()\n\n def _set_file_hash(self):\n with self.open_file() as f:\n self.file_hash = hash_filelike(f)\n\n def get_file_hash(self):\n if self.file_hash == \"\":\n self._set_file_hash()\n self.save(update_fields=[\"file_hash\"])\n\n return self.file_hash\n\n def _set_image_file_metadata(self):\n self.file.open()\n\n # Set new image file size\n self.file_size = self.file.size\n\n # Set new image file hash\n self._set_file_hash()\n self.file.seek(0)\n\n def get_upload_to(self, filename):\n folder_name = \"original_images\"\n filename = self.file.field.storage.get_valid_name(filename)\n\n # convert the filename to simple ascii characters and then\n # replace non-ascii characters in filename with _ , to sidestep issues with filesystem encoding\n filename = \"\".join(\n (i if ord(i) < 128 else \"_\") for i in string_to_ascii(filename)\n )\n\n # Truncate filename so it fits in the 100 character limit\n # https://code.djangoproject.com/ticket/9893\n full_path = os.path.join(folder_name, filename)\n if len(full_path) >= 95:\n chars_to_trim = len(full_path) - 94\n prefix, extension = os.path.splitext(filename)\n filename = prefix[:-chars_to_trim] + extension\n full_path = os.path.join(folder_name, filename)\n\n return full_path\n\n def get_usage(self):\n return ReferenceIndex.get_grouped_references_to(self)\n\n @property\n def usage_url(self):\n return reverse(\"wagtailimages:image_usage\", args=(self.id,))\n\n search_fields = CollectionMember.search_fields + [\n index.SearchField(\"title\", boost=10),\n index.AutocompleteField(\"title\"),\n index.FilterField(\"title\"),\n index.RelatedFields(\n \"tags\",\n [\n index.SearchField(\"name\", boost=10),\n index.AutocompleteField(\"name\"),\n ],\n ),\n index.FilterField(\"uploaded_by_user\"),\n ]\n\n def __str__(self):\n return self.title\n\n def get_rect(self):\n return Rect(0, 0, self.width, self.height)\n\n def get_focal_point(self):\n if (\n self.focal_point_x is not None\n and self.focal_point_y is not None\n and self.focal_point_width is not None\n and self.focal_point_height is not None\n ):\n return Rect.from_point(\n self.focal_point_x,\n self.focal_point_y,\n self.focal_point_width,\n self.focal_point_height,\n )\n\n def has_focal_point(self):\n return self.get_focal_point() is not None\n\n def set_focal_point(self, rect):\n if rect is not None:\n self.focal_point_x = rect.centroid_x\n self.focal_point_y = rect.centroid_y\n self.focal_point_width = rect.width\n self.focal_point_height = rect.height\n else:\n self.focal_point_x = None\n self.focal_point_y = None\n self.focal_point_width = None\n self.focal_point_height = None\n\n def get_suggested_focal_point(self):\n with self.get_willow_image() as willow:\n faces = willow.detect_faces()\n\n if faces:\n # Create a bounding box around all faces\n left = min(face[0] for face in faces)\n top = min(face[1] for face in faces)\n right = max(face[2] for face in faces)\n bottom = max(face[3] for face in faces)\n focal_point = Rect(left, top, right, bottom)\n else:\n features = willow.detect_features()\n if features:\n # Create a bounding box around all features\n left = min(feature[0] for feature in features)\n top = min(feature[1] for feature in features)\n right = max(feature[0] for feature in features)\n bottom = max(feature[1] for feature in features)\n focal_point = Rect(left, top, right, bottom)\n else:\n return None\n\n # Add 20% to width and height and give it a minimum size\n x, y = focal_point.centroid\n width, height = focal_point.size\n\n width *= 1.20\n height *= 1.20\n\n width = max(width, 100)\n height = max(height, 100)\n\n return Rect.from_point(x, y, width, height)\n\n @classmethod\n def get_rendition_model(cls):\n \"\"\"Get the Rendition model for this Image model\"\"\"\n return cls.renditions.rel.related_model\n\n def _get_prefetched_renditions(self) -> Union[Iterable[\"AbstractRendition\"], None]:\n if \"renditions\" in getattr(self, \"_prefetched_objects_cache\", {}):\n return self.renditions.all()\n return getattr(self, \"prefetched_renditions\", None)\n\n def _add_to_prefetched_renditions(self, rendition: \"AbstractRendition\") -> None:\n # Reuse this rendition if requested again from this object\n try:\n self._prefetched_objects_cache[\"renditions\"]._result_cache.append(rendition)\n except (AttributeError, KeyError):\n pass\n try:\n self.prefetched_renditions.append(rendition)\n except AttributeError:\n pass\n\n def get_rendition(self, filter: Union[\"Filter\", str]) -> \"AbstractRendition\":\n \"\"\"\n Returns a ``Rendition`` instance with a ``file`` field value (an\n image) reflecting the supplied ``filter`` value and focal point values\n from this object.\n\n Note: If using custom image models, an instance of the custom rendition\n model will be returned.\n \"\"\"\n Rendition = self.get_rendition_model()\n\n if isinstance(filter, str):\n filter = Filter(spec=filter)\n\n try:\n rendition = self.find_existing_rendition(filter)\n except Rendition.DoesNotExist:\n rendition = self.create_rendition(filter)\n # Reuse this rendition if requested again from this object\n self._add_to_prefetched_renditions(rendition)\n\n cache_key = Rendition.construct_cache_key(\n self, filter.get_cache_key(self), filter.spec\n )\n Rendition.cache_backend.set(cache_key, rendition)\n\n return rendition\n\n def find_existing_rendition(self, filter: \"Filter\") -> \"AbstractRendition\":\n \"\"\"\n Returns an existing ``Rendition`` instance with a ``file`` field value\n (an image) reflecting the supplied ``filter`` value and focal point\n values from this object.\n\n If no such rendition exists, a ``DoesNotExist`` error is raised for the\n relevant model.\n\n Note: If using custom image models, an instance of the custom rendition\n model will be returned.\n \"\"\"\n Rendition = self.get_rendition_model()\n\n try:\n return self.find_existing_renditions(filter)[filter]\n except KeyError:\n raise Rendition.DoesNotExist\n\n def create_rendition(self, filter: \"Filter\") -> \"AbstractRendition\":\n \"\"\"\n Creates and returns a ``Rendition`` instance with a ``file`` field\n value (an image) reflecting the supplied ``filter`` value and focal\n point values from this object.\n\n This method is usually called by ``Image.get_rendition()``, after first\n checking that a suitable rendition does not already exist.\n\n Note: If using custom image models, an instance of the custom rendition\n model will be returned.\n \"\"\"\n # Because of unique constraints applied to the model, we use\n # get_or_create() to guard against race conditions\n rendition, created = self.renditions.get_or_create(\n filter_spec=filter.spec,\n focal_point_key=filter.get_cache_key(self),\n defaults={\"file\": self.generate_rendition_file(filter)},\n )\n return rendition\n\n def get_renditions(\n self, *filters: Union[\"Filter\", str]\n ) -> Dict[str, \"AbstractRendition\"]:\n \"\"\"\n Returns a ``dict`` of ``Rendition`` instances with image files reflecting\n the supplied ``filters``, keyed by filter spec patterns.\n\n Note: If using custom image models, instances of the custom rendition\n model will be returned.\n \"\"\"\n Rendition = self.get_rendition_model()\n # We don\u2019t support providing mixed Filter and string arguments in the same call.\n if isinstance(filters[0], str):\n filters = [Filter(spec) for spec in dict.fromkeys(filters).keys()]\n\n # Find existing renditions where possible\n renditions = self.find_existing_renditions(*filters)\n\n # Create any renditions not found in prefetched values, cache or database\n not_found = [f for f in filters if f not in renditions]\n for filter, rendition in self.create_renditions(*not_found).items():\n self._add_to_prefetched_renditions(rendition)\n renditions[filter] = rendition\n\n # Update the cache\n cache_additions = {\n Rendition.construct_cache_key(\n self, filter.get_cache_key(self), filter.spec\n ): rendition\n for filter, rendition in renditions.items()\n # prevent writing of cached data back to the cache\n if not getattr(rendition, \"_from_cache\", False)\n }\n if cache_additions:\n Rendition.cache_backend.set_many(cache_additions)\n\n # Make sure key insertion order matches the input order.\n return {filter.spec: renditions[filter] for filter in filters}\n\n def find_existing_renditions(\n self, *filters: \"Filter\"\n ) -> Dict[\"Filter\", \"AbstractRendition\"]:\n \"\"\"\n Returns a dictionary of existing ``Rendition`` instances with ``file``\n values (images) reflecting the supplied ``filters`` and the focal point\n values from this object.\n\n Filters for which an existing rendition cannot be found are ommitted\n from the return value. If none of the requested renditions have been\n created before, the return value will be an empty dict.\n \"\"\"\n Rendition = self.get_rendition_model()\n filters_by_spec: Dict[str, Filter] = {f.spec: f for f in filters}\n found: Dict[Filter, AbstractRendition] = {}\n\n # Interrogate prefetched values first (where available)\n prefetched_renditions = self._get_prefetched_renditions()\n if prefetched_renditions is not None:\n # NOTE: When renditions are prefetched, it's assumed that if the\n # requested renditions exist, they will be present in the\n # prefetched value, and further cache/database lookups are avoided.\n\n # group renditions by the filters of interest\n potential_matches: Dict[Filter, List[AbstractRendition]] = defaultdict(list)\n for rendition in prefetched_renditions:\n try:\n filter = filters_by_spec[rendition.filter_spec]\n except KeyError:\n continue # this rendition can be ignored\n else:\n potential_matches[filter].append(rendition)\n\n # For each filter we have renditions for, look for one with a\n # 'focal_point_key' value matching filter.get_cache_key()\n for filter, renditions in potential_matches.items():\n focal_point_key = filter.get_cache_key(self)\n for rendition in renditions:\n if rendition.focal_point_key == focal_point_key:\n # to prevent writing of cached data back to the cache\n rendition._from_cache = True\n # use this rendition\n found[filter] = rendition\n # skip to the next filter\n break\n else:\n # Renditions are not prefetched, so attempt to find suitable\n # items in the cache or database\n\n # Query the cache first\n cache_keys = [\n Rendition.construct_cache_key(self, filter.get_cache_key(self), spec)\n for spec, filter in filters_by_spec.items()\n ]\n for rendition in Rendition.cache_backend.get_many(cache_keys).values():\n filter = filters_by_spec[rendition.filter_spec]\n found[filter] = rendition\n\n # For items not found in the cache, look in the database\n not_found = [f for f in filters if f not in found]\n if not_found:\n lookup_q = Q()\n for filter in not_found:\n lookup_q |= Q(\n filter_spec=filter.spec,\n focal_point_key=filter.get_cache_key(self),\n )\n for rendition in self.renditions.filter(lookup_q):\n filter = filters_by_spec[rendition.filter_spec]\n found[filter] = rendition\n return found\n\n def create_renditions(\n self, *filters: \"Filter\"\n ) -> Dict[\"Filter\", \"AbstractRendition\"]:\n \"\"\"\n Creates multiple ``Rendition`` instances with image files reflecting the supplied\n ``filters``, and returns them as a ``dict`` keyed by the relevant ``Filter`` instance.\n Where suitable renditions already exist in the database, they will be returned instead,\n so as not to create duplicates.\n\n This method is usually called by ``Image.get_renditions()``, after first\n checking that a suitable rendition does not already exist.\n\n Note: If using custom image models, an instance of the custom rendition\n model will be returned.\n \"\"\"\n Rendition = self.get_rendition_model()\n\n if not filters:\n return {}\n\n if len(filters) == 1:\n # create_rendition() is better for single renditions, as it can\n # utilize QuerySet.get_or_create(), which has better handling of\n # race conditions\n filter = filters[0]\n return {filter: self.create_rendition(filter)}\n\n return_value: Dict[Filter, AbstractRendition] = {}\n filter_map: Dict[str, Filter] = {f.spec: f for f in filters}\n\n with self.open_file() as file:\n original_image_bytes = file.read()\n\n to_create = []\n\n def _generate_single_rendition(filter):\n # Using ContentFile here ensures we generate all renditions. Simply\n # passing self.file required several page reloads to generate all\n image_file = self.generate_rendition_file(\n filter, source=ContentFile(original_image_bytes, name=self.file.name)\n )\n to_create.append(\n Rendition(\n image=self,\n filter_spec=filter.spec,\n focal_point_key=filter.get_cache_key(self),\n file=image_file,\n )\n )\n\n with ThreadPoolExecutor() as executor:\n executor.map(_generate_single_rendition, filters)\n\n # Rendition generation can take a while. So, if other processes have created\n # identical renditions in the meantime, we should find them to avoid clashes.\n # NB: Clashes can still occur, because there is no get_or_create() equivalent\n # for multiple objects. However, this will reduce that risk considerably.\n files_for_deletion: List[File] = []\n\n # Assemble Q() to identify potential clashes\n lookup_q = Q()\n for rendition in to_create:\n lookup_q |= Q(\n filter_spec=rendition.filter_spec,\n focal_point_key=rendition.focal_point_key,\n )\n\n for existing in self.renditions.filter(lookup_q):\n # Include the existing rendition in the return value\n filter = filter_map[existing.filter_spec]\n return_value[filter] = existing\n\n for new in to_create:\n if (\n new.filter_spec == existing.filter_spec\n and new.focal_point_key == existing.focal_point_key\n ):\n # Avoid creating the new version\n to_create.remove(new)\n # Mark for deletion later, so as not to hold up creation\n files_for_deletion.append(new.file)\n\n for new in Rendition.objects.bulk_create(to_create, ignore_conflicts=True):\n filter = filter_map[new.filter_spec]\n return_value[filter] = new\n\n # Delete redundant rendition image files\n for file in files_for_deletion:\n file.delete(save=False)\n\n return return_value\n\n def generate_rendition_file(self, filter: \"Filter\", *, source: File = None) -> File:\n \"\"\"\n Generates an in-memory image matching the supplied ``filter`` value\n and focal point value from this object, wraps it in a ``File`` object\n with a suitable filename, and returns it. The return value is used\n as the ``file`` field value for rendition objects saved by\n ``AbstractImage.create_rendition()``.\n\n If the contents of ``self.file`` has already been read into memory, the\n ``source`` keyword can be used to provide a reference to the in-memory\n ``File``, bypassing the need to reload the image contents from storage.\n\n NOTE: The responsibility of generating the new image from the original\n falls to the supplied ``filter`` object. If you want to do anything\n custom with rendition images (for example, to preserve metadata from\n the original image), you might want to consider swapping out ``filter``\n for an instance of a custom ``Filter`` subclass of your design.\n \"\"\"\n\n cache_key = filter.get_cache_key(self)\n\n logger.debug(\n \"Generating '%s' rendition for image %d\",\n filter.spec,\n self.pk,\n )\n\n start_time = time.time()\n\n try:\n generated_image = filter.run(\n self,\n SpooledTemporaryFile(max_size=settings.FILE_UPLOAD_MAX_MEMORY_SIZE),\n source=source,\n )\n\n logger.debug(\n \"Generated '%s' rendition for image %d in %.1fms\",\n filter.spec,\n self.pk,\n (time.time() - start_time) * 1000,\n )\n except: # noqa:B901,E722\n logger.debug(\n \"Failed to generate '%s' rendition for image %d\",\n filter.spec,\n self.pk,\n )\n raise\n\n # Generate filename\n input_filename = os.path.basename(self.file.name)\n input_filename_without_extension, input_extension = os.path.splitext(\n input_filename\n )\n output_extension = (\n filter.spec.replace(\"|\", \".\")\n + IMAGE_FORMAT_EXTENSIONS[generated_image.format_name]\n )\n if cache_key:\n output_extension = cache_key + \".\" + output_extension\n\n # Truncate filename to prevent it going over 60 chars\n output_filename_without_extension = input_filename_without_extension[\n : (59 - len(output_extension))\n ]\n output_filename = output_filename_without_extension + \".\" + output_extension\n\n return File(generated_image.f, name=output_filename)\n\n def is_portrait(self):\n return self.width < self.height\n\n def is_landscape(self):\n return self.height < self.width\n\n def is_svg(self):\n _, ext = os.path.splitext(self.file.name)\n return ext.lower() == \".svg\"\n\n @property\n def filename(self):\n return os.path.basename(self.file.name)\n\n @property\n def default_alt_text(self):\n # by default the alt text field (used in rich text insertion) is populated\n # from the title. Subclasses might provide a separate alt field, and\n # override this\n return self.title\n\n def is_editable_by_user(self, user):\n from wagtail.images.permissions import permission_policy\n\n return permission_policy.user_has_permission_for_instance(user, \"change\", self)\n\n class Meta:\n abstract = True\n\n\nclass Image(AbstractImage):\n admin_form_fields = (\n \"title\",\n \"file\",\n \"collection\",\n \"tags\",\n \"focal_point_x\",\n \"focal_point_y\",\n \"focal_point_width\",\n \"focal_point_height\",\n )\n\n class Meta(AbstractImage.Meta):\n verbose_name = _(\"image\")\n verbose_name_plural = _(\"images\")\n permissions = [\n (\"choose_image\", \"Can choose image\"),\n ]\n\n\nclass Filter:\n \"\"\"\n Represents one or more operations that can be applied to an Image to produce a rendition\n appropriate for final display on the website. Usually this would be a resize operation,\n but could potentially involve colour processing, etc.\n \"\"\"\n\n spec_pattern = re.compile(r\"^[A-Za-z0-9_\\-\\.]+$\")\n pipe_spec_pattern = re.compile(r\"^[A-Za-z0-9_\\-\\.\\|]+$\")\n expanding_spec_pattern = re.compile(r\"^[A-Za-z0-9_\\-\\.{},]+$\")\n pipe_expanding_spec_pattern = re.compile(r\"^[A-Za-z0-9_\\-\\.{},\\|]+$\")\n\n def __init__(self, spec=None):\n # The spec pattern is operation1-var1-var2|operation2-var1\n self.spec = spec\n\n @classmethod\n def expand_spec(self, spec: Union[\"str\", Iterable[\"str\"]]) -> List[\"str\"]:\n \"\"\"\n Converts a spec pattern with brace-expansions, into a list of spec patterns.\n For example, \"width-{100,200}\" becomes [\"width-100\", \"width-200\"].\n\n Supports providing filter specs already split, or pipe or space-separated.\n \"\"\"\n if isinstance(spec, str):\n separator = \"|\" if \"|\" in spec else \" \"\n spec = spec.split(separator)\n\n expanded_segments = []\n for segment in spec:\n # Check if segment has braces to expand\n if \"{\" in segment and \"}\" in segment:\n prefix, options_suffixed = segment.split(\"{\")\n options_pattern, suffix = options_suffixed.split(\"}\")\n options = options_pattern.split(\",\")\n expanded_segments.append(\n [prefix + option + suffix for option in options]\n )\n else:\n expanded_segments.append([segment])\n\n # Cartesian product of all expanded segments (equivalent to nested for loops).\n combinations = itertools.product(*expanded_segments)\n\n return [\"|\".join(combination) for combination in combinations]\n\n @cached_property\n def operations(self):\n # Search for operations\n registered_operations = {}\n for fn in hooks.get_hooks(\"register_image_operations\"):\n registered_operations.update(dict(fn()))\n\n # Build list of operation objects\n operations = []\n for op_spec in self.spec.split(\"|\"):\n op_spec_parts = op_spec.split(\"-\")\n\n if op_spec_parts[0] not in registered_operations:\n raise InvalidFilterSpecError(\n \"Unrecognised operation: %s\" % op_spec_parts[0]\n )\n\n op_class = registered_operations[op_spec_parts[0]]\n operations.append(op_class(*op_spec_parts))\n return operations\n\n @property\n def transform_operations(self):\n return [\n operation\n for operation in self.operations\n if isinstance(operation, TransformOperation)\n ]\n\n @property\n def filter_operations(self):\n return [\n operation\n for operation in self.operations\n if isinstance(operation, FilterOperation)\n ]\n\n def get_transform(self, image, size=None):\n \"\"\"\n Returns an ImageTransform with all the transforms in this filter applied.\n\n The ImageTransform is an object with two attributes:\n - .size - The size of the final image\n - .matrix - An affine transformation matrix that combines any\n transform/scale/rotation operations that need to be applied to the image\n \"\"\"\n\n if not size:\n size = (image.width, image.height)\n\n transform = ImageTransform(size, image_is_svg=image.is_svg())\n for operation in self.transform_operations:\n transform = operation.run(transform, image)\n return transform\n\n @contextmanager\n def get_willow_image(self, image: AbstractImage, source: File = None):\n if source is not None:\n yield willow.Image.open(source)\n else:\n with image.get_willow_image() as willow_image:\n yield willow_image\n\n def run(self, image: AbstractImage, output: BytesIO, source: File = None):\n with self.get_willow_image(image, source) as willow:\n\n original_format = willow.format_name\n\n # Fix orientation of image\n willow = willow.auto_orient()\n\n # Transform the image\n transform = self.get_transform(\n image, (willow.image.width, willow.image.height)\n )\n willow = willow.crop(transform.get_rect().round())\n willow = willow.resize(transform.size)\n\n # Apply filters\n env = {\n \"original-format\": original_format,\n }\n for operation in self.filter_operations:\n willow = operation.run(willow, image, env) or willow\n\n # Find the output format to use\n if \"output-format\" in env:\n # Developer specified an output format\n output_format = env[\"output-format\"]\n else:\n # Convert bmp and webp to png by default\n default_conversions = {\n \"avif\": \"png\",\n \"bmp\": \"png\",\n \"webp\": \"png\",\n }\n\n # Convert unanimated GIFs to PNG as well\n if not willow.has_animation():\n default_conversions[\"gif\"] = \"png\"\n\n # Allow the user to override the conversions\n conversion = getattr(settings, \"WAGTAILIMAGES_FORMAT_CONVERSIONS\", {})\n default_conversions.update(conversion)\n\n # Get the converted output format falling back to the original\n output_format = default_conversions.get(\n original_format, original_format\n )\n\n if output_format == \"jpeg\":\n # Allow changing of JPEG compression quality\n if \"jpeg-quality\" in env:\n quality = env[\"jpeg-quality\"]\n else:\n quality = getattr(settings, \"WAGTAILIMAGES_JPEG_QUALITY\", 85)\n\n # If the image has an alpha channel, give it a white background\n if willow.has_alpha():\n willow = willow.set_background_color_rgb((255, 255, 255))\n\n return willow.save_as_jpeg(\n output, quality=quality, progressive=True, optimize=True\n )\n elif output_format == \"png\":\n return willow.save_as_png(output, optimize=True)\n elif output_format == \"gif\":\n return willow.save_as_gif(output)\n elif output_format == \"webp\":\n # Allow changing of WebP compression quality\n if (\n \"output-format-options\" in env\n and \"lossless\" in env[\"output-format-options\"]\n ):\n return willow.save_as_webp(output, lossless=True)\n elif \"webp-quality\" in env:\n quality = env[\"webp-quality\"]\n else:\n quality = getattr(settings, \"WAGTAILIMAGES_WEBP_QUALITY\", 80)\n\n return willow.save_as_webp(output, quality=quality)\n elif output_format == \"avif\":\n # Allow changing of AVIF compression quality\n if (\n \"output-format-options\" in env\n and \"lossless\" in env[\"output-format-options\"]\n ):\n return willow.save_as_avif(output, lossless=True)\n elif \"avif-quality\" in env:\n quality = env[\"avif-quality\"]\n else:\n quality = getattr(settings, \"WAGTAILIMAGES_AVIF_QUALITY\", 80)\n return willow.save_as_avif(output, quality=quality)\n elif output_format == \"svg\":\n return willow.save_as_svg(output)\n raise UnknownOutputImageFormatError(\n f\"Unknown output image format '{output_format}'\"\n )\n\n def get_cache_key(self, image):\n vary_parts = []\n\n for operation in self.operations:\n for field in getattr(operation, \"vary_fields\", []):\n value = getattr(image, field, \"\")\n vary_parts.append(str(value))\n\n vary_string = \"-\".join(vary_parts)\n\n # Return blank string if there are no vary fields\n if not vary_string:\n return \"\"\n\n return hashlib.sha1(vary_string.encode(\"utf-8\")).hexdigest()[:8]\n\n\nclass ResponsiveImage:\n \"\"\"\n A custom object used to represent a collection of renditions.\n Provides a 'renditions' property to access the renditions,\n and renders to the front-end HTML.\n \"\"\"\n\n def __init__(\n self,\n renditions: Dict[str, \"AbstractRendition\"],\n attrs: Optional[Dict[str, Any]] = None,\n ):\n self.renditions = list(renditions.values())\n self.attrs = attrs\n\n @classmethod\n def get_width_srcset(cls, renditions_list: List[\"AbstractRendition\"]):\n if len(renditions_list) == 1:\n # No point in using width descriptors if there is a single image.\n return renditions_list[0].url\n\n return \", \".join([f\"{r.url} {r.width}w\" for r in renditions_list])\n\n def __html__(self):\n attrs = self.attrs or {}\n\n # No point in adding a srcset if there is a single image.\n if len(self.renditions) > 1:\n attrs[\"srcset\"] = self.get_width_srcset(self.renditions)\n\n # The first rendition is the \"base\" / \"fallback\" image.\n return self.renditions[0].img_tag(attrs)\n\n def __str__(self):\n return mark_safe(self.__html__())\n\n def __bool__(self):\n return bool(self.renditions)\n\n def __eq__(self, other: \"ResponsiveImage\"):\n if isinstance(other, ResponsiveImage):\n return self.renditions == other.renditions and self.attrs == other.attrs\n return False\n\n\nclass Picture(ResponsiveImage):\n # Keep this separate from FormatOperation.supported_formats,\n # as the order our formats are defined in is essential for the picture tag.\n # Defines the order of <source> elements in the tag when format operations\n # are in use, and the priority order to identify the \"fallback\" format.\n # The browser will pick the first supported format in this list.\n source_format_order = [\"avif\", \"webp\", \"jpeg\", \"png\", \"gif\"]\n\n def __init__(\n self,\n renditions: Dict[str, \"AbstractRendition\"],\n attrs: Optional[Dict[str, Any]] = None,\n ):\n super().__init__(renditions, attrs)\n # Store renditions grouped by format separately for access from templates.\n self.formats = self.get_formats(renditions)\n\n def get_formats(\n self, renditions: Dict[str, \"AbstractRendition\"]\n ) -> Dict[str, List[\"AbstractRendition\"]]:\n \"\"\"\n Group renditions by the format they are for, if any.\n If there is only one format, no grouping is required.\n \"\"\"\n formats = defaultdict(list)\n for spec, rendition in renditions.items():\n for fmt in FormatOperation.supported_formats:\n # Identify the spec\u2019s format (if any).\n if f\"format-{fmt}\" in spec:\n formats[fmt].append(rendition)\n break\n # Avoid the split by format if there is only one.\n if len(formats.keys()) < 2:\n return {}\n\n return formats\n\n def get_fallback_format(self):\n for fmt in reversed(self.source_format_order):\n if fmt in self.formats:\n return fmt\n\n def __html__(self):\n # If there aren\u2019t multiple formats, render a vanilla img tag with srcset.\n if not self.formats:\n return mark_safe(f\"<picture>{super().__html__()}</picture>\")\n\n attrs = self.attrs or {}\n\n sizes = f'sizes=\"{attrs[\"sizes\"]}\" ' if \"sizes\" in attrs else \"\"\n fallback_format = self.get_fallback_format()\n fallback_renditions = self.formats[fallback_format]\n\n sources = []\n\n for fmt in self.source_format_order:\n if fmt != fallback_format and fmt in self.formats:\n srcset = self.get_width_srcset(self.formats[fmt])\n mime = image_format_name_to_content_type(fmt)\n sources.append(f'<source srcset=\"{srcset}\" {sizes}type=\"{mime}\">')\n\n if len(fallback_renditions) > 1:\n attrs[\"srcset\"] = self.get_width_srcset(fallback_renditions)\n\n # The first rendition is the \"base\" / \"fallback\" image.\n fallback = fallback_renditions[0].img_tag(attrs)\n\n return mark_safe(f\"<picture>{''.join(sources)}{fallback}</picture>\")\n\n\nclass AbstractRendition(ImageFileMixin, models.Model):\n filter_spec = models.CharField(max_length=255, db_index=True)\n \"\"\" Use local ImageField with Willow support. \"\"\"\n file = WagtailImageField(\n upload_to=get_rendition_upload_to,\n storage=get_rendition_storage,\n width_field=\"width\",\n height_field=\"height\",\n )\n width = models.IntegerField(editable=False)\n height = models.IntegerField(editable=False)\n focal_point_key = models.CharField(\n max_length=16, blank=True, default=\"\", editable=False\n )\n\n wagtail_reference_index_ignore = True\n\n @property\n def url(self):\n return self.file.url\n\n @property\n def alt(self):\n return self.image.default_alt_text\n\n @property\n def attrs(self):\n \"\"\"\n The src, width, height, and alt attributes for an <img> tag, as a HTML\n string\n \"\"\"\n return flatatt(self.attrs_dict)\n\n @property\n def attrs_dict(self):\n \"\"\"\n A dict of the src, width, height, and alt attributes for an <img> tag.\n \"\"\"\n return OrderedDict(\n [\n (\"src\", self.url),\n (\"width\", self.width),\n (\"height\", self.height),\n (\"alt\", self.alt),\n ]\n )\n\n @property\n def full_url(self):\n url = self.url\n if hasattr(settings, \"WAGTAILADMIN_BASE_URL\") and url.startswith(\"/\"):\n url = settings.WAGTAILADMIN_BASE_URL + url\n return url\n\n @property\n def filter(self):\n return Filter(self.filter_spec)\n\n @cached_property\n def focal_point(self):\n image_focal_point = self.image.get_focal_point()\n if image_focal_point:\n transform = self.filter.get_transform(self.image)\n return image_focal_point.transform(transform)\n\n @property\n def background_position_style(self):\n \"\"\"\n Returns a `background-position` rule to be put in the inline style of an element which uses the rendition for its background.\n\n This positions the rendition according to the value of the focal point. This is helpful for when the element does not have\n the same aspect ratio as the rendition.\n\n For example:\n\n {% image page.image fill-1920x600 as image %}\n <div style=\"background-image: url('{{ image.url }}'); {{ image.background_position_style }}\">\n </div>\n \"\"\"\n focal_point = self.focal_point\n if focal_point:\n horz = int((focal_point.x * 100) // self.width)\n vert = int((focal_point.y * 100) // self.height)\n return f\"background-position: {horz}% {vert}%;\"\n else:\n return \"background-position: 50% 50%;\"\n\n def img_tag(self, extra_attributes={}):\n attrs = self.attrs_dict.copy()\n\n attrs.update(apps.get_app_config(\"wagtailimages\").default_attrs)\n\n attrs.update(extra_attributes)\n\n return mark_safe(f\"<img{flatatt(attrs)}>\")\n\n def __html__(self):\n return self.img_tag()\n\n def get_upload_to(self, filename):\n folder_name = \"images\"\n filename = self.file.field.storage.get_valid_name(filename)\n return os.path.join(folder_name, filename)\n\n @classmethod\n def check(cls, **kwargs):\n errors = super().check(**kwargs)\n if not cls._meta.abstract:\n if not any(\n set(constraint) == {\"image\", \"filter_spec\", \"focal_point_key\"}\n for constraint in cls._meta.unique_together\n ):\n errors.append(\n checks.Error(\n \"Custom rendition model %r has an invalid unique_together setting\"\n % cls,\n hint=\"Custom rendition models must include the constraint \"\n \"('image', 'filter_spec', 'focal_point_key') in their unique_together definition.\",\n obj=cls,\n id=\"wagtailimages.E001\",\n )\n )\n\n return errors\n\n @staticmethod\n def construct_cache_key(image, filter_cache_key, filter_spec):\n return \"wagtail-rendition-\" + \"-\".join(\n [str(image.id), image.file_hash, filter_cache_key, filter_spec]\n )\n\n @classproperty\n def cache_backend(cls) -> BaseCache:\n try:\n return caches[\"renditions\"]\n except InvalidCacheBackendError:\n return caches[DEFAULT_CACHE_ALIAS]\n\n def get_cache_key(self):\n return self.construct_cache_key(\n self.image, self.focal_point_key, self.filter_spec\n )\n\n def purge_from_cache(self):\n self.cache_backend.delete(self.get_cache_key())\n\n class Meta:\n abstract = True\n\n\nclass Rendition(AbstractRendition):\n image = models.ForeignKey(\n Image, related_name=\"renditions\", on_delete=models.CASCADE\n )\n\n class Meta:\n unique_together = ((\"image\", \"filter_spec\", \"focal_point_key\"),)\n\n\nclass UploadedImage(models.Model):\n \"\"\"\n Temporary storage for images uploaded through the multiple image uploader, when validation rules (e.g.\n required metadata fields) prevent creating an Image object from the image file alone. In this case,\n the image file is stored against this model, to be turned into an Image object once the full form\n has been filled in.\n \"\"\"\n\n file = models.ImageField(upload_to=\"uploaded_images\", max_length=200)\n uploaded_by_user = models.ForeignKey(\n settings.AUTH_USER_MODEL,\n verbose_name=_(\"uploaded by user\"),\n null=True,\n blank=True,\n editable=False,\n on_delete=models.SET_NULL,\n )\n uploaded_by_user.wagtail_reference_index_ignore = True\n", "path": "wagtail/images/models.py"}], "after_files": [{"content": "import hashlib\nimport itertools\nimport logging\nimport os.path\nimport re\nimport time\nfrom collections import OrderedDict, defaultdict\nfrom concurrent.futures import ThreadPoolExecutor\nfrom contextlib import contextmanager\nfrom io import BytesIO\nfrom tempfile import SpooledTemporaryFile\nfrom typing import Any, Dict, Iterable, List, Optional, Union\n\nimport willow\nfrom django.apps import apps\nfrom django.conf import settings\nfrom django.core import checks\nfrom django.core.cache import DEFAULT_CACHE_ALIAS, InvalidCacheBackendError, caches\nfrom django.core.cache.backends.base import BaseCache\nfrom django.core.files import File\nfrom django.core.files.base import ContentFile\nfrom django.core.files.storage import default_storage\nfrom django.db import models\nfrom django.db.models import Q\nfrom django.forms.utils import flatatt\nfrom django.urls import reverse\nfrom django.utils.functional import cached_property, classproperty\nfrom django.utils.module_loading import import_string\nfrom django.utils.safestring import mark_safe\nfrom django.utils.translation import gettext_lazy as _\nfrom taggit.managers import TaggableManager\n\nfrom wagtail import hooks\nfrom wagtail.coreutils import string_to_ascii\nfrom wagtail.images.exceptions import (\n InvalidFilterSpecError,\n UnknownOutputImageFormatError,\n)\nfrom wagtail.images.fields import image_format_name_to_content_type\nfrom wagtail.images.image_operations import (\n FilterOperation,\n FormatOperation,\n ImageTransform,\n TransformOperation,\n)\nfrom wagtail.images.rect import Rect\nfrom wagtail.models import CollectionMember, ReferenceIndex\nfrom wagtail.search import index\nfrom wagtail.search.queryset import SearchableQuerySetMixin\nfrom wagtail.utils.file import hash_filelike\n\nlogger = logging.getLogger(\"wagtail.images\")\n\n\nIMAGE_FORMAT_EXTENSIONS = {\n \"avif\": \".avif\",\n \"jpeg\": \".jpg\",\n \"png\": \".png\",\n \"gif\": \".gif\",\n \"webp\": \".webp\",\n \"svg\": \".svg\",\n}\n\n\nclass SourceImageIOError(IOError):\n \"\"\"\n Custom exception to distinguish IOErrors that were thrown while opening the source image\n \"\"\"\n\n pass\n\n\nclass ImageQuerySet(SearchableQuerySetMixin, models.QuerySet):\n def prefetch_renditions(self, *filters):\n \"\"\"\n Prefetches generated renditions for the given filters.\n Returns all renditions when no filters are provided.\n \"\"\"\n rendition_model = self.model.get_rendition_model()\n queryset = rendition_model.objects.all()\n\n if filters:\n # Get a list of filter spec strings. The given value could contain Filter objects\n filter_specs = [\n filter.spec if isinstance(filter, Filter) else filter\n for filter in filters\n ]\n queryset = queryset.filter(filter_spec__in=filter_specs)\n\n return self.prefetch_related(\n models.Prefetch(\n \"renditions\",\n queryset=queryset,\n to_attr=\"prefetched_renditions\",\n )\n )\n\n\ndef get_upload_to(instance, filename):\n \"\"\"\n Obtain a valid upload path for an image file.\n\n This needs to be a module-level function so that it can be referenced within migrations,\n but simply delegates to the `get_upload_to` method of the instance, so that AbstractImage\n subclasses can override it.\n \"\"\"\n return instance.get_upload_to(filename)\n\n\ndef get_rendition_upload_to(instance, filename):\n \"\"\"\n Obtain a valid upload path for an image rendition file.\n\n This needs to be a module-level function so that it can be referenced within migrations,\n but simply delegates to the `get_upload_to` method of the instance, so that AbstractRendition\n subclasses can override it.\n \"\"\"\n return instance.get_upload_to(filename)\n\n\ndef get_rendition_storage():\n \"\"\"\n Obtain the storage object for an image rendition file.\n Returns custom storage (if defined), or the default storage.\n\n This needs to be a module-level function, because we do not yet\n have an instance when Django loads the models.\n \"\"\"\n storage = getattr(settings, \"WAGTAILIMAGES_RENDITION_STORAGE\", default_storage)\n if isinstance(storage, str):\n module = import_string(storage)\n storage = module()\n return storage\n\n\nclass ImageFileMixin:\n def is_stored_locally(self):\n \"\"\"\n Returns True if the image is hosted on the local filesystem\n \"\"\"\n try:\n self.file.path\n\n return True\n except NotImplementedError:\n return False\n\n def get_file_size(self):\n if self.file_size is None:\n try:\n self.file_size = self.file.size\n except Exception as e: # noqa: BLE001\n # File not found\n #\n # Have to catch everything, because the exception\n # depends on the file subclass, and therefore the\n # storage being used.\n raise SourceImageIOError(str(e))\n\n self.save(update_fields=[\"file_size\"])\n\n return self.file_size\n\n @contextmanager\n def open_file(self):\n # Open file if it is closed\n close_file = False\n try:\n image_file = self.file\n\n if self.file.closed:\n # Reopen the file\n if self.is_stored_locally():\n self.file.open(\"rb\")\n else:\n # Some external storage backends don't allow reopening\n # the file. Get a fresh file instance. #1397\n storage = self._meta.get_field(\"file\").storage\n image_file = storage.open(self.file.name, \"rb\")\n\n close_file = True\n except OSError as e:\n # re-throw this as a SourceImageIOError so that calling code can distinguish\n # these from IOErrors elsewhere in the process\n raise SourceImageIOError(str(e))\n\n # Seek to beginning\n image_file.seek(0)\n\n try:\n yield image_file\n finally:\n if close_file:\n image_file.close()\n\n @contextmanager\n def get_willow_image(self):\n with self.open_file() as image_file:\n yield willow.Image.open(image_file)\n\n\nclass WagtailImageFieldFile(models.fields.files.ImageFieldFile):\n \"\"\"\n Override the ImageFieldFile in order to use Willow instead\n of Pillow.\n \"\"\"\n\n def _get_image_dimensions(self):\n \"\"\"\n override _get_image_dimensions to call our own get_image_dimensions.\n \"\"\"\n if not hasattr(self, \"_dimensions_cache\"):\n self._dimensions_cache = self.get_image_dimensions()\n return self._dimensions_cache\n\n def get_image_dimensions(self):\n \"\"\"\n The upstream ImageFieldFile calls a local function get_image_dimensions. In this implementation we've made get_image_dimensions\n a method to make it easier to override for Wagtail developers in the future.\n \"\"\"\n close = self.closed\n try:\n self.open()\n image = willow.Image.open(self)\n return image.get_size()\n finally:\n if close:\n self.close()\n else:\n self.seek(0)\n\n\nclass WagtailImageField(models.ImageField):\n \"\"\"\n Override the attr_class on the Django ImageField Model to inject our ImageFieldFile\n with Willow support.\n \"\"\"\n\n attr_class = WagtailImageFieldFile\n\n\nclass AbstractImage(ImageFileMixin, CollectionMember, index.Indexed, models.Model):\n title = models.CharField(max_length=255, verbose_name=_(\"title\"))\n \"\"\" Use local ImageField with Willow support. \"\"\"\n file = WagtailImageField(\n verbose_name=_(\"file\"),\n upload_to=get_upload_to,\n width_field=\"width\",\n height_field=\"height\",\n )\n width = models.IntegerField(verbose_name=_(\"width\"), editable=False)\n height = models.IntegerField(verbose_name=_(\"height\"), editable=False)\n created_at = models.DateTimeField(\n verbose_name=_(\"created at\"), auto_now_add=True, db_index=True\n )\n uploaded_by_user = models.ForeignKey(\n settings.AUTH_USER_MODEL,\n verbose_name=_(\"uploaded by user\"),\n null=True,\n blank=True,\n editable=False,\n on_delete=models.SET_NULL,\n )\n uploaded_by_user.wagtail_reference_index_ignore = True\n\n tags = TaggableManager(help_text=None, blank=True, verbose_name=_(\"tags\"))\n\n focal_point_x = models.PositiveIntegerField(null=True, blank=True)\n focal_point_y = models.PositiveIntegerField(null=True, blank=True)\n focal_point_width = models.PositiveIntegerField(null=True, blank=True)\n focal_point_height = models.PositiveIntegerField(null=True, blank=True)\n\n file_size = models.PositiveIntegerField(null=True, editable=False)\n # A SHA-1 hash of the file contents\n file_hash = models.CharField(\n max_length=40, blank=True, editable=False, db_index=True\n )\n\n objects = ImageQuerySet.as_manager()\n\n def _set_file_hash(self):\n with self.open_file() as f:\n self.file_hash = hash_filelike(f)\n\n def get_file_hash(self):\n if self.file_hash == \"\":\n self._set_file_hash()\n self.save(update_fields=[\"file_hash\"])\n\n return self.file_hash\n\n def _set_image_file_metadata(self):\n self.file.open()\n\n # Set new image file size\n self.file_size = self.file.size\n\n # Set new image file hash\n self._set_file_hash()\n self.file.seek(0)\n\n def get_upload_to(self, filename):\n folder_name = \"original_images\"\n filename = self.file.field.storage.get_valid_name(filename)\n\n # convert the filename to simple ascii characters and then\n # replace non-ascii characters in filename with _ , to sidestep issues with filesystem encoding\n filename = \"\".join(\n (i if ord(i) < 128 else \"_\") for i in string_to_ascii(filename)\n )\n\n # Truncate filename so it fits in the 100 character limit\n # https://code.djangoproject.com/ticket/9893\n full_path = os.path.join(folder_name, filename)\n if len(full_path) >= 95:\n chars_to_trim = len(full_path) - 94\n prefix, extension = os.path.splitext(filename)\n filename = prefix[:-chars_to_trim] + extension\n full_path = os.path.join(folder_name, filename)\n\n return full_path\n\n def get_usage(self):\n return ReferenceIndex.get_grouped_references_to(self)\n\n @property\n def usage_url(self):\n return reverse(\"wagtailimages:image_usage\", args=(self.id,))\n\n search_fields = CollectionMember.search_fields + [\n index.SearchField(\"title\", boost=10),\n index.AutocompleteField(\"title\"),\n index.FilterField(\"title\"),\n index.RelatedFields(\n \"tags\",\n [\n index.SearchField(\"name\", boost=10),\n index.AutocompleteField(\"name\"),\n ],\n ),\n index.FilterField(\"uploaded_by_user\"),\n ]\n\n def __str__(self):\n return self.title\n\n def get_rect(self):\n return Rect(0, 0, self.width, self.height)\n\n def get_focal_point(self):\n if (\n self.focal_point_x is not None\n and self.focal_point_y is not None\n and self.focal_point_width is not None\n and self.focal_point_height is not None\n ):\n return Rect.from_point(\n self.focal_point_x,\n self.focal_point_y,\n self.focal_point_width,\n self.focal_point_height,\n )\n\n def has_focal_point(self):\n return self.get_focal_point() is not None\n\n def set_focal_point(self, rect):\n if rect is not None:\n self.focal_point_x = rect.centroid_x\n self.focal_point_y = rect.centroid_y\n self.focal_point_width = rect.width\n self.focal_point_height = rect.height\n else:\n self.focal_point_x = None\n self.focal_point_y = None\n self.focal_point_width = None\n self.focal_point_height = None\n\n def get_suggested_focal_point(self):\n if self.is_svg():\n # We can't run feature detection on SVGs, and don't provide a\n # pathway from SVG -> raster formats, so don't try it.\n return None\n\n with self.get_willow_image() as willow:\n faces = willow.detect_faces()\n\n if faces:\n # Create a bounding box around all faces\n left = min(face[0] for face in faces)\n top = min(face[1] for face in faces)\n right = max(face[2] for face in faces)\n bottom = max(face[3] for face in faces)\n focal_point = Rect(left, top, right, bottom)\n else:\n features = willow.detect_features()\n if features:\n # Create a bounding box around all features\n left = min(feature[0] for feature in features)\n top = min(feature[1] for feature in features)\n right = max(feature[0] for feature in features)\n bottom = max(feature[1] for feature in features)\n focal_point = Rect(left, top, right, bottom)\n else:\n return None\n\n # Add 20% to width and height and give it a minimum size\n x, y = focal_point.centroid\n width, height = focal_point.size\n\n width *= 1.20\n height *= 1.20\n\n width = max(width, 100)\n height = max(height, 100)\n\n return Rect.from_point(x, y, width, height)\n\n @classmethod\n def get_rendition_model(cls):\n \"\"\"Get the Rendition model for this Image model\"\"\"\n return cls.renditions.rel.related_model\n\n def _get_prefetched_renditions(self) -> Union[Iterable[\"AbstractRendition\"], None]:\n if \"renditions\" in getattr(self, \"_prefetched_objects_cache\", {}):\n return self.renditions.all()\n return getattr(self, \"prefetched_renditions\", None)\n\n def _add_to_prefetched_renditions(self, rendition: \"AbstractRendition\") -> None:\n # Reuse this rendition if requested again from this object\n try:\n self._prefetched_objects_cache[\"renditions\"]._result_cache.append(rendition)\n except (AttributeError, KeyError):\n pass\n try:\n self.prefetched_renditions.append(rendition)\n except AttributeError:\n pass\n\n def get_rendition(self, filter: Union[\"Filter\", str]) -> \"AbstractRendition\":\n \"\"\"\n Returns a ``Rendition`` instance with a ``file`` field value (an\n image) reflecting the supplied ``filter`` value and focal point values\n from this object.\n\n Note: If using custom image models, an instance of the custom rendition\n model will be returned.\n \"\"\"\n Rendition = self.get_rendition_model()\n\n if isinstance(filter, str):\n filter = Filter(spec=filter)\n\n try:\n rendition = self.find_existing_rendition(filter)\n except Rendition.DoesNotExist:\n rendition = self.create_rendition(filter)\n # Reuse this rendition if requested again from this object\n self._add_to_prefetched_renditions(rendition)\n\n cache_key = Rendition.construct_cache_key(\n self, filter.get_cache_key(self), filter.spec\n )\n Rendition.cache_backend.set(cache_key, rendition)\n\n return rendition\n\n def find_existing_rendition(self, filter: \"Filter\") -> \"AbstractRendition\":\n \"\"\"\n Returns an existing ``Rendition`` instance with a ``file`` field value\n (an image) reflecting the supplied ``filter`` value and focal point\n values from this object.\n\n If no such rendition exists, a ``DoesNotExist`` error is raised for the\n relevant model.\n\n Note: If using custom image models, an instance of the custom rendition\n model will be returned.\n \"\"\"\n Rendition = self.get_rendition_model()\n\n try:\n return self.find_existing_renditions(filter)[filter]\n except KeyError:\n raise Rendition.DoesNotExist\n\n def create_rendition(self, filter: \"Filter\") -> \"AbstractRendition\":\n \"\"\"\n Creates and returns a ``Rendition`` instance with a ``file`` field\n value (an image) reflecting the supplied ``filter`` value and focal\n point values from this object.\n\n This method is usually called by ``Image.get_rendition()``, after first\n checking that a suitable rendition does not already exist.\n\n Note: If using custom image models, an instance of the custom rendition\n model will be returned.\n \"\"\"\n # Because of unique constraints applied to the model, we use\n # get_or_create() to guard against race conditions\n rendition, created = self.renditions.get_or_create(\n filter_spec=filter.spec,\n focal_point_key=filter.get_cache_key(self),\n defaults={\"file\": self.generate_rendition_file(filter)},\n )\n return rendition\n\n def get_renditions(\n self, *filters: Union[\"Filter\", str]\n ) -> Dict[str, \"AbstractRendition\"]:\n \"\"\"\n Returns a ``dict`` of ``Rendition`` instances with image files reflecting\n the supplied ``filters``, keyed by filter spec patterns.\n\n Note: If using custom image models, instances of the custom rendition\n model will be returned.\n \"\"\"\n Rendition = self.get_rendition_model()\n # We don\u2019t support providing mixed Filter and string arguments in the same call.\n if isinstance(filters[0], str):\n filters = [Filter(spec) for spec in dict.fromkeys(filters).keys()]\n\n # Find existing renditions where possible\n renditions = self.find_existing_renditions(*filters)\n\n # Create any renditions not found in prefetched values, cache or database\n not_found = [f for f in filters if f not in renditions]\n for filter, rendition in self.create_renditions(*not_found).items():\n self._add_to_prefetched_renditions(rendition)\n renditions[filter] = rendition\n\n # Update the cache\n cache_additions = {\n Rendition.construct_cache_key(\n self, filter.get_cache_key(self), filter.spec\n ): rendition\n for filter, rendition in renditions.items()\n # prevent writing of cached data back to the cache\n if not getattr(rendition, \"_from_cache\", False)\n }\n if cache_additions:\n Rendition.cache_backend.set_many(cache_additions)\n\n # Make sure key insertion order matches the input order.\n return {filter.spec: renditions[filter] for filter in filters}\n\n def find_existing_renditions(\n self, *filters: \"Filter\"\n ) -> Dict[\"Filter\", \"AbstractRendition\"]:\n \"\"\"\n Returns a dictionary of existing ``Rendition`` instances with ``file``\n values (images) reflecting the supplied ``filters`` and the focal point\n values from this object.\n\n Filters for which an existing rendition cannot be found are ommitted\n from the return value. If none of the requested renditions have been\n created before, the return value will be an empty dict.\n \"\"\"\n Rendition = self.get_rendition_model()\n filters_by_spec: Dict[str, Filter] = {f.spec: f for f in filters}\n found: Dict[Filter, AbstractRendition] = {}\n\n # Interrogate prefetched values first (where available)\n prefetched_renditions = self._get_prefetched_renditions()\n if prefetched_renditions is not None:\n # NOTE: When renditions are prefetched, it's assumed that if the\n # requested renditions exist, they will be present in the\n # prefetched value, and further cache/database lookups are avoided.\n\n # group renditions by the filters of interest\n potential_matches: Dict[Filter, List[AbstractRendition]] = defaultdict(list)\n for rendition in prefetched_renditions:\n try:\n filter = filters_by_spec[rendition.filter_spec]\n except KeyError:\n continue # this rendition can be ignored\n else:\n potential_matches[filter].append(rendition)\n\n # For each filter we have renditions for, look for one with a\n # 'focal_point_key' value matching filter.get_cache_key()\n for filter, renditions in potential_matches.items():\n focal_point_key = filter.get_cache_key(self)\n for rendition in renditions:\n if rendition.focal_point_key == focal_point_key:\n # to prevent writing of cached data back to the cache\n rendition._from_cache = True\n # use this rendition\n found[filter] = rendition\n # skip to the next filter\n break\n else:\n # Renditions are not prefetched, so attempt to find suitable\n # items in the cache or database\n\n # Query the cache first\n cache_keys = [\n Rendition.construct_cache_key(self, filter.get_cache_key(self), spec)\n for spec, filter in filters_by_spec.items()\n ]\n for rendition in Rendition.cache_backend.get_many(cache_keys).values():\n filter = filters_by_spec[rendition.filter_spec]\n found[filter] = rendition\n\n # For items not found in the cache, look in the database\n not_found = [f for f in filters if f not in found]\n if not_found:\n lookup_q = Q()\n for filter in not_found:\n lookup_q |= Q(\n filter_spec=filter.spec,\n focal_point_key=filter.get_cache_key(self),\n )\n for rendition in self.renditions.filter(lookup_q):\n filter = filters_by_spec[rendition.filter_spec]\n found[filter] = rendition\n return found\n\n def create_renditions(\n self, *filters: \"Filter\"\n ) -> Dict[\"Filter\", \"AbstractRendition\"]:\n \"\"\"\n Creates multiple ``Rendition`` instances with image files reflecting the supplied\n ``filters``, and returns them as a ``dict`` keyed by the relevant ``Filter`` instance.\n Where suitable renditions already exist in the database, they will be returned instead,\n so as not to create duplicates.\n\n This method is usually called by ``Image.get_renditions()``, after first\n checking that a suitable rendition does not already exist.\n\n Note: If using custom image models, an instance of the custom rendition\n model will be returned.\n \"\"\"\n Rendition = self.get_rendition_model()\n\n if not filters:\n return {}\n\n if len(filters) == 1:\n # create_rendition() is better for single renditions, as it can\n # utilize QuerySet.get_or_create(), which has better handling of\n # race conditions\n filter = filters[0]\n return {filter: self.create_rendition(filter)}\n\n return_value: Dict[Filter, AbstractRendition] = {}\n filter_map: Dict[str, Filter] = {f.spec: f for f in filters}\n\n with self.open_file() as file:\n original_image_bytes = file.read()\n\n to_create = []\n\n def _generate_single_rendition(filter):\n # Using ContentFile here ensures we generate all renditions. Simply\n # passing self.file required several page reloads to generate all\n image_file = self.generate_rendition_file(\n filter, source=ContentFile(original_image_bytes, name=self.file.name)\n )\n to_create.append(\n Rendition(\n image=self,\n filter_spec=filter.spec,\n focal_point_key=filter.get_cache_key(self),\n file=image_file,\n )\n )\n\n with ThreadPoolExecutor() as executor:\n executor.map(_generate_single_rendition, filters)\n\n # Rendition generation can take a while. So, if other processes have created\n # identical renditions in the meantime, we should find them to avoid clashes.\n # NB: Clashes can still occur, because there is no get_or_create() equivalent\n # for multiple objects. However, this will reduce that risk considerably.\n files_for_deletion: List[File] = []\n\n # Assemble Q() to identify potential clashes\n lookup_q = Q()\n for rendition in to_create:\n lookup_q |= Q(\n filter_spec=rendition.filter_spec,\n focal_point_key=rendition.focal_point_key,\n )\n\n for existing in self.renditions.filter(lookup_q):\n # Include the existing rendition in the return value\n filter = filter_map[existing.filter_spec]\n return_value[filter] = existing\n\n for new in to_create:\n if (\n new.filter_spec == existing.filter_spec\n and new.focal_point_key == existing.focal_point_key\n ):\n # Avoid creating the new version\n to_create.remove(new)\n # Mark for deletion later, so as not to hold up creation\n files_for_deletion.append(new.file)\n\n for new in Rendition.objects.bulk_create(to_create, ignore_conflicts=True):\n filter = filter_map[new.filter_spec]\n return_value[filter] = new\n\n # Delete redundant rendition image files\n for file in files_for_deletion:\n file.delete(save=False)\n\n return return_value\n\n def generate_rendition_file(self, filter: \"Filter\", *, source: File = None) -> File:\n \"\"\"\n Generates an in-memory image matching the supplied ``filter`` value\n and focal point value from this object, wraps it in a ``File`` object\n with a suitable filename, and returns it. The return value is used\n as the ``file`` field value for rendition objects saved by\n ``AbstractImage.create_rendition()``.\n\n If the contents of ``self.file`` has already been read into memory, the\n ``source`` keyword can be used to provide a reference to the in-memory\n ``File``, bypassing the need to reload the image contents from storage.\n\n NOTE: The responsibility of generating the new image from the original\n falls to the supplied ``filter`` object. If you want to do anything\n custom with rendition images (for example, to preserve metadata from\n the original image), you might want to consider swapping out ``filter``\n for an instance of a custom ``Filter`` subclass of your design.\n \"\"\"\n\n cache_key = filter.get_cache_key(self)\n\n logger.debug(\n \"Generating '%s' rendition for image %d\",\n filter.spec,\n self.pk,\n )\n\n start_time = time.time()\n\n try:\n generated_image = filter.run(\n self,\n SpooledTemporaryFile(max_size=settings.FILE_UPLOAD_MAX_MEMORY_SIZE),\n source=source,\n )\n\n logger.debug(\n \"Generated '%s' rendition for image %d in %.1fms\",\n filter.spec,\n self.pk,\n (time.time() - start_time) * 1000,\n )\n except: # noqa:B901,E722\n logger.debug(\n \"Failed to generate '%s' rendition for image %d\",\n filter.spec,\n self.pk,\n )\n raise\n\n # Generate filename\n input_filename = os.path.basename(self.file.name)\n input_filename_without_extension, input_extension = os.path.splitext(\n input_filename\n )\n output_extension = (\n filter.spec.replace(\"|\", \".\")\n + IMAGE_FORMAT_EXTENSIONS[generated_image.format_name]\n )\n if cache_key:\n output_extension = cache_key + \".\" + output_extension\n\n # Truncate filename to prevent it going over 60 chars\n output_filename_without_extension = input_filename_without_extension[\n : (59 - len(output_extension))\n ]\n output_filename = output_filename_without_extension + \".\" + output_extension\n\n return File(generated_image.f, name=output_filename)\n\n def is_portrait(self):\n return self.width < self.height\n\n def is_landscape(self):\n return self.height < self.width\n\n def is_svg(self):\n _, ext = os.path.splitext(self.file.name)\n return ext.lower() == \".svg\"\n\n @property\n def filename(self):\n return os.path.basename(self.file.name)\n\n @property\n def default_alt_text(self):\n # by default the alt text field (used in rich text insertion) is populated\n # from the title. Subclasses might provide a separate alt field, and\n # override this\n return self.title\n\n def is_editable_by_user(self, user):\n from wagtail.images.permissions import permission_policy\n\n return permission_policy.user_has_permission_for_instance(user, \"change\", self)\n\n class Meta:\n abstract = True\n\n\nclass Image(AbstractImage):\n admin_form_fields = (\n \"title\",\n \"file\",\n \"collection\",\n \"tags\",\n \"focal_point_x\",\n \"focal_point_y\",\n \"focal_point_width\",\n \"focal_point_height\",\n )\n\n class Meta(AbstractImage.Meta):\n verbose_name = _(\"image\")\n verbose_name_plural = _(\"images\")\n permissions = [\n (\"choose_image\", \"Can choose image\"),\n ]\n\n\nclass Filter:\n \"\"\"\n Represents one or more operations that can be applied to an Image to produce a rendition\n appropriate for final display on the website. Usually this would be a resize operation,\n but could potentially involve colour processing, etc.\n \"\"\"\n\n spec_pattern = re.compile(r\"^[A-Za-z0-9_\\-\\.]+$\")\n pipe_spec_pattern = re.compile(r\"^[A-Za-z0-9_\\-\\.\\|]+$\")\n expanding_spec_pattern = re.compile(r\"^[A-Za-z0-9_\\-\\.{},]+$\")\n pipe_expanding_spec_pattern = re.compile(r\"^[A-Za-z0-9_\\-\\.{},\\|]+$\")\n\n def __init__(self, spec=None):\n # The spec pattern is operation1-var1-var2|operation2-var1\n self.spec = spec\n\n @classmethod\n def expand_spec(self, spec: Union[\"str\", Iterable[\"str\"]]) -> List[\"str\"]:\n \"\"\"\n Converts a spec pattern with brace-expansions, into a list of spec patterns.\n For example, \"width-{100,200}\" becomes [\"width-100\", \"width-200\"].\n\n Supports providing filter specs already split, or pipe or space-separated.\n \"\"\"\n if isinstance(spec, str):\n separator = \"|\" if \"|\" in spec else \" \"\n spec = spec.split(separator)\n\n expanded_segments = []\n for segment in spec:\n # Check if segment has braces to expand\n if \"{\" in segment and \"}\" in segment:\n prefix, options_suffixed = segment.split(\"{\")\n options_pattern, suffix = options_suffixed.split(\"}\")\n options = options_pattern.split(\",\")\n expanded_segments.append(\n [prefix + option + suffix for option in options]\n )\n else:\n expanded_segments.append([segment])\n\n # Cartesian product of all expanded segments (equivalent to nested for loops).\n combinations = itertools.product(*expanded_segments)\n\n return [\"|\".join(combination) for combination in combinations]\n\n @cached_property\n def operations(self):\n # Search for operations\n registered_operations = {}\n for fn in hooks.get_hooks(\"register_image_operations\"):\n registered_operations.update(dict(fn()))\n\n # Build list of operation objects\n operations = []\n for op_spec in self.spec.split(\"|\"):\n op_spec_parts = op_spec.split(\"-\")\n\n if op_spec_parts[0] not in registered_operations:\n raise InvalidFilterSpecError(\n \"Unrecognised operation: %s\" % op_spec_parts[0]\n )\n\n op_class = registered_operations[op_spec_parts[0]]\n operations.append(op_class(*op_spec_parts))\n return operations\n\n @property\n def transform_operations(self):\n return [\n operation\n for operation in self.operations\n if isinstance(operation, TransformOperation)\n ]\n\n @property\n def filter_operations(self):\n return [\n operation\n for operation in self.operations\n if isinstance(operation, FilterOperation)\n ]\n\n def get_transform(self, image, size=None):\n \"\"\"\n Returns an ImageTransform with all the transforms in this filter applied.\n\n The ImageTransform is an object with two attributes:\n - .size - The size of the final image\n - .matrix - An affine transformation matrix that combines any\n transform/scale/rotation operations that need to be applied to the image\n \"\"\"\n\n if not size:\n size = (image.width, image.height)\n\n transform = ImageTransform(size, image_is_svg=image.is_svg())\n for operation in self.transform_operations:\n transform = operation.run(transform, image)\n return transform\n\n @contextmanager\n def get_willow_image(self, image: AbstractImage, source: File = None):\n if source is not None:\n yield willow.Image.open(source)\n else:\n with image.get_willow_image() as willow_image:\n yield willow_image\n\n def run(self, image: AbstractImage, output: BytesIO, source: File = None):\n with self.get_willow_image(image, source) as willow:\n\n original_format = willow.format_name\n\n # Fix orientation of image\n willow = willow.auto_orient()\n\n # Transform the image\n transform = self.get_transform(\n image, (willow.image.width, willow.image.height)\n )\n willow = willow.crop(transform.get_rect().round())\n willow = willow.resize(transform.size)\n\n # Apply filters\n env = {\n \"original-format\": original_format,\n }\n for operation in self.filter_operations:\n willow = operation.run(willow, image, env) or willow\n\n # Find the output format to use\n if \"output-format\" in env:\n # Developer specified an output format\n output_format = env[\"output-format\"]\n else:\n # Convert bmp and webp to png by default\n default_conversions = {\n \"avif\": \"png\",\n \"bmp\": \"png\",\n \"webp\": \"png\",\n }\n\n # Convert unanimated GIFs to PNG as well\n if not willow.has_animation():\n default_conversions[\"gif\"] = \"png\"\n\n # Allow the user to override the conversions\n conversion = getattr(settings, \"WAGTAILIMAGES_FORMAT_CONVERSIONS\", {})\n default_conversions.update(conversion)\n\n # Get the converted output format falling back to the original\n output_format = default_conversions.get(\n original_format, original_format\n )\n\n if output_format == \"jpeg\":\n # Allow changing of JPEG compression quality\n if \"jpeg-quality\" in env:\n quality = env[\"jpeg-quality\"]\n else:\n quality = getattr(settings, \"WAGTAILIMAGES_JPEG_QUALITY\", 85)\n\n # If the image has an alpha channel, give it a white background\n if willow.has_alpha():\n willow = willow.set_background_color_rgb((255, 255, 255))\n\n return willow.save_as_jpeg(\n output, quality=quality, progressive=True, optimize=True\n )\n elif output_format == \"png\":\n return willow.save_as_png(output, optimize=True)\n elif output_format == \"gif\":\n return willow.save_as_gif(output)\n elif output_format == \"webp\":\n # Allow changing of WebP compression quality\n if (\n \"output-format-options\" in env\n and \"lossless\" in env[\"output-format-options\"]\n ):\n return willow.save_as_webp(output, lossless=True)\n elif \"webp-quality\" in env:\n quality = env[\"webp-quality\"]\n else:\n quality = getattr(settings, \"WAGTAILIMAGES_WEBP_QUALITY\", 80)\n\n return willow.save_as_webp(output, quality=quality)\n elif output_format == \"avif\":\n # Allow changing of AVIF compression quality\n if (\n \"output-format-options\" in env\n and \"lossless\" in env[\"output-format-options\"]\n ):\n return willow.save_as_avif(output, lossless=True)\n elif \"avif-quality\" in env:\n quality = env[\"avif-quality\"]\n else:\n quality = getattr(settings, \"WAGTAILIMAGES_AVIF_QUALITY\", 80)\n return willow.save_as_avif(output, quality=quality)\n elif output_format == \"svg\":\n return willow.save_as_svg(output)\n raise UnknownOutputImageFormatError(\n f\"Unknown output image format '{output_format}'\"\n )\n\n def get_cache_key(self, image):\n vary_parts = []\n\n for operation in self.operations:\n for field in getattr(operation, \"vary_fields\", []):\n value = getattr(image, field, \"\")\n vary_parts.append(str(value))\n\n vary_string = \"-\".join(vary_parts)\n\n # Return blank string if there are no vary fields\n if not vary_string:\n return \"\"\n\n return hashlib.sha1(vary_string.encode(\"utf-8\")).hexdigest()[:8]\n\n\nclass ResponsiveImage:\n \"\"\"\n A custom object used to represent a collection of renditions.\n Provides a 'renditions' property to access the renditions,\n and renders to the front-end HTML.\n \"\"\"\n\n def __init__(\n self,\n renditions: Dict[str, \"AbstractRendition\"],\n attrs: Optional[Dict[str, Any]] = None,\n ):\n self.renditions = list(renditions.values())\n self.attrs = attrs\n\n @classmethod\n def get_width_srcset(cls, renditions_list: List[\"AbstractRendition\"]):\n if len(renditions_list) == 1:\n # No point in using width descriptors if there is a single image.\n return renditions_list[0].url\n\n return \", \".join([f\"{r.url} {r.width}w\" for r in renditions_list])\n\n def __html__(self):\n attrs = self.attrs or {}\n\n # No point in adding a srcset if there is a single image.\n if len(self.renditions) > 1:\n attrs[\"srcset\"] = self.get_width_srcset(self.renditions)\n\n # The first rendition is the \"base\" / \"fallback\" image.\n return self.renditions[0].img_tag(attrs)\n\n def __str__(self):\n return mark_safe(self.__html__())\n\n def __bool__(self):\n return bool(self.renditions)\n\n def __eq__(self, other: \"ResponsiveImage\"):\n if isinstance(other, ResponsiveImage):\n return self.renditions == other.renditions and self.attrs == other.attrs\n return False\n\n\nclass Picture(ResponsiveImage):\n # Keep this separate from FormatOperation.supported_formats,\n # as the order our formats are defined in is essential for the picture tag.\n # Defines the order of <source> elements in the tag when format operations\n # are in use, and the priority order to identify the \"fallback\" format.\n # The browser will pick the first supported format in this list.\n source_format_order = [\"avif\", \"webp\", \"jpeg\", \"png\", \"gif\"]\n\n def __init__(\n self,\n renditions: Dict[str, \"AbstractRendition\"],\n attrs: Optional[Dict[str, Any]] = None,\n ):\n super().__init__(renditions, attrs)\n # Store renditions grouped by format separately for access from templates.\n self.formats = self.get_formats(renditions)\n\n def get_formats(\n self, renditions: Dict[str, \"AbstractRendition\"]\n ) -> Dict[str, List[\"AbstractRendition\"]]:\n \"\"\"\n Group renditions by the format they are for, if any.\n If there is only one format, no grouping is required.\n \"\"\"\n formats = defaultdict(list)\n for spec, rendition in renditions.items():\n for fmt in FormatOperation.supported_formats:\n # Identify the spec\u2019s format (if any).\n if f\"format-{fmt}\" in spec:\n formats[fmt].append(rendition)\n break\n # Avoid the split by format if there is only one.\n if len(formats.keys()) < 2:\n return {}\n\n return formats\n\n def get_fallback_format(self):\n for fmt in reversed(self.source_format_order):\n if fmt in self.formats:\n return fmt\n\n def __html__(self):\n # If there aren\u2019t multiple formats, render a vanilla img tag with srcset.\n if not self.formats:\n return mark_safe(f\"<picture>{super().__html__()}</picture>\")\n\n attrs = self.attrs or {}\n\n sizes = f'sizes=\"{attrs[\"sizes\"]}\" ' if \"sizes\" in attrs else \"\"\n fallback_format = self.get_fallback_format()\n fallback_renditions = self.formats[fallback_format]\n\n sources = []\n\n for fmt in self.source_format_order:\n if fmt != fallback_format and fmt in self.formats:\n srcset = self.get_width_srcset(self.formats[fmt])\n mime = image_format_name_to_content_type(fmt)\n sources.append(f'<source srcset=\"{srcset}\" {sizes}type=\"{mime}\">')\n\n if len(fallback_renditions) > 1:\n attrs[\"srcset\"] = self.get_width_srcset(fallback_renditions)\n\n # The first rendition is the \"base\" / \"fallback\" image.\n fallback = fallback_renditions[0].img_tag(attrs)\n\n return mark_safe(f\"<picture>{''.join(sources)}{fallback}</picture>\")\n\n\nclass AbstractRendition(ImageFileMixin, models.Model):\n filter_spec = models.CharField(max_length=255, db_index=True)\n \"\"\" Use local ImageField with Willow support. \"\"\"\n file = WagtailImageField(\n upload_to=get_rendition_upload_to,\n storage=get_rendition_storage,\n width_field=\"width\",\n height_field=\"height\",\n )\n width = models.IntegerField(editable=False)\n height = models.IntegerField(editable=False)\n focal_point_key = models.CharField(\n max_length=16, blank=True, default=\"\", editable=False\n )\n\n wagtail_reference_index_ignore = True\n\n @property\n def url(self):\n return self.file.url\n\n @property\n def alt(self):\n return self.image.default_alt_text\n\n @property\n def attrs(self):\n \"\"\"\n The src, width, height, and alt attributes for an <img> tag, as a HTML\n string\n \"\"\"\n return flatatt(self.attrs_dict)\n\n @property\n def attrs_dict(self):\n \"\"\"\n A dict of the src, width, height, and alt attributes for an <img> tag.\n \"\"\"\n return OrderedDict(\n [\n (\"src\", self.url),\n (\"width\", self.width),\n (\"height\", self.height),\n (\"alt\", self.alt),\n ]\n )\n\n @property\n def full_url(self):\n url = self.url\n if hasattr(settings, \"WAGTAILADMIN_BASE_URL\") and url.startswith(\"/\"):\n url = settings.WAGTAILADMIN_BASE_URL + url\n return url\n\n @property\n def filter(self):\n return Filter(self.filter_spec)\n\n @cached_property\n def focal_point(self):\n image_focal_point = self.image.get_focal_point()\n if image_focal_point:\n transform = self.filter.get_transform(self.image)\n return image_focal_point.transform(transform)\n\n @property\n def background_position_style(self):\n \"\"\"\n Returns a `background-position` rule to be put in the inline style of an element which uses the rendition for its background.\n\n This positions the rendition according to the value of the focal point. This is helpful for when the element does not have\n the same aspect ratio as the rendition.\n\n For example:\n\n {% image page.image fill-1920x600 as image %}\n <div style=\"background-image: url('{{ image.url }}'); {{ image.background_position_style }}\">\n </div>\n \"\"\"\n focal_point = self.focal_point\n if focal_point:\n horz = int((focal_point.x * 100) // self.width)\n vert = int((focal_point.y * 100) // self.height)\n return f\"background-position: {horz}% {vert}%;\"\n else:\n return \"background-position: 50% 50%;\"\n\n def img_tag(self, extra_attributes={}):\n attrs = self.attrs_dict.copy()\n\n attrs.update(apps.get_app_config(\"wagtailimages\").default_attrs)\n\n attrs.update(extra_attributes)\n\n return mark_safe(f\"<img{flatatt(attrs)}>\")\n\n def __html__(self):\n return self.img_tag()\n\n def get_upload_to(self, filename):\n folder_name = \"images\"\n filename = self.file.field.storage.get_valid_name(filename)\n return os.path.join(folder_name, filename)\n\n @classmethod\n def check(cls, **kwargs):\n errors = super().check(**kwargs)\n if not cls._meta.abstract:\n if not any(\n set(constraint) == {\"image\", \"filter_spec\", \"focal_point_key\"}\n for constraint in cls._meta.unique_together\n ):\n errors.append(\n checks.Error(\n \"Custom rendition model %r has an invalid unique_together setting\"\n % cls,\n hint=\"Custom rendition models must include the constraint \"\n \"('image', 'filter_spec', 'focal_point_key') in their unique_together definition.\",\n obj=cls,\n id=\"wagtailimages.E001\",\n )\n )\n\n return errors\n\n @staticmethod\n def construct_cache_key(image, filter_cache_key, filter_spec):\n return \"wagtail-rendition-\" + \"-\".join(\n [str(image.id), image.file_hash, filter_cache_key, filter_spec]\n )\n\n @classproperty\n def cache_backend(cls) -> BaseCache:\n try:\n return caches[\"renditions\"]\n except InvalidCacheBackendError:\n return caches[DEFAULT_CACHE_ALIAS]\n\n def get_cache_key(self):\n return self.construct_cache_key(\n self.image, self.focal_point_key, self.filter_spec\n )\n\n def purge_from_cache(self):\n self.cache_backend.delete(self.get_cache_key())\n\n class Meta:\n abstract = True\n\n\nclass Rendition(AbstractRendition):\n image = models.ForeignKey(\n Image, related_name=\"renditions\", on_delete=models.CASCADE\n )\n\n class Meta:\n unique_together = ((\"image\", \"filter_spec\", \"focal_point_key\"),)\n\n\nclass UploadedImage(models.Model):\n \"\"\"\n Temporary storage for images uploaded through the multiple image uploader, when validation rules (e.g.\n required metadata fields) prevent creating an Image object from the image file alone. In this case,\n the image file is stored against this model, to be turned into an Image object once the full form\n has been filled in.\n \"\"\"\n\n file = models.ImageField(upload_to=\"uploaded_images\", max_length=200)\n uploaded_by_user = models.ForeignKey(\n settings.AUTH_USER_MODEL,\n verbose_name=_(\"uploaded by user\"),\n null=True,\n blank=True,\n editable=False,\n on_delete=models.SET_NULL,\n )\n uploaded_by_user.wagtail_reference_index_ignore = True\n", "path": "wagtail/images/models.py"}]} |
gh_patches_debug_1398 | rasdani/github-patches | git_diff | Textualize__textual-2690 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Setting `Select.value` in `on_mount` for a `Select` in a container raises an exception
Initially stemming from [a question on Discord](https://discord.com/channels/1026214085173461072/1033754296224841768/1111680440709947533), it seems that it isn't possible to set `Select.value` in `on_mount` if the `Select` is inside a container. That is to say, this code works fine:
```python
from textual.app import App, ComposeResult
from textual.widgets import Select
class SelectBugApp( App[ None ] ):
def on_mount( self ) -> None:
self.query_one( Select ).value = 1
def compose( self ) -> ComposeResult:
yield Select( ( ( str( n ), n ) for n in range( 10 ) ) )
if __name__ == "__main__":
SelectBugApp().run()
```
but this code:
```python
from textual.app import App, ComposeResult
from textual.containers import Horizontal
from textual.widgets import Select
class SelectBugApp( App[ None ] ):
def on_mount( self ) -> None:
self.query_one( Select ).value = 1
def compose( self ) -> ComposeResult:
yield Horizontal( Select( ( ( str( n ), n ) for n in range( 10 ) ) ) )
if __name__ == "__main__":
SelectBugApp().run()
```
raises this error:
```
╭───────────────────────────────────────────────────── Traceback (most recent call last) ──────────────────────────────────────────────────────╮
│ /Users/davep/develop/python/textual-sandbox/select_value.py:8 in on_mount │
│ │
│ 5 class SelectBugApp( App[ None ] ): │
│ 6 │ │
│ 7 │ def on_mount( self ) -> None: │
│ ❱ 8 │ │ self.query_one( Select ).value = 1 │
│ 9 │ │
│ 10 │ def compose( self ) -> ComposeResult: │
│ 11 │ │ yield Horizontal( Select( ( ( str( n ), n ) for n in range( 10 ) ) ) ) │
│ │
│ ╭───────────────────────────── locals ──────────────────────────────╮ │
│ │ self = SelectBugApp(title='SelectBugApp', classes={'-dark-mode'}) │ │
│ ╰───────────────────────────────────────────────────────────────────╯ │
│ │
│ /Users/davep/develop/python/textual-sandbox/.venv/lib/python3.10/site-packages/textual/widgets/_select.py:310 in _watch_value │
│ │
│ 307 │ │ │ if value is None: │
│ 308 │ │ │ │ self.query_one(SelectCurrent).update(None) │
│ 309 │ │ │ else: │
│ ❱ 310 │ │ │ │ for index, (prompt, _value) in enumerate(self._options): │
│ 311 │ │ │ │ │ if _value == value: │
│ 312 │ │ │ │ │ │ select_overlay = self.query_one(SelectOverlay) │
│ 313 │ │ │ │ │ │ select_overlay.highlighted = index │
│ │
│ ╭────────────────────────── locals ──────────────────────────╮ │
│ │ select_current = SelectCurrent(pseudo_classes={'enabled'}) │ │
│ │ self = Select(pseudo_classes={'enabled'}) │ │
│ │ value = 1 │ │
│ ╰────────────────────────────────────────────────────────────╯ │
╰──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯
AttributeError: 'Select' object has no attribute '_options'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/textual/widgets/_select.py`
Content:
```
1 from __future__ import annotations
2
3 from dataclasses import dataclass
4 from typing import TYPE_CHECKING, Generic, Iterable, Optional, TypeVar
5
6 from rich.console import RenderableType
7 from rich.text import Text
8
9 from .. import events, on
10 from ..app import ComposeResult
11 from ..containers import Horizontal, Vertical
12 from ..css.query import NoMatches
13 from ..message import Message
14 from ..reactive import var
15 from ..widgets import Static
16 from ._option_list import Option, OptionList
17
18 if TYPE_CHECKING:
19 from typing_extensions import TypeAlias
20
21
22 class SelectOverlay(OptionList):
23 """The 'pop-up' overlay for the Select control."""
24
25 BINDINGS = [("escape", "dismiss")]
26
27 DEFAULT_CSS = """
28 SelectOverlay {
29 border: tall $background;
30 background: $panel;
31 color: $text;
32 width: 100%;
33 padding: 0 1;
34 }
35 SelectOverlay > .option-list--option {
36 padding: 0 1;
37 }
38 """
39
40 @dataclass
41 class Dismiss(Message):
42 """Inform ancestor the overlay should be dismissed."""
43
44 lost_focus: bool = False
45 """True if the overlay lost focus."""
46
47 @dataclass
48 class UpdateSelection(Message):
49 """Inform ancestor the selection was changed."""
50
51 option_index: int
52 """The index of the new selection."""
53
54 def select(self, index: int | None) -> None:
55 """Move selection.
56
57 Args:
58 index: Index of new selection.
59 """
60 self.highlighted = index
61 self.scroll_to_highlight(top=True)
62
63 def action_dismiss(self) -> None:
64 """Dismiss the overlay."""
65 self.post_message(self.Dismiss())
66
67 def _on_blur(self, _event: events.Blur) -> None:
68 """On blur we want to dismiss the overlay."""
69 self.post_message(self.Dismiss(lost_focus=True))
70
71 def on_option_list_option_selected(self, event: OptionList.OptionSelected) -> None:
72 """Inform parent when an option is selected."""
73 event.stop()
74 self.post_message(self.UpdateSelection(event.option_index))
75
76
77 class SelectCurrent(Horizontal):
78 """Displays the currently selected option."""
79
80 DEFAULT_CSS = """
81 SelectCurrent {
82 border: tall $background;
83 background: $boost;
84 color: $text;
85 width: 100%;
86 height: auto;
87 padding: 0 2;
88 }
89 SelectCurrent Static#label {
90 width: 1fr;
91 height: auto;
92 color: $text-disabled;
93 background: transparent;
94 }
95 SelectCurrent.-has-value Static#label {
96 color: $text;
97 }
98 SelectCurrent .arrow {
99 box-sizing: content-box;
100 width: 1;
101 height: 1;
102 padding: 0 0 0 1;
103 color: $text-muted;
104 background: transparent;
105 }
106 SelectCurrent .arrow {
107 box-sizing: content-box;
108 width: 1;
109 height: 1;
110 padding: 0 0 0 1;
111 color: $text-muted;
112 background: transparent;
113 }
114 """
115
116 has_value: var[bool] = var(False)
117 """True if there is a current value, or False if it is None."""
118
119 class Toggle(Message):
120 """Request toggle overlay."""
121
122 def __init__(self, placeholder: str) -> None:
123 """Initialize the SelectCurrent.
124
125 Args:
126 placeholder: A string to display when there is nothing selected.
127 """
128 super().__init__()
129 self.placeholder = placeholder
130 self.label: RenderableType | None = None
131
132 def update(self, label: RenderableType | None) -> None:
133 """Update the content in the widget.
134
135 Args:
136 label: A renderable to display, or `None` for the placeholder.
137 """
138 self.label = label
139 self.has_value = label is not None
140 self.query_one("#label", Static).update(
141 self.placeholder if label is None else label
142 )
143
144 def compose(self) -> ComposeResult:
145 """Compose label and down arrow."""
146 yield Static(self.placeholder, id="label")
147 yield Static("▼", classes="arrow down-arrow")
148 yield Static("▲", classes="arrow up-arrow")
149
150 def _watch_has_value(self, has_value: bool) -> None:
151 """Toggle the class."""
152 self.set_class(has_value, "-has-value")
153
154 async def _on_click(self, event: events.Click) -> None:
155 """Inform ancestor we want to toggle."""
156 self.post_message(self.Toggle())
157
158
159 SelectType = TypeVar("SelectType")
160 """The type used for data in the Select."""
161 SelectOption: TypeAlias = "tuple[str, SelectType]"
162 """The type used for options in the Select."""
163
164
165 class Select(Generic[SelectType], Vertical, can_focus=True):
166 """Widget to select from a list of possible options.
167
168 A Select displays the current selection.
169 When activated with ++enter++ the widget displays an overlay with a list of all possible options.
170
171 """
172
173 BINDINGS = [("enter,down,space,up", "show_overlay")]
174 """
175 | Key(s) | Description |
176 | :- | :- |
177 | enter,down,space,up | Activate the overlay |
178 """
179
180 DEFAULT_CSS = """
181 Select {
182 height: auto;
183 }
184
185 Select:focus > SelectCurrent {
186 border: tall $accent;
187 }
188
189 Select > SelectOverlay {
190 width: 1fr;
191 display: none;
192 height: auto;
193 max-height: 10;
194 overlay: screen;
195 constrain: y;
196 }
197
198 Select .up-arrow {
199 display:none;
200 }
201
202 Select.-expanded .down-arrow {
203 display:none;
204 }
205
206 Select.-expanded .up-arrow {
207 display: block;
208 }
209
210 Select.-expanded > SelectOverlay {
211 display: block;
212 }
213
214 Select.-expanded > SelectCurrent {
215 border: tall $accent;
216 }
217 """
218
219 expanded: var[bool] = var(False, init=False)
220 """True to show the overlay, otherwise False."""
221 prompt: var[str] = var[str]("Select")
222 """The prompt to show when no value is selected."""
223 value: var[SelectType | None] = var[Optional[SelectType]](None)
224 """The value of the select."""
225
226 class Changed(Message, bubble=True):
227 """Posted when the select value was changed.
228
229 This message can be handled using a `on_select_changed` method.
230 """
231
232 def __init__(self, select: Select, value: SelectType | None) -> None:
233 """
234 Initialize the Changed message.
235 """
236 super().__init__()
237 self.select = select
238 """The select widget."""
239 self.value = value
240 """The value of the Select when it changed."""
241
242 @property
243 def control(self) -> Select:
244 """The Select that sent the message."""
245 return self.select
246
247 def __init__(
248 self,
249 options: Iterable[tuple[str, SelectType]],
250 *,
251 prompt: str = "Select",
252 allow_blank: bool = True,
253 value: SelectType | None = None,
254 name: str | None = None,
255 id: str | None = None,
256 classes: str | None = None,
257 disabled: bool = False,
258 ):
259 """Initialize the Select control
260
261 Args:
262 options: Options to select from.
263 prompt: Text to show in the control when no option is select.
264 allow_blank: Allow the selection of a blank option.
265 value: Initial value (should be one of the values in `options`).
266 name: The name of the select control.
267 id: The ID of the control the DOM.
268 classes: The CSS classes of the control.
269 disabled: Whether the control is disabled or not.
270 """
271 super().__init__(name=name, id=id, classes=classes, disabled=disabled)
272 self._allow_blank = allow_blank
273 self.prompt = prompt
274 self._initial_options = list(options)
275 self._value: SelectType | None = value
276
277 def set_options(self, options: Iterable[tuple[RenderableType, SelectType]]) -> None:
278 """Set the options for the Select.
279
280 Args:
281 options: An iterable of tuples containing (STRING, VALUE).
282 """
283 self._options: list[tuple[RenderableType, SelectType | None]] = list(options)
284
285 if self._allow_blank:
286 self._options.insert(0, ("", None))
287
288 self._select_options: list[Option] = [
289 (
290 Option(Text(self.prompt, style="dim"))
291 if value is None
292 else Option(prompt)
293 )
294 for prompt, value in self._options
295 ]
296
297 option_list = self.query_one(SelectOverlay)
298 option_list.clear_options()
299 for option in self._select_options:
300 option_list.add_option(option)
301
302 def _watch_value(self, value: SelectType | None) -> None:
303 """Update the current value when it changes."""
304 self._value = value
305 try:
306 select_current = self.query_one(SelectCurrent)
307 except NoMatches:
308 pass
309 else:
310 if value is None:
311 self.query_one(SelectCurrent).update(None)
312 else:
313 for index, (prompt, _value) in enumerate(self._options):
314 if _value == value:
315 select_overlay = self.query_one(SelectOverlay)
316 select_overlay.highlighted = index
317 self.query_one(SelectCurrent).update(prompt)
318 break
319 else:
320 self.query_one(SelectCurrent).update(None)
321
322 def compose(self) -> ComposeResult:
323 """Compose Select with overlay and current value."""
324 yield SelectCurrent(self.prompt)
325 yield SelectOverlay()
326
327 def _on_mount(self, _event: events.Mount) -> None:
328 """Set initial values."""
329 self.set_options(self._initial_options)
330 self.value = self._value
331
332 def _watch_expanded(self, expanded: bool) -> None:
333 """Display or hide overlay."""
334 overlay = self.query_one(SelectOverlay)
335 self.set_class(expanded, "-expanded")
336 if expanded:
337 overlay.focus()
338 if self.value is None:
339 overlay.select(None)
340 self.query_one(SelectCurrent).has_value = False
341 else:
342 value = self.value
343 for index, (_prompt, prompt_value) in enumerate(self._options):
344 if value == prompt_value:
345 overlay.select(index)
346 break
347 self.query_one(SelectCurrent).has_value = True
348
349 @on(SelectCurrent.Toggle)
350 def _select_current_toggle(self, event: SelectCurrent.Toggle) -> None:
351 """Show the overlay when toggled."""
352 event.stop()
353 self.expanded = not self.expanded
354
355 @on(SelectOverlay.Dismiss)
356 def _select_overlay_dismiss(self, event: SelectOverlay.Dismiss) -> None:
357 """Dismiss the overlay."""
358 event.stop()
359 self.expanded = False
360 if not event.lost_focus:
361 # If the overlay didn't lose focus, we want to re-focus the select.
362 self.focus()
363
364 @on(SelectOverlay.UpdateSelection)
365 def _update_selection(self, event: SelectOverlay.UpdateSelection) -> None:
366 """Update the current selection."""
367 event.stop()
368 value = self._options[event.option_index][1]
369 self.value = value
370
371 async def update_focus() -> None:
372 """Update focus and reset overlay."""
373 self.focus()
374 self.expanded = False
375
376 self.call_after_refresh(update_focus) # Prevents a little flicker
377 self.post_message(self.Changed(self, value))
378
379 def action_show_overlay(self) -> None:
380 """Show the overlay."""
381 select_current = self.query_one(SelectCurrent)
382 select_current.has_value = True
383 self.expanded = True
384
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/textual/widgets/_select.py b/src/textual/widgets/_select.py
--- a/src/textual/widgets/_select.py
+++ b/src/textual/widgets/_select.py
@@ -273,6 +273,7 @@
self.prompt = prompt
self._initial_options = list(options)
self._value: SelectType | None = value
+ self._options = options
def set_options(self, options: Iterable[tuple[RenderableType, SelectType]]) -> None:
"""Set the options for the Select.
| {"golden_diff": "diff --git a/src/textual/widgets/_select.py b/src/textual/widgets/_select.py\n--- a/src/textual/widgets/_select.py\n+++ b/src/textual/widgets/_select.py\n@@ -273,6 +273,7 @@\n self.prompt = prompt\n self._initial_options = list(options)\n self._value: SelectType | None = value\n+ self._options = options\n \n def set_options(self, options: Iterable[tuple[RenderableType, SelectType]]) -> None:\n \"\"\"Set the options for the Select.\n", "issue": "Setting `Select.value` in `on_mount` for a `Select` in a container raises an exception\nInitially stemming from [a question on Discord](https://discord.com/channels/1026214085173461072/1033754296224841768/1111680440709947533), it seems that it isn't possible to set `Select.value` in `on_mount` if the `Select` is inside a container. That is to say, this code works fine:\r\n\r\n```python\r\nfrom textual.app import App, ComposeResult\r\nfrom textual.widgets import Select\r\n\r\nclass SelectBugApp( App[ None ] ):\r\n\r\n def on_mount( self ) -> None:\r\n self.query_one( Select ).value = 1\r\n\r\n def compose( self ) -> ComposeResult:\r\n yield Select( ( ( str( n ), n ) for n in range( 10 ) ) )\r\n\r\nif __name__ == \"__main__\":\r\n SelectBugApp().run()\r\n```\r\n\r\nbut this code:\r\n\r\n```python\r\nfrom textual.app import App, ComposeResult\r\nfrom textual.containers import Horizontal\r\nfrom textual.widgets import Select\r\n\r\nclass SelectBugApp( App[ None ] ):\r\n\r\n def on_mount( self ) -> None:\r\n self.query_one( Select ).value = 1\r\n\r\n def compose( self ) -> ComposeResult:\r\n yield Horizontal( Select( ( ( str( n ), n ) for n in range( 10 ) ) ) )\r\n\r\nif __name__ == \"__main__\":\r\n SelectBugApp().run()\r\n```\r\n\r\nraises this error:\r\n\r\n```\r\n\u256d\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500 Traceback (most recent call last) \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u256e\r\n\u2502 /Users/davep/develop/python/textual-sandbox/select_value.py:8 in on_mount \u2502\r\n\u2502 \u2502\r\n\u2502 5 class SelectBugApp( App[ None ] ): \u2502\r\n\u2502 6 \u2502 \u2502\r\n\u2502 7 \u2502 def on_mount( self ) -> None: \u2502\r\n\u2502 \u2771 8 \u2502 \u2502 self.query_one( Select ).value = 1 \u2502\r\n\u2502 9 \u2502 \u2502\r\n\u2502 10 \u2502 def compose( self ) -> ComposeResult: \u2502\r\n\u2502 11 \u2502 \u2502 yield Horizontal( Select( ( ( str( n ), n ) for n in range( 10 ) ) ) ) \u2502\r\n\u2502 \u2502\r\n\u2502 \u256d\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500 locals \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u256e \u2502\r\n\u2502 \u2502 self = SelectBugApp(title='SelectBugApp', classes={'-dark-mode'}) \u2502 \u2502\r\n\u2502 \u2570\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u256f \u2502\r\n\u2502 \u2502\r\n\u2502 /Users/davep/develop/python/textual-sandbox/.venv/lib/python3.10/site-packages/textual/widgets/_select.py:310 in _watch_value \u2502\r\n\u2502 \u2502\r\n\u2502 307 \u2502 \u2502 \u2502 if value is None: \u2502\r\n\u2502 308 \u2502 \u2502 \u2502 \u2502 self.query_one(SelectCurrent).update(None) \u2502\r\n\u2502 309 \u2502 \u2502 \u2502 else: \u2502\r\n\u2502 \u2771 310 \u2502 \u2502 \u2502 \u2502 for index, (prompt, _value) in enumerate(self._options): \u2502\r\n\u2502 311 \u2502 \u2502 \u2502 \u2502 \u2502 if _value == value: \u2502\r\n\u2502 312 \u2502 \u2502 \u2502 \u2502 \u2502 \u2502 select_overlay = self.query_one(SelectOverlay) \u2502\r\n\u2502 313 \u2502 \u2502 \u2502 \u2502 \u2502 \u2502 select_overlay.highlighted = index \u2502\r\n\u2502 \u2502\r\n\u2502 \u256d\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500 locals \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u256e \u2502\r\n\u2502 \u2502 select_current = SelectCurrent(pseudo_classes={'enabled'}) \u2502 \u2502\r\n\u2502 \u2502 self = Select(pseudo_classes={'enabled'}) \u2502 \u2502\r\n\u2502 \u2502 value = 1 \u2502 \u2502\r\n\u2502 \u2570\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u256f \u2502\r\n\u2570\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u256f\r\nAttributeError: 'Select' object has no attribute '_options'\r\n```\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom dataclasses import dataclass\nfrom typing import TYPE_CHECKING, Generic, Iterable, Optional, TypeVar\n\nfrom rich.console import RenderableType\nfrom rich.text import Text\n\nfrom .. import events, on\nfrom ..app import ComposeResult\nfrom ..containers import Horizontal, Vertical\nfrom ..css.query import NoMatches\nfrom ..message import Message\nfrom ..reactive import var\nfrom ..widgets import Static\nfrom ._option_list import Option, OptionList\n\nif TYPE_CHECKING:\n from typing_extensions import TypeAlias\n\n\nclass SelectOverlay(OptionList):\n \"\"\"The 'pop-up' overlay for the Select control.\"\"\"\n\n BINDINGS = [(\"escape\", \"dismiss\")]\n\n DEFAULT_CSS = \"\"\"\n SelectOverlay {\n border: tall $background;\n background: $panel;\n color: $text;\n width: 100%;\n padding: 0 1;\n }\n SelectOverlay > .option-list--option {\n padding: 0 1;\n }\n \"\"\"\n\n @dataclass\n class Dismiss(Message):\n \"\"\"Inform ancestor the overlay should be dismissed.\"\"\"\n\n lost_focus: bool = False\n \"\"\"True if the overlay lost focus.\"\"\"\n\n @dataclass\n class UpdateSelection(Message):\n \"\"\"Inform ancestor the selection was changed.\"\"\"\n\n option_index: int\n \"\"\"The index of the new selection.\"\"\"\n\n def select(self, index: int | None) -> None:\n \"\"\"Move selection.\n\n Args:\n index: Index of new selection.\n \"\"\"\n self.highlighted = index\n self.scroll_to_highlight(top=True)\n\n def action_dismiss(self) -> None:\n \"\"\"Dismiss the overlay.\"\"\"\n self.post_message(self.Dismiss())\n\n def _on_blur(self, _event: events.Blur) -> None:\n \"\"\"On blur we want to dismiss the overlay.\"\"\"\n self.post_message(self.Dismiss(lost_focus=True))\n\n def on_option_list_option_selected(self, event: OptionList.OptionSelected) -> None:\n \"\"\"Inform parent when an option is selected.\"\"\"\n event.stop()\n self.post_message(self.UpdateSelection(event.option_index))\n\n\nclass SelectCurrent(Horizontal):\n \"\"\"Displays the currently selected option.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n SelectCurrent {\n border: tall $background;\n background: $boost;\n color: $text;\n width: 100%;\n height: auto;\n padding: 0 2;\n }\n SelectCurrent Static#label {\n width: 1fr;\n height: auto;\n color: $text-disabled;\n background: transparent;\n }\n SelectCurrent.-has-value Static#label {\n color: $text;\n }\n SelectCurrent .arrow {\n box-sizing: content-box;\n width: 1;\n height: 1;\n padding: 0 0 0 1;\n color: $text-muted;\n background: transparent;\n }\n SelectCurrent .arrow {\n box-sizing: content-box;\n width: 1;\n height: 1;\n padding: 0 0 0 1;\n color: $text-muted;\n background: transparent;\n }\n \"\"\"\n\n has_value: var[bool] = var(False)\n \"\"\"True if there is a current value, or False if it is None.\"\"\"\n\n class Toggle(Message):\n \"\"\"Request toggle overlay.\"\"\"\n\n def __init__(self, placeholder: str) -> None:\n \"\"\"Initialize the SelectCurrent.\n\n Args:\n placeholder: A string to display when there is nothing selected.\n \"\"\"\n super().__init__()\n self.placeholder = placeholder\n self.label: RenderableType | None = None\n\n def update(self, label: RenderableType | None) -> None:\n \"\"\"Update the content in the widget.\n\n Args:\n label: A renderable to display, or `None` for the placeholder.\n \"\"\"\n self.label = label\n self.has_value = label is not None\n self.query_one(\"#label\", Static).update(\n self.placeholder if label is None else label\n )\n\n def compose(self) -> ComposeResult:\n \"\"\"Compose label and down arrow.\"\"\"\n yield Static(self.placeholder, id=\"label\")\n yield Static(\"\u25bc\", classes=\"arrow down-arrow\")\n yield Static(\"\u25b2\", classes=\"arrow up-arrow\")\n\n def _watch_has_value(self, has_value: bool) -> None:\n \"\"\"Toggle the class.\"\"\"\n self.set_class(has_value, \"-has-value\")\n\n async def _on_click(self, event: events.Click) -> None:\n \"\"\"Inform ancestor we want to toggle.\"\"\"\n self.post_message(self.Toggle())\n\n\nSelectType = TypeVar(\"SelectType\")\n\"\"\"The type used for data in the Select.\"\"\"\nSelectOption: TypeAlias = \"tuple[str, SelectType]\"\n\"\"\"The type used for options in the Select.\"\"\"\n\n\nclass Select(Generic[SelectType], Vertical, can_focus=True):\n \"\"\"Widget to select from a list of possible options.\n\n A Select displays the current selection.\n When activated with ++enter++ the widget displays an overlay with a list of all possible options.\n\n \"\"\"\n\n BINDINGS = [(\"enter,down,space,up\", \"show_overlay\")]\n \"\"\"\n | Key(s) | Description |\n | :- | :- |\n | enter,down,space,up | Activate the overlay |\n \"\"\"\n\n DEFAULT_CSS = \"\"\"\n Select {\n height: auto;\n }\n\n Select:focus > SelectCurrent {\n border: tall $accent;\n }\n\n Select > SelectOverlay {\n width: 1fr;\n display: none;\n height: auto;\n max-height: 10;\n overlay: screen;\n constrain: y;\n }\n\n Select .up-arrow {\n display:none;\n }\n\n Select.-expanded .down-arrow {\n display:none;\n }\n\n Select.-expanded .up-arrow {\n display: block;\n }\n\n Select.-expanded > SelectOverlay {\n display: block;\n }\n\n Select.-expanded > SelectCurrent {\n border: tall $accent;\n }\n \"\"\"\n\n expanded: var[bool] = var(False, init=False)\n \"\"\"True to show the overlay, otherwise False.\"\"\"\n prompt: var[str] = var[str](\"Select\")\n \"\"\"The prompt to show when no value is selected.\"\"\"\n value: var[SelectType | None] = var[Optional[SelectType]](None)\n \"\"\"The value of the select.\"\"\"\n\n class Changed(Message, bubble=True):\n \"\"\"Posted when the select value was changed.\n\n This message can be handled using a `on_select_changed` method.\n \"\"\"\n\n def __init__(self, select: Select, value: SelectType | None) -> None:\n \"\"\"\n Initialize the Changed message.\n \"\"\"\n super().__init__()\n self.select = select\n \"\"\"The select widget.\"\"\"\n self.value = value\n \"\"\"The value of the Select when it changed.\"\"\"\n\n @property\n def control(self) -> Select:\n \"\"\"The Select that sent the message.\"\"\"\n return self.select\n\n def __init__(\n self,\n options: Iterable[tuple[str, SelectType]],\n *,\n prompt: str = \"Select\",\n allow_blank: bool = True,\n value: SelectType | None = None,\n name: str | None = None,\n id: str | None = None,\n classes: str | None = None,\n disabled: bool = False,\n ):\n \"\"\"Initialize the Select control\n\n Args:\n options: Options to select from.\n prompt: Text to show in the control when no option is select.\n allow_blank: Allow the selection of a blank option.\n value: Initial value (should be one of the values in `options`).\n name: The name of the select control.\n id: The ID of the control the DOM.\n classes: The CSS classes of the control.\n disabled: Whether the control is disabled or not.\n \"\"\"\n super().__init__(name=name, id=id, classes=classes, disabled=disabled)\n self._allow_blank = allow_blank\n self.prompt = prompt\n self._initial_options = list(options)\n self._value: SelectType | None = value\n\n def set_options(self, options: Iterable[tuple[RenderableType, SelectType]]) -> None:\n \"\"\"Set the options for the Select.\n\n Args:\n options: An iterable of tuples containing (STRING, VALUE).\n \"\"\"\n self._options: list[tuple[RenderableType, SelectType | None]] = list(options)\n\n if self._allow_blank:\n self._options.insert(0, (\"\", None))\n\n self._select_options: list[Option] = [\n (\n Option(Text(self.prompt, style=\"dim\"))\n if value is None\n else Option(prompt)\n )\n for prompt, value in self._options\n ]\n\n option_list = self.query_one(SelectOverlay)\n option_list.clear_options()\n for option in self._select_options:\n option_list.add_option(option)\n\n def _watch_value(self, value: SelectType | None) -> None:\n \"\"\"Update the current value when it changes.\"\"\"\n self._value = value\n try:\n select_current = self.query_one(SelectCurrent)\n except NoMatches:\n pass\n else:\n if value is None:\n self.query_one(SelectCurrent).update(None)\n else:\n for index, (prompt, _value) in enumerate(self._options):\n if _value == value:\n select_overlay = self.query_one(SelectOverlay)\n select_overlay.highlighted = index\n self.query_one(SelectCurrent).update(prompt)\n break\n else:\n self.query_one(SelectCurrent).update(None)\n\n def compose(self) -> ComposeResult:\n \"\"\"Compose Select with overlay and current value.\"\"\"\n yield SelectCurrent(self.prompt)\n yield SelectOverlay()\n\n def _on_mount(self, _event: events.Mount) -> None:\n \"\"\"Set initial values.\"\"\"\n self.set_options(self._initial_options)\n self.value = self._value\n\n def _watch_expanded(self, expanded: bool) -> None:\n \"\"\"Display or hide overlay.\"\"\"\n overlay = self.query_one(SelectOverlay)\n self.set_class(expanded, \"-expanded\")\n if expanded:\n overlay.focus()\n if self.value is None:\n overlay.select(None)\n self.query_one(SelectCurrent).has_value = False\n else:\n value = self.value\n for index, (_prompt, prompt_value) in enumerate(self._options):\n if value == prompt_value:\n overlay.select(index)\n break\n self.query_one(SelectCurrent).has_value = True\n\n @on(SelectCurrent.Toggle)\n def _select_current_toggle(self, event: SelectCurrent.Toggle) -> None:\n \"\"\"Show the overlay when toggled.\"\"\"\n event.stop()\n self.expanded = not self.expanded\n\n @on(SelectOverlay.Dismiss)\n def _select_overlay_dismiss(self, event: SelectOverlay.Dismiss) -> None:\n \"\"\"Dismiss the overlay.\"\"\"\n event.stop()\n self.expanded = False\n if not event.lost_focus:\n # If the overlay didn't lose focus, we want to re-focus the select.\n self.focus()\n\n @on(SelectOverlay.UpdateSelection)\n def _update_selection(self, event: SelectOverlay.UpdateSelection) -> None:\n \"\"\"Update the current selection.\"\"\"\n event.stop()\n value = self._options[event.option_index][1]\n self.value = value\n\n async def update_focus() -> None:\n \"\"\"Update focus and reset overlay.\"\"\"\n self.focus()\n self.expanded = False\n\n self.call_after_refresh(update_focus) # Prevents a little flicker\n self.post_message(self.Changed(self, value))\n\n def action_show_overlay(self) -> None:\n \"\"\"Show the overlay.\"\"\"\n select_current = self.query_one(SelectCurrent)\n select_current.has_value = True\n self.expanded = True\n", "path": "src/textual/widgets/_select.py"}], "after_files": [{"content": "from __future__ import annotations\n\nfrom dataclasses import dataclass\nfrom typing import TYPE_CHECKING, Generic, Iterable, Optional, TypeVar\n\nfrom rich.console import RenderableType\nfrom rich.text import Text\n\nfrom .. import events, on\nfrom ..app import ComposeResult\nfrom ..containers import Horizontal, Vertical\nfrom ..css.query import NoMatches\nfrom ..message import Message\nfrom ..reactive import var\nfrom ..widgets import Static\nfrom ._option_list import Option, OptionList\n\nif TYPE_CHECKING:\n from typing_extensions import TypeAlias\n\n\nclass SelectOverlay(OptionList):\n \"\"\"The 'pop-up' overlay for the Select control.\"\"\"\n\n BINDINGS = [(\"escape\", \"dismiss\")]\n\n DEFAULT_CSS = \"\"\"\n SelectOverlay {\n border: tall $background;\n background: $panel;\n color: $text;\n width: 100%;\n padding: 0 1;\n }\n SelectOverlay > .option-list--option {\n padding: 0 1;\n }\n \"\"\"\n\n @dataclass\n class Dismiss(Message):\n \"\"\"Inform ancestor the overlay should be dismissed.\"\"\"\n\n lost_focus: bool = False\n \"\"\"True if the overlay lost focus.\"\"\"\n\n @dataclass\n class UpdateSelection(Message):\n \"\"\"Inform ancestor the selection was changed.\"\"\"\n\n option_index: int\n \"\"\"The index of the new selection.\"\"\"\n\n def select(self, index: int | None) -> None:\n \"\"\"Move selection.\n\n Args:\n index: Index of new selection.\n \"\"\"\n self.highlighted = index\n self.scroll_to_highlight(top=True)\n\n def action_dismiss(self) -> None:\n \"\"\"Dismiss the overlay.\"\"\"\n self.post_message(self.Dismiss())\n\n def _on_blur(self, _event: events.Blur) -> None:\n \"\"\"On blur we want to dismiss the overlay.\"\"\"\n self.post_message(self.Dismiss(lost_focus=True))\n\n def on_option_list_option_selected(self, event: OptionList.OptionSelected) -> None:\n \"\"\"Inform parent when an option is selected.\"\"\"\n event.stop()\n self.post_message(self.UpdateSelection(event.option_index))\n\n\nclass SelectCurrent(Horizontal):\n \"\"\"Displays the currently selected option.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n SelectCurrent {\n border: tall $background;\n background: $boost;\n color: $text;\n width: 100%;\n height: auto;\n padding: 0 2;\n }\n SelectCurrent Static#label {\n width: 1fr;\n height: auto;\n color: $text-disabled;\n background: transparent;\n }\n SelectCurrent.-has-value Static#label {\n color: $text;\n }\n SelectCurrent .arrow {\n box-sizing: content-box;\n width: 1;\n height: 1;\n padding: 0 0 0 1;\n color: $text-muted;\n background: transparent;\n }\n SelectCurrent .arrow {\n box-sizing: content-box;\n width: 1;\n height: 1;\n padding: 0 0 0 1;\n color: $text-muted;\n background: transparent;\n }\n \"\"\"\n\n has_value: var[bool] = var(False)\n \"\"\"True if there is a current value, or False if it is None.\"\"\"\n\n class Toggle(Message):\n \"\"\"Request toggle overlay.\"\"\"\n\n def __init__(self, placeholder: str) -> None:\n \"\"\"Initialize the SelectCurrent.\n\n Args:\n placeholder: A string to display when there is nothing selected.\n \"\"\"\n super().__init__()\n self.placeholder = placeholder\n self.label: RenderableType | None = None\n\n def update(self, label: RenderableType | None) -> None:\n \"\"\"Update the content in the widget.\n\n Args:\n label: A renderable to display, or `None` for the placeholder.\n \"\"\"\n self.label = label\n self.has_value = label is not None\n self.query_one(\"#label\", Static).update(\n self.placeholder if label is None else label\n )\n\n def compose(self) -> ComposeResult:\n \"\"\"Compose label and down arrow.\"\"\"\n yield Static(self.placeholder, id=\"label\")\n yield Static(\"\u25bc\", classes=\"arrow down-arrow\")\n yield Static(\"\u25b2\", classes=\"arrow up-arrow\")\n\n def _watch_has_value(self, has_value: bool) -> None:\n \"\"\"Toggle the class.\"\"\"\n self.set_class(has_value, \"-has-value\")\n\n async def _on_click(self, event: events.Click) -> None:\n \"\"\"Inform ancestor we want to toggle.\"\"\"\n self.post_message(self.Toggle())\n\n\nSelectType = TypeVar(\"SelectType\")\n\"\"\"The type used for data in the Select.\"\"\"\nSelectOption: TypeAlias = \"tuple[str, SelectType]\"\n\"\"\"The type used for options in the Select.\"\"\"\n\n\nclass Select(Generic[SelectType], Vertical, can_focus=True):\n \"\"\"Widget to select from a list of possible options.\n\n A Select displays the current selection.\n When activated with ++enter++ the widget displays an overlay with a list of all possible options.\n\n \"\"\"\n\n BINDINGS = [(\"enter,down,space,up\", \"show_overlay\")]\n \"\"\"\n | Key(s) | Description |\n | :- | :- |\n | enter,down,space,up | Activate the overlay |\n \"\"\"\n\n DEFAULT_CSS = \"\"\"\n Select {\n height: auto;\n }\n\n Select:focus > SelectCurrent {\n border: tall $accent;\n }\n\n Select > SelectOverlay {\n width: 1fr;\n display: none;\n height: auto;\n max-height: 10;\n overlay: screen;\n constrain: y;\n }\n\n Select .up-arrow {\n display:none;\n }\n\n Select.-expanded .down-arrow {\n display:none;\n }\n\n Select.-expanded .up-arrow {\n display: block;\n }\n\n Select.-expanded > SelectOverlay {\n display: block;\n }\n\n Select.-expanded > SelectCurrent {\n border: tall $accent;\n }\n \"\"\"\n\n expanded: var[bool] = var(False, init=False)\n \"\"\"True to show the overlay, otherwise False.\"\"\"\n prompt: var[str] = var[str](\"Select\")\n \"\"\"The prompt to show when no value is selected.\"\"\"\n value: var[SelectType | None] = var[Optional[SelectType]](None)\n \"\"\"The value of the select.\"\"\"\n\n class Changed(Message, bubble=True):\n \"\"\"Posted when the select value was changed.\n\n This message can be handled using a `on_select_changed` method.\n \"\"\"\n\n def __init__(self, select: Select, value: SelectType | None) -> None:\n \"\"\"\n Initialize the Changed message.\n \"\"\"\n super().__init__()\n self.select = select\n \"\"\"The select widget.\"\"\"\n self.value = value\n \"\"\"The value of the Select when it changed.\"\"\"\n\n @property\n def control(self) -> Select:\n \"\"\"The Select that sent the message.\"\"\"\n return self.select\n\n def __init__(\n self,\n options: Iterable[tuple[str, SelectType]],\n *,\n prompt: str = \"Select\",\n allow_blank: bool = True,\n value: SelectType | None = None,\n name: str | None = None,\n id: str | None = None,\n classes: str | None = None,\n disabled: bool = False,\n ):\n \"\"\"Initialize the Select control\n\n Args:\n options: Options to select from.\n prompt: Text to show in the control when no option is select.\n allow_blank: Allow the selection of a blank option.\n value: Initial value (should be one of the values in `options`).\n name: The name of the select control.\n id: The ID of the control the DOM.\n classes: The CSS classes of the control.\n disabled: Whether the control is disabled or not.\n \"\"\"\n super().__init__(name=name, id=id, classes=classes, disabled=disabled)\n self._allow_blank = allow_blank\n self.prompt = prompt\n self._initial_options = list(options)\n self._value: SelectType | None = value\n self._options = options\n\n def set_options(self, options: Iterable[tuple[RenderableType, SelectType]]) -> None:\n \"\"\"Set the options for the Select.\n\n Args:\n options: An iterable of tuples containing (STRING, VALUE).\n \"\"\"\n self._options: list[tuple[RenderableType, SelectType | None]] = list(options)\n\n if self._allow_blank:\n self._options.insert(0, (\"\", None))\n\n self._select_options: list[Option] = [\n (\n Option(Text(self.prompt, style=\"dim\"))\n if value is None\n else Option(prompt)\n )\n for prompt, value in self._options\n ]\n\n option_list = self.query_one(SelectOverlay)\n option_list.clear_options()\n for option in self._select_options:\n option_list.add_option(option)\n\n def _watch_value(self, value: SelectType | None) -> None:\n \"\"\"Update the current value when it changes.\"\"\"\n self._value = value\n try:\n select_current = self.query_one(SelectCurrent)\n except NoMatches:\n pass\n else:\n if value is None:\n self.query_one(SelectCurrent).update(None)\n else:\n for index, (prompt, _value) in enumerate(self._options):\n if _value == value:\n select_overlay = self.query_one(SelectOverlay)\n select_overlay.highlighted = index\n self.query_one(SelectCurrent).update(prompt)\n break\n else:\n self.query_one(SelectCurrent).update(None)\n\n def compose(self) -> ComposeResult:\n \"\"\"Compose Select with overlay and current value.\"\"\"\n yield SelectCurrent(self.prompt)\n yield SelectOverlay()\n\n def _on_mount(self, _event: events.Mount) -> None:\n \"\"\"Set initial values.\"\"\"\n self.set_options(self._initial_options)\n self.value = self._value\n\n def _watch_expanded(self, expanded: bool) -> None:\n \"\"\"Display or hide overlay.\"\"\"\n overlay = self.query_one(SelectOverlay)\n self.set_class(expanded, \"-expanded\")\n if expanded:\n overlay.focus()\n if self.value is None:\n overlay.select(None)\n self.query_one(SelectCurrent).has_value = False\n else:\n value = self.value\n for index, (_prompt, prompt_value) in enumerate(self._options):\n if value == prompt_value:\n overlay.select(index)\n break\n self.query_one(SelectCurrent).has_value = True\n\n @on(SelectCurrent.Toggle)\n def _select_current_toggle(self, event: SelectCurrent.Toggle) -> None:\n \"\"\"Show the overlay when toggled.\"\"\"\n event.stop()\n self.expanded = not self.expanded\n\n @on(SelectOverlay.Dismiss)\n def _select_overlay_dismiss(self, event: SelectOverlay.Dismiss) -> None:\n \"\"\"Dismiss the overlay.\"\"\"\n event.stop()\n self.expanded = False\n if not event.lost_focus:\n # If the overlay didn't lose focus, we want to re-focus the select.\n self.focus()\n\n @on(SelectOverlay.UpdateSelection)\n def _update_selection(self, event: SelectOverlay.UpdateSelection) -> None:\n \"\"\"Update the current selection.\"\"\"\n event.stop()\n value = self._options[event.option_index][1]\n self.value = value\n\n async def update_focus() -> None:\n \"\"\"Update focus and reset overlay.\"\"\"\n self.focus()\n self.expanded = False\n\n self.call_after_refresh(update_focus) # Prevents a little flicker\n self.post_message(self.Changed(self, value))\n\n def action_show_overlay(self) -> None:\n \"\"\"Show the overlay.\"\"\"\n select_current = self.query_one(SelectCurrent)\n select_current.has_value = True\n self.expanded = True\n", "path": "src/textual/widgets/_select.py"}]} |
gh_patches_debug_1399 | rasdani/github-patches | git_diff | cookiecutter__cookiecutter-django-4995 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add `compose` to the names of docker compose files
## Description
`local.yml`, `production.yml`, and `docs.yml` should be renamed something with "compose" in it: I propose `docker-compose.*`
## Rationale
The VSCode Docker extension needs "compose" in the filename to detect a docker compose file. This lets you right click on the file to run compose commands using it. Also, it puts the files next to each other alphabetically, and perhaps most importantly, more clearly communicates the purpose of the files.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `hooks/post_gen_project.py`
Content:
```
1 """
2 NOTE:
3 the below code is to be maintained Python 2.x-compatible
4 as the whole Cookiecutter Django project initialization
5 can potentially be run in Python 2.x environment
6 (at least so we presume in `pre_gen_project.py`).
7
8 TODO: restrict Cookiecutter Django project initialization to
9 Python 3.x environments only
10 """
11
12 from __future__ import print_function
13
14 import json
15 import os
16 import random
17 import shutil
18 import string
19
20 try:
21 # Inspired by
22 # https://github.com/django/django/blob/master/django/utils/crypto.py
23 random = random.SystemRandom()
24 using_sysrandom = True
25 except NotImplementedError:
26 using_sysrandom = False
27
28 TERMINATOR = "\x1b[0m"
29 WARNING = "\x1b[1;33m [WARNING]: "
30 INFO = "\x1b[1;33m [INFO]: "
31 HINT = "\x1b[3;33m"
32 SUCCESS = "\x1b[1;32m [SUCCESS]: "
33
34 DEBUG_VALUE = "debug"
35
36
37 def remove_open_source_files():
38 file_names = ["CONTRIBUTORS.txt", "LICENSE"]
39 for file_name in file_names:
40 os.remove(file_name)
41
42
43 def remove_gplv3_files():
44 file_names = ["COPYING"]
45 for file_name in file_names:
46 os.remove(file_name)
47
48
49 def remove_custom_user_manager_files():
50 os.remove(
51 os.path.join(
52 "{{cookiecutter.project_slug}}",
53 "users",
54 "managers.py",
55 )
56 )
57 os.remove(
58 os.path.join(
59 "{{cookiecutter.project_slug}}",
60 "users",
61 "tests",
62 "test_managers.py",
63 )
64 )
65
66
67 def remove_pycharm_files():
68 idea_dir_path = ".idea"
69 if os.path.exists(idea_dir_path):
70 shutil.rmtree(idea_dir_path)
71
72 docs_dir_path = os.path.join("docs", "pycharm")
73 if os.path.exists(docs_dir_path):
74 shutil.rmtree(docs_dir_path)
75
76
77 def remove_docker_files():
78 shutil.rmtree(".devcontainer")
79 shutil.rmtree("compose")
80
81 file_names = ["local.yml", "production.yml", ".dockerignore"]
82 for file_name in file_names:
83 os.remove(file_name)
84 if "{{ cookiecutter.editor }}" == "PyCharm":
85 file_names = ["docker_compose_up_django.xml", "docker_compose_up_docs.xml"]
86 for file_name in file_names:
87 os.remove(os.path.join(".idea", "runConfigurations", file_name))
88
89
90 def remove_utility_files():
91 shutil.rmtree("utility")
92
93
94 def remove_heroku_files():
95 file_names = ["Procfile", "runtime.txt", "requirements.txt"]
96 for file_name in file_names:
97 if file_name == "requirements.txt" and "{{ cookiecutter.ci_tool }}".lower() == "travis":
98 # don't remove the file if we are using travisci but not using heroku
99 continue
100 os.remove(file_name)
101 shutil.rmtree("bin")
102
103
104 def remove_sass_files():
105 shutil.rmtree(os.path.join("{{cookiecutter.project_slug}}", "static", "sass"))
106
107
108 def remove_gulp_files():
109 file_names = ["gulpfile.js"]
110 for file_name in file_names:
111 os.remove(file_name)
112
113
114 def remove_webpack_files():
115 shutil.rmtree("webpack")
116 remove_vendors_js()
117
118
119 def remove_vendors_js():
120 vendors_js_path = os.path.join(
121 "{{ cookiecutter.project_slug }}",
122 "static",
123 "js",
124 "vendors.js",
125 )
126 if os.path.exists(vendors_js_path):
127 os.remove(vendors_js_path)
128
129
130 def remove_packagejson_file():
131 file_names = ["package.json"]
132 for file_name in file_names:
133 os.remove(file_name)
134
135
136 def update_package_json(remove_dev_deps=None, remove_keys=None, scripts=None):
137 remove_dev_deps = remove_dev_deps or []
138 remove_keys = remove_keys or []
139 scripts = scripts or {}
140 with open("package.json", mode="r") as fd:
141 content = json.load(fd)
142 for package_name in remove_dev_deps:
143 content["devDependencies"].pop(package_name)
144 for key in remove_keys:
145 content.pop(key)
146 content["scripts"].update(scripts)
147 with open("package.json", mode="w") as fd:
148 json.dump(content, fd, ensure_ascii=False, indent=2)
149 fd.write("\n")
150
151
152 def handle_js_runner(choice, use_docker, use_async):
153 if choice == "Gulp":
154 update_package_json(
155 remove_dev_deps=[
156 "@babel/core",
157 "@babel/preset-env",
158 "babel-loader",
159 "concurrently",
160 "css-loader",
161 "mini-css-extract-plugin",
162 "postcss-loader",
163 "postcss-preset-env",
164 "sass-loader",
165 "webpack",
166 "webpack-bundle-tracker",
167 "webpack-cli",
168 "webpack-dev-server",
169 "webpack-merge",
170 ],
171 remove_keys=["babel"],
172 scripts={
173 "dev": "gulp",
174 "build": "gulp generate-assets",
175 },
176 )
177 remove_webpack_files()
178 elif choice == "Webpack":
179 scripts = {
180 "dev": "webpack serve --config webpack/dev.config.js",
181 "build": "webpack --config webpack/prod.config.js",
182 }
183 remove_dev_deps = [
184 "browser-sync",
185 "cssnano",
186 "gulp",
187 "gulp-concat",
188 "gulp-imagemin",
189 "gulp-plumber",
190 "gulp-postcss",
191 "gulp-rename",
192 "gulp-sass",
193 "gulp-uglify-es",
194 ]
195 if not use_docker:
196 dev_django_cmd = (
197 "uvicorn config.asgi:application --reload" if use_async else "python manage.py runserver_plus"
198 )
199 scripts.update(
200 {
201 "dev": "concurrently npm:dev:*",
202 "dev:webpack": "webpack serve --config webpack/dev.config.js",
203 "dev:django": dev_django_cmd,
204 }
205 )
206 else:
207 remove_dev_deps.append("concurrently")
208 update_package_json(remove_dev_deps=remove_dev_deps, scripts=scripts)
209 remove_gulp_files()
210
211
212 def remove_prettier_pre_commit():
213 with open(".pre-commit-config.yaml", "r") as fd:
214 content = fd.readlines()
215
216 removing = False
217 new_lines = []
218 for line in content:
219 if removing and "- repo:" in line:
220 removing = False
221 if "mirrors-prettier" in line:
222 removing = True
223 if not removing:
224 new_lines.append(line)
225
226 with open(".pre-commit-config.yaml", "w") as fd:
227 fd.writelines(new_lines)
228
229
230 def remove_celery_files():
231 file_names = [
232 os.path.join("config", "celery_app.py"),
233 os.path.join("{{ cookiecutter.project_slug }}", "users", "tasks.py"),
234 os.path.join("{{ cookiecutter.project_slug }}", "users", "tests", "test_tasks.py"),
235 ]
236 for file_name in file_names:
237 os.remove(file_name)
238
239
240 def remove_async_files():
241 file_names = [
242 os.path.join("config", "asgi.py"),
243 os.path.join("config", "websocket.py"),
244 ]
245 for file_name in file_names:
246 os.remove(file_name)
247
248
249 def remove_dottravisyml_file():
250 os.remove(".travis.yml")
251
252
253 def remove_dotgitlabciyml_file():
254 os.remove(".gitlab-ci.yml")
255
256
257 def remove_dotgithub_folder():
258 shutil.rmtree(".github")
259
260
261 def remove_dotdrone_file():
262 os.remove(".drone.yml")
263
264
265 def generate_random_string(length, using_digits=False, using_ascii_letters=False, using_punctuation=False):
266 """
267 Example:
268 opting out for 50 symbol-long, [a-z][A-Z][0-9] string
269 would yield log_2((26+26+50)^50) ~= 334 bit strength.
270 """
271 if not using_sysrandom:
272 return None
273
274 symbols = []
275 if using_digits:
276 symbols += string.digits
277 if using_ascii_letters:
278 symbols += string.ascii_letters
279 if using_punctuation:
280 all_punctuation = set(string.punctuation)
281 # These symbols can cause issues in environment variables
282 unsuitable = {"'", '"', "\\", "$"}
283 suitable = all_punctuation.difference(unsuitable)
284 symbols += "".join(suitable)
285 return "".join([random.choice(symbols) for _ in range(length)])
286
287
288 def set_flag(file_path, flag, value=None, formatted=None, *args, **kwargs):
289 if value is None:
290 random_string = generate_random_string(*args, **kwargs)
291 if random_string is None:
292 print(
293 "We couldn't find a secure pseudo-random number generator on your "
294 "system. Please, make sure to manually {} later.".format(flag)
295 )
296 random_string = flag
297 if formatted is not None:
298 random_string = formatted.format(random_string)
299 value = random_string
300
301 with open(file_path, "r+") as f:
302 file_contents = f.read().replace(flag, value)
303 f.seek(0)
304 f.write(file_contents)
305 f.truncate()
306
307 return value
308
309
310 def set_django_secret_key(file_path):
311 django_secret_key = set_flag(
312 file_path,
313 "!!!SET DJANGO_SECRET_KEY!!!",
314 length=64,
315 using_digits=True,
316 using_ascii_letters=True,
317 )
318 return django_secret_key
319
320
321 def set_django_admin_url(file_path):
322 django_admin_url = set_flag(
323 file_path,
324 "!!!SET DJANGO_ADMIN_URL!!!",
325 formatted="{}/",
326 length=32,
327 using_digits=True,
328 using_ascii_letters=True,
329 )
330 return django_admin_url
331
332
333 def generate_random_user():
334 return generate_random_string(length=32, using_ascii_letters=True)
335
336
337 def generate_postgres_user(debug=False):
338 return DEBUG_VALUE if debug else generate_random_user()
339
340
341 def set_postgres_user(file_path, value):
342 postgres_user = set_flag(file_path, "!!!SET POSTGRES_USER!!!", value=value)
343 return postgres_user
344
345
346 def set_postgres_password(file_path, value=None):
347 postgres_password = set_flag(
348 file_path,
349 "!!!SET POSTGRES_PASSWORD!!!",
350 value=value,
351 length=64,
352 using_digits=True,
353 using_ascii_letters=True,
354 )
355 return postgres_password
356
357
358 def set_celery_flower_user(file_path, value):
359 celery_flower_user = set_flag(file_path, "!!!SET CELERY_FLOWER_USER!!!", value=value)
360 return celery_flower_user
361
362
363 def set_celery_flower_password(file_path, value=None):
364 celery_flower_password = set_flag(
365 file_path,
366 "!!!SET CELERY_FLOWER_PASSWORD!!!",
367 value=value,
368 length=64,
369 using_digits=True,
370 using_ascii_letters=True,
371 )
372 return celery_flower_password
373
374
375 def append_to_gitignore_file(ignored_line):
376 with open(".gitignore", "a") as gitignore_file:
377 gitignore_file.write(ignored_line)
378 gitignore_file.write("\n")
379
380
381 def set_flags_in_envs(postgres_user, celery_flower_user, debug=False):
382 local_django_envs_path = os.path.join(".envs", ".local", ".django")
383 production_django_envs_path = os.path.join(".envs", ".production", ".django")
384 local_postgres_envs_path = os.path.join(".envs", ".local", ".postgres")
385 production_postgres_envs_path = os.path.join(".envs", ".production", ".postgres")
386
387 set_django_secret_key(production_django_envs_path)
388 set_django_admin_url(production_django_envs_path)
389
390 set_postgres_user(local_postgres_envs_path, value=postgres_user)
391 set_postgres_password(local_postgres_envs_path, value=DEBUG_VALUE if debug else None)
392 set_postgres_user(production_postgres_envs_path, value=postgres_user)
393 set_postgres_password(production_postgres_envs_path, value=DEBUG_VALUE if debug else None)
394
395 set_celery_flower_user(local_django_envs_path, value=celery_flower_user)
396 set_celery_flower_password(local_django_envs_path, value=DEBUG_VALUE if debug else None)
397 set_celery_flower_user(production_django_envs_path, value=celery_flower_user)
398 set_celery_flower_password(production_django_envs_path, value=DEBUG_VALUE if debug else None)
399
400
401 def set_flags_in_settings_files():
402 set_django_secret_key(os.path.join("config", "settings", "local.py"))
403 set_django_secret_key(os.path.join("config", "settings", "test.py"))
404
405
406 def remove_envs_and_associated_files():
407 shutil.rmtree(".envs")
408 os.remove("merge_production_dotenvs_in_dotenv.py")
409 shutil.rmtree("tests")
410
411
412 def remove_celery_compose_dirs():
413 shutil.rmtree(os.path.join("compose", "local", "django", "celery"))
414 shutil.rmtree(os.path.join("compose", "production", "django", "celery"))
415
416
417 def remove_node_dockerfile():
418 shutil.rmtree(os.path.join("compose", "local", "node"))
419
420
421 def remove_aws_dockerfile():
422 shutil.rmtree(os.path.join("compose", "production", "aws"))
423
424
425 def remove_drf_starter_files():
426 os.remove(os.path.join("config", "api_router.py"))
427 shutil.rmtree(os.path.join("{{cookiecutter.project_slug}}", "users", "api"))
428 os.remove(os.path.join("{{cookiecutter.project_slug}}", "users", "tests", "test_drf_urls.py"))
429 os.remove(os.path.join("{{cookiecutter.project_slug}}", "users", "tests", "test_drf_views.py"))
430 os.remove(os.path.join("{{cookiecutter.project_slug}}", "users", "tests", "test_swagger.py"))
431
432
433 def main():
434 debug = "{{ cookiecutter.debug }}".lower() == "y"
435
436 set_flags_in_envs(
437 DEBUG_VALUE if debug else generate_random_user(),
438 DEBUG_VALUE if debug else generate_random_user(),
439 debug=debug,
440 )
441 set_flags_in_settings_files()
442
443 if "{{ cookiecutter.open_source_license }}" == "Not open source":
444 remove_open_source_files()
445 if "{{ cookiecutter.open_source_license}}" != "GPLv3":
446 remove_gplv3_files()
447
448 if "{{ cookiecutter.username_type }}" == "username":
449 remove_custom_user_manager_files()
450
451 if "{{ cookiecutter.editor }}" != "PyCharm":
452 remove_pycharm_files()
453
454 if "{{ cookiecutter.use_docker }}".lower() == "y":
455 remove_utility_files()
456 else:
457 remove_docker_files()
458
459 if "{{ cookiecutter.use_docker }}".lower() == "y" and "{{ cookiecutter.cloud_provider}}" != "AWS":
460 remove_aws_dockerfile()
461
462 if "{{ cookiecutter.use_heroku }}".lower() == "n":
463 remove_heroku_files()
464
465 if "{{ cookiecutter.use_docker }}".lower() == "n" and "{{ cookiecutter.use_heroku }}".lower() == "n":
466 if "{{ cookiecutter.keep_local_envs_in_vcs }}".lower() == "y":
467 print(
468 INFO + ".env(s) are only utilized when Docker Compose and/or "
469 "Heroku support is enabled so keeping them does not make sense "
470 "given your current setup." + TERMINATOR
471 )
472 remove_envs_and_associated_files()
473 else:
474 append_to_gitignore_file(".env")
475 append_to_gitignore_file(".envs/*")
476 if "{{ cookiecutter.keep_local_envs_in_vcs }}".lower() == "y":
477 append_to_gitignore_file("!.envs/.local/")
478
479 if "{{ cookiecutter.frontend_pipeline }}" in ["None", "Django Compressor"]:
480 remove_gulp_files()
481 remove_webpack_files()
482 remove_sass_files()
483 remove_packagejson_file()
484 remove_prettier_pre_commit()
485 if "{{ cookiecutter.use_docker }}".lower() == "y":
486 remove_node_dockerfile()
487 else:
488 handle_js_runner(
489 "{{ cookiecutter.frontend_pipeline }}",
490 use_docker=("{{ cookiecutter.use_docker }}".lower() == "y"),
491 use_async=("{{ cookiecutter.use_async }}".lower() == "y"),
492 )
493
494 if "{{ cookiecutter.cloud_provider }}" == "None" and "{{ cookiecutter.use_docker }}".lower() == "n":
495 print(
496 WARNING + "You chose to not use any cloud providers nor Docker, "
497 "media files won't be served in production." + TERMINATOR
498 )
499
500 if "{{ cookiecutter.use_celery }}".lower() == "n":
501 remove_celery_files()
502 if "{{ cookiecutter.use_docker }}".lower() == "y":
503 remove_celery_compose_dirs()
504
505 if "{{ cookiecutter.ci_tool }}" != "Travis":
506 remove_dottravisyml_file()
507
508 if "{{ cookiecutter.ci_tool }}" != "Gitlab":
509 remove_dotgitlabciyml_file()
510
511 if "{{ cookiecutter.ci_tool }}" != "Github":
512 remove_dotgithub_folder()
513
514 if "{{ cookiecutter.ci_tool }}" != "Drone":
515 remove_dotdrone_file()
516
517 if "{{ cookiecutter.use_drf }}".lower() == "n":
518 remove_drf_starter_files()
519
520 if "{{ cookiecutter.use_async }}".lower() == "n":
521 remove_async_files()
522
523 print(SUCCESS + "Project initialized, keep up the good work!" + TERMINATOR)
524
525
526 if __name__ == "__main__":
527 main()
528
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/hooks/post_gen_project.py b/hooks/post_gen_project.py
--- a/hooks/post_gen_project.py
+++ b/hooks/post_gen_project.py
@@ -78,7 +78,11 @@
shutil.rmtree(".devcontainer")
shutil.rmtree("compose")
- file_names = ["local.yml", "production.yml", ".dockerignore"]
+ file_names = [
+ "docker-compose.local.yml",
+ "docker-compose.production.yml",
+ ".dockerignore",
+ ]
for file_name in file_names:
os.remove(file_name)
if "{{ cookiecutter.editor }}" == "PyCharm":
| {"golden_diff": "diff --git a/hooks/post_gen_project.py b/hooks/post_gen_project.py\n--- a/hooks/post_gen_project.py\n+++ b/hooks/post_gen_project.py\n@@ -78,7 +78,11 @@\n shutil.rmtree(\".devcontainer\")\n shutil.rmtree(\"compose\")\n \n- file_names = [\"local.yml\", \"production.yml\", \".dockerignore\"]\n+ file_names = [\n+ \"docker-compose.local.yml\",\n+ \"docker-compose.production.yml\",\n+ \".dockerignore\",\n+ ]\n for file_name in file_names:\n os.remove(file_name)\n if \"{{ cookiecutter.editor }}\" == \"PyCharm\":\n", "issue": "Add `compose` to the names of docker compose files\n## Description\r\n\r\n`local.yml`, `production.yml`, and `docs.yml` should be renamed something with \"compose\" in it: I propose `docker-compose.*`\r\n\r\n## Rationale\r\n\r\nThe VSCode Docker extension needs \"compose\" in the filename to detect a docker compose file. This lets you right click on the file to run compose commands using it. Also, it puts the files next to each other alphabetically, and perhaps most importantly, more clearly communicates the purpose of the files.\n", "before_files": [{"content": "\"\"\"\nNOTE:\n the below code is to be maintained Python 2.x-compatible\n as the whole Cookiecutter Django project initialization\n can potentially be run in Python 2.x environment\n (at least so we presume in `pre_gen_project.py`).\n\nTODO: restrict Cookiecutter Django project initialization to\n Python 3.x environments only\n\"\"\"\n\nfrom __future__ import print_function\n\nimport json\nimport os\nimport random\nimport shutil\nimport string\n\ntry:\n # Inspired by\n # https://github.com/django/django/blob/master/django/utils/crypto.py\n random = random.SystemRandom()\n using_sysrandom = True\nexcept NotImplementedError:\n using_sysrandom = False\n\nTERMINATOR = \"\\x1b[0m\"\nWARNING = \"\\x1b[1;33m [WARNING]: \"\nINFO = \"\\x1b[1;33m [INFO]: \"\nHINT = \"\\x1b[3;33m\"\nSUCCESS = \"\\x1b[1;32m [SUCCESS]: \"\n\nDEBUG_VALUE = \"debug\"\n\n\ndef remove_open_source_files():\n file_names = [\"CONTRIBUTORS.txt\", \"LICENSE\"]\n for file_name in file_names:\n os.remove(file_name)\n\n\ndef remove_gplv3_files():\n file_names = [\"COPYING\"]\n for file_name in file_names:\n os.remove(file_name)\n\n\ndef remove_custom_user_manager_files():\n os.remove(\n os.path.join(\n \"{{cookiecutter.project_slug}}\",\n \"users\",\n \"managers.py\",\n )\n )\n os.remove(\n os.path.join(\n \"{{cookiecutter.project_slug}}\",\n \"users\",\n \"tests\",\n \"test_managers.py\",\n )\n )\n\n\ndef remove_pycharm_files():\n idea_dir_path = \".idea\"\n if os.path.exists(idea_dir_path):\n shutil.rmtree(idea_dir_path)\n\n docs_dir_path = os.path.join(\"docs\", \"pycharm\")\n if os.path.exists(docs_dir_path):\n shutil.rmtree(docs_dir_path)\n\n\ndef remove_docker_files():\n shutil.rmtree(\".devcontainer\")\n shutil.rmtree(\"compose\")\n\n file_names = [\"local.yml\", \"production.yml\", \".dockerignore\"]\n for file_name in file_names:\n os.remove(file_name)\n if \"{{ cookiecutter.editor }}\" == \"PyCharm\":\n file_names = [\"docker_compose_up_django.xml\", \"docker_compose_up_docs.xml\"]\n for file_name in file_names:\n os.remove(os.path.join(\".idea\", \"runConfigurations\", file_name))\n\n\ndef remove_utility_files():\n shutil.rmtree(\"utility\")\n\n\ndef remove_heroku_files():\n file_names = [\"Procfile\", \"runtime.txt\", \"requirements.txt\"]\n for file_name in file_names:\n if file_name == \"requirements.txt\" and \"{{ cookiecutter.ci_tool }}\".lower() == \"travis\":\n # don't remove the file if we are using travisci but not using heroku\n continue\n os.remove(file_name)\n shutil.rmtree(\"bin\")\n\n\ndef remove_sass_files():\n shutil.rmtree(os.path.join(\"{{cookiecutter.project_slug}}\", \"static\", \"sass\"))\n\n\ndef remove_gulp_files():\n file_names = [\"gulpfile.js\"]\n for file_name in file_names:\n os.remove(file_name)\n\n\ndef remove_webpack_files():\n shutil.rmtree(\"webpack\")\n remove_vendors_js()\n\n\ndef remove_vendors_js():\n vendors_js_path = os.path.join(\n \"{{ cookiecutter.project_slug }}\",\n \"static\",\n \"js\",\n \"vendors.js\",\n )\n if os.path.exists(vendors_js_path):\n os.remove(vendors_js_path)\n\n\ndef remove_packagejson_file():\n file_names = [\"package.json\"]\n for file_name in file_names:\n os.remove(file_name)\n\n\ndef update_package_json(remove_dev_deps=None, remove_keys=None, scripts=None):\n remove_dev_deps = remove_dev_deps or []\n remove_keys = remove_keys or []\n scripts = scripts or {}\n with open(\"package.json\", mode=\"r\") as fd:\n content = json.load(fd)\n for package_name in remove_dev_deps:\n content[\"devDependencies\"].pop(package_name)\n for key in remove_keys:\n content.pop(key)\n content[\"scripts\"].update(scripts)\n with open(\"package.json\", mode=\"w\") as fd:\n json.dump(content, fd, ensure_ascii=False, indent=2)\n fd.write(\"\\n\")\n\n\ndef handle_js_runner(choice, use_docker, use_async):\n if choice == \"Gulp\":\n update_package_json(\n remove_dev_deps=[\n \"@babel/core\",\n \"@babel/preset-env\",\n \"babel-loader\",\n \"concurrently\",\n \"css-loader\",\n \"mini-css-extract-plugin\",\n \"postcss-loader\",\n \"postcss-preset-env\",\n \"sass-loader\",\n \"webpack\",\n \"webpack-bundle-tracker\",\n \"webpack-cli\",\n \"webpack-dev-server\",\n \"webpack-merge\",\n ],\n remove_keys=[\"babel\"],\n scripts={\n \"dev\": \"gulp\",\n \"build\": \"gulp generate-assets\",\n },\n )\n remove_webpack_files()\n elif choice == \"Webpack\":\n scripts = {\n \"dev\": \"webpack serve --config webpack/dev.config.js\",\n \"build\": \"webpack --config webpack/prod.config.js\",\n }\n remove_dev_deps = [\n \"browser-sync\",\n \"cssnano\",\n \"gulp\",\n \"gulp-concat\",\n \"gulp-imagemin\",\n \"gulp-plumber\",\n \"gulp-postcss\",\n \"gulp-rename\",\n \"gulp-sass\",\n \"gulp-uglify-es\",\n ]\n if not use_docker:\n dev_django_cmd = (\n \"uvicorn config.asgi:application --reload\" if use_async else \"python manage.py runserver_plus\"\n )\n scripts.update(\n {\n \"dev\": \"concurrently npm:dev:*\",\n \"dev:webpack\": \"webpack serve --config webpack/dev.config.js\",\n \"dev:django\": dev_django_cmd,\n }\n )\n else:\n remove_dev_deps.append(\"concurrently\")\n update_package_json(remove_dev_deps=remove_dev_deps, scripts=scripts)\n remove_gulp_files()\n\n\ndef remove_prettier_pre_commit():\n with open(\".pre-commit-config.yaml\", \"r\") as fd:\n content = fd.readlines()\n\n removing = False\n new_lines = []\n for line in content:\n if removing and \"- repo:\" in line:\n removing = False\n if \"mirrors-prettier\" in line:\n removing = True\n if not removing:\n new_lines.append(line)\n\n with open(\".pre-commit-config.yaml\", \"w\") as fd:\n fd.writelines(new_lines)\n\n\ndef remove_celery_files():\n file_names = [\n os.path.join(\"config\", \"celery_app.py\"),\n os.path.join(\"{{ cookiecutter.project_slug }}\", \"users\", \"tasks.py\"),\n os.path.join(\"{{ cookiecutter.project_slug }}\", \"users\", \"tests\", \"test_tasks.py\"),\n ]\n for file_name in file_names:\n os.remove(file_name)\n\n\ndef remove_async_files():\n file_names = [\n os.path.join(\"config\", \"asgi.py\"),\n os.path.join(\"config\", \"websocket.py\"),\n ]\n for file_name in file_names:\n os.remove(file_name)\n\n\ndef remove_dottravisyml_file():\n os.remove(\".travis.yml\")\n\n\ndef remove_dotgitlabciyml_file():\n os.remove(\".gitlab-ci.yml\")\n\n\ndef remove_dotgithub_folder():\n shutil.rmtree(\".github\")\n\n\ndef remove_dotdrone_file():\n os.remove(\".drone.yml\")\n\n\ndef generate_random_string(length, using_digits=False, using_ascii_letters=False, using_punctuation=False):\n \"\"\"\n Example:\n opting out for 50 symbol-long, [a-z][A-Z][0-9] string\n would yield log_2((26+26+50)^50) ~= 334 bit strength.\n \"\"\"\n if not using_sysrandom:\n return None\n\n symbols = []\n if using_digits:\n symbols += string.digits\n if using_ascii_letters:\n symbols += string.ascii_letters\n if using_punctuation:\n all_punctuation = set(string.punctuation)\n # These symbols can cause issues in environment variables\n unsuitable = {\"'\", '\"', \"\\\\\", \"$\"}\n suitable = all_punctuation.difference(unsuitable)\n symbols += \"\".join(suitable)\n return \"\".join([random.choice(symbols) for _ in range(length)])\n\n\ndef set_flag(file_path, flag, value=None, formatted=None, *args, **kwargs):\n if value is None:\n random_string = generate_random_string(*args, **kwargs)\n if random_string is None:\n print(\n \"We couldn't find a secure pseudo-random number generator on your \"\n \"system. Please, make sure to manually {} later.\".format(flag)\n )\n random_string = flag\n if formatted is not None:\n random_string = formatted.format(random_string)\n value = random_string\n\n with open(file_path, \"r+\") as f:\n file_contents = f.read().replace(flag, value)\n f.seek(0)\n f.write(file_contents)\n f.truncate()\n\n return value\n\n\ndef set_django_secret_key(file_path):\n django_secret_key = set_flag(\n file_path,\n \"!!!SET DJANGO_SECRET_KEY!!!\",\n length=64,\n using_digits=True,\n using_ascii_letters=True,\n )\n return django_secret_key\n\n\ndef set_django_admin_url(file_path):\n django_admin_url = set_flag(\n file_path,\n \"!!!SET DJANGO_ADMIN_URL!!!\",\n formatted=\"{}/\",\n length=32,\n using_digits=True,\n using_ascii_letters=True,\n )\n return django_admin_url\n\n\ndef generate_random_user():\n return generate_random_string(length=32, using_ascii_letters=True)\n\n\ndef generate_postgres_user(debug=False):\n return DEBUG_VALUE if debug else generate_random_user()\n\n\ndef set_postgres_user(file_path, value):\n postgres_user = set_flag(file_path, \"!!!SET POSTGRES_USER!!!\", value=value)\n return postgres_user\n\n\ndef set_postgres_password(file_path, value=None):\n postgres_password = set_flag(\n file_path,\n \"!!!SET POSTGRES_PASSWORD!!!\",\n value=value,\n length=64,\n using_digits=True,\n using_ascii_letters=True,\n )\n return postgres_password\n\n\ndef set_celery_flower_user(file_path, value):\n celery_flower_user = set_flag(file_path, \"!!!SET CELERY_FLOWER_USER!!!\", value=value)\n return celery_flower_user\n\n\ndef set_celery_flower_password(file_path, value=None):\n celery_flower_password = set_flag(\n file_path,\n \"!!!SET CELERY_FLOWER_PASSWORD!!!\",\n value=value,\n length=64,\n using_digits=True,\n using_ascii_letters=True,\n )\n return celery_flower_password\n\n\ndef append_to_gitignore_file(ignored_line):\n with open(\".gitignore\", \"a\") as gitignore_file:\n gitignore_file.write(ignored_line)\n gitignore_file.write(\"\\n\")\n\n\ndef set_flags_in_envs(postgres_user, celery_flower_user, debug=False):\n local_django_envs_path = os.path.join(\".envs\", \".local\", \".django\")\n production_django_envs_path = os.path.join(\".envs\", \".production\", \".django\")\n local_postgres_envs_path = os.path.join(\".envs\", \".local\", \".postgres\")\n production_postgres_envs_path = os.path.join(\".envs\", \".production\", \".postgres\")\n\n set_django_secret_key(production_django_envs_path)\n set_django_admin_url(production_django_envs_path)\n\n set_postgres_user(local_postgres_envs_path, value=postgres_user)\n set_postgres_password(local_postgres_envs_path, value=DEBUG_VALUE if debug else None)\n set_postgres_user(production_postgres_envs_path, value=postgres_user)\n set_postgres_password(production_postgres_envs_path, value=DEBUG_VALUE if debug else None)\n\n set_celery_flower_user(local_django_envs_path, value=celery_flower_user)\n set_celery_flower_password(local_django_envs_path, value=DEBUG_VALUE if debug else None)\n set_celery_flower_user(production_django_envs_path, value=celery_flower_user)\n set_celery_flower_password(production_django_envs_path, value=DEBUG_VALUE if debug else None)\n\n\ndef set_flags_in_settings_files():\n set_django_secret_key(os.path.join(\"config\", \"settings\", \"local.py\"))\n set_django_secret_key(os.path.join(\"config\", \"settings\", \"test.py\"))\n\n\ndef remove_envs_and_associated_files():\n shutil.rmtree(\".envs\")\n os.remove(\"merge_production_dotenvs_in_dotenv.py\")\n shutil.rmtree(\"tests\")\n\n\ndef remove_celery_compose_dirs():\n shutil.rmtree(os.path.join(\"compose\", \"local\", \"django\", \"celery\"))\n shutil.rmtree(os.path.join(\"compose\", \"production\", \"django\", \"celery\"))\n\n\ndef remove_node_dockerfile():\n shutil.rmtree(os.path.join(\"compose\", \"local\", \"node\"))\n\n\ndef remove_aws_dockerfile():\n shutil.rmtree(os.path.join(\"compose\", \"production\", \"aws\"))\n\n\ndef remove_drf_starter_files():\n os.remove(os.path.join(\"config\", \"api_router.py\"))\n shutil.rmtree(os.path.join(\"{{cookiecutter.project_slug}}\", \"users\", \"api\"))\n os.remove(os.path.join(\"{{cookiecutter.project_slug}}\", \"users\", \"tests\", \"test_drf_urls.py\"))\n os.remove(os.path.join(\"{{cookiecutter.project_slug}}\", \"users\", \"tests\", \"test_drf_views.py\"))\n os.remove(os.path.join(\"{{cookiecutter.project_slug}}\", \"users\", \"tests\", \"test_swagger.py\"))\n\n\ndef main():\n debug = \"{{ cookiecutter.debug }}\".lower() == \"y\"\n\n set_flags_in_envs(\n DEBUG_VALUE if debug else generate_random_user(),\n DEBUG_VALUE if debug else generate_random_user(),\n debug=debug,\n )\n set_flags_in_settings_files()\n\n if \"{{ cookiecutter.open_source_license }}\" == \"Not open source\":\n remove_open_source_files()\n if \"{{ cookiecutter.open_source_license}}\" != \"GPLv3\":\n remove_gplv3_files()\n\n if \"{{ cookiecutter.username_type }}\" == \"username\":\n remove_custom_user_manager_files()\n\n if \"{{ cookiecutter.editor }}\" != \"PyCharm\":\n remove_pycharm_files()\n\n if \"{{ cookiecutter.use_docker }}\".lower() == \"y\":\n remove_utility_files()\n else:\n remove_docker_files()\n\n if \"{{ cookiecutter.use_docker }}\".lower() == \"y\" and \"{{ cookiecutter.cloud_provider}}\" != \"AWS\":\n remove_aws_dockerfile()\n\n if \"{{ cookiecutter.use_heroku }}\".lower() == \"n\":\n remove_heroku_files()\n\n if \"{{ cookiecutter.use_docker }}\".lower() == \"n\" and \"{{ cookiecutter.use_heroku }}\".lower() == \"n\":\n if \"{{ cookiecutter.keep_local_envs_in_vcs }}\".lower() == \"y\":\n print(\n INFO + \".env(s) are only utilized when Docker Compose and/or \"\n \"Heroku support is enabled so keeping them does not make sense \"\n \"given your current setup.\" + TERMINATOR\n )\n remove_envs_and_associated_files()\n else:\n append_to_gitignore_file(\".env\")\n append_to_gitignore_file(\".envs/*\")\n if \"{{ cookiecutter.keep_local_envs_in_vcs }}\".lower() == \"y\":\n append_to_gitignore_file(\"!.envs/.local/\")\n\n if \"{{ cookiecutter.frontend_pipeline }}\" in [\"None\", \"Django Compressor\"]:\n remove_gulp_files()\n remove_webpack_files()\n remove_sass_files()\n remove_packagejson_file()\n remove_prettier_pre_commit()\n if \"{{ cookiecutter.use_docker }}\".lower() == \"y\":\n remove_node_dockerfile()\n else:\n handle_js_runner(\n \"{{ cookiecutter.frontend_pipeline }}\",\n use_docker=(\"{{ cookiecutter.use_docker }}\".lower() == \"y\"),\n use_async=(\"{{ cookiecutter.use_async }}\".lower() == \"y\"),\n )\n\n if \"{{ cookiecutter.cloud_provider }}\" == \"None\" and \"{{ cookiecutter.use_docker }}\".lower() == \"n\":\n print(\n WARNING + \"You chose to not use any cloud providers nor Docker, \"\n \"media files won't be served in production.\" + TERMINATOR\n )\n\n if \"{{ cookiecutter.use_celery }}\".lower() == \"n\":\n remove_celery_files()\n if \"{{ cookiecutter.use_docker }}\".lower() == \"y\":\n remove_celery_compose_dirs()\n\n if \"{{ cookiecutter.ci_tool }}\" != \"Travis\":\n remove_dottravisyml_file()\n\n if \"{{ cookiecutter.ci_tool }}\" != \"Gitlab\":\n remove_dotgitlabciyml_file()\n\n if \"{{ cookiecutter.ci_tool }}\" != \"Github\":\n remove_dotgithub_folder()\n\n if \"{{ cookiecutter.ci_tool }}\" != \"Drone\":\n remove_dotdrone_file()\n\n if \"{{ cookiecutter.use_drf }}\".lower() == \"n\":\n remove_drf_starter_files()\n\n if \"{{ cookiecutter.use_async }}\".lower() == \"n\":\n remove_async_files()\n\n print(SUCCESS + \"Project initialized, keep up the good work!\" + TERMINATOR)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "hooks/post_gen_project.py"}], "after_files": [{"content": "\"\"\"\nNOTE:\n the below code is to be maintained Python 2.x-compatible\n as the whole Cookiecutter Django project initialization\n can potentially be run in Python 2.x environment\n (at least so we presume in `pre_gen_project.py`).\n\nTODO: restrict Cookiecutter Django project initialization to\n Python 3.x environments only\n\"\"\"\n\nfrom __future__ import print_function\n\nimport json\nimport os\nimport random\nimport shutil\nimport string\n\ntry:\n # Inspired by\n # https://github.com/django/django/blob/master/django/utils/crypto.py\n random = random.SystemRandom()\n using_sysrandom = True\nexcept NotImplementedError:\n using_sysrandom = False\n\nTERMINATOR = \"\\x1b[0m\"\nWARNING = \"\\x1b[1;33m [WARNING]: \"\nINFO = \"\\x1b[1;33m [INFO]: \"\nHINT = \"\\x1b[3;33m\"\nSUCCESS = \"\\x1b[1;32m [SUCCESS]: \"\n\nDEBUG_VALUE = \"debug\"\n\n\ndef remove_open_source_files():\n file_names = [\"CONTRIBUTORS.txt\", \"LICENSE\"]\n for file_name in file_names:\n os.remove(file_name)\n\n\ndef remove_gplv3_files():\n file_names = [\"COPYING\"]\n for file_name in file_names:\n os.remove(file_name)\n\n\ndef remove_custom_user_manager_files():\n os.remove(\n os.path.join(\n \"{{cookiecutter.project_slug}}\",\n \"users\",\n \"managers.py\",\n )\n )\n os.remove(\n os.path.join(\n \"{{cookiecutter.project_slug}}\",\n \"users\",\n \"tests\",\n \"test_managers.py\",\n )\n )\n\n\ndef remove_pycharm_files():\n idea_dir_path = \".idea\"\n if os.path.exists(idea_dir_path):\n shutil.rmtree(idea_dir_path)\n\n docs_dir_path = os.path.join(\"docs\", \"pycharm\")\n if os.path.exists(docs_dir_path):\n shutil.rmtree(docs_dir_path)\n\n\ndef remove_docker_files():\n shutil.rmtree(\".devcontainer\")\n shutil.rmtree(\"compose\")\n\n file_names = [\n \"docker-compose.local.yml\",\n \"docker-compose.production.yml\",\n \".dockerignore\",\n ]\n for file_name in file_names:\n os.remove(file_name)\n if \"{{ cookiecutter.editor }}\" == \"PyCharm\":\n file_names = [\"docker_compose_up_django.xml\", \"docker_compose_up_docs.xml\"]\n for file_name in file_names:\n os.remove(os.path.join(\".idea\", \"runConfigurations\", file_name))\n\n\ndef remove_utility_files():\n shutil.rmtree(\"utility\")\n\n\ndef remove_heroku_files():\n file_names = [\"Procfile\", \"runtime.txt\", \"requirements.txt\"]\n for file_name in file_names:\n if file_name == \"requirements.txt\" and \"{{ cookiecutter.ci_tool }}\".lower() == \"travis\":\n # don't remove the file if we are using travisci but not using heroku\n continue\n os.remove(file_name)\n shutil.rmtree(\"bin\")\n\n\ndef remove_sass_files():\n shutil.rmtree(os.path.join(\"{{cookiecutter.project_slug}}\", \"static\", \"sass\"))\n\n\ndef remove_gulp_files():\n file_names = [\"gulpfile.js\"]\n for file_name in file_names:\n os.remove(file_name)\n\n\ndef remove_webpack_files():\n shutil.rmtree(\"webpack\")\n remove_vendors_js()\n\n\ndef remove_vendors_js():\n vendors_js_path = os.path.join(\n \"{{ cookiecutter.project_slug }}\",\n \"static\",\n \"js\",\n \"vendors.js\",\n )\n if os.path.exists(vendors_js_path):\n os.remove(vendors_js_path)\n\n\ndef remove_packagejson_file():\n file_names = [\"package.json\"]\n for file_name in file_names:\n os.remove(file_name)\n\n\ndef update_package_json(remove_dev_deps=None, remove_keys=None, scripts=None):\n remove_dev_deps = remove_dev_deps or []\n remove_keys = remove_keys or []\n scripts = scripts or {}\n with open(\"package.json\", mode=\"r\") as fd:\n content = json.load(fd)\n for package_name in remove_dev_deps:\n content[\"devDependencies\"].pop(package_name)\n for key in remove_keys:\n content.pop(key)\n content[\"scripts\"].update(scripts)\n with open(\"package.json\", mode=\"w\") as fd:\n json.dump(content, fd, ensure_ascii=False, indent=2)\n fd.write(\"\\n\")\n\n\ndef handle_js_runner(choice, use_docker, use_async):\n if choice == \"Gulp\":\n update_package_json(\n remove_dev_deps=[\n \"@babel/core\",\n \"@babel/preset-env\",\n \"babel-loader\",\n \"concurrently\",\n \"css-loader\",\n \"mini-css-extract-plugin\",\n \"postcss-loader\",\n \"postcss-preset-env\",\n \"sass-loader\",\n \"webpack\",\n \"webpack-bundle-tracker\",\n \"webpack-cli\",\n \"webpack-dev-server\",\n \"webpack-merge\",\n ],\n remove_keys=[\"babel\"],\n scripts={\n \"dev\": \"gulp\",\n \"build\": \"gulp generate-assets\",\n },\n )\n remove_webpack_files()\n elif choice == \"Webpack\":\n scripts = {\n \"dev\": \"webpack serve --config webpack/dev.config.js\",\n \"build\": \"webpack --config webpack/prod.config.js\",\n }\n remove_dev_deps = [\n \"browser-sync\",\n \"cssnano\",\n \"gulp\",\n \"gulp-concat\",\n \"gulp-imagemin\",\n \"gulp-plumber\",\n \"gulp-postcss\",\n \"gulp-rename\",\n \"gulp-sass\",\n \"gulp-uglify-es\",\n ]\n if not use_docker:\n dev_django_cmd = (\n \"uvicorn config.asgi:application --reload\" if use_async else \"python manage.py runserver_plus\"\n )\n scripts.update(\n {\n \"dev\": \"concurrently npm:dev:*\",\n \"dev:webpack\": \"webpack serve --config webpack/dev.config.js\",\n \"dev:django\": dev_django_cmd,\n }\n )\n else:\n remove_dev_deps.append(\"concurrently\")\n update_package_json(remove_dev_deps=remove_dev_deps, scripts=scripts)\n remove_gulp_files()\n\n\ndef remove_prettier_pre_commit():\n with open(\".pre-commit-config.yaml\", \"r\") as fd:\n content = fd.readlines()\n\n removing = False\n new_lines = []\n for line in content:\n if removing and \"- repo:\" in line:\n removing = False\n if \"mirrors-prettier\" in line:\n removing = True\n if not removing:\n new_lines.append(line)\n\n with open(\".pre-commit-config.yaml\", \"w\") as fd:\n fd.writelines(new_lines)\n\n\ndef remove_celery_files():\n file_names = [\n os.path.join(\"config\", \"celery_app.py\"),\n os.path.join(\"{{ cookiecutter.project_slug }}\", \"users\", \"tasks.py\"),\n os.path.join(\"{{ cookiecutter.project_slug }}\", \"users\", \"tests\", \"test_tasks.py\"),\n ]\n for file_name in file_names:\n os.remove(file_name)\n\n\ndef remove_async_files():\n file_names = [\n os.path.join(\"config\", \"asgi.py\"),\n os.path.join(\"config\", \"websocket.py\"),\n ]\n for file_name in file_names:\n os.remove(file_name)\n\n\ndef remove_dottravisyml_file():\n os.remove(\".travis.yml\")\n\n\ndef remove_dotgitlabciyml_file():\n os.remove(\".gitlab-ci.yml\")\n\n\ndef remove_dotgithub_folder():\n shutil.rmtree(\".github\")\n\n\ndef remove_dotdrone_file():\n os.remove(\".drone.yml\")\n\n\ndef generate_random_string(length, using_digits=False, using_ascii_letters=False, using_punctuation=False):\n \"\"\"\n Example:\n opting out for 50 symbol-long, [a-z][A-Z][0-9] string\n would yield log_2((26+26+50)^50) ~= 334 bit strength.\n \"\"\"\n if not using_sysrandom:\n return None\n\n symbols = []\n if using_digits:\n symbols += string.digits\n if using_ascii_letters:\n symbols += string.ascii_letters\n if using_punctuation:\n all_punctuation = set(string.punctuation)\n # These symbols can cause issues in environment variables\n unsuitable = {\"'\", '\"', \"\\\\\", \"$\"}\n suitable = all_punctuation.difference(unsuitable)\n symbols += \"\".join(suitable)\n return \"\".join([random.choice(symbols) for _ in range(length)])\n\n\ndef set_flag(file_path, flag, value=None, formatted=None, *args, **kwargs):\n if value is None:\n random_string = generate_random_string(*args, **kwargs)\n if random_string is None:\n print(\n \"We couldn't find a secure pseudo-random number generator on your \"\n \"system. Please, make sure to manually {} later.\".format(flag)\n )\n random_string = flag\n if formatted is not None:\n random_string = formatted.format(random_string)\n value = random_string\n\n with open(file_path, \"r+\") as f:\n file_contents = f.read().replace(flag, value)\n f.seek(0)\n f.write(file_contents)\n f.truncate()\n\n return value\n\n\ndef set_django_secret_key(file_path):\n django_secret_key = set_flag(\n file_path,\n \"!!!SET DJANGO_SECRET_KEY!!!\",\n length=64,\n using_digits=True,\n using_ascii_letters=True,\n )\n return django_secret_key\n\n\ndef set_django_admin_url(file_path):\n django_admin_url = set_flag(\n file_path,\n \"!!!SET DJANGO_ADMIN_URL!!!\",\n formatted=\"{}/\",\n length=32,\n using_digits=True,\n using_ascii_letters=True,\n )\n return django_admin_url\n\n\ndef generate_random_user():\n return generate_random_string(length=32, using_ascii_letters=True)\n\n\ndef generate_postgres_user(debug=False):\n return DEBUG_VALUE if debug else generate_random_user()\n\n\ndef set_postgres_user(file_path, value):\n postgres_user = set_flag(file_path, \"!!!SET POSTGRES_USER!!!\", value=value)\n return postgres_user\n\n\ndef set_postgres_password(file_path, value=None):\n postgres_password = set_flag(\n file_path,\n \"!!!SET POSTGRES_PASSWORD!!!\",\n value=value,\n length=64,\n using_digits=True,\n using_ascii_letters=True,\n )\n return postgres_password\n\n\ndef set_celery_flower_user(file_path, value):\n celery_flower_user = set_flag(file_path, \"!!!SET CELERY_FLOWER_USER!!!\", value=value)\n return celery_flower_user\n\n\ndef set_celery_flower_password(file_path, value=None):\n celery_flower_password = set_flag(\n file_path,\n \"!!!SET CELERY_FLOWER_PASSWORD!!!\",\n value=value,\n length=64,\n using_digits=True,\n using_ascii_letters=True,\n )\n return celery_flower_password\n\n\ndef append_to_gitignore_file(ignored_line):\n with open(\".gitignore\", \"a\") as gitignore_file:\n gitignore_file.write(ignored_line)\n gitignore_file.write(\"\\n\")\n\n\ndef set_flags_in_envs(postgres_user, celery_flower_user, debug=False):\n local_django_envs_path = os.path.join(\".envs\", \".local\", \".django\")\n production_django_envs_path = os.path.join(\".envs\", \".production\", \".django\")\n local_postgres_envs_path = os.path.join(\".envs\", \".local\", \".postgres\")\n production_postgres_envs_path = os.path.join(\".envs\", \".production\", \".postgres\")\n\n set_django_secret_key(production_django_envs_path)\n set_django_admin_url(production_django_envs_path)\n\n set_postgres_user(local_postgres_envs_path, value=postgres_user)\n set_postgres_password(local_postgres_envs_path, value=DEBUG_VALUE if debug else None)\n set_postgres_user(production_postgres_envs_path, value=postgres_user)\n set_postgres_password(production_postgres_envs_path, value=DEBUG_VALUE if debug else None)\n\n set_celery_flower_user(local_django_envs_path, value=celery_flower_user)\n set_celery_flower_password(local_django_envs_path, value=DEBUG_VALUE if debug else None)\n set_celery_flower_user(production_django_envs_path, value=celery_flower_user)\n set_celery_flower_password(production_django_envs_path, value=DEBUG_VALUE if debug else None)\n\n\ndef set_flags_in_settings_files():\n set_django_secret_key(os.path.join(\"config\", \"settings\", \"local.py\"))\n set_django_secret_key(os.path.join(\"config\", \"settings\", \"test.py\"))\n\n\ndef remove_envs_and_associated_files():\n shutil.rmtree(\".envs\")\n os.remove(\"merge_production_dotenvs_in_dotenv.py\")\n shutil.rmtree(\"tests\")\n\n\ndef remove_celery_compose_dirs():\n shutil.rmtree(os.path.join(\"compose\", \"local\", \"django\", \"celery\"))\n shutil.rmtree(os.path.join(\"compose\", \"production\", \"django\", \"celery\"))\n\n\ndef remove_node_dockerfile():\n shutil.rmtree(os.path.join(\"compose\", \"local\", \"node\"))\n\n\ndef remove_aws_dockerfile():\n shutil.rmtree(os.path.join(\"compose\", \"production\", \"aws\"))\n\n\ndef remove_drf_starter_files():\n os.remove(os.path.join(\"config\", \"api_router.py\"))\n shutil.rmtree(os.path.join(\"{{cookiecutter.project_slug}}\", \"users\", \"api\"))\n os.remove(os.path.join(\"{{cookiecutter.project_slug}}\", \"users\", \"tests\", \"test_drf_urls.py\"))\n os.remove(os.path.join(\"{{cookiecutter.project_slug}}\", \"users\", \"tests\", \"test_drf_views.py\"))\n os.remove(os.path.join(\"{{cookiecutter.project_slug}}\", \"users\", \"tests\", \"test_swagger.py\"))\n\n\ndef main():\n debug = \"{{ cookiecutter.debug }}\".lower() == \"y\"\n\n set_flags_in_envs(\n DEBUG_VALUE if debug else generate_random_user(),\n DEBUG_VALUE if debug else generate_random_user(),\n debug=debug,\n )\n set_flags_in_settings_files()\n\n if \"{{ cookiecutter.open_source_license }}\" == \"Not open source\":\n remove_open_source_files()\n if \"{{ cookiecutter.open_source_license}}\" != \"GPLv3\":\n remove_gplv3_files()\n\n if \"{{ cookiecutter.username_type }}\" == \"username\":\n remove_custom_user_manager_files()\n\n if \"{{ cookiecutter.editor }}\" != \"PyCharm\":\n remove_pycharm_files()\n\n if \"{{ cookiecutter.use_docker }}\".lower() == \"y\":\n remove_utility_files()\n else:\n remove_docker_files()\n\n if \"{{ cookiecutter.use_docker }}\".lower() == \"y\" and \"{{ cookiecutter.cloud_provider}}\" != \"AWS\":\n remove_aws_dockerfile()\n\n if \"{{ cookiecutter.use_heroku }}\".lower() == \"n\":\n remove_heroku_files()\n\n if \"{{ cookiecutter.use_docker }}\".lower() == \"n\" and \"{{ cookiecutter.use_heroku }}\".lower() == \"n\":\n if \"{{ cookiecutter.keep_local_envs_in_vcs }}\".lower() == \"y\":\n print(\n INFO + \".env(s) are only utilized when Docker Compose and/or \"\n \"Heroku support is enabled so keeping them does not make sense \"\n \"given your current setup.\" + TERMINATOR\n )\n remove_envs_and_associated_files()\n else:\n append_to_gitignore_file(\".env\")\n append_to_gitignore_file(\".envs/*\")\n if \"{{ cookiecutter.keep_local_envs_in_vcs }}\".lower() == \"y\":\n append_to_gitignore_file(\"!.envs/.local/\")\n\n if \"{{ cookiecutter.frontend_pipeline }}\" in [\"None\", \"Django Compressor\"]:\n remove_gulp_files()\n remove_webpack_files()\n remove_sass_files()\n remove_packagejson_file()\n remove_prettier_pre_commit()\n if \"{{ cookiecutter.use_docker }}\".lower() == \"y\":\n remove_node_dockerfile()\n else:\n handle_js_runner(\n \"{{ cookiecutter.frontend_pipeline }}\",\n use_docker=(\"{{ cookiecutter.use_docker }}\".lower() == \"y\"),\n use_async=(\"{{ cookiecutter.use_async }}\".lower() == \"y\"),\n )\n\n if \"{{ cookiecutter.cloud_provider }}\" == \"None\" and \"{{ cookiecutter.use_docker }}\".lower() == \"n\":\n print(\n WARNING + \"You chose to not use any cloud providers nor Docker, \"\n \"media files won't be served in production.\" + TERMINATOR\n )\n\n if \"{{ cookiecutter.use_celery }}\".lower() == \"n\":\n remove_celery_files()\n if \"{{ cookiecutter.use_docker }}\".lower() == \"y\":\n remove_celery_compose_dirs()\n\n if \"{{ cookiecutter.ci_tool }}\" != \"Travis\":\n remove_dottravisyml_file()\n\n if \"{{ cookiecutter.ci_tool }}\" != \"Gitlab\":\n remove_dotgitlabciyml_file()\n\n if \"{{ cookiecutter.ci_tool }}\" != \"Github\":\n remove_dotgithub_folder()\n\n if \"{{ cookiecutter.ci_tool }}\" != \"Drone\":\n remove_dotdrone_file()\n\n if \"{{ cookiecutter.use_drf }}\".lower() == \"n\":\n remove_drf_starter_files()\n\n if \"{{ cookiecutter.use_async }}\".lower() == \"n\":\n remove_async_files()\n\n print(SUCCESS + \"Project initialized, keep up the good work!\" + TERMINATOR)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "hooks/post_gen_project.py"}]} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.