problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.1k
25.4k
| golden_diff
stringlengths 145
5.13k
| verification_info
stringlengths 582
39.1k
| num_tokens
int64 271
4.1k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_20708 | rasdani/github-patches | git_diff | cfpb__consumerfinance.gov-457 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
little typo on The Bureau page
http://beta.consumerfinance.gov/the-bureau/
"Organizaitonal structure and leadership >"
should be Organizational
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `_lib/wordpress_post_processor.py`
Content:
```
1 import sys
2 import json
3 import os.path
4 import requests
5 from string import Template
6
7 import dateutil.parser
8
9 def posts_at_url(url):
10
11 current_page = 1
12 max_page = sys.maxint
13
14 while current_page <= max_page:
15
16 url = os.path.expandvars(url)
17 resp = requests.get(url, params={'page':current_page, 'count': '-1'})
18 results = json.loads(resp.content)
19 current_page += 1
20 max_page = results['pages']
21 total = 0
22 for p in results['posts']:
23 total += 1
24 yield p
25
26 def documents(name, url, **kwargs):
27
28 for post in posts_at_url(url):
29 yield process_post(post)
30
31
32 def process_post(post, newsroom = False):
33 del post['comments']
34 post['_id'] = post['slug']
35 # remove fields we're not interested in
36 if post['type'] == 'cfpb_newsroom':
37 post['category'] = [cat['title'].replace('&', '&') for cat in post['taxonomy_cfpb_newsroom_cat_taxonomy']]
38 elif post['type'] == 'post':
39 post['category'] = [cat['title'].replace('&', '&') for cat in post['taxonomy_fj_category']]
40 if post['type'] == 'watchroom':
41 post['author'] = [post['author']['name']]
42 # convert watchroom_data_x into a proper list
43 links = []
44 for x in xrange(0,10):
45 custom_fields = post['custom_fields']
46 key = 'watchroom_data_%s_link' % x
47 if key in custom_fields:
48 links.append(custom_fields[key])
49 post['links'] = links
50 else:
51 post['tags'] = [tag['title'] for tag in post['taxonomy_fj_tag']]
52 post['author'] = [author['title'] for author in
53 post['taxonomy_fj_author'] if 'Press Release' not in
54 post['category']]
55 if newsroom and post['type'] == 'post':
56 post['category'][0] = "Blog"
57 author_template = Template("$first_name $last_name")
58 dt = dateutil.parser.parse(post['date'])
59 dt_string = dt.strftime('%Y-%m-%dT%H:%M:%SZ')
60 post['date'] = dt_string
61 if 'twtr_text' in post['custom_fields']:
62 post['twtr_text'] = post['custom_fields']['twtr_text'][0]
63 if 'twtr_lang' in post['custom_fields']:
64 post['twtr_lang'] = post['custom_fields']['twtr_lang'][0]
65 if 'twtr_rel' in post['custom_fields']:
66 post['twtr_rel'] = post['custom_fields']['twtr_rel'][0]
67 if 'twtr_hash' in post['custom_fields']:
68 post['twtr_hash'] = post['custom_fields']['twtr_hash'][0]
69 return post
70
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/_lib/wordpress_post_processor.py b/_lib/wordpress_post_processor.py
--- a/_lib/wordpress_post_processor.py
+++ b/_lib/wordpress_post_processor.py
@@ -37,13 +37,13 @@
post['category'] = [cat['title'].replace('&', '&') for cat in post['taxonomy_cfpb_newsroom_cat_taxonomy']]
elif post['type'] == 'post':
post['category'] = [cat['title'].replace('&', '&') for cat in post['taxonomy_fj_category']]
- if post['type'] == 'watchroom':
+ if post['type'] == 'featured_topic':
post['author'] = [post['author']['name']]
- # convert watchroom_data_x into a proper list
+ # convert featured_topic_data_x into a proper list
links = []
for x in xrange(0,10):
custom_fields = post['custom_fields']
- key = 'watchroom_data_%s_link' % x
+ key = 'featured_topic_data_%s_link' % x
if key in custom_fields:
links.append(custom_fields[key])
post['links'] = links
| {"golden_diff": "diff --git a/_lib/wordpress_post_processor.py b/_lib/wordpress_post_processor.py\n--- a/_lib/wordpress_post_processor.py\n+++ b/_lib/wordpress_post_processor.py\n@@ -37,13 +37,13 @@\n post['category'] = [cat['title'].replace('&', '&') for cat in post['taxonomy_cfpb_newsroom_cat_taxonomy']]\n elif post['type'] == 'post':\n post['category'] = [cat['title'].replace('&', '&') for cat in post['taxonomy_fj_category']]\n- if post['type'] == 'watchroom':\n+ if post['type'] == 'featured_topic':\n post['author'] = [post['author']['name']]\n- # convert watchroom_data_x into a proper list\n+ # convert featured_topic_data_x into a proper list\n links = []\n for x in xrange(0,10):\n custom_fields = post['custom_fields']\n- key = 'watchroom_data_%s_link' % x\n+ key = 'featured_topic_data_%s_link' % x\n if key in custom_fields:\n links.append(custom_fields[key])\n post['links'] = links\n", "issue": "little typo on The Bureau page\nhttp://beta.consumerfinance.gov/the-bureau/\n\n\"Organizaitonal structure and leadership >\"\n\nshould be Organizational\n\n", "before_files": [{"content": "import sys\nimport json\nimport os.path\nimport requests\nfrom string import Template\n\nimport dateutil.parser\n\ndef posts_at_url(url):\n \n current_page = 1\n max_page = sys.maxint\n\n while current_page <= max_page:\n\n url = os.path.expandvars(url)\n resp = requests.get(url, params={'page':current_page, 'count': '-1'})\n results = json.loads(resp.content) \n current_page += 1\n max_page = results['pages']\n total = 0\n for p in results['posts']:\n total += 1\n yield p\n\ndef documents(name, url, **kwargs):\n \n for post in posts_at_url(url):\n yield process_post(post)\n\n\ndef process_post(post, newsroom = False):\n del post['comments']\n post['_id'] = post['slug']\n # remove fields we're not interested in\n if post['type'] == 'cfpb_newsroom':\n post['category'] = [cat['title'].replace('&', '&') for cat in post['taxonomy_cfpb_newsroom_cat_taxonomy']]\n elif post['type'] == 'post':\n post['category'] = [cat['title'].replace('&', '&') for cat in post['taxonomy_fj_category']]\n if post['type'] == 'watchroom':\n post['author'] = [post['author']['name']]\n # convert watchroom_data_x into a proper list\n links = []\n for x in xrange(0,10):\n custom_fields = post['custom_fields']\n key = 'watchroom_data_%s_link' % x\n if key in custom_fields:\n links.append(custom_fields[key])\n post['links'] = links\n else:\n post['tags'] = [tag['title'] for tag in post['taxonomy_fj_tag']]\n post['author'] = [author['title'] for author in\n post['taxonomy_fj_author'] if 'Press Release' not in\n post['category']]\n if newsroom and post['type'] == 'post':\n post['category'][0] = \"Blog\"\n author_template = Template(\"$first_name $last_name\")\n dt = dateutil.parser.parse(post['date'])\n dt_string = dt.strftime('%Y-%m-%dT%H:%M:%SZ')\n post['date'] = dt_string\n if 'twtr_text' in post['custom_fields']:\n post['twtr_text'] = post['custom_fields']['twtr_text'][0]\n if 'twtr_lang' in post['custom_fields']:\n post['twtr_lang'] = post['custom_fields']['twtr_lang'][0]\n if 'twtr_rel' in post['custom_fields']:\n post['twtr_rel'] = post['custom_fields']['twtr_rel'][0]\n if 'twtr_hash' in post['custom_fields']:\n post['twtr_hash'] = post['custom_fields']['twtr_hash'][0]\n return post\n", "path": "_lib/wordpress_post_processor.py"}], "after_files": [{"content": "import sys\nimport json\nimport os.path\nimport requests\nfrom string import Template\n\nimport dateutil.parser\n\ndef posts_at_url(url):\n \n current_page = 1\n max_page = sys.maxint\n\n while current_page <= max_page:\n\n url = os.path.expandvars(url)\n resp = requests.get(url, params={'page':current_page, 'count': '-1'})\n results = json.loads(resp.content) \n current_page += 1\n max_page = results['pages']\n total = 0\n for p in results['posts']:\n total += 1\n yield p\n\ndef documents(name, url, **kwargs):\n \n for post in posts_at_url(url):\n yield process_post(post)\n\n\ndef process_post(post, newsroom = False):\n del post['comments']\n post['_id'] = post['slug']\n # remove fields we're not interested in\n if post['type'] == 'cfpb_newsroom':\n post['category'] = [cat['title'].replace('&', '&') for cat in post['taxonomy_cfpb_newsroom_cat_taxonomy']]\n elif post['type'] == 'post':\n post['category'] = [cat['title'].replace('&', '&') for cat in post['taxonomy_fj_category']]\n if post['type'] == 'featured_topic':\n post['author'] = [post['author']['name']]\n # convert featured_topic_data_x into a proper list\n links = []\n for x in xrange(0,10):\n custom_fields = post['custom_fields']\n key = 'featured_topic_data_%s_link' % x\n if key in custom_fields:\n links.append(custom_fields[key])\n post['links'] = links\n else:\n post['tags'] = [tag['title'] for tag in post['taxonomy_fj_tag']]\n post['author'] = [author['title'] for author in\n post['taxonomy_fj_author'] if 'Press Release' not in\n post['category']]\n if newsroom and post['type'] == 'post':\n post['category'][0] = \"Blog\"\n author_template = Template(\"$first_name $last_name\")\n dt = dateutil.parser.parse(post['date'])\n dt_string = dt.strftime('%Y-%m-%dT%H:%M:%SZ')\n post['date'] = dt_string\n if 'twtr_text' in post['custom_fields']:\n post['twtr_text'] = post['custom_fields']['twtr_text'][0]\n if 'twtr_lang' in post['custom_fields']:\n post['twtr_lang'] = post['custom_fields']['twtr_lang'][0]\n if 'twtr_rel' in post['custom_fields']:\n post['twtr_rel'] = post['custom_fields']['twtr_rel'][0]\n if 'twtr_hash' in post['custom_fields']:\n post['twtr_hash'] = post['custom_fields']['twtr_hash'][0]\n return post\n", "path": "_lib/wordpress_post_processor.py"}]} | 1,064 | 261 |
gh_patches_debug_22554 | rasdani/github-patches | git_diff | ansible__ansible-modules-core-3313 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ops_template.py: Take care of json src variable
##### ISSUE TYPE
- Bug Report
##### COMPONENT NAME
ops_template.py
##### ANSIBLE VERSION
```
air$ ansible-playbook --version
ansible-playbook 2.1.0
config file = /usr/local/git/github.com/dc-on-docker/ansible.cfg
configured module search path = Default w/o overrides
```
##### CONFIGURATION
As mentioned in [ansible/ansible #15133](https://github.com/ansible/ansible/issues/15133), configuring OpenSwitch through the [OpenSwitch switch role](https://github.com/keinohguchi/ops-switch-role).
##### OS / ENVIRONMENT
Reproduced on ArchLinux control machine, just like [ansible/ansible #15133](https://github.com/ansible/ansible/issues/15133).
##### SUMMARY
Call `module.from_json` in case of `dc` or `rest` transport case.
##### STEPS TO REPRODUCE
```
---
- name: fabric switches
hosts: fabrics
gather_facts: yes
vars:
ansible_user: admin
ops_system_hostname: "{{ inventory_hostname }}"
ops_debug: no
roles:
- role: switch
ops_intf_admin_state: up
- name: spine switches
hosts: spines
gather_facts: yes
vars:
ansible_user: admin
ops_system_hostname: "{{ inventory_hostname }}"
ops_debug: no
roles:
- role: switch
ops_intf_admin_state: up
- name: leaf switches
hosts: leaves
gather_facts: yes
vars:
ansible_user: admin
ops_system_hostname: "{{ inventory_hostname }}"
ops_debug: no
roles:
- role: switch
ops_intf_admin_state: up
```
##### EXPECTED RESULTS
Configure the OPS switch both through DC or REST APIs.
##### ACTUAL RESULTS
```
air$ play site.yaml
PLAY [fabric switches] *********************************************************
TASK [setup] *******************************************************************
ok: [fab1]
TASK [switch : print JSON input for this play] *********************************
skipping: [fab1]
TASK [switch : configure the switch] *******************************************
fatal: [fab1]: FAILED! => {"changed": false, "failed": true, "msg": "argument src is of type <type 'str'> and we were unable to convert
to dict"}
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `network/openswitch/ops_template.py`
Content:
```
1 #!/usr/bin/python
2 #
3 # This file is part of Ansible
4 #
5 # Ansible is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU General Public License as published by
7 # the Free Software Foundation, either version 3 of the License, or
8 # (at your option) any later version.
9 #
10 # Ansible is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU General Public License for more details.
14 #
15 # You should have received a copy of the GNU General Public License
16 # along with Ansible. If not, see <http://www.gnu.org/licenses/>.
17 #
18 DOCUMENTATION = """
19 ---
20 module: ops_template
21 version_added: "2.1"
22 author: "Peter Sprygada (@privateip)"
23 short_description: Push configuration to OpenSwitch
24 description:
25 - The OpenSwitch platform provides a library for pushing JSON structured
26 configuration files into the current running-config. This module
27 will read the current configuration from OpenSwitch and compare it
28 against a provided candidate configuration. If there are changes, the
29 candidate configuration is merged with the current configuration and
30 pushed into OpenSwitch
31 extends_documentation_fragment: openswitch
32 options:
33 src:
34 description:
35 - The path to the config source. The source can be either a
36 file with config or a template that will be merged during
37 runtime. By default the task will search for the source
38 file in role or playbook root folder in templates directory.
39 required: true
40 force:
41 description:
42 - The force argument instructs the module to not consider the
43 current devices running-config. When set to true, this will
44 cause the module to push the contents of I(src) into the device
45 without first checking if already configured.
46 required: false
47 default: false
48 choices: ['yes', 'no']
49 backup:
50 description:
51 - When this argument is configured true, the module will backup
52 the running-config from the node prior to making any changes.
53 The backup file will be written to backups/ in
54 the root of the playbook directory.
55 required: false
56 default: false
57 choices: ['yes', 'no']
58 config:
59 description:
60 - The module, by default, will connect to the remote device and
61 retrieve the current running-config to use as a base for comparing
62 against the contents of source. There are times when it is not
63 desirable to have the task get the current running-config for
64 every task in a playbook. The I(config) argument allows the
65 implementer to pass in the configuruation to use as the base
66 config for comparision.
67 required: false
68 default: null
69 """
70
71 EXAMPLES = """
72 - name: set hostname with file lookup
73 ops_template:
74 src: ./hostname.json
75 backup: yes
76 remote_user: admin
77 become: yes
78
79 - name: set hostname with var
80 ops_template:
81 src: "{{ config }}"
82 remote_user: admin
83 become: yes
84 """
85
86 RETURN = """
87 updates:
88 description: The list of configuration updates to be merged
89 retured: always
90 type: dict
91 sample: {obj, obj}
92 responses:
93 desription: returns the responses when configuring using cli
94 returned: when transport == cli
95 type: list
96 sample: [...]
97 """
98 import copy
99
100 def compare(this, other):
101 parents = [item.text for item in this.parents]
102 for entry in other:
103 if this == entry:
104 return None
105 return this
106
107 def expand(obj, queue):
108 block = [item.raw for item in obj.parents]
109 block.append(obj.raw)
110
111 current_level = queue
112 for b in block:
113 if b not in current_level:
114 current_level[b] = collections.OrderedDict()
115 current_level = current_level[b]
116 for c in obj.children:
117 if c.raw not in current_level:
118 current_level[c.raw] = collections.OrderedDict()
119
120 def flatten(data, obj):
121 for k, v in data.items():
122 obj.append(k)
123 flatten(v, obj)
124 return obj
125
126 def get_config(module):
127 config = module.params['config'] or dict()
128 if not config and not module.params['force']:
129 config = module.config
130 return config
131
132 def sort(val):
133 if isinstance(val, (list, set)):
134 return sorted(val)
135 return val
136
137 def diff(this, other, path=None):
138 updates = list()
139 path = path or list()
140 for key, value in this.items():
141 if key not in other:
142 other_value = other.get(key)
143 updates.append((list(path), key, value, other_value))
144 else:
145 if isinstance(this[key], dict):
146 path.append(key)
147 updates.extend(diff(this[key], other[key], list(path)))
148 path.pop()
149 else:
150 other_value = other.get(key)
151 if sort(this[key]) != sort(other_value):
152 updates.append((list(path), key, value, other_value))
153 return updates
154
155 def merge(changeset, config=None):
156 config = config or dict()
157 for path, key, value, _ in changeset:
158 current_level = config
159 for part in path:
160 if part not in current_level:
161 current_level[part] = dict()
162 current_level = current_level[part]
163 current_level[key] = value
164 return config
165
166 def main():
167 """ main entry point for module execution
168 """
169
170 argument_spec = dict(
171 src=dict(type='dict'),
172 force=dict(default=False, type='bool'),
173 backup=dict(default=False, type='bool'),
174 config=dict(type='dict'),
175 )
176
177 mutually_exclusive = [('config', 'backup'), ('config', 'force')]
178
179 module = get_module(argument_spec=argument_spec,
180 mutually_exclusive=mutually_exclusive,
181 supports_check_mode=True)
182
183 src = module.params['src']
184
185 result = dict(changed=False)
186
187 contents = get_config(module)
188 result['_backup'] = copy.deepcopy(module.config)
189
190 if module.params['transport'] in ['ssh', 'rest']:
191 config = contents
192
193 changeset = diff(src, config)
194 candidate = merge(changeset, config)
195
196 updates = dict()
197 for path, key, new_value, old_value in changeset:
198 path = '%s.%s' % ('.'.join(path), key)
199 updates[path] = new_value
200 result['updates'] = updates
201
202 if changeset:
203 if not module.check_mode:
204 module.configure(config)
205 result['changed'] = True
206
207 else:
208 config = module.parse_config(config)
209 candidate = module.parse_config(module.params['src'])
210
211 commands = collections.OrderedDict()
212 toplevel = [c.text for c in config]
213
214 for line in candidate:
215 if line.text in ['!', '']:
216 continue
217
218 if not line.parents:
219 if line.text not in toplevel:
220 expand(line, commands)
221 else:
222 item = compare(line, config)
223 if item:
224 expand(item, commands)
225
226 commands = flatten(commands, list())
227
228 if commands:
229 if not module.check_mode:
230 commands = [str(c).strip() for c in commands]
231 response = module.configure(commands)
232 result['responses'] = response
233 result['changed'] = True
234 result['updates'] = commands
235
236 module.exit_json(**result)
237
238 from ansible.module_utils.basic import *
239 from ansible.module_utils.urls import *
240 from ansible.module_utils.netcfg import *
241 from ansible.module_utils.shell import *
242 from ansible.module_utils.openswitch import *
243 if __name__ == '__main__':
244 main()
245
246
247
248
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/network/openswitch/ops_template.py b/network/openswitch/ops_template.py
--- a/network/openswitch/ops_template.py
+++ b/network/openswitch/ops_template.py
@@ -168,7 +168,7 @@
"""
argument_spec = dict(
- src=dict(type='dict'),
+ src=dict(type='str'),
force=dict(default=False, type='bool'),
backup=dict(default=False, type='bool'),
config=dict(type='dict'),
@@ -180,8 +180,6 @@
mutually_exclusive=mutually_exclusive,
supports_check_mode=True)
- src = module.params['src']
-
result = dict(changed=False)
contents = get_config(module)
@@ -189,6 +187,7 @@
if module.params['transport'] in ['ssh', 'rest']:
config = contents
+ src = module.from_json(module.params['src'])
changeset = diff(src, config)
candidate = merge(changeset, config)
| {"golden_diff": "diff --git a/network/openswitch/ops_template.py b/network/openswitch/ops_template.py\n--- a/network/openswitch/ops_template.py\n+++ b/network/openswitch/ops_template.py\n@@ -168,7 +168,7 @@\n \"\"\"\n \n argument_spec = dict(\n- src=dict(type='dict'),\n+ src=dict(type='str'),\n force=dict(default=False, type='bool'),\n backup=dict(default=False, type='bool'),\n config=dict(type='dict'),\n@@ -180,8 +180,6 @@\n mutually_exclusive=mutually_exclusive,\n supports_check_mode=True)\n \n- src = module.params['src']\n-\n result = dict(changed=False)\n \n contents = get_config(module)\n@@ -189,6 +187,7 @@\n \n if module.params['transport'] in ['ssh', 'rest']:\n config = contents\n+ src = module.from_json(module.params['src'])\n \n changeset = diff(src, config)\n candidate = merge(changeset, config)\n", "issue": "ops_template.py: Take care of json src variable\n##### ISSUE TYPE\n- Bug Report\n##### COMPONENT NAME\n\nops_template.py\n##### ANSIBLE VERSION\n\n```\nair$ ansible-playbook --version\nansible-playbook 2.1.0\n config file = /usr/local/git/github.com/dc-on-docker/ansible.cfg\n configured module search path = Default w/o overrides\n```\n##### CONFIGURATION\n\nAs mentioned in [ansible/ansible #15133](https://github.com/ansible/ansible/issues/15133), configuring OpenSwitch through the [OpenSwitch switch role](https://github.com/keinohguchi/ops-switch-role).\n##### OS / ENVIRONMENT\n\nReproduced on ArchLinux control machine, just like [ansible/ansible #15133](https://github.com/ansible/ansible/issues/15133).\n##### SUMMARY\n\nCall `module.from_json` in case of `dc` or `rest` transport case.\n##### STEPS TO REPRODUCE\n\n```\n\n---\n- name: fabric switches\n hosts: fabrics\n gather_facts: yes\n vars:\n ansible_user: admin\n ops_system_hostname: \"{{ inventory_hostname }}\"\n ops_debug: no\n roles:\n - role: switch\n ops_intf_admin_state: up\n\n- name: spine switches\n hosts: spines\n gather_facts: yes\n vars:\n ansible_user: admin\n ops_system_hostname: \"{{ inventory_hostname }}\"\n ops_debug: no\n roles:\n - role: switch\n ops_intf_admin_state: up\n\n- name: leaf switches\n hosts: leaves\n gather_facts: yes\n vars:\n ansible_user: admin\n ops_system_hostname: \"{{ inventory_hostname }}\"\n ops_debug: no\n roles:\n - role: switch\n ops_intf_admin_state: up\n```\n##### EXPECTED RESULTS\n\nConfigure the OPS switch both through DC or REST APIs.\n##### ACTUAL RESULTS\n\n```\nair$ play site.yaml\n\nPLAY [fabric switches] *********************************************************\n\nTASK [setup] *******************************************************************\nok: [fab1]\n\nTASK [switch : print JSON input for this play] *********************************\nskipping: [fab1]\n\nTASK [switch : configure the switch] *******************************************\nfatal: [fab1]: FAILED! => {\"changed\": false, \"failed\": true, \"msg\": \"argument src is of type <type 'str'> and we were unable to convert\nto dict\"}\n```\n\n", "before_files": [{"content": "#!/usr/bin/python\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\n#\nDOCUMENTATION = \"\"\"\n---\nmodule: ops_template\nversion_added: \"2.1\"\nauthor: \"Peter Sprygada (@privateip)\"\nshort_description: Push configuration to OpenSwitch\ndescription:\n - The OpenSwitch platform provides a library for pushing JSON structured\n configuration files into the current running-config. This module\n will read the current configuration from OpenSwitch and compare it\n against a provided candidate configuration. If there are changes, the\n candidate configuration is merged with the current configuration and\n pushed into OpenSwitch\nextends_documentation_fragment: openswitch\noptions:\n src:\n description:\n - The path to the config source. The source can be either a\n file with config or a template that will be merged during\n runtime. By default the task will search for the source\n file in role or playbook root folder in templates directory.\n required: true\n force:\n description:\n - The force argument instructs the module to not consider the\n current devices running-config. When set to true, this will\n cause the module to push the contents of I(src) into the device\n without first checking if already configured.\n required: false\n default: false\n choices: ['yes', 'no']\n backup:\n description:\n - When this argument is configured true, the module will backup\n the running-config from the node prior to making any changes.\n The backup file will be written to backups/ in\n the root of the playbook directory.\n required: false\n default: false\n choices: ['yes', 'no']\n config:\n description:\n - The module, by default, will connect to the remote device and\n retrieve the current running-config to use as a base for comparing\n against the contents of source. There are times when it is not\n desirable to have the task get the current running-config for\n every task in a playbook. The I(config) argument allows the\n implementer to pass in the configuruation to use as the base\n config for comparision.\n required: false\n default: null\n\"\"\"\n\nEXAMPLES = \"\"\"\n- name: set hostname with file lookup\n ops_template:\n src: ./hostname.json\n backup: yes\n remote_user: admin\n become: yes\n\n- name: set hostname with var\n ops_template:\n src: \"{{ config }}\"\n remote_user: admin\n become: yes\n\"\"\"\n\nRETURN = \"\"\"\nupdates:\n description: The list of configuration updates to be merged\n retured: always\n type: dict\n sample: {obj, obj}\nresponses:\n desription: returns the responses when configuring using cli\n returned: when transport == cli\n type: list\n sample: [...]\n\"\"\"\nimport copy\n\ndef compare(this, other):\n parents = [item.text for item in this.parents]\n for entry in other:\n if this == entry:\n return None\n return this\n\ndef expand(obj, queue):\n block = [item.raw for item in obj.parents]\n block.append(obj.raw)\n\n current_level = queue\n for b in block:\n if b not in current_level:\n current_level[b] = collections.OrderedDict()\n current_level = current_level[b]\n for c in obj.children:\n if c.raw not in current_level:\n current_level[c.raw] = collections.OrderedDict()\n\ndef flatten(data, obj):\n for k, v in data.items():\n obj.append(k)\n flatten(v, obj)\n return obj\n\ndef get_config(module):\n config = module.params['config'] or dict()\n if not config and not module.params['force']:\n config = module.config\n return config\n\ndef sort(val):\n if isinstance(val, (list, set)):\n return sorted(val)\n return val\n\ndef diff(this, other, path=None):\n updates = list()\n path = path or list()\n for key, value in this.items():\n if key not in other:\n other_value = other.get(key)\n updates.append((list(path), key, value, other_value))\n else:\n if isinstance(this[key], dict):\n path.append(key)\n updates.extend(diff(this[key], other[key], list(path)))\n path.pop()\n else:\n other_value = other.get(key)\n if sort(this[key]) != sort(other_value):\n updates.append((list(path), key, value, other_value))\n return updates\n\ndef merge(changeset, config=None):\n config = config or dict()\n for path, key, value, _ in changeset:\n current_level = config\n for part in path:\n if part not in current_level:\n current_level[part] = dict()\n current_level = current_level[part]\n current_level[key] = value\n return config\n\ndef main():\n \"\"\" main entry point for module execution\n \"\"\"\n\n argument_spec = dict(\n src=dict(type='dict'),\n force=dict(default=False, type='bool'),\n backup=dict(default=False, type='bool'),\n config=dict(type='dict'),\n )\n\n mutually_exclusive = [('config', 'backup'), ('config', 'force')]\n\n module = get_module(argument_spec=argument_spec,\n mutually_exclusive=mutually_exclusive,\n supports_check_mode=True)\n\n src = module.params['src']\n\n result = dict(changed=False)\n\n contents = get_config(module)\n result['_backup'] = copy.deepcopy(module.config)\n\n if module.params['transport'] in ['ssh', 'rest']:\n config = contents\n\n changeset = diff(src, config)\n candidate = merge(changeset, config)\n\n updates = dict()\n for path, key, new_value, old_value in changeset:\n path = '%s.%s' % ('.'.join(path), key)\n updates[path] = new_value\n result['updates'] = updates\n\n if changeset:\n if not module.check_mode:\n module.configure(config)\n result['changed'] = True\n\n else:\n config = module.parse_config(config)\n candidate = module.parse_config(module.params['src'])\n\n commands = collections.OrderedDict()\n toplevel = [c.text for c in config]\n\n for line in candidate:\n if line.text in ['!', '']:\n continue\n\n if not line.parents:\n if line.text not in toplevel:\n expand(line, commands)\n else:\n item = compare(line, config)\n if item:\n expand(item, commands)\n\n commands = flatten(commands, list())\n\n if commands:\n if not module.check_mode:\n commands = [str(c).strip() for c in commands]\n response = module.configure(commands)\n result['responses'] = response\n result['changed'] = True\n result['updates'] = commands\n\n module.exit_json(**result)\n\nfrom ansible.module_utils.basic import *\nfrom ansible.module_utils.urls import *\nfrom ansible.module_utils.netcfg import *\nfrom ansible.module_utils.shell import *\nfrom ansible.module_utils.openswitch import *\nif __name__ == '__main__':\n main()\n\n\n\n", "path": "network/openswitch/ops_template.py"}], "after_files": [{"content": "#!/usr/bin/python\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\n#\nDOCUMENTATION = \"\"\"\n---\nmodule: ops_template\nversion_added: \"2.1\"\nauthor: \"Peter Sprygada (@privateip)\"\nshort_description: Push configuration to OpenSwitch\ndescription:\n - The OpenSwitch platform provides a library for pushing JSON structured\n configuration files into the current running-config. This module\n will read the current configuration from OpenSwitch and compare it\n against a provided candidate configuration. If there are changes, the\n candidate configuration is merged with the current configuration and\n pushed into OpenSwitch\nextends_documentation_fragment: openswitch\noptions:\n src:\n description:\n - The path to the config source. The source can be either a\n file with config or a template that will be merged during\n runtime. By default the task will search for the source\n file in role or playbook root folder in templates directory.\n required: true\n force:\n description:\n - The force argument instructs the module to not consider the\n current devices running-config. When set to true, this will\n cause the module to push the contents of I(src) into the device\n without first checking if already configured.\n required: false\n default: false\n choices: ['yes', 'no']\n backup:\n description:\n - When this argument is configured true, the module will backup\n the running-config from the node prior to making any changes.\n The backup file will be written to backups/ in\n the root of the playbook directory.\n required: false\n default: false\n choices: ['yes', 'no']\n config:\n description:\n - The module, by default, will connect to the remote device and\n retrieve the current running-config to use as a base for comparing\n against the contents of source. There are times when it is not\n desirable to have the task get the current running-config for\n every task in a playbook. The I(config) argument allows the\n implementer to pass in the configuruation to use as the base\n config for comparision.\n required: false\n default: null\n\"\"\"\n\nEXAMPLES = \"\"\"\n- name: set hostname with file lookup\n ops_template:\n src: ./hostname.json\n backup: yes\n remote_user: admin\n become: yes\n\n- name: set hostname with var\n ops_template:\n src: \"{{ config }}\"\n remote_user: admin\n become: yes\n\"\"\"\n\nRETURN = \"\"\"\nupdates:\n description: The list of configuration updates to be merged\n retured: always\n type: dict\n sample: {obj, obj}\nresponses:\n desription: returns the responses when configuring using cli\n returned: when transport == cli\n type: list\n sample: [...]\n\"\"\"\nimport copy\n\ndef compare(this, other):\n parents = [item.text for item in this.parents]\n for entry in other:\n if this == entry:\n return None\n return this\n\ndef expand(obj, queue):\n block = [item.raw for item in obj.parents]\n block.append(obj.raw)\n\n current_level = queue\n for b in block:\n if b not in current_level:\n current_level[b] = collections.OrderedDict()\n current_level = current_level[b]\n for c in obj.children:\n if c.raw not in current_level:\n current_level[c.raw] = collections.OrderedDict()\n\ndef flatten(data, obj):\n for k, v in data.items():\n obj.append(k)\n flatten(v, obj)\n return obj\n\ndef get_config(module):\n config = module.params['config'] or dict()\n if not config and not module.params['force']:\n config = module.config\n return config\n\ndef sort(val):\n if isinstance(val, (list, set)):\n return sorted(val)\n return val\n\ndef diff(this, other, path=None):\n updates = list()\n path = path or list()\n for key, value in this.items():\n if key not in other:\n other_value = other.get(key)\n updates.append((list(path), key, value, other_value))\n else:\n if isinstance(this[key], dict):\n path.append(key)\n updates.extend(diff(this[key], other[key], list(path)))\n path.pop()\n else:\n other_value = other.get(key)\n if sort(this[key]) != sort(other_value):\n updates.append((list(path), key, value, other_value))\n return updates\n\ndef merge(changeset, config=None):\n config = config or dict()\n for path, key, value, _ in changeset:\n current_level = config\n for part in path:\n if part not in current_level:\n current_level[part] = dict()\n current_level = current_level[part]\n current_level[key] = value\n return config\n\ndef main():\n \"\"\" main entry point for module execution\n \"\"\"\n\n argument_spec = dict(\n src=dict(type='str'),\n force=dict(default=False, type='bool'),\n backup=dict(default=False, type='bool'),\n config=dict(type='dict'),\n )\n\n mutually_exclusive = [('config', 'backup'), ('config', 'force')]\n\n module = get_module(argument_spec=argument_spec,\n mutually_exclusive=mutually_exclusive,\n supports_check_mode=True)\n\n result = dict(changed=False)\n\n contents = get_config(module)\n result['_backup'] = copy.deepcopy(module.config)\n\n if module.params['transport'] in ['ssh', 'rest']:\n config = contents\n src = module.from_json(module.params['src'])\n\n changeset = diff(src, config)\n candidate = merge(changeset, config)\n\n updates = dict()\n for path, key, new_value, old_value in changeset:\n path = '%s.%s' % ('.'.join(path), key)\n updates[path] = new_value\n result['updates'] = updates\n\n if changeset:\n if not module.check_mode:\n module.configure(config)\n result['changed'] = True\n\n else:\n config = module.parse_config(config)\n candidate = module.parse_config(module.params['src'])\n\n commands = collections.OrderedDict()\n toplevel = [c.text for c in config]\n\n for line in candidate:\n if line.text in ['!', '']:\n continue\n\n if not line.parents:\n if line.text not in toplevel:\n expand(line, commands)\n else:\n item = compare(line, config)\n if item:\n expand(item, commands)\n\n commands = flatten(commands, list())\n\n if commands:\n if not module.check_mode:\n commands = [str(c).strip() for c in commands]\n response = module.configure(commands)\n result['responses'] = response\n result['changed'] = True\n result['updates'] = commands\n\n module.exit_json(**result)\n\nfrom ansible.module_utils.basic import *\nfrom ansible.module_utils.urls import *\nfrom ansible.module_utils.netcfg import *\nfrom ansible.module_utils.shell import *\nfrom ansible.module_utils.openswitch import *\nif __name__ == '__main__':\n main()\n\n\n\n", "path": "network/openswitch/ops_template.py"}]} | 3,106 | 232 |
gh_patches_debug_15794 | rasdani/github-patches | git_diff | systemd__mkosi-2611 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
/var/lib/dpkg/available does not exist and causes errors when installing packages using apt in image
### mkosi commit the issue has been seen with
v22
### Used host distribution
Debian 12 (backports enabled)
### Used target distribution
Ubuntu 20.04
### Linux kernel version used
6.1.0-18-amd64
### CPU architectures issue was seen on
x86_64
### Unexpected behaviour you saw
mkosi does not create /var/lib/dpkg/available which is used by the old tool dselect. This caused errors when using piuparts to test Debian packages. The error occurs when piuparts installs the package to be tested into a chroot environment.
We have already found a workaround by running the following command in a prepare script:
```bash
dpkg --update-avail
```
I'm wondering if this command should be part of the mkosi build process.
### Used mkosi config
```ini
[Output]
Format=tar
CompressOutput=false
[Host]
QemuSwtpm=False
# setup qemu to exit on kernel panic
# https://unix.stackexchange.com/questions/443017/can-i-make-qemu-exit-with-failure-on-kernel-panic
QemuArgs=-no-reboot
KernelCommandLineExtra=panic=-1
[Content]
Bootable=false
WithDocs=false
PackageDirectories=../extra_packages/
[Packages]
WithNetwork=true
[Distribution]
Architecture=x86-64
Distribution=ubuntu
Release=focal
Repositories=main,universe
@Mirror=http://my-package-mirror.local/ubuntu
[Content]
Packages=apt
apt-transport-https
sudo
ca-certificates
ca-custom
gpg
SkeletonTrees=../../../../data/rootfs
```
### mkosi output
```sh
piuparts output:
Preconfiguring packages ...
dpkg: error: failed to open package info file '/var/lib/dpkg/available' for reading: No such file or directory
Fetched 497 MB in 11s (44.2 MB/s)
E: Sub-process dpkg --set-selections returned an error code (2)
E: Couldn't record the approved state changes as dpkg selection states
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mkosi/installer/apt.py`
Content:
```
1 # SPDX-License-Identifier: LGPL-2.1+
2 import os
3 import textwrap
4 from collections.abc import Iterable, Sequence
5 from pathlib import Path
6 from typing import NamedTuple, Optional
7
8 from mkosi.config import Config, ConfigFeature
9 from mkosi.context import Context
10 from mkosi.installer import PackageManager
11 from mkosi.log import die
12 from mkosi.mounts import finalize_source_mounts
13 from mkosi.run import find_binary, run
14 from mkosi.sandbox import Mount, apivfs_cmd
15 from mkosi.types import _FILE, CompletedProcess, PathString
16 from mkosi.util import umask
17
18
19 class Apt(PackageManager):
20 class Repository(NamedTuple):
21 types: tuple[str, ...]
22 url: str
23 suite: str
24 components: tuple[str, ...]
25 signedby: Optional[Path]
26
27 def __str__(self) -> str:
28 return textwrap.dedent(
29 f"""\
30 Types: {" ".join(self.types)}
31 URIs: {self.url}
32 Suites: {self.suite}
33 Components: {" ".join(self.components)}
34 {"Signed-By" if self.signedby else "Trusted"}: {self.signedby or "yes"}
35
36 """
37 )
38
39 @classmethod
40 def executable(cls, config: Config) -> str:
41 return "apt"
42
43 @classmethod
44 def subdir(cls, config: Config) -> Path:
45 return Path("apt")
46
47 @classmethod
48 def cache_subdirs(cls, cache: Path) -> list[Path]:
49 return [cache / "archives"]
50
51 @classmethod
52 def scripts(cls, context: Context) -> dict[str, list[PathString]]:
53 return {
54 **{
55 command: apivfs_cmd() + cls.env_cmd(context) + cls.cmd(context, command) for command in (
56 "apt",
57 "apt-cache",
58 "apt-cdrom",
59 "apt-config",
60 "apt-extracttemplates",
61 "apt-get",
62 "apt-key",
63 "apt-mark",
64 "apt-sortpkgs",
65 )
66 },
67 "mkosi-install" : ["apt-get", "install"],
68 "mkosi-upgrade" : ["apt-get", "upgrade"],
69 "mkosi-remove" : ["apt-get", "purge"],
70 "mkosi-reinstall": ["apt-get", "install", "--reinstall"],
71 }
72
73 @classmethod
74 def setup(cls, context: Context, repos: Iterable[Repository]) -> None:
75 (context.pkgmngr / "etc/apt").mkdir(exist_ok=True, parents=True)
76 (context.pkgmngr / "etc/apt/apt.conf.d").mkdir(exist_ok=True, parents=True)
77 (context.pkgmngr / "etc/apt/preferences.d").mkdir(exist_ok=True, parents=True)
78 (context.pkgmngr / "etc/apt/sources.list.d").mkdir(exist_ok=True, parents=True)
79
80 with umask(~0o755):
81 # TODO: Drop once apt 2.5.4 is widely available.
82 (context.root / "var/lib/dpkg").mkdir(parents=True, exist_ok=True)
83 (context.root / "var/lib/dpkg/status").touch()
84
85 # We have a special apt.conf outside of pkgmngr dir that only configures "Dir::Etc" that we pass to APT_CONFIG
86 # to tell apt it should read config files from /etc/apt in case this is overridden by distributions. This is
87 # required because apt parses CLI configuration options after parsing its configuration files and as such we
88 # can't use CLI options to tell apt where to look for configuration files.
89 config = context.pkgmngr / "etc/apt.conf"
90 if not config.exists():
91 config.write_text(
92 textwrap.dedent(
93 """\
94 Dir::Etc "/etc/apt";
95 """
96 )
97 )
98
99 sources = context.pkgmngr / "etc/apt/sources.list.d/mkosi.sources"
100 if not sources.exists():
101 for repo in repos:
102 if repo.signedby and not repo.signedby.exists():
103 die(
104 f"Keyring for repo {repo.url} not found at {repo.signedby}",
105 hint="Make sure the right keyring package (e.g. debian-archive-keyring or ubuntu-keyring) is "
106 "installed",
107 )
108
109 with sources.open("w") as f:
110 for repo in repos:
111 f.write(str(repo))
112
113 @classmethod
114 def finalize_environment(cls, context: Context) -> dict[str, str]:
115 env = {
116 "APT_CONFIG": "/etc/apt.conf",
117 "DEBIAN_FRONTEND" : "noninteractive",
118 "DEBCONF_INTERACTIVE_SEEN": "true",
119 }
120
121 if "INITRD" not in context.config.environment and context.config.bootable != ConfigFeature.disabled:
122 env["INITRD"] = "No"
123
124 return super().finalize_environment(context) | env
125
126 @classmethod
127 def cmd(cls, context: Context, command: str) -> list[PathString]:
128 debarch = context.config.distribution.architecture(context.config.architecture)
129
130 cmdline: list[PathString] = [
131 command,
132 "-o", f"APT::Architecture={debarch}",
133 "-o", f"APT::Architectures={debarch}",
134 "-o", f"APT::Install-Recommends={str(context.config.with_recommends).lower()}",
135 "-o", "APT::Immediate-Configure=off",
136 "-o", "APT::Get::Assume-Yes=true",
137 "-o", "APT::Get::AutomaticRemove=true",
138 "-o", "APT::Get::Allow-Change-Held-Packages=true",
139 "-o", "APT::Get::Allow-Remove-Essential=true",
140 "-o", "APT::Sandbox::User=root",
141 "-o", "Dir::Cache=/var/cache/apt",
142 "-o", "Dir::State=/var/lib/apt",
143 "-o", "Dir::Log=/var/log/apt",
144 "-o", "Dir::State::Status=/buildroot/var/lib/dpkg/status",
145 "-o", f"Dir::Bin::DPkg={find_binary('dpkg', root=context.config.tools())}",
146 "-o", "Debug::NoLocking=true",
147 "-o", "DPkg::Options::=--root=/buildroot",
148 "-o", "DPkg::Options::=--force-unsafe-io",
149 "-o", "DPkg::Options::=--force-architecture",
150 "-o", "DPkg::Options::=--force-depends",
151 "-o", "DPkg::Options::=--no-debsig",
152 "-o", "DPkg::Use-Pty=false",
153 "-o", "DPkg::Install::Recursive::Minimum=1000",
154 "-o", "pkgCacheGen::ForceEssential=,",
155 ]
156
157 if not context.config.repository_key_check:
158 cmdline += [
159 "-o", "Acquire::AllowInsecureRepositories=true",
160 "-o", "Acquire::AllowDowngradeToInsecureRepositories=true",
161 "-o", "APT::Get::AllowUnauthenticated=true",
162 ]
163
164 if not context.config.with_docs:
165 cmdline += [
166 "-o", "DPkg::Options::=--path-exclude=/usr/share/doc/*",
167 "-o", "DPkg::Options::=--path-include=/usr/share/doc/*/copyright",
168 "-o", "DPkg::Options::=--path-exclude=/usr/share/man/*",
169 "-o", "DPkg::Options::=--path-exclude=/usr/share/groff/*",
170 "-o", "DPkg::Options::=--path-exclude=/usr/share/info/*",
171 ]
172
173 if context.config.proxy_url:
174 cmdline += [
175 "-o", f"Acquire::http::Proxy={context.config.proxy_url}",
176 "-o", f"Acquire::https::Proxy={context.config.proxy_url}",
177 ]
178
179 return cmdline
180
181 @classmethod
182 def invoke(
183 cls,
184 context: Context,
185 operation: str,
186 arguments: Sequence[str] = (),
187 *,
188 apivfs: bool = False,
189 mounts: Sequence[Mount] = (),
190 stdout: _FILE = None,
191 ) -> CompletedProcess:
192 with finalize_source_mounts(
193 context.config,
194 ephemeral=os.getuid() == 0 and context.config.build_sources_ephemeral,
195 ) as sources:
196 return run(
197 cls.cmd(context, "apt-get") + [operation, *arguments],
198 sandbox=(
199 context.sandbox(
200 network=True,
201 mounts=[Mount(context.root, "/buildroot"), *cls.mounts(context), *sources, *mounts],
202 options=["--dir", "/work/src", "--chdir", "/work/src"],
203 ) + (apivfs_cmd() if apivfs else [])
204 ),
205 env=context.config.environment | cls.finalize_environment(context),
206 stdout=stdout,
207 )
208
209 @classmethod
210 def sync(cls, context: Context) -> None:
211 cls.invoke(context, "update")
212
213 @classmethod
214 def createrepo(cls, context: Context) -> None:
215 with (context.packages / "Packages").open("wb") as f:
216 run(
217 ["dpkg-scanpackages", "."],
218 stdout=f,
219 sandbox=context.sandbox(
220 mounts=[Mount(context.packages, context.packages, ro=True)],
221 options=["--chdir", context.packages],
222 ),
223 )
224
225 (context.pkgmngr / "etc/apt/sources.list.d").mkdir(parents=True, exist_ok=True)
226 (context.pkgmngr / "etc/apt/sources.list.d/mkosi-local.sources").write_text(
227 textwrap.dedent(
228 """\
229 Enabled: yes
230 Types: deb
231 URIs: file:///work/packages
232 Suites: ./
233 Trusted: yes
234 """
235 )
236 )
237
238 cls.invoke(
239 context,
240 "update",
241 arguments=[
242 "-o", "Dir::Etc::sourcelist=sources.list.d/mkosi-local.sources",
243 "-o", "Dir::Etc::sourceparts=-",
244 "-o", "APT::Get::List-Cleanup=0",
245 ],
246 )
247
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mkosi/installer/apt.py b/mkosi/installer/apt.py
--- a/mkosi/installer/apt.py
+++ b/mkosi/installer/apt.py
@@ -82,6 +82,8 @@
(context.root / "var/lib/dpkg").mkdir(parents=True, exist_ok=True)
(context.root / "var/lib/dpkg/status").touch()
+ (context.root / "var/lib/dpkg/available").touch()
+
# We have a special apt.conf outside of pkgmngr dir that only configures "Dir::Etc" that we pass to APT_CONFIG
# to tell apt it should read config files from /etc/apt in case this is overridden by distributions. This is
# required because apt parses CLI configuration options after parsing its configuration files and as such we
| {"golden_diff": "diff --git a/mkosi/installer/apt.py b/mkosi/installer/apt.py\n--- a/mkosi/installer/apt.py\n+++ b/mkosi/installer/apt.py\n@@ -82,6 +82,8 @@\n (context.root / \"var/lib/dpkg\").mkdir(parents=True, exist_ok=True)\n (context.root / \"var/lib/dpkg/status\").touch()\n \n+ (context.root / \"var/lib/dpkg/available\").touch()\n+\n # We have a special apt.conf outside of pkgmngr dir that only configures \"Dir::Etc\" that we pass to APT_CONFIG\n # to tell apt it should read config files from /etc/apt in case this is overridden by distributions. This is\n # required because apt parses CLI configuration options after parsing its configuration files and as such we\n", "issue": "/var/lib/dpkg/available does not exist and causes errors when installing packages using apt in image\n### mkosi commit the issue has been seen with\n\nv22\n\n### Used host distribution\n\nDebian 12 (backports enabled)\n\n### Used target distribution\n\nUbuntu 20.04\n\n### Linux kernel version used\n\n6.1.0-18-amd64\n\n### CPU architectures issue was seen on\n\nx86_64\n\n### Unexpected behaviour you saw\n\nmkosi does not create /var/lib/dpkg/available which is used by the old tool dselect. This caused errors when using piuparts to test Debian packages. The error occurs when piuparts installs the package to be tested into a chroot environment.\r\n\r\nWe have already found a workaround by running the following command in a prepare script:\r\n```bash\r\ndpkg --update-avail\r\n```\r\n\r\nI'm wondering if this command should be part of the mkosi build process.\n\n### Used mkosi config\n\n```ini\n[Output]\r\nFormat=tar\r\nCompressOutput=false\r\n\r\n[Host]\r\nQemuSwtpm=False\r\n# setup qemu to exit on kernel panic\r\n# https://unix.stackexchange.com/questions/443017/can-i-make-qemu-exit-with-failure-on-kernel-panic\r\nQemuArgs=-no-reboot\r\nKernelCommandLineExtra=panic=-1\r\n\r\n[Content]\r\nBootable=false\r\nWithDocs=false\r\nPackageDirectories=../extra_packages/\r\n\r\n[Packages]\r\nWithNetwork=true\r\n\r\n[Distribution]\r\nArchitecture=x86-64\r\nDistribution=ubuntu\r\nRelease=focal\r\nRepositories=main,universe\r\n\r\n@Mirror=http://my-package-mirror.local/ubuntu\r\n\r\n[Content]\r\nPackages=apt\r\n apt-transport-https\r\n sudo\r\n ca-certificates\r\n ca-custom\r\n gpg\r\nSkeletonTrees=../../../../data/rootfs\n```\n\n\n### mkosi output\n\n```sh\npiuparts output:\r\n\r\nPreconfiguring packages ...\r\n dpkg: error: failed to open package info file '/var/lib/dpkg/available' for reading: No such file or directory\r\n Fetched 497 MB in 11s (44.2 MB/s)\r\n E: Sub-process dpkg --set-selections returned an error code (2)\r\n E: Couldn't record the approved state changes as dpkg selection states\n```\n\n", "before_files": [{"content": "# SPDX-License-Identifier: LGPL-2.1+\nimport os\nimport textwrap\nfrom collections.abc import Iterable, Sequence\nfrom pathlib import Path\nfrom typing import NamedTuple, Optional\n\nfrom mkosi.config import Config, ConfigFeature\nfrom mkosi.context import Context\nfrom mkosi.installer import PackageManager\nfrom mkosi.log import die\nfrom mkosi.mounts import finalize_source_mounts\nfrom mkosi.run import find_binary, run\nfrom mkosi.sandbox import Mount, apivfs_cmd\nfrom mkosi.types import _FILE, CompletedProcess, PathString\nfrom mkosi.util import umask\n\n\nclass Apt(PackageManager):\n class Repository(NamedTuple):\n types: tuple[str, ...]\n url: str\n suite: str\n components: tuple[str, ...]\n signedby: Optional[Path]\n\n def __str__(self) -> str:\n return textwrap.dedent(\n f\"\"\"\\\n Types: {\" \".join(self.types)}\n URIs: {self.url}\n Suites: {self.suite}\n Components: {\" \".join(self.components)}\n {\"Signed-By\" if self.signedby else \"Trusted\"}: {self.signedby or \"yes\"}\n\n \"\"\"\n )\n\n @classmethod\n def executable(cls, config: Config) -> str:\n return \"apt\"\n\n @classmethod\n def subdir(cls, config: Config) -> Path:\n return Path(\"apt\")\n\n @classmethod\n def cache_subdirs(cls, cache: Path) -> list[Path]:\n return [cache / \"archives\"]\n\n @classmethod\n def scripts(cls, context: Context) -> dict[str, list[PathString]]:\n return {\n **{\n command: apivfs_cmd() + cls.env_cmd(context) + cls.cmd(context, command) for command in (\n \"apt\",\n \"apt-cache\",\n \"apt-cdrom\",\n \"apt-config\",\n \"apt-extracttemplates\",\n \"apt-get\",\n \"apt-key\",\n \"apt-mark\",\n \"apt-sortpkgs\",\n )\n },\n \"mkosi-install\" : [\"apt-get\", \"install\"],\n \"mkosi-upgrade\" : [\"apt-get\", \"upgrade\"],\n \"mkosi-remove\" : [\"apt-get\", \"purge\"],\n \"mkosi-reinstall\": [\"apt-get\", \"install\", \"--reinstall\"],\n }\n\n @classmethod\n def setup(cls, context: Context, repos: Iterable[Repository]) -> None:\n (context.pkgmngr / \"etc/apt\").mkdir(exist_ok=True, parents=True)\n (context.pkgmngr / \"etc/apt/apt.conf.d\").mkdir(exist_ok=True, parents=True)\n (context.pkgmngr / \"etc/apt/preferences.d\").mkdir(exist_ok=True, parents=True)\n (context.pkgmngr / \"etc/apt/sources.list.d\").mkdir(exist_ok=True, parents=True)\n\n with umask(~0o755):\n # TODO: Drop once apt 2.5.4 is widely available.\n (context.root / \"var/lib/dpkg\").mkdir(parents=True, exist_ok=True)\n (context.root / \"var/lib/dpkg/status\").touch()\n\n # We have a special apt.conf outside of pkgmngr dir that only configures \"Dir::Etc\" that we pass to APT_CONFIG\n # to tell apt it should read config files from /etc/apt in case this is overridden by distributions. This is\n # required because apt parses CLI configuration options after parsing its configuration files and as such we\n # can't use CLI options to tell apt where to look for configuration files.\n config = context.pkgmngr / \"etc/apt.conf\"\n if not config.exists():\n config.write_text(\n textwrap.dedent(\n \"\"\"\\\n Dir::Etc \"/etc/apt\";\n \"\"\"\n )\n )\n\n sources = context.pkgmngr / \"etc/apt/sources.list.d/mkosi.sources\"\n if not sources.exists():\n for repo in repos:\n if repo.signedby and not repo.signedby.exists():\n die(\n f\"Keyring for repo {repo.url} not found at {repo.signedby}\",\n hint=\"Make sure the right keyring package (e.g. debian-archive-keyring or ubuntu-keyring) is \"\n \"installed\",\n )\n\n with sources.open(\"w\") as f:\n for repo in repos:\n f.write(str(repo))\n\n @classmethod\n def finalize_environment(cls, context: Context) -> dict[str, str]:\n env = {\n \"APT_CONFIG\": \"/etc/apt.conf\",\n \"DEBIAN_FRONTEND\" : \"noninteractive\",\n \"DEBCONF_INTERACTIVE_SEEN\": \"true\",\n }\n\n if \"INITRD\" not in context.config.environment and context.config.bootable != ConfigFeature.disabled:\n env[\"INITRD\"] = \"No\"\n\n return super().finalize_environment(context) | env\n\n @classmethod\n def cmd(cls, context: Context, command: str) -> list[PathString]:\n debarch = context.config.distribution.architecture(context.config.architecture)\n\n cmdline: list[PathString] = [\n command,\n \"-o\", f\"APT::Architecture={debarch}\",\n \"-o\", f\"APT::Architectures={debarch}\",\n \"-o\", f\"APT::Install-Recommends={str(context.config.with_recommends).lower()}\",\n \"-o\", \"APT::Immediate-Configure=off\",\n \"-o\", \"APT::Get::Assume-Yes=true\",\n \"-o\", \"APT::Get::AutomaticRemove=true\",\n \"-o\", \"APT::Get::Allow-Change-Held-Packages=true\",\n \"-o\", \"APT::Get::Allow-Remove-Essential=true\",\n \"-o\", \"APT::Sandbox::User=root\",\n \"-o\", \"Dir::Cache=/var/cache/apt\",\n \"-o\", \"Dir::State=/var/lib/apt\",\n \"-o\", \"Dir::Log=/var/log/apt\",\n \"-o\", \"Dir::State::Status=/buildroot/var/lib/dpkg/status\",\n \"-o\", f\"Dir::Bin::DPkg={find_binary('dpkg', root=context.config.tools())}\",\n \"-o\", \"Debug::NoLocking=true\",\n \"-o\", \"DPkg::Options::=--root=/buildroot\",\n \"-o\", \"DPkg::Options::=--force-unsafe-io\",\n \"-o\", \"DPkg::Options::=--force-architecture\",\n \"-o\", \"DPkg::Options::=--force-depends\",\n \"-o\", \"DPkg::Options::=--no-debsig\",\n \"-o\", \"DPkg::Use-Pty=false\",\n \"-o\", \"DPkg::Install::Recursive::Minimum=1000\",\n \"-o\", \"pkgCacheGen::ForceEssential=,\",\n ]\n\n if not context.config.repository_key_check:\n cmdline += [\n \"-o\", \"Acquire::AllowInsecureRepositories=true\",\n \"-o\", \"Acquire::AllowDowngradeToInsecureRepositories=true\",\n \"-o\", \"APT::Get::AllowUnauthenticated=true\",\n ]\n\n if not context.config.with_docs:\n cmdline += [\n \"-o\", \"DPkg::Options::=--path-exclude=/usr/share/doc/*\",\n \"-o\", \"DPkg::Options::=--path-include=/usr/share/doc/*/copyright\",\n \"-o\", \"DPkg::Options::=--path-exclude=/usr/share/man/*\",\n \"-o\", \"DPkg::Options::=--path-exclude=/usr/share/groff/*\",\n \"-o\", \"DPkg::Options::=--path-exclude=/usr/share/info/*\",\n ]\n\n if context.config.proxy_url:\n cmdline += [\n \"-o\", f\"Acquire::http::Proxy={context.config.proxy_url}\",\n \"-o\", f\"Acquire::https::Proxy={context.config.proxy_url}\",\n ]\n\n return cmdline\n\n @classmethod\n def invoke(\n cls,\n context: Context,\n operation: str,\n arguments: Sequence[str] = (),\n *,\n apivfs: bool = False,\n mounts: Sequence[Mount] = (),\n stdout: _FILE = None,\n ) -> CompletedProcess:\n with finalize_source_mounts(\n context.config,\n ephemeral=os.getuid() == 0 and context.config.build_sources_ephemeral,\n ) as sources:\n return run(\n cls.cmd(context, \"apt-get\") + [operation, *arguments],\n sandbox=(\n context.sandbox(\n network=True,\n mounts=[Mount(context.root, \"/buildroot\"), *cls.mounts(context), *sources, *mounts],\n options=[\"--dir\", \"/work/src\", \"--chdir\", \"/work/src\"],\n ) + (apivfs_cmd() if apivfs else [])\n ),\n env=context.config.environment | cls.finalize_environment(context),\n stdout=stdout,\n )\n\n @classmethod\n def sync(cls, context: Context) -> None:\n cls.invoke(context, \"update\")\n\n @classmethod\n def createrepo(cls, context: Context) -> None:\n with (context.packages / \"Packages\").open(\"wb\") as f:\n run(\n [\"dpkg-scanpackages\", \".\"],\n stdout=f,\n sandbox=context.sandbox(\n mounts=[Mount(context.packages, context.packages, ro=True)],\n options=[\"--chdir\", context.packages],\n ),\n )\n\n (context.pkgmngr / \"etc/apt/sources.list.d\").mkdir(parents=True, exist_ok=True)\n (context.pkgmngr / \"etc/apt/sources.list.d/mkosi-local.sources\").write_text(\n textwrap.dedent(\n \"\"\"\\\n Enabled: yes\n Types: deb\n URIs: file:///work/packages\n Suites: ./\n Trusted: yes\n \"\"\"\n )\n )\n\n cls.invoke(\n context,\n \"update\",\n arguments=[\n \"-o\", \"Dir::Etc::sourcelist=sources.list.d/mkosi-local.sources\",\n \"-o\", \"Dir::Etc::sourceparts=-\",\n \"-o\", \"APT::Get::List-Cleanup=0\",\n ],\n )\n", "path": "mkosi/installer/apt.py"}], "after_files": [{"content": "# SPDX-License-Identifier: LGPL-2.1+\nimport os\nimport textwrap\nfrom collections.abc import Iterable, Sequence\nfrom pathlib import Path\nfrom typing import NamedTuple, Optional\n\nfrom mkosi.config import Config, ConfigFeature\nfrom mkosi.context import Context\nfrom mkosi.installer import PackageManager\nfrom mkosi.log import die\nfrom mkosi.mounts import finalize_source_mounts\nfrom mkosi.run import find_binary, run\nfrom mkosi.sandbox import Mount, apivfs_cmd\nfrom mkosi.types import _FILE, CompletedProcess, PathString\nfrom mkosi.util import umask\n\n\nclass Apt(PackageManager):\n class Repository(NamedTuple):\n types: tuple[str, ...]\n url: str\n suite: str\n components: tuple[str, ...]\n signedby: Optional[Path]\n\n def __str__(self) -> str:\n return textwrap.dedent(\n f\"\"\"\\\n Types: {\" \".join(self.types)}\n URIs: {self.url}\n Suites: {self.suite}\n Components: {\" \".join(self.components)}\n {\"Signed-By\" if self.signedby else \"Trusted\"}: {self.signedby or \"yes\"}\n\n \"\"\"\n )\n\n @classmethod\n def executable(cls, config: Config) -> str:\n return \"apt\"\n\n @classmethod\n def subdir(cls, config: Config) -> Path:\n return Path(\"apt\")\n\n @classmethod\n def cache_subdirs(cls, cache: Path) -> list[Path]:\n return [cache / \"archives\"]\n\n @classmethod\n def scripts(cls, context: Context) -> dict[str, list[PathString]]:\n return {\n **{\n command: apivfs_cmd() + cls.env_cmd(context) + cls.cmd(context, command) for command in (\n \"apt\",\n \"apt-cache\",\n \"apt-cdrom\",\n \"apt-config\",\n \"apt-extracttemplates\",\n \"apt-get\",\n \"apt-key\",\n \"apt-mark\",\n \"apt-sortpkgs\",\n )\n },\n \"mkosi-install\" : [\"apt-get\", \"install\"],\n \"mkosi-upgrade\" : [\"apt-get\", \"upgrade\"],\n \"mkosi-remove\" : [\"apt-get\", \"purge\"],\n \"mkosi-reinstall\": [\"apt-get\", \"install\", \"--reinstall\"],\n }\n\n @classmethod\n def setup(cls, context: Context, repos: Iterable[Repository]) -> None:\n (context.pkgmngr / \"etc/apt\").mkdir(exist_ok=True, parents=True)\n (context.pkgmngr / \"etc/apt/apt.conf.d\").mkdir(exist_ok=True, parents=True)\n (context.pkgmngr / \"etc/apt/preferences.d\").mkdir(exist_ok=True, parents=True)\n (context.pkgmngr / \"etc/apt/sources.list.d\").mkdir(exist_ok=True, parents=True)\n\n with umask(~0o755):\n # TODO: Drop once apt 2.5.4 is widely available.\n (context.root / \"var/lib/dpkg\").mkdir(parents=True, exist_ok=True)\n (context.root / \"var/lib/dpkg/status\").touch()\n\n (context.root / \"var/lib/dpkg/available\").touch()\n\n # We have a special apt.conf outside of pkgmngr dir that only configures \"Dir::Etc\" that we pass to APT_CONFIG\n # to tell apt it should read config files from /etc/apt in case this is overridden by distributions. This is\n # required because apt parses CLI configuration options after parsing its configuration files and as such we\n # can't use CLI options to tell apt where to look for configuration files.\n config = context.pkgmngr / \"etc/apt.conf\"\n if not config.exists():\n config.write_text(\n textwrap.dedent(\n \"\"\"\\\n Dir::Etc \"/etc/apt\";\n \"\"\"\n )\n )\n\n sources = context.pkgmngr / \"etc/apt/sources.list.d/mkosi.sources\"\n if not sources.exists():\n for repo in repos:\n if repo.signedby and not repo.signedby.exists():\n die(\n f\"Keyring for repo {repo.url} not found at {repo.signedby}\",\n hint=\"Make sure the right keyring package (e.g. debian-archive-keyring or ubuntu-keyring) is \"\n \"installed\",\n )\n\n with sources.open(\"w\") as f:\n for repo in repos:\n f.write(str(repo))\n\n @classmethod\n def finalize_environment(cls, context: Context) -> dict[str, str]:\n env = {\n \"APT_CONFIG\": \"/etc/apt.conf\",\n \"DEBIAN_FRONTEND\" : \"noninteractive\",\n \"DEBCONF_INTERACTIVE_SEEN\": \"true\",\n }\n\n if \"INITRD\" not in context.config.environment and context.config.bootable != ConfigFeature.disabled:\n env[\"INITRD\"] = \"No\"\n\n return super().finalize_environment(context) | env\n\n @classmethod\n def cmd(cls, context: Context, command: str) -> list[PathString]:\n debarch = context.config.distribution.architecture(context.config.architecture)\n\n cmdline: list[PathString] = [\n command,\n \"-o\", f\"APT::Architecture={debarch}\",\n \"-o\", f\"APT::Architectures={debarch}\",\n \"-o\", f\"APT::Install-Recommends={str(context.config.with_recommends).lower()}\",\n \"-o\", \"APT::Immediate-Configure=off\",\n \"-o\", \"APT::Get::Assume-Yes=true\",\n \"-o\", \"APT::Get::AutomaticRemove=true\",\n \"-o\", \"APT::Get::Allow-Change-Held-Packages=true\",\n \"-o\", \"APT::Get::Allow-Remove-Essential=true\",\n \"-o\", \"APT::Sandbox::User=root\",\n \"-o\", \"Dir::Cache=/var/cache/apt\",\n \"-o\", \"Dir::State=/var/lib/apt\",\n \"-o\", \"Dir::Log=/var/log/apt\",\n \"-o\", \"Dir::State::Status=/buildroot/var/lib/dpkg/status\",\n \"-o\", f\"Dir::Bin::DPkg={find_binary('dpkg', root=context.config.tools())}\",\n \"-o\", \"Debug::NoLocking=true\",\n \"-o\", \"DPkg::Options::=--root=/buildroot\",\n \"-o\", \"DPkg::Options::=--force-unsafe-io\",\n \"-o\", \"DPkg::Options::=--force-architecture\",\n \"-o\", \"DPkg::Options::=--force-depends\",\n \"-o\", \"DPkg::Options::=--no-debsig\",\n \"-o\", \"DPkg::Use-Pty=false\",\n \"-o\", \"DPkg::Install::Recursive::Minimum=1000\",\n \"-o\", \"pkgCacheGen::ForceEssential=,\",\n ]\n\n if not context.config.repository_key_check:\n cmdline += [\n \"-o\", \"Acquire::AllowInsecureRepositories=true\",\n \"-o\", \"Acquire::AllowDowngradeToInsecureRepositories=true\",\n \"-o\", \"APT::Get::AllowUnauthenticated=true\",\n ]\n\n if not context.config.with_docs:\n cmdline += [\n \"-o\", \"DPkg::Options::=--path-exclude=/usr/share/doc/*\",\n \"-o\", \"DPkg::Options::=--path-include=/usr/share/doc/*/copyright\",\n \"-o\", \"DPkg::Options::=--path-exclude=/usr/share/man/*\",\n \"-o\", \"DPkg::Options::=--path-exclude=/usr/share/groff/*\",\n \"-o\", \"DPkg::Options::=--path-exclude=/usr/share/info/*\",\n ]\n\n if context.config.proxy_url:\n cmdline += [\n \"-o\", f\"Acquire::http::Proxy={context.config.proxy_url}\",\n \"-o\", f\"Acquire::https::Proxy={context.config.proxy_url}\",\n ]\n\n return cmdline\n\n @classmethod\n def invoke(\n cls,\n context: Context,\n operation: str,\n arguments: Sequence[str] = (),\n *,\n apivfs: bool = False,\n mounts: Sequence[Mount] = (),\n stdout: _FILE = None,\n ) -> CompletedProcess:\n with finalize_source_mounts(\n context.config,\n ephemeral=os.getuid() == 0 and context.config.build_sources_ephemeral,\n ) as sources:\n return run(\n cls.cmd(context, \"apt-get\") + [operation, *arguments],\n sandbox=(\n context.sandbox(\n network=True,\n mounts=[Mount(context.root, \"/buildroot\"), *cls.mounts(context), *sources, *mounts],\n options=[\"--dir\", \"/work/src\", \"--chdir\", \"/work/src\"],\n ) + (apivfs_cmd() if apivfs else [])\n ),\n env=context.config.environment | cls.finalize_environment(context),\n stdout=stdout,\n )\n\n @classmethod\n def sync(cls, context: Context) -> None:\n cls.invoke(context, \"update\")\n\n @classmethod\n def createrepo(cls, context: Context) -> None:\n with (context.packages / \"Packages\").open(\"wb\") as f:\n run(\n [\"dpkg-scanpackages\", \".\"],\n stdout=f,\n sandbox=context.sandbox(\n mounts=[Mount(context.packages, context.packages, ro=True)],\n options=[\"--chdir\", context.packages],\n ),\n )\n\n (context.pkgmngr / \"etc/apt/sources.list.d\").mkdir(parents=True, exist_ok=True)\n (context.pkgmngr / \"etc/apt/sources.list.d/mkosi-local.sources\").write_text(\n textwrap.dedent(\n \"\"\"\\\n Enabled: yes\n Types: deb\n URIs: file:///work/packages\n Suites: ./\n Trusted: yes\n \"\"\"\n )\n )\n\n cls.invoke(\n context,\n \"update\",\n arguments=[\n \"-o\", \"Dir::Etc::sourcelist=sources.list.d/mkosi-local.sources\",\n \"-o\", \"Dir::Etc::sourceparts=-\",\n \"-o\", \"APT::Get::List-Cleanup=0\",\n ],\n )\n", "path": "mkosi/installer/apt.py"}]} | 3,598 | 180 |
gh_patches_debug_27979 | rasdani/github-patches | git_diff | deepset-ai__haystack-4825 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
GPT-4-32k support broken
**Describe the bug**
Token limit for `gpt-4-32k` gets set to 2049.
**Error message**
```
Traceback (most recent call last):
File "/home/dnetguru/.conda/envs/thunk/lib/python3.10/site-packages/haystack/pipelines/base.py", line 552, in run
node_output, stream_id = self._run_node(node_id, node_input)
File "/home/dnetguru/.conda/envs/thunk/lib/python3.10/site-packages/haystack/pipelines/base.py", line 467, in _run_node
return self.graph.nodes[node_id]["component"]._dispatch_run(**node_input)
File "/home/dnetguru/.conda/envs/thunk/lib/python3.10/site-packages/haystack/nodes/base.py", line 201, in _dispatch_run
return self._dispatch_run_general(self.run, **kwargs)
File "/home/dnetguru/.conda/envs/thunk/lib/python3.10/site-packages/haystack/nodes/base.py", line 245, in _dispatch_run_general
output, stream = run_method(**run_inputs, **run_params)
File "/home/dnetguru/.conda/envs/thunk/lib/python3.10/site-packages/haystack/nodes/prompt/prompt_node.py", line 361, in run
results = self(prompt_collector=prompt_collector, **invocation_context)
File "/home/dnetguru/.conda/envs/thunk/lib/python3.10/site-packages/haystack/nodes/prompt/prompt_node.py", line 145, in __call__
return self.prompt(prompt_template, *args, **kwargs)
File "/home/dnetguru/.conda/envs/thunk/lib/python3.10/site-packages/haystack/nodes/prompt/prompt_node.py", line 171, in prompt
prompt = self.prompt_model._ensure_token_limit(prompt)
File "/home/dnetguru/.conda/envs/thunk/lib/python3.10/site-packages/haystack/nodes/prompt/prompt_model.py", line 134, in _ensure_token_limit
return self.model_invocation_layer._ensure_token_limit(prompt=prompt)
File "/home/dnetguru/.conda/envs/thunk/lib/python3.10/site-packages/haystack/nodes/prompt/invocation_layer/chatgpt.py", line 120, in _ensure_token_limit
raise ValueError(
ValueError: The prompt or the messages are too long (12807 tokens). The length of the prompt or messages and the answer (100 tokens) should be within the max token limit (2049 tokens). Reduce the length of the prompt or messages.
```
**Expected behavior**
`gpt-4-32k` should support up to 32768 tokens.
**To Reproduce**
Pass more than 2049 tokens to a PromptNode using `gpt-4-32k`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `haystack/utils/openai_utils.py`
Content:
```
1 """Utils for using OpenAI API"""
2 import os
3 import logging
4 import platform
5 import sys
6 import json
7 from typing import Dict, Union, Tuple, Optional, List
8 import requests
9 import tenacity
10 from transformers import GPT2TokenizerFast
11
12 from haystack.errors import OpenAIError, OpenAIRateLimitError, OpenAIUnauthorizedError
13 from haystack.environment import (
14 HAYSTACK_REMOTE_API_BACKOFF_SEC,
15 HAYSTACK_REMOTE_API_MAX_RETRIES,
16 HAYSTACK_REMOTE_API_TIMEOUT_SEC,
17 )
18
19 logger = logging.getLogger(__name__)
20
21
22 machine = platform.machine().lower()
23 system = platform.system()
24
25
26 OPENAI_TIMEOUT = float(os.environ.get(HAYSTACK_REMOTE_API_TIMEOUT_SEC, 30))
27 OPENAI_BACKOFF = int(os.environ.get(HAYSTACK_REMOTE_API_BACKOFF_SEC, 10))
28 OPENAI_MAX_RETRIES = int(os.environ.get(HAYSTACK_REMOTE_API_MAX_RETRIES, 5))
29
30
31 USE_TIKTOKEN = False
32 if sys.version_info >= (3, 8) and (machine in ["amd64", "x86_64"] or (machine == "arm64" and system == "Darwin")):
33 USE_TIKTOKEN = True
34
35 if USE_TIKTOKEN:
36 import tiktoken # pylint: disable=import-error
37 from tiktoken.model import MODEL_TO_ENCODING
38 else:
39 logger.warning(
40 "OpenAI tiktoken module is not available for Python < 3.8,Linux ARM64 and AARCH64. Falling back to GPT2TokenizerFast."
41 )
42
43
44 def load_openai_tokenizer(tokenizer_name: str):
45 """Load either the tokenizer from tiktoken (if the library is available) or fallback to the GPT2TokenizerFast
46 from the transformers library.
47
48 :param tokenizer_name: The name of the tokenizer to load.
49 """
50 if USE_TIKTOKEN:
51 logger.debug("Using tiktoken %s tokenizer", tokenizer_name)
52 tokenizer = tiktoken.get_encoding(tokenizer_name)
53 else:
54 logger.debug("Using GPT2TokenizerFast tokenizer")
55 tokenizer = GPT2TokenizerFast.from_pretrained(tokenizer_name)
56 return tokenizer
57
58
59 def count_openai_tokens(text: str, tokenizer) -> int:
60 """Count the number of tokens in `text` based on the provided OpenAI `tokenizer`.
61
62 :param text: A string to be tokenized.
63 :param tokenizer: An OpenAI tokenizer.
64 """
65 if USE_TIKTOKEN:
66 return len(tokenizer.encode(text))
67 else:
68 return len(tokenizer.tokenize(text))
69
70
71 def count_openai_tokens_messages(messages: List[Dict[str, str]], tokenizer) -> int:
72 """Count the number of tokens in `messages` based on the OpenAI `tokenizer` provided.
73
74 :param messages: The messages to be tokenized.
75 :param tokenizer: An OpenAI tokenizer.
76 """
77 # adapted from https://platform.openai.com/docs/guides/chat/introduction
78 # should be kept up to date
79 num_tokens = 0
80 for message in messages:
81 num_tokens += 4 # every message follows <im_start>{role/name}\n{content}<im_end>\n
82 for key, value in message.items():
83 if USE_TIKTOKEN:
84 num_tokens += len(tokenizer.encode(value))
85 else:
86 num_tokens += len(tokenizer.tokenize(value))
87 if key == "name": # if there's a name, the role is omitted
88 num_tokens += -1 # role is always required and always 1 token
89 num_tokens += 2 # every reply is primed with <im_start>assistant
90 return num_tokens
91
92
93 def _openai_text_completion_tokenization_details(model_name: str):
94 """Return the tokenizer name and max tokens limit for a given OpenAI `model_name`.
95
96 :param model_name: Name of the OpenAI model.
97 """
98 tokenizer_name = "gpt2"
99 max_tokens_limit = 2049 # Based on this ref: https://platform.openai.com/docs/models/gpt-3
100 model_tokenizer = MODEL_TO_ENCODING.get(model_name) if USE_TIKTOKEN else None
101
102 # covering the lack of support in Tiktoken. https://github.com/openai/tiktoken/pull/72
103 if model_name == "gpt-35-turbo" and USE_TIKTOKEN:
104 model_tokenizer = "cl100k_base"
105
106 if model_tokenizer:
107 # Based on OpenAI models page, 'davinci' considers have 2049 tokens,
108 ## therefore, it is better to add `text-davinci` instead to the condition.
109 ## Ref: https://platform.openai.com/docs/models/gpt-3-5
110 ## https://platform.openai.com/docs/models/gpt-3
111 if "text-davinci" in model_name:
112 max_tokens_limit = 4097
113 tokenizer_name = model_tokenizer
114 elif model_name.startswith("gpt-3"):
115 max_tokens_limit = 4096
116 tokenizer_name = model_tokenizer
117 # Ref: https://platform.openai.com/docs/models/gpt-4
118 elif model_name.startswith("gpt-4-32k"):
119 max_tokens_limit = 32768 # tokens
120 tokenizer_name = model_tokenizer
121 elif model_name.startswith("gpt-4"):
122 max_tokens_limit = 8192 # tokens
123 tokenizer_name = model_tokenizer
124 else:
125 tokenizer_name = model_tokenizer
126
127 return tokenizer_name, max_tokens_limit
128
129
130 @tenacity.retry(
131 reraise=True,
132 retry=tenacity.retry_if_exception_type(OpenAIError)
133 and tenacity.retry_if_not_exception_type(OpenAIUnauthorizedError),
134 wait=tenacity.wait_exponential(multiplier=OPENAI_BACKOFF),
135 stop=tenacity.stop_after_attempt(OPENAI_MAX_RETRIES),
136 )
137 def openai_request(
138 url: str,
139 headers: Dict,
140 payload: Dict,
141 timeout: Union[float, Tuple[float, float]] = OPENAI_TIMEOUT,
142 read_response: Optional[bool] = True,
143 **kwargs,
144 ):
145 """Make a request to the OpenAI API given a `url`, `headers`, `payload`, and `timeout`.
146
147 :param url: The URL of the OpenAI API.
148 :param headers: Dictionary of HTTP Headers to send with the :class:`Request`.
149 :param payload: The payload to send with the request.
150 :param timeout: The timeout length of the request. The default is 30s.
151 :param read_response: Whether to read the response as JSON. The default is True.
152 """
153 response = requests.request("POST", url, headers=headers, data=json.dumps(payload), timeout=timeout, **kwargs)
154 if read_response:
155 json_response = json.loads(response.text)
156
157 if response.status_code != 200:
158 openai_error: OpenAIError
159 if response.status_code == 429:
160 openai_error = OpenAIRateLimitError(f"API rate limit exceeded: {response.text}")
161 elif response.status_code == 401:
162 openai_error = OpenAIUnauthorizedError(f"API key is invalid: {response.text}")
163 else:
164 openai_error = OpenAIError(
165 f"OpenAI returned an error.\n"
166 f"Status code: {response.status_code}\n"
167 f"Response body: {response.text}",
168 status_code=response.status_code,
169 )
170 raise openai_error
171 if read_response:
172 return json_response
173 else:
174 return response
175
176
177 def _check_openai_finish_reason(result: Dict, payload: Dict) -> None:
178 """Check the `finish_reason` the answers returned by OpenAI completions endpoint.
179 If the `finish_reason` is `length` or `content_filter`, log a warning to the user.
180
181 :param result: The result returned from the OpenAI API.
182 :param payload: The payload sent to the OpenAI API.
183 """
184 number_of_truncated_completions = sum(1 for ans in result["choices"] if ans["finish_reason"] == "length")
185 if number_of_truncated_completions > 0:
186 logger.warning(
187 "%s out of the %s completions have been truncated before reaching a natural stopping point. "
188 "Increase the max_tokens parameter to allow for longer completions.",
189 number_of_truncated_completions,
190 payload["n"],
191 )
192
193 number_of_content_filtered_completions = sum(
194 1 for ans in result["choices"] if ans["finish_reason"] == "content_filter"
195 )
196 if number_of_content_filtered_completions > 0:
197 logger.warning(
198 "%s out of the %s completions have omitted content due to a flag from OpenAI content filters.",
199 number_of_truncated_completions,
200 payload["n"],
201 )
202
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/haystack/utils/openai_utils.py b/haystack/utils/openai_utils.py
--- a/haystack/utils/openai_utils.py
+++ b/haystack/utils/openai_utils.py
@@ -34,7 +34,7 @@
if USE_TIKTOKEN:
import tiktoken # pylint: disable=import-error
- from tiktoken.model import MODEL_TO_ENCODING
+ from tiktoken.model import MODEL_TO_ENCODING, MODEL_PREFIX_TO_ENCODING
else:
logger.warning(
"OpenAI tiktoken module is not available for Python < 3.8,Linux ARM64 and AARCH64. Falling back to GPT2TokenizerFast."
@@ -97,11 +97,18 @@
"""
tokenizer_name = "gpt2"
max_tokens_limit = 2049 # Based on this ref: https://platform.openai.com/docs/models/gpt-3
- model_tokenizer = MODEL_TO_ENCODING.get(model_name) if USE_TIKTOKEN else None
- # covering the lack of support in Tiktoken. https://github.com/openai/tiktoken/pull/72
- if model_name == "gpt-35-turbo" and USE_TIKTOKEN:
- model_tokenizer = "cl100k_base"
+ if USE_TIKTOKEN:
+ if model_name == "gpt-35-turbo":
+ # covering the lack of support in Tiktoken. https://github.com/openai/tiktoken/pull/72
+ model_tokenizer = "cl100k_base"
+ elif model_name in MODEL_TO_ENCODING:
+ model_tokenizer = MODEL_TO_ENCODING[model_name]
+ else:
+ for model_prefix, tokenizer in MODEL_PREFIX_TO_ENCODING.items():
+ if model_name.startswith(model_prefix):
+ model_tokenizer = tokenizer
+ break
if model_tokenizer:
# Based on OpenAI models page, 'davinci' considers have 2049 tokens,
| {"golden_diff": "diff --git a/haystack/utils/openai_utils.py b/haystack/utils/openai_utils.py\n--- a/haystack/utils/openai_utils.py\n+++ b/haystack/utils/openai_utils.py\n@@ -34,7 +34,7 @@\n \n if USE_TIKTOKEN:\n import tiktoken # pylint: disable=import-error\n- from tiktoken.model import MODEL_TO_ENCODING\n+ from tiktoken.model import MODEL_TO_ENCODING, MODEL_PREFIX_TO_ENCODING\n else:\n logger.warning(\n \"OpenAI tiktoken module is not available for Python < 3.8,Linux ARM64 and AARCH64. Falling back to GPT2TokenizerFast.\"\n@@ -97,11 +97,18 @@\n \"\"\"\n tokenizer_name = \"gpt2\"\n max_tokens_limit = 2049 # Based on this ref: https://platform.openai.com/docs/models/gpt-3\n- model_tokenizer = MODEL_TO_ENCODING.get(model_name) if USE_TIKTOKEN else None\n \n- # covering the lack of support in Tiktoken. https://github.com/openai/tiktoken/pull/72\n- if model_name == \"gpt-35-turbo\" and USE_TIKTOKEN:\n- model_tokenizer = \"cl100k_base\"\n+ if USE_TIKTOKEN:\n+ if model_name == \"gpt-35-turbo\":\n+ # covering the lack of support in Tiktoken. https://github.com/openai/tiktoken/pull/72\n+ model_tokenizer = \"cl100k_base\"\n+ elif model_name in MODEL_TO_ENCODING:\n+ model_tokenizer = MODEL_TO_ENCODING[model_name]\n+ else:\n+ for model_prefix, tokenizer in MODEL_PREFIX_TO_ENCODING.items():\n+ if model_name.startswith(model_prefix):\n+ model_tokenizer = tokenizer\n+ break\n \n if model_tokenizer:\n # Based on OpenAI models page, 'davinci' considers have 2049 tokens,\n", "issue": "GPT-4-32k support broken\n**Describe the bug**\r\nToken limit for `gpt-4-32k` gets set to 2049.\r\n\r\n**Error message**\r\n```\r\nTraceback (most recent call last):\r\n File \"/home/dnetguru/.conda/envs/thunk/lib/python3.10/site-packages/haystack/pipelines/base.py\", line 552, in run\r\n node_output, stream_id = self._run_node(node_id, node_input)\r\n File \"/home/dnetguru/.conda/envs/thunk/lib/python3.10/site-packages/haystack/pipelines/base.py\", line 467, in _run_node\r\n return self.graph.nodes[node_id][\"component\"]._dispatch_run(**node_input)\r\n File \"/home/dnetguru/.conda/envs/thunk/lib/python3.10/site-packages/haystack/nodes/base.py\", line 201, in _dispatch_run\r\n return self._dispatch_run_general(self.run, **kwargs)\r\n File \"/home/dnetguru/.conda/envs/thunk/lib/python3.10/site-packages/haystack/nodes/base.py\", line 245, in _dispatch_run_general\r\n output, stream = run_method(**run_inputs, **run_params)\r\n File \"/home/dnetguru/.conda/envs/thunk/lib/python3.10/site-packages/haystack/nodes/prompt/prompt_node.py\", line 361, in run\r\n results = self(prompt_collector=prompt_collector, **invocation_context)\r\n File \"/home/dnetguru/.conda/envs/thunk/lib/python3.10/site-packages/haystack/nodes/prompt/prompt_node.py\", line 145, in __call__\r\n return self.prompt(prompt_template, *args, **kwargs)\r\n File \"/home/dnetguru/.conda/envs/thunk/lib/python3.10/site-packages/haystack/nodes/prompt/prompt_node.py\", line 171, in prompt\r\n prompt = self.prompt_model._ensure_token_limit(prompt)\r\n File \"/home/dnetguru/.conda/envs/thunk/lib/python3.10/site-packages/haystack/nodes/prompt/prompt_model.py\", line 134, in _ensure_token_limit\r\n return self.model_invocation_layer._ensure_token_limit(prompt=prompt)\r\n File \"/home/dnetguru/.conda/envs/thunk/lib/python3.10/site-packages/haystack/nodes/prompt/invocation_layer/chatgpt.py\", line 120, in _ensure_token_limit\r\n raise ValueError(\r\nValueError: The prompt or the messages are too long (12807 tokens). The length of the prompt or messages and the answer (100 tokens) should be within the max token limit (2049 tokens). Reduce the length of the prompt or messages.\r\n```\r\n\r\n**Expected behavior**\r\n`gpt-4-32k` should support up to 32768 tokens.\r\n\r\n**To Reproduce**\r\nPass more than 2049 tokens to a PromptNode using `gpt-4-32k`\n", "before_files": [{"content": "\"\"\"Utils for using OpenAI API\"\"\"\nimport os\nimport logging\nimport platform\nimport sys\nimport json\nfrom typing import Dict, Union, Tuple, Optional, List\nimport requests\nimport tenacity\nfrom transformers import GPT2TokenizerFast\n\nfrom haystack.errors import OpenAIError, OpenAIRateLimitError, OpenAIUnauthorizedError\nfrom haystack.environment import (\n HAYSTACK_REMOTE_API_BACKOFF_SEC,\n HAYSTACK_REMOTE_API_MAX_RETRIES,\n HAYSTACK_REMOTE_API_TIMEOUT_SEC,\n)\n\nlogger = logging.getLogger(__name__)\n\n\nmachine = platform.machine().lower()\nsystem = platform.system()\n\n\nOPENAI_TIMEOUT = float(os.environ.get(HAYSTACK_REMOTE_API_TIMEOUT_SEC, 30))\nOPENAI_BACKOFF = int(os.environ.get(HAYSTACK_REMOTE_API_BACKOFF_SEC, 10))\nOPENAI_MAX_RETRIES = int(os.environ.get(HAYSTACK_REMOTE_API_MAX_RETRIES, 5))\n\n\nUSE_TIKTOKEN = False\nif sys.version_info >= (3, 8) and (machine in [\"amd64\", \"x86_64\"] or (machine == \"arm64\" and system == \"Darwin\")):\n USE_TIKTOKEN = True\n\nif USE_TIKTOKEN:\n import tiktoken # pylint: disable=import-error\n from tiktoken.model import MODEL_TO_ENCODING\nelse:\n logger.warning(\n \"OpenAI tiktoken module is not available for Python < 3.8,Linux ARM64 and AARCH64. Falling back to GPT2TokenizerFast.\"\n )\n\n\ndef load_openai_tokenizer(tokenizer_name: str):\n \"\"\"Load either the tokenizer from tiktoken (if the library is available) or fallback to the GPT2TokenizerFast\n from the transformers library.\n\n :param tokenizer_name: The name of the tokenizer to load.\n \"\"\"\n if USE_TIKTOKEN:\n logger.debug(\"Using tiktoken %s tokenizer\", tokenizer_name)\n tokenizer = tiktoken.get_encoding(tokenizer_name)\n else:\n logger.debug(\"Using GPT2TokenizerFast tokenizer\")\n tokenizer = GPT2TokenizerFast.from_pretrained(tokenizer_name)\n return tokenizer\n\n\ndef count_openai_tokens(text: str, tokenizer) -> int:\n \"\"\"Count the number of tokens in `text` based on the provided OpenAI `tokenizer`.\n\n :param text: A string to be tokenized.\n :param tokenizer: An OpenAI tokenizer.\n \"\"\"\n if USE_TIKTOKEN:\n return len(tokenizer.encode(text))\n else:\n return len(tokenizer.tokenize(text))\n\n\ndef count_openai_tokens_messages(messages: List[Dict[str, str]], tokenizer) -> int:\n \"\"\"Count the number of tokens in `messages` based on the OpenAI `tokenizer` provided.\n\n :param messages: The messages to be tokenized.\n :param tokenizer: An OpenAI tokenizer.\n \"\"\"\n # adapted from https://platform.openai.com/docs/guides/chat/introduction\n # should be kept up to date\n num_tokens = 0\n for message in messages:\n num_tokens += 4 # every message follows <im_start>{role/name}\\n{content}<im_end>\\n\n for key, value in message.items():\n if USE_TIKTOKEN:\n num_tokens += len(tokenizer.encode(value))\n else:\n num_tokens += len(tokenizer.tokenize(value))\n if key == \"name\": # if there's a name, the role is omitted\n num_tokens += -1 # role is always required and always 1 token\n num_tokens += 2 # every reply is primed with <im_start>assistant\n return num_tokens\n\n\ndef _openai_text_completion_tokenization_details(model_name: str):\n \"\"\"Return the tokenizer name and max tokens limit for a given OpenAI `model_name`.\n\n :param model_name: Name of the OpenAI model.\n \"\"\"\n tokenizer_name = \"gpt2\"\n max_tokens_limit = 2049 # Based on this ref: https://platform.openai.com/docs/models/gpt-3\n model_tokenizer = MODEL_TO_ENCODING.get(model_name) if USE_TIKTOKEN else None\n\n # covering the lack of support in Tiktoken. https://github.com/openai/tiktoken/pull/72\n if model_name == \"gpt-35-turbo\" and USE_TIKTOKEN:\n model_tokenizer = \"cl100k_base\"\n\n if model_tokenizer:\n # Based on OpenAI models page, 'davinci' considers have 2049 tokens,\n ## therefore, it is better to add `text-davinci` instead to the condition.\n ## Ref: https://platform.openai.com/docs/models/gpt-3-5\n ## https://platform.openai.com/docs/models/gpt-3\n if \"text-davinci\" in model_name:\n max_tokens_limit = 4097\n tokenizer_name = model_tokenizer\n elif model_name.startswith(\"gpt-3\"):\n max_tokens_limit = 4096\n tokenizer_name = model_tokenizer\n # Ref: https://platform.openai.com/docs/models/gpt-4\n elif model_name.startswith(\"gpt-4-32k\"):\n max_tokens_limit = 32768 # tokens\n tokenizer_name = model_tokenizer\n elif model_name.startswith(\"gpt-4\"):\n max_tokens_limit = 8192 # tokens\n tokenizer_name = model_tokenizer\n else:\n tokenizer_name = model_tokenizer\n\n return tokenizer_name, max_tokens_limit\n\n\[email protected](\n reraise=True,\n retry=tenacity.retry_if_exception_type(OpenAIError)\n and tenacity.retry_if_not_exception_type(OpenAIUnauthorizedError),\n wait=tenacity.wait_exponential(multiplier=OPENAI_BACKOFF),\n stop=tenacity.stop_after_attempt(OPENAI_MAX_RETRIES),\n)\ndef openai_request(\n url: str,\n headers: Dict,\n payload: Dict,\n timeout: Union[float, Tuple[float, float]] = OPENAI_TIMEOUT,\n read_response: Optional[bool] = True,\n **kwargs,\n):\n \"\"\"Make a request to the OpenAI API given a `url`, `headers`, `payload`, and `timeout`.\n\n :param url: The URL of the OpenAI API.\n :param headers: Dictionary of HTTP Headers to send with the :class:`Request`.\n :param payload: The payload to send with the request.\n :param timeout: The timeout length of the request. The default is 30s.\n :param read_response: Whether to read the response as JSON. The default is True.\n \"\"\"\n response = requests.request(\"POST\", url, headers=headers, data=json.dumps(payload), timeout=timeout, **kwargs)\n if read_response:\n json_response = json.loads(response.text)\n\n if response.status_code != 200:\n openai_error: OpenAIError\n if response.status_code == 429:\n openai_error = OpenAIRateLimitError(f\"API rate limit exceeded: {response.text}\")\n elif response.status_code == 401:\n openai_error = OpenAIUnauthorizedError(f\"API key is invalid: {response.text}\")\n else:\n openai_error = OpenAIError(\n f\"OpenAI returned an error.\\n\"\n f\"Status code: {response.status_code}\\n\"\n f\"Response body: {response.text}\",\n status_code=response.status_code,\n )\n raise openai_error\n if read_response:\n return json_response\n else:\n return response\n\n\ndef _check_openai_finish_reason(result: Dict, payload: Dict) -> None:\n \"\"\"Check the `finish_reason` the answers returned by OpenAI completions endpoint.\n If the `finish_reason` is `length` or `content_filter`, log a warning to the user.\n\n :param result: The result returned from the OpenAI API.\n :param payload: The payload sent to the OpenAI API.\n \"\"\"\n number_of_truncated_completions = sum(1 for ans in result[\"choices\"] if ans[\"finish_reason\"] == \"length\")\n if number_of_truncated_completions > 0:\n logger.warning(\n \"%s out of the %s completions have been truncated before reaching a natural stopping point. \"\n \"Increase the max_tokens parameter to allow for longer completions.\",\n number_of_truncated_completions,\n payload[\"n\"],\n )\n\n number_of_content_filtered_completions = sum(\n 1 for ans in result[\"choices\"] if ans[\"finish_reason\"] == \"content_filter\"\n )\n if number_of_content_filtered_completions > 0:\n logger.warning(\n \"%s out of the %s completions have omitted content due to a flag from OpenAI content filters.\",\n number_of_truncated_completions,\n payload[\"n\"],\n )\n", "path": "haystack/utils/openai_utils.py"}], "after_files": [{"content": "\"\"\"Utils for using OpenAI API\"\"\"\nimport os\nimport logging\nimport platform\nimport sys\nimport json\nfrom typing import Dict, Union, Tuple, Optional, List\nimport requests\nimport tenacity\nfrom transformers import GPT2TokenizerFast\n\nfrom haystack.errors import OpenAIError, OpenAIRateLimitError, OpenAIUnauthorizedError\nfrom haystack.environment import (\n HAYSTACK_REMOTE_API_BACKOFF_SEC,\n HAYSTACK_REMOTE_API_MAX_RETRIES,\n HAYSTACK_REMOTE_API_TIMEOUT_SEC,\n)\n\nlogger = logging.getLogger(__name__)\n\n\nmachine = platform.machine().lower()\nsystem = platform.system()\n\n\nOPENAI_TIMEOUT = float(os.environ.get(HAYSTACK_REMOTE_API_TIMEOUT_SEC, 30))\nOPENAI_BACKOFF = int(os.environ.get(HAYSTACK_REMOTE_API_BACKOFF_SEC, 10))\nOPENAI_MAX_RETRIES = int(os.environ.get(HAYSTACK_REMOTE_API_MAX_RETRIES, 5))\n\n\nUSE_TIKTOKEN = False\nif sys.version_info >= (3, 8) and (machine in [\"amd64\", \"x86_64\"] or (machine == \"arm64\" and system == \"Darwin\")):\n USE_TIKTOKEN = True\n\nif USE_TIKTOKEN:\n import tiktoken # pylint: disable=import-error\n from tiktoken.model import MODEL_TO_ENCODING, MODEL_PREFIX_TO_ENCODING\nelse:\n logger.warning(\n \"OpenAI tiktoken module is not available for Python < 3.8,Linux ARM64 and AARCH64. Falling back to GPT2TokenizerFast.\"\n )\n\n\ndef load_openai_tokenizer(tokenizer_name: str):\n \"\"\"Load either the tokenizer from tiktoken (if the library is available) or fallback to the GPT2TokenizerFast\n from the transformers library.\n\n :param tokenizer_name: The name of the tokenizer to load.\n \"\"\"\n if USE_TIKTOKEN:\n logger.debug(\"Using tiktoken %s tokenizer\", tokenizer_name)\n tokenizer = tiktoken.get_encoding(tokenizer_name)\n else:\n logger.debug(\"Using GPT2TokenizerFast tokenizer\")\n tokenizer = GPT2TokenizerFast.from_pretrained(tokenizer_name)\n return tokenizer\n\n\ndef count_openai_tokens(text: str, tokenizer) -> int:\n \"\"\"Count the number of tokens in `text` based on the provided OpenAI `tokenizer`.\n\n :param text: A string to be tokenized.\n :param tokenizer: An OpenAI tokenizer.\n \"\"\"\n if USE_TIKTOKEN:\n return len(tokenizer.encode(text))\n else:\n return len(tokenizer.tokenize(text))\n\n\ndef count_openai_tokens_messages(messages: List[Dict[str, str]], tokenizer) -> int:\n \"\"\"Count the number of tokens in `messages` based on the OpenAI `tokenizer` provided.\n\n :param messages: The messages to be tokenized.\n :param tokenizer: An OpenAI tokenizer.\n \"\"\"\n # adapted from https://platform.openai.com/docs/guides/chat/introduction\n # should be kept up to date\n num_tokens = 0\n for message in messages:\n num_tokens += 4 # every message follows <im_start>{role/name}\\n{content}<im_end>\\n\n for key, value in message.items():\n if USE_TIKTOKEN:\n num_tokens += len(tokenizer.encode(value))\n else:\n num_tokens += len(tokenizer.tokenize(value))\n if key == \"name\": # if there's a name, the role is omitted\n num_tokens += -1 # role is always required and always 1 token\n num_tokens += 2 # every reply is primed with <im_start>assistant\n return num_tokens\n\n\ndef _openai_text_completion_tokenization_details(model_name: str):\n \"\"\"Return the tokenizer name and max tokens limit for a given OpenAI `model_name`.\n\n :param model_name: Name of the OpenAI model.\n \"\"\"\n tokenizer_name = \"gpt2\"\n max_tokens_limit = 2049 # Based on this ref: https://platform.openai.com/docs/models/gpt-3\n\n if USE_TIKTOKEN:\n if model_name == \"gpt-35-turbo\":\n # covering the lack of support in Tiktoken. https://github.com/openai/tiktoken/pull/72\n model_tokenizer = \"cl100k_base\"\n elif model_name in MODEL_TO_ENCODING:\n model_tokenizer = MODEL_TO_ENCODING[model_name]\n else:\n for model_prefix, tokenizer in MODEL_PREFIX_TO_ENCODING.items():\n if model_name.startswith(model_prefix):\n model_tokenizer = tokenizer\n break\n\n if model_tokenizer:\n # Based on OpenAI models page, 'davinci' considers have 2049 tokens,\n ## therefore, it is better to add `text-davinci` instead to the condition.\n ## Ref: https://platform.openai.com/docs/models/gpt-3-5\n ## https://platform.openai.com/docs/models/gpt-3\n if \"text-davinci\" in model_name:\n max_tokens_limit = 4097\n tokenizer_name = model_tokenizer\n elif model_name.startswith(\"gpt-3\"):\n max_tokens_limit = 4096\n tokenizer_name = model_tokenizer\n # Ref: https://platform.openai.com/docs/models/gpt-4\n elif model_name.startswith(\"gpt-4-32k\"):\n max_tokens_limit = 32768 # tokens\n tokenizer_name = model_tokenizer\n elif model_name.startswith(\"gpt-4\"):\n max_tokens_limit = 8192 # tokens\n tokenizer_name = model_tokenizer\n else:\n tokenizer_name = model_tokenizer\n\n return tokenizer_name, max_tokens_limit\n\n\[email protected](\n reraise=True,\n retry=tenacity.retry_if_exception_type(OpenAIError)\n and tenacity.retry_if_not_exception_type(OpenAIUnauthorizedError),\n wait=tenacity.wait_exponential(multiplier=OPENAI_BACKOFF),\n stop=tenacity.stop_after_attempt(OPENAI_MAX_RETRIES),\n)\ndef openai_request(\n url: str,\n headers: Dict,\n payload: Dict,\n timeout: Union[float, Tuple[float, float]] = OPENAI_TIMEOUT,\n read_response: Optional[bool] = True,\n **kwargs,\n):\n \"\"\"Make a request to the OpenAI API given a `url`, `headers`, `payload`, and `timeout`.\n\n :param url: The URL of the OpenAI API.\n :param headers: Dictionary of HTTP Headers to send with the :class:`Request`.\n :param payload: The payload to send with the request.\n :param timeout: The timeout length of the request. The default is 30s.\n :param read_response: Whether to read the response as JSON. The default is True.\n \"\"\"\n response = requests.request(\"POST\", url, headers=headers, data=json.dumps(payload), timeout=timeout, **kwargs)\n if read_response:\n json_response = json.loads(response.text)\n\n if response.status_code != 200:\n openai_error: OpenAIError\n if response.status_code == 429:\n openai_error = OpenAIRateLimitError(f\"API rate limit exceeded: {response.text}\")\n elif response.status_code == 401:\n openai_error = OpenAIUnauthorizedError(f\"API key is invalid: {response.text}\")\n else:\n openai_error = OpenAIError(\n f\"OpenAI returned an error.\\n\"\n f\"Status code: {response.status_code}\\n\"\n f\"Response body: {response.text}\",\n status_code=response.status_code,\n )\n raise openai_error\n if read_response:\n return json_response\n else:\n return response\n\n\ndef _check_openai_finish_reason(result: Dict, payload: Dict) -> None:\n \"\"\"Check the `finish_reason` the answers returned by OpenAI completions endpoint.\n If the `finish_reason` is `length` or `content_filter`, log a warning to the user.\n\n :param result: The result returned from the OpenAI API.\n :param payload: The payload sent to the OpenAI API.\n \"\"\"\n number_of_truncated_completions = sum(1 for ans in result[\"choices\"] if ans[\"finish_reason\"] == \"length\")\n if number_of_truncated_completions > 0:\n logger.warning(\n \"%s out of the %s completions have been truncated before reaching a natural stopping point. \"\n \"Increase the max_tokens parameter to allow for longer completions.\",\n number_of_truncated_completions,\n payload[\"n\"],\n )\n\n number_of_content_filtered_completions = sum(\n 1 for ans in result[\"choices\"] if ans[\"finish_reason\"] == \"content_filter\"\n )\n if number_of_content_filtered_completions > 0:\n logger.warning(\n \"%s out of the %s completions have omitted content due to a flag from OpenAI content filters.\",\n number_of_truncated_completions,\n payload[\"n\"],\n )\n", "path": "haystack/utils/openai_utils.py"}]} | 3,368 | 440 |
gh_patches_debug_15914 | rasdani/github-patches | git_diff | keras-team__keras-8240 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
fix device names for multi_gpu_model
This PR will try to fix #8213.
`DeviceSpec.from_string(device).to_string()` is used by tensorflow. ([Ref](https://github.com/tensorflow/tensorflow/blob/40c475b48c091a70ad8061c1508dff6ded2d2af6/tensorflow/python/framework/device.py#L251))
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `keras/utils/training_utils.py`
Content:
```
1 from ..layers.merge import concatenate
2 from .. import backend as K
3 from ..layers.core import Lambda
4 from ..engine.training import Model
5
6
7 def _get_available_devices():
8 from tensorflow.python.client import device_lib
9 local_device_protos = device_lib.list_local_devices()
10 return [x.name for x in local_device_protos]
11
12
13 def multi_gpu_model(model, gpus):
14 """Replicates a model on different GPUs.
15
16 Specifically, this function implements single-machine
17 multi-GPU data parallelism. It works in the following way:
18
19 - Divide the model's input(s) into multiple sub-batches.
20 - Apply a model copy on each sub-batch. Every model copy
21 is executed on a dedicated GPU.
22 - Concatenate the results (on CPU) into one big batch.
23
24 E.g. if your `batch_size` is 64 and you use `gpus=2`,
25 then we will divide the input into 2 sub-batches of 32 samples,
26 process each sub-batch on one GPU, then return the full
27 batch of 64 processed samples.
28
29 This induces quasi-linear speedup on up to 8 GPUs.
30
31 This function is only available with the TensorFlow backend
32 for the time being.
33
34 # Arguments
35 model: A Keras model instance. To avoid OOM errors,
36 this model could have been built on CPU, for instance
37 (see usage example below).
38 gpus: Integer >= 2, number of on GPUs on which to create
39 model replicas.
40
41 # Returns
42 A Keras `Model` instance which can be used just like the initial
43 `model` argument, but which distributes its workload on multiple GPUs.
44
45 # Example
46
47 ```python
48 import tensorflow as tf
49 from keras.applications import Xception
50 from keras.utils import multi_gpu_model
51 import numpy as np
52
53 num_samples = 1000
54 height = 224
55 width = 224
56 num_classes = 1000
57
58 # Instantiate the base model
59 # (here, we do it on CPU, which is optional).
60 with tf.device('/cpu:0'):
61 model = Xception(weights=None,
62 input_shape=(height, width, 3),
63 classes=num_classes)
64
65 # Replicates the model on 8 GPUs.
66 # This assumes that your machine has 8 available GPUs.
67 parallel_model = multi_gpu_model(model, gpus=8)
68 parallel_model.compile(loss='categorical_crossentropy',
69 optimizer='rmsprop')
70
71 # Generate dummy data.
72 x = np.random.random((num_samples, height, width, 3))
73 y = np.random.random((num_samples, num_classes))
74
75 # This `fit` call will be distributed on 8 GPUs.
76 # Since the batch size is 256, each GPU will process 32 samples.
77 parallel_model.fit(x, y, epochs=20, batch_size=256)
78 ```
79 """
80 if K.backend() != 'tensorflow':
81 raise ValueError('`multi_gpu_model` is only available '
82 'with the TensorFlow backend.')
83 if gpus <= 1:
84 raise ValueError('For multi-gpu usage to be effective, '
85 'call `multi_gpu_model` with `gpus >= 2`. '
86 'Received: `gpus=%d`' % gpus)
87
88 import tensorflow as tf
89
90 target_devices = ['/cpu:0'] + ['/gpu:%d' % i for i in range(gpus)]
91 available_devices = _get_available_devices()
92 for device in target_devices:
93 if device not in available_devices:
94 raise ValueError(
95 'To call `multi_gpu_model` with `gpus=%d`, '
96 'we expect the following devices to be available: %s. '
97 'However this machine only has: %s. '
98 'Try reducing `gpus`.' % (gpus,
99 target_devices,
100 available_devices))
101
102 def get_slice(data, i, parts):
103 shape = tf.shape(data)
104 batch_size = shape[:1]
105 input_shape = shape[1:]
106 step = batch_size // parts
107 if i == gpus - 1:
108 size = batch_size - step * i
109 else:
110 size = step
111 size = tf.concat([size, input_shape], axis=0)
112 stride = tf.concat([step, input_shape * 0], axis=0)
113 start = stride * i
114 return tf.slice(data, start, size)
115
116 all_outputs = []
117 for i in range(len(model.outputs)):
118 all_outputs.append([])
119
120 # Place a copy of the model on each GPU,
121 # each getting a slice of the inputs.
122 for i in range(gpus):
123 with tf.device('/gpu:%d' % i):
124 with tf.name_scope('replica_%d' % i):
125 inputs = []
126 # Retrieve a slice of the input.
127 for x in model.inputs:
128 input_shape = tuple(x.get_shape().as_list())[1:]
129 slice_i = Lambda(get_slice,
130 output_shape=input_shape,
131 arguments={'i': i,
132 'parts': gpus})(x)
133 inputs.append(slice_i)
134
135 # Apply model on slice
136 # (creating a model replica on the target device).
137 outputs = model(inputs)
138 if not isinstance(outputs, list):
139 outputs = [outputs]
140
141 # Save the outputs for merging back together later.
142 for o in range(len(outputs)):
143 all_outputs[o].append(outputs[o])
144
145 # Merge outputs on CPU.
146 with tf.device('/cpu:0'):
147 merged = []
148 for outputs in all_outputs:
149 merged.append(concatenate(outputs,
150 axis=0))
151 return Model(model.inputs, merged)
152
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/keras/utils/training_utils.py b/keras/utils/training_utils.py
--- a/keras/utils/training_utils.py
+++ b/keras/utils/training_utils.py
@@ -10,6 +10,11 @@
return [x.name for x in local_device_protos]
+def _normalize_device_name(name):
+ name = name.lower().replace('device:', '')
+ return name
+
+
def multi_gpu_model(model, gpus):
"""Replicates a model on different GPUs.
@@ -89,6 +94,7 @@
target_devices = ['/cpu:0'] + ['/gpu:%d' % i for i in range(gpus)]
available_devices = _get_available_devices()
+ available_devices = [_normalize_device_name(name) for name in available_devices]
for device in target_devices:
if device not in available_devices:
raise ValueError(
| {"golden_diff": "diff --git a/keras/utils/training_utils.py b/keras/utils/training_utils.py\n--- a/keras/utils/training_utils.py\n+++ b/keras/utils/training_utils.py\n@@ -10,6 +10,11 @@\n return [x.name for x in local_device_protos]\n \n \n+def _normalize_device_name(name):\n+ name = name.lower().replace('device:', '')\n+ return name\n+\n+\n def multi_gpu_model(model, gpus):\n \"\"\"Replicates a model on different GPUs.\n \n@@ -89,6 +94,7 @@\n \n target_devices = ['/cpu:0'] + ['/gpu:%d' % i for i in range(gpus)]\n available_devices = _get_available_devices()\n+ available_devices = [_normalize_device_name(name) for name in available_devices]\n for device in target_devices:\n if device not in available_devices:\n raise ValueError(\n", "issue": "fix device names for multi_gpu_model\nThis PR will try to fix #8213.\r\n\r\n`DeviceSpec.from_string(device).to_string()` is used by tensorflow. ([Ref](https://github.com/tensorflow/tensorflow/blob/40c475b48c091a70ad8061c1508dff6ded2d2af6/tensorflow/python/framework/device.py#L251))\n", "before_files": [{"content": "from ..layers.merge import concatenate\nfrom .. import backend as K\nfrom ..layers.core import Lambda\nfrom ..engine.training import Model\n\n\ndef _get_available_devices():\n from tensorflow.python.client import device_lib\n local_device_protos = device_lib.list_local_devices()\n return [x.name for x in local_device_protos]\n\n\ndef multi_gpu_model(model, gpus):\n \"\"\"Replicates a model on different GPUs.\n\n Specifically, this function implements single-machine\n multi-GPU data parallelism. It works in the following way:\n\n - Divide the model's input(s) into multiple sub-batches.\n - Apply a model copy on each sub-batch. Every model copy\n is executed on a dedicated GPU.\n - Concatenate the results (on CPU) into one big batch.\n\n E.g. if your `batch_size` is 64 and you use `gpus=2`,\n then we will divide the input into 2 sub-batches of 32 samples,\n process each sub-batch on one GPU, then return the full\n batch of 64 processed samples.\n\n This induces quasi-linear speedup on up to 8 GPUs.\n\n This function is only available with the TensorFlow backend\n for the time being.\n\n # Arguments\n model: A Keras model instance. To avoid OOM errors,\n this model could have been built on CPU, for instance\n (see usage example below).\n gpus: Integer >= 2, number of on GPUs on which to create\n model replicas.\n\n # Returns\n A Keras `Model` instance which can be used just like the initial\n `model` argument, but which distributes its workload on multiple GPUs.\n\n # Example\n\n ```python\n import tensorflow as tf\n from keras.applications import Xception\n from keras.utils import multi_gpu_model\n import numpy as np\n\n num_samples = 1000\n height = 224\n width = 224\n num_classes = 1000\n\n # Instantiate the base model\n # (here, we do it on CPU, which is optional).\n with tf.device('/cpu:0'):\n model = Xception(weights=None,\n input_shape=(height, width, 3),\n classes=num_classes)\n\n # Replicates the model on 8 GPUs.\n # This assumes that your machine has 8 available GPUs.\n parallel_model = multi_gpu_model(model, gpus=8)\n parallel_model.compile(loss='categorical_crossentropy',\n optimizer='rmsprop')\n\n # Generate dummy data.\n x = np.random.random((num_samples, height, width, 3))\n y = np.random.random((num_samples, num_classes))\n\n # This `fit` call will be distributed on 8 GPUs.\n # Since the batch size is 256, each GPU will process 32 samples.\n parallel_model.fit(x, y, epochs=20, batch_size=256)\n ```\n \"\"\"\n if K.backend() != 'tensorflow':\n raise ValueError('`multi_gpu_model` is only available '\n 'with the TensorFlow backend.')\n if gpus <= 1:\n raise ValueError('For multi-gpu usage to be effective, '\n 'call `multi_gpu_model` with `gpus >= 2`. '\n 'Received: `gpus=%d`' % gpus)\n\n import tensorflow as tf\n\n target_devices = ['/cpu:0'] + ['/gpu:%d' % i for i in range(gpus)]\n available_devices = _get_available_devices()\n for device in target_devices:\n if device not in available_devices:\n raise ValueError(\n 'To call `multi_gpu_model` with `gpus=%d`, '\n 'we expect the following devices to be available: %s. '\n 'However this machine only has: %s. '\n 'Try reducing `gpus`.' % (gpus,\n target_devices,\n available_devices))\n\n def get_slice(data, i, parts):\n shape = tf.shape(data)\n batch_size = shape[:1]\n input_shape = shape[1:]\n step = batch_size // parts\n if i == gpus - 1:\n size = batch_size - step * i\n else:\n size = step\n size = tf.concat([size, input_shape], axis=0)\n stride = tf.concat([step, input_shape * 0], axis=0)\n start = stride * i\n return tf.slice(data, start, size)\n\n all_outputs = []\n for i in range(len(model.outputs)):\n all_outputs.append([])\n\n # Place a copy of the model on each GPU,\n # each getting a slice of the inputs.\n for i in range(gpus):\n with tf.device('/gpu:%d' % i):\n with tf.name_scope('replica_%d' % i):\n inputs = []\n # Retrieve a slice of the input.\n for x in model.inputs:\n input_shape = tuple(x.get_shape().as_list())[1:]\n slice_i = Lambda(get_slice,\n output_shape=input_shape,\n arguments={'i': i,\n 'parts': gpus})(x)\n inputs.append(slice_i)\n\n # Apply model on slice\n # (creating a model replica on the target device).\n outputs = model(inputs)\n if not isinstance(outputs, list):\n outputs = [outputs]\n\n # Save the outputs for merging back together later.\n for o in range(len(outputs)):\n all_outputs[o].append(outputs[o])\n\n # Merge outputs on CPU.\n with tf.device('/cpu:0'):\n merged = []\n for outputs in all_outputs:\n merged.append(concatenate(outputs,\n axis=0))\n return Model(model.inputs, merged)\n", "path": "keras/utils/training_utils.py"}], "after_files": [{"content": "from ..layers.merge import concatenate\nfrom .. import backend as K\nfrom ..layers.core import Lambda\nfrom ..engine.training import Model\n\n\ndef _get_available_devices():\n from tensorflow.python.client import device_lib\n local_device_protos = device_lib.list_local_devices()\n return [x.name for x in local_device_protos]\n\n\ndef _normalize_device_name(name):\n name = name.lower().replace('device:', '')\n return name\n\n\ndef multi_gpu_model(model, gpus):\n \"\"\"Replicates a model on different GPUs.\n\n Specifically, this function implements single-machine\n multi-GPU data parallelism. It works in the following way:\n\n - Divide the model's input(s) into multiple sub-batches.\n - Apply a model copy on each sub-batch. Every model copy\n is executed on a dedicated GPU.\n - Concatenate the results (on CPU) into one big batch.\n\n E.g. if your `batch_size` is 64 and you use `gpus=2`,\n then we will divide the input into 2 sub-batches of 32 samples,\n process each sub-batch on one GPU, then return the full\n batch of 64 processed samples.\n\n This induces quasi-linear speedup on up to 8 GPUs.\n\n This function is only available with the TensorFlow backend\n for the time being.\n\n # Arguments\n model: A Keras model instance. To avoid OOM errors,\n this model could have been built on CPU, for instance\n (see usage example below).\n gpus: Integer >= 2, number of on GPUs on which to create\n model replicas.\n\n # Returns\n A Keras `Model` instance which can be used just like the initial\n `model` argument, but which distributes its workload on multiple GPUs.\n\n # Example\n\n ```python\n import tensorflow as tf\n from keras.applications import Xception\n from keras.utils import multi_gpu_model\n import numpy as np\n\n num_samples = 1000\n height = 224\n width = 224\n num_classes = 1000\n\n # Instantiate the base model\n # (here, we do it on CPU, which is optional).\n with tf.device('/cpu:0'):\n model = Xception(weights=None,\n input_shape=(height, width, 3),\n classes=num_classes)\n\n # Replicates the model on 8 GPUs.\n # This assumes that your machine has 8 available GPUs.\n parallel_model = multi_gpu_model(model, gpus=8)\n parallel_model.compile(loss='categorical_crossentropy',\n optimizer='rmsprop')\n\n # Generate dummy data.\n x = np.random.random((num_samples, height, width, 3))\n y = np.random.random((num_samples, num_classes))\n\n # This `fit` call will be distributed on 8 GPUs.\n # Since the batch size is 256, each GPU will process 32 samples.\n parallel_model.fit(x, y, epochs=20, batch_size=256)\n ```\n \"\"\"\n if K.backend() != 'tensorflow':\n raise ValueError('`multi_gpu_model` is only available '\n 'with the TensorFlow backend.')\n if gpus <= 1:\n raise ValueError('For multi-gpu usage to be effective, '\n 'call `multi_gpu_model` with `gpus >= 2`. '\n 'Received: `gpus=%d`' % gpus)\n\n import tensorflow as tf\n\n target_devices = ['/cpu:0'] + ['/gpu:%d' % i for i in range(gpus)]\n available_devices = _get_available_devices()\n available_devices = [_normalize_device_name(name) for name in available_devices]\n for device in target_devices:\n if device not in available_devices:\n raise ValueError(\n 'To call `multi_gpu_model` with `gpus=%d`, '\n 'we expect the following devices to be available: %s. '\n 'However this machine only has: %s. '\n 'Try reducing `gpus`.' % (gpus,\n target_devices,\n available_devices))\n\n def get_slice(data, i, parts):\n shape = tf.shape(data)\n batch_size = shape[:1]\n input_shape = shape[1:]\n step = batch_size // parts\n if i == gpus - 1:\n size = batch_size - step * i\n else:\n size = step\n size = tf.concat([size, input_shape], axis=0)\n stride = tf.concat([step, input_shape * 0], axis=0)\n start = stride * i\n return tf.slice(data, start, size)\n\n all_outputs = []\n for i in range(len(model.outputs)):\n all_outputs.append([])\n\n # Place a copy of the model on each GPU,\n # each getting a slice of the inputs.\n for i in range(gpus):\n with tf.device('/gpu:%d' % i):\n with tf.name_scope('replica_%d' % i):\n inputs = []\n # Retrieve a slice of the input.\n for x in model.inputs:\n input_shape = tuple(x.get_shape().as_list())[1:]\n slice_i = Lambda(get_slice,\n output_shape=input_shape,\n arguments={'i': i,\n 'parts': gpus})(x)\n inputs.append(slice_i)\n\n # Apply model on slice\n # (creating a model replica on the target device).\n outputs = model(inputs)\n if not isinstance(outputs, list):\n outputs = [outputs]\n\n # Save the outputs for merging back together later.\n for o in range(len(outputs)):\n all_outputs[o].append(outputs[o])\n\n # Merge outputs on CPU.\n with tf.device('/cpu:0'):\n merged = []\n for outputs in all_outputs:\n merged.append(concatenate(outputs,\n axis=0))\n return Model(model.inputs, merged)\n", "path": "keras/utils/training_utils.py"}]} | 1,957 | 201 |
gh_patches_debug_12524 | rasdani/github-patches | git_diff | python-telegram-bot__python-telegram-bot-1734 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Python supported versions need to be updated in docs
Not really a bug, but for example it still says Python 3.4 is supported in readme and setup explanations.
Copy-pasting from https://pypi.org/project/python-telegram-bot/ :
> This library provides a pure Python interface for the Telegram Bot API. It’s compatible with Python versions 2.7, 3.3+ and PyPy.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 """The setup and build script for the python-telegram-bot library."""
3
4 import codecs
5 import os
6 from setuptools import setup, find_packages
7
8
9 def requirements():
10 """Build the requirements list for this project"""
11 requirements_list = []
12
13 with open('requirements.txt') as requirements:
14 for install in requirements:
15 requirements_list.append(install.strip())
16
17 return requirements_list
18
19
20 packages = find_packages(exclude=['tests*'])
21
22 with codecs.open('README.rst', 'r', 'utf-8') as fd:
23 fn = os.path.join('telegram', 'version.py')
24 with open(fn) as fh:
25 code = compile(fh.read(), fn, 'exec')
26 exec(code)
27
28 setup(name='python-telegram-bot',
29 version=__version__,
30 author='Leandro Toledo',
31 author_email='[email protected]',
32 license='LGPLv3',
33 url='https://python-telegram-bot.org/',
34 keywords='python telegram bot api wrapper',
35 description="We have made you a wrapper you can't refuse",
36 long_description=fd.read(),
37 packages=packages,
38 install_requires=requirements(),
39 extras_require={
40 'json': 'ujson',
41 'socks': 'PySocks'
42 },
43 include_package_data=True,
44 classifiers=[
45 'Development Status :: 5 - Production/Stable',
46 'Intended Audience :: Developers',
47 'License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3)',
48 'Operating System :: OS Independent',
49 'Topic :: Software Development :: Libraries :: Python Modules',
50 'Topic :: Communications :: Chat',
51 'Topic :: Internet',
52 'Programming Language :: Python',
53 'Programming Language :: Python :: 2',
54 'Programming Language :: Python :: 2.7',
55 'Programming Language :: Python :: 3',
56 'Programming Language :: Python :: 3.4',
57 'Programming Language :: Python :: 3.5',
58 'Programming Language :: Python :: 3.6',
59 'Programming Language :: Python :: 3.7'
60 ],)
61
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -50,10 +50,7 @@
'Topic :: Communications :: Chat',
'Topic :: Internet',
'Programming Language :: Python',
- 'Programming Language :: Python :: 2',
- 'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
- 'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7'
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -50,10 +50,7 @@\n 'Topic :: Communications :: Chat',\n 'Topic :: Internet',\n 'Programming Language :: Python',\n- 'Programming Language :: Python :: 2',\n- 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n- 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7'\n", "issue": "Python supported versions need to be updated in docs\nNot really a bug, but for example it still says Python 3.4 is supported in readme and setup explanations.\r\n\r\nCopy-pasting from https://pypi.org/project/python-telegram-bot/ :\r\n\r\n> This library provides a pure Python interface for the Telegram Bot API. It\u2019s compatible with Python versions 2.7, 3.3+ and PyPy.\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\"\"\"The setup and build script for the python-telegram-bot library.\"\"\"\n\nimport codecs\nimport os\nfrom setuptools import setup, find_packages\n\n\ndef requirements():\n \"\"\"Build the requirements list for this project\"\"\"\n requirements_list = []\n\n with open('requirements.txt') as requirements:\n for install in requirements:\n requirements_list.append(install.strip())\n\n return requirements_list\n\n\npackages = find_packages(exclude=['tests*'])\n\nwith codecs.open('README.rst', 'r', 'utf-8') as fd:\n fn = os.path.join('telegram', 'version.py')\n with open(fn) as fh:\n code = compile(fh.read(), fn, 'exec')\n exec(code)\n\n setup(name='python-telegram-bot',\n version=__version__,\n author='Leandro Toledo',\n author_email='[email protected]',\n license='LGPLv3',\n url='https://python-telegram-bot.org/',\n keywords='python telegram bot api wrapper',\n description=\"We have made you a wrapper you can't refuse\",\n long_description=fd.read(),\n packages=packages,\n install_requires=requirements(),\n extras_require={\n 'json': 'ujson',\n 'socks': 'PySocks'\n },\n include_package_data=True,\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3)',\n 'Operating System :: OS Independent',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Communications :: Chat',\n 'Topic :: Internet',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7'\n ],)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\"\"\"The setup and build script for the python-telegram-bot library.\"\"\"\n\nimport codecs\nimport os\nfrom setuptools import setup, find_packages\n\n\ndef requirements():\n \"\"\"Build the requirements list for this project\"\"\"\n requirements_list = []\n\n with open('requirements.txt') as requirements:\n for install in requirements:\n requirements_list.append(install.strip())\n\n return requirements_list\n\n\npackages = find_packages(exclude=['tests*'])\n\nwith codecs.open('README.rst', 'r', 'utf-8') as fd:\n fn = os.path.join('telegram', 'version.py')\n with open(fn) as fh:\n code = compile(fh.read(), fn, 'exec')\n exec(code)\n\n setup(name='python-telegram-bot',\n version=__version__,\n author='Leandro Toledo',\n author_email='[email protected]',\n license='LGPLv3',\n url='https://python-telegram-bot.org/',\n keywords='python telegram bot api wrapper',\n description=\"We have made you a wrapper you can't refuse\",\n long_description=fd.read(),\n packages=packages,\n install_requires=requirements(),\n extras_require={\n 'json': 'ujson',\n 'socks': 'PySocks'\n },\n include_package_data=True,\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3)',\n 'Operating System :: OS Independent',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Communications :: Chat',\n 'Topic :: Internet',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7'\n ],)\n", "path": "setup.py"}]} | 904 | 137 |
gh_patches_debug_37494 | rasdani/github-patches | git_diff | ansible-collections__community.vmware-1437 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
vmware_content_library_info: Only lists Content Libraries with the type of "Local", does not include "Subscribed" type
<!--- Verify first that your issue is not already reported on GitHub -->
<!--- Also test if the latest release and devel branch are affected too -->
<!--- Complete *all* sections as described, this form is processed automatically -->
##### SUMMARY
When using the vmware_content_library_info task type to query our Content Libraries, only the Libraries with the type of "Local" are reported back to the ansible task. We used shared or "Subscribed" Content Libraries in our environment, to share a consistent Library of VM Templates between all of our vCenters.
How can we get this functionality added?
##### ISSUE TYPE
- Bug Report
##### COMPONENT NAME
vmware_content_library_info
##### ANSIBLE VERSION
<!--- Paste verbatim output from "ansible --version" between quotes -->
```paste below
ansible 2.10.9
config file = /home/<redacted>/.ansible.cfg
configured module search path = ['/home/<redacted>/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /home/<redacted>/.local/lib/python3.8/site-packages/ansible
executable location = /home/<redacted>/.local/bin/ansible
python version = 3.8.10 (default, Jun 22 2022, 20:18:18) [GCC 9.4.0]
```
##### COLLECTION VERSION
<!--- Paste verbatim output from "ansible-galaxy collection list <namespace>.<collection>" between the quotes
for example: ansible-galaxy collection list community.general
-->
```paste below
# /usr/local/lib/python3.8/dist-packages/ansible_collections
Collection Version
---------------- -------
community.vmware 1.10.0
# /home/<redacted>/.local/lib/python3.8/site-packages/ansible_collections
Collection Version
---------------- -------
community.vmware 1.10.0
```
##### CONFIGURATION
<!--- Paste verbatim output from "ansible-config dump --only-changed" between quotes -->
```paste below
DEFAULT_HOST_LIST(/home/<redacted>/.ansible.cfg) = ['/home/<redacted>/inventory']
DEFAULT_LOG_PATH(/home/<redacted>/.ansible.cfg) = /home/<redacted>/.ansible/logs/log.txt
DEFAULT_TIMEOUT(/home/<redacted>/.ansible.cfg) = 120
DEFAULT_VAULT_PASSWORD_FILE(/home/<redacted>/.ansible.cfg) = /home/<redacted>/playbooks/secret.yaml
```
##### OS / ENVIRONMENT
<!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. -->
vCenter Version - 7.0.3 build 20150588
Client - vSphere Client version 7.0.3.00700
Hosts - VMware ESXi, 7.0.3, 20036589
##### STEPS TO REPRODUCE
<!--- Describe exactly how to reproduce the problem, using a minimal test-case -->
<!--- Paste example playbooks or commands between quotes below -->
```yaml
- name: "Collect list of Content Libraries from specified vCenter"
community.vmware.vmware_content_library_info:
hostname: "{{ hostname }}"
username: "{{ username }}"
password: "{{ password }}"
validate_certs: no
register: libraries
- name: "Display list of found Content Libraries"
debug:
var: libraries
```
<!--- HINT: You can paste gist.github.com links for larger files -->
##### EXPECTED RESULTS
<!--- Describe what you expected to happen when running the steps above -->
```yaml
TASK [Display list of found Content Libraries] ****************************************************************************************************************************
ok: [localhost] => {
"libraries": {
"changed": false,
"content_libs": [
"6b5e0c60-3173-4a75-8101-33335f3bb7dd",
"7bd40369-84d6-4fd5-9cf9-7c33377f3931"
],
"failed": false
}
}
```
##### ACTUAL RESULTS
<!--- Describe what actually happened. If possible run with extra verbosity (-vvvv) -->
<!--- Paste verbatim command output between quotes -->
```yaml
TASK [Display list of found Content Libraries] ****************************************************************************************************************************
ok: [localhost] => {
"libraries": {
"changed": false,
"content_libs": [
"6b5e0c60-3173-4a75-8101-33335f3bb7dd"
],
"failed": false
}
}
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `plugins/modules/vmware_content_library_info.py`
Content:
```
1 #!/usr/bin/python
2 # -*- coding: utf-8 -*-
3
4 # Copyright: (c) 2019, Ansible Project
5 # Copyright: (c) 2019, Pavan Bidkar <[email protected]>
6 # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
7 # SPDX-License-Identifier: GPL-3.0-or-later
8
9 from __future__ import absolute_import, division, print_function
10 __metaclass__ = type
11
12
13 DOCUMENTATION = r'''
14 ---
15 module: vmware_content_library_info
16 short_description: Gather information about VMWare Content Library
17 description:
18 - Module to list the content libraries.
19 - Module to get information about specific content library.
20 - Content Library feature is introduced in vSphere 6.0 version, so this module is not supported in the earlier versions of vSphere.
21 - All variables and VMware object names are case sensitive.
22 author:
23 - Pavan Bidkar (@pgbidkar)
24 requirements:
25 - vSphere Automation SDK
26 options:
27 library_id:
28 description:
29 - content library id for which details needs to be fetched.
30 type: str
31 required: False
32 extends_documentation_fragment:
33 - community.vmware.vmware_rest_client.documentation
34
35 '''
36
37 EXAMPLES = r'''
38 - name: Get List of Content Libraries
39 community.vmware.vmware_content_library_info:
40 hostname: '{{ vcenter_hostname }}'
41 username: '{{ vcenter_username }}'
42 password: '{{ vcenter_password }}'
43 delegate_to: localhost
44
45 - name: Get information about content library
46 community.vmware.vmware_content_library_info:
47 hostname: '{{ vcenter_hostname }}'
48 username: '{{ vcenter_username }}'
49 password: '{{ vcenter_password }}'
50 library_id: '13b0f060-f4d3-4f84-b61f-0fe1b0c0a5a8'
51 delegate_to: localhost
52 '''
53
54 RETURN = r'''
55 content_lib_details:
56 description: list of content library metadata
57 returned: on success
58 type: list
59 sample: [
60 {
61 "library_creation_time": "2019-07-02T11:50:52.242000",
62 "library_description": "new description",
63 "library_id": "13b0f060-f4d3-4f84-b61f-0fe1b0c0a5a8",
64 "library_name": "demo-local-lib",
65 "library_publish_info": {
66 "authentication_method": "NONE",
67 "persist_json_enabled": false,
68 "publish_url": null,
69 "published": false,
70 "user_name": null
71 },
72 "library_server_guid": "0fd5813b-aac7-4b92-9fb7-f18f16565613",
73 "library_type": "LOCAL",
74 "library_version": "3"
75 }
76 ]
77 content_libs:
78 description: list of content libraries
79 returned: on success
80 type: list
81 sample: [
82 "ded9c4d5-0dcd-4837-b1d8-af7398511e33",
83 "36b72549-14ed-4b5f-94cb-6213fecacc02"
84 ]
85 '''
86
87 from ansible.module_utils.basic import AnsibleModule
88 from ansible_collections.community.vmware.plugins.module_utils.vmware_rest_client import VmwareRestClient
89
90
91 class VmwareContentLibInfo(VmwareRestClient):
92 def __init__(self, module):
93 """Constructor."""
94 super(VmwareContentLibInfo, self).__init__(module)
95 self.content_service = self.api_client
96 self.library_info = []
97
98 def get_all_content_libs(self):
99 """Method to retrieve List of content libraries."""
100 self.module.exit_json(changed=False, content_libs=self.content_service.content.LocalLibrary.list())
101
102 def get_content_lib_details(self, library_id):
103 """Method to retrieve Details of contentlib with library_id"""
104 try:
105 lib_details = self.content_service.content.LocalLibrary.get(library_id)
106 except Exception as e:
107 self.module.fail_json(exists=False, msg="%s" % self.get_error_message(e))
108 lib_publish_info = dict(
109 persist_json_enabled=lib_details.publish_info.persist_json_enabled,
110 authentication_method=lib_details.publish_info.authentication_method,
111 publish_url=lib_details.publish_info.publish_url,
112 published=lib_details.publish_info.published,
113 user_name=lib_details.publish_info.user_name
114 )
115 self.library_info.append(
116 dict(
117 library_name=lib_details.name,
118 library_description=lib_details.description,
119 library_id=lib_details.id,
120 library_type=lib_details.type,
121 library_creation_time=lib_details.creation_time,
122 library_server_guid=lib_details.server_guid,
123 library_version=lib_details.version,
124 library_publish_info=lib_publish_info
125 )
126 )
127
128 self.module.exit_json(exists=False, changed=False, content_lib_details=self.library_info)
129
130
131 def main():
132 argument_spec = VmwareRestClient.vmware_client_argument_spec()
133 argument_spec.update(
134 library_id=dict(type='str', required=False),
135 )
136 module = AnsibleModule(argument_spec=argument_spec,
137 supports_check_mode=True)
138
139 vmware_contentlib_info = VmwareContentLibInfo(module)
140 if module.params.get('library_id'):
141 vmware_contentlib_info.get_content_lib_details(module.params['library_id'])
142 else:
143 vmware_contentlib_info.get_all_content_libs()
144
145
146 if __name__ == '__main__':
147 main()
148
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/plugins/modules/vmware_content_library_info.py b/plugins/modules/vmware_content_library_info.py
--- a/plugins/modules/vmware_content_library_info.py
+++ b/plugins/modules/vmware_content_library_info.py
@@ -93,25 +93,46 @@
"""Constructor."""
super(VmwareContentLibInfo, self).__init__(module)
self.content_service = self.api_client
+ self.local_content_libraries = self.content_service.content.LocalLibrary.list()
+ if self.local_content_libraries is None:
+ self.local_content_libraries = []
+
+ self.subscribed_content_libraries = self.content_service.content.SubscribedLibrary.list()
+ if self.subscribed_content_libraries is None:
+ self.subscribed_content_libraries = []
+
self.library_info = []
def get_all_content_libs(self):
"""Method to retrieve List of content libraries."""
- self.module.exit_json(changed=False, content_libs=self.content_service.content.LocalLibrary.list())
+ content_libraries = self.local_content_libraries + self.subscribed_content_libraries
+
+ self.module.exit_json(changed=False, content_libs=content_libraries)
def get_content_lib_details(self, library_id):
"""Method to retrieve Details of contentlib with library_id"""
- try:
- lib_details = self.content_service.content.LocalLibrary.get(library_id)
- except Exception as e:
- self.module.fail_json(exists=False, msg="%s" % self.get_error_message(e))
- lib_publish_info = dict(
- persist_json_enabled=lib_details.publish_info.persist_json_enabled,
- authentication_method=lib_details.publish_info.authentication_method,
- publish_url=lib_details.publish_info.publish_url,
- published=lib_details.publish_info.published,
- user_name=lib_details.publish_info.user_name
- )
+ lib_publish_info = None
+
+ if library_id in self.local_content_libraries:
+ try:
+ lib_details = self.content_service.content.LocalLibrary.get(library_id)
+ lib_publish_info = dict(
+ persist_json_enabled=lib_details.publish_info.persist_json_enabled,
+ authentication_method=lib_details.publish_info.authentication_method,
+ publish_url=lib_details.publish_info.publish_url,
+ published=lib_details.publish_info.published,
+ user_name=lib_details.publish_info.user_name
+ )
+ except Exception as e:
+ self.module.fail_json(exists=False, msg="%s" % self.get_error_message(e))
+ elif library_id in self.subscribed_content_libraries:
+ try:
+ lib_details = self.content_service.content.SubscribedLibrary.get(library_id)
+ except Exception as e:
+ self.module.fail_json(exists=False, msg="%s" % self.get_error_message(e))
+ else:
+ self.module.fail_json(exists=False, msg="Library %s not found." % library_id)
+
self.library_info.append(
dict(
library_name=lib_details.name,
| {"golden_diff": "diff --git a/plugins/modules/vmware_content_library_info.py b/plugins/modules/vmware_content_library_info.py\n--- a/plugins/modules/vmware_content_library_info.py\n+++ b/plugins/modules/vmware_content_library_info.py\n@@ -93,25 +93,46 @@\n \"\"\"Constructor.\"\"\"\n super(VmwareContentLibInfo, self).__init__(module)\n self.content_service = self.api_client\n+ self.local_content_libraries = self.content_service.content.LocalLibrary.list()\n+ if self.local_content_libraries is None:\n+ self.local_content_libraries = []\n+\n+ self.subscribed_content_libraries = self.content_service.content.SubscribedLibrary.list()\n+ if self.subscribed_content_libraries is None:\n+ self.subscribed_content_libraries = []\n+\n self.library_info = []\n \n def get_all_content_libs(self):\n \"\"\"Method to retrieve List of content libraries.\"\"\"\n- self.module.exit_json(changed=False, content_libs=self.content_service.content.LocalLibrary.list())\n+ content_libraries = self.local_content_libraries + self.subscribed_content_libraries\n+\n+ self.module.exit_json(changed=False, content_libs=content_libraries)\n \n def get_content_lib_details(self, library_id):\n \"\"\"Method to retrieve Details of contentlib with library_id\"\"\"\n- try:\n- lib_details = self.content_service.content.LocalLibrary.get(library_id)\n- except Exception as e:\n- self.module.fail_json(exists=False, msg=\"%s\" % self.get_error_message(e))\n- lib_publish_info = dict(\n- persist_json_enabled=lib_details.publish_info.persist_json_enabled,\n- authentication_method=lib_details.publish_info.authentication_method,\n- publish_url=lib_details.publish_info.publish_url,\n- published=lib_details.publish_info.published,\n- user_name=lib_details.publish_info.user_name\n- )\n+ lib_publish_info = None\n+\n+ if library_id in self.local_content_libraries:\n+ try:\n+ lib_details = self.content_service.content.LocalLibrary.get(library_id)\n+ lib_publish_info = dict(\n+ persist_json_enabled=lib_details.publish_info.persist_json_enabled,\n+ authentication_method=lib_details.publish_info.authentication_method,\n+ publish_url=lib_details.publish_info.publish_url,\n+ published=lib_details.publish_info.published,\n+ user_name=lib_details.publish_info.user_name\n+ )\n+ except Exception as e:\n+ self.module.fail_json(exists=False, msg=\"%s\" % self.get_error_message(e))\n+ elif library_id in self.subscribed_content_libraries:\n+ try:\n+ lib_details = self.content_service.content.SubscribedLibrary.get(library_id)\n+ except Exception as e:\n+ self.module.fail_json(exists=False, msg=\"%s\" % self.get_error_message(e))\n+ else:\n+ self.module.fail_json(exists=False, msg=\"Library %s not found.\" % library_id)\n+\n self.library_info.append(\n dict(\n library_name=lib_details.name,\n", "issue": "vmware_content_library_info: Only lists Content Libraries with the type of \"Local\", does not include \"Subscribed\" type\n<!--- Verify first that your issue is not already reported on GitHub -->\r\n<!--- Also test if the latest release and devel branch are affected too -->\r\n<!--- Complete *all* sections as described, this form is processed automatically -->\r\n\r\n##### SUMMARY\r\nWhen using the vmware_content_library_info task type to query our Content Libraries, only the Libraries with the type of \"Local\" are reported back to the ansible task. We used shared or \"Subscribed\" Content Libraries in our environment, to share a consistent Library of VM Templates between all of our vCenters.\r\n\r\nHow can we get this functionality added?\r\n\r\n##### ISSUE TYPE\r\n- Bug Report\r\n\r\n##### COMPONENT NAME\r\nvmware_content_library_info\r\n\r\n##### ANSIBLE VERSION\r\n<!--- Paste verbatim output from \"ansible --version\" between quotes -->\r\n```paste below\r\nansible 2.10.9\r\n config file = /home/<redacted>/.ansible.cfg\r\n configured module search path = ['/home/<redacted>/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']\r\n ansible python module location = /home/<redacted>/.local/lib/python3.8/site-packages/ansible\r\n executable location = /home/<redacted>/.local/bin/ansible\r\n python version = 3.8.10 (default, Jun 22 2022, 20:18:18) [GCC 9.4.0]\r\n```\r\n\r\n##### COLLECTION VERSION\r\n<!--- Paste verbatim output from \"ansible-galaxy collection list <namespace>.<collection>\" between the quotes\r\nfor example: ansible-galaxy collection list community.general\r\n-->\r\n```paste below\r\n\r\n# /usr/local/lib/python3.8/dist-packages/ansible_collections\r\nCollection Version\r\n---------------- -------\r\ncommunity.vmware 1.10.0\r\n\r\n# /home/<redacted>/.local/lib/python3.8/site-packages/ansible_collections\r\nCollection Version\r\n---------------- -------\r\ncommunity.vmware 1.10.0\r\n```\r\n\r\n##### CONFIGURATION\r\n<!--- Paste verbatim output from \"ansible-config dump --only-changed\" between quotes -->\r\n```paste below\r\nDEFAULT_HOST_LIST(/home/<redacted>/.ansible.cfg) = ['/home/<redacted>/inventory']\r\nDEFAULT_LOG_PATH(/home/<redacted>/.ansible.cfg) = /home/<redacted>/.ansible/logs/log.txt\r\nDEFAULT_TIMEOUT(/home/<redacted>/.ansible.cfg) = 120\r\nDEFAULT_VAULT_PASSWORD_FILE(/home/<redacted>/.ansible.cfg) = /home/<redacted>/playbooks/secret.yaml\r\n```\r\n\r\n##### OS / ENVIRONMENT\r\n<!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. -->\r\nvCenter Version - 7.0.3 build 20150588\r\nClient - vSphere Client version 7.0.3.00700\r\nHosts - VMware ESXi, 7.0.3, 20036589\r\n\r\n##### STEPS TO REPRODUCE\r\n<!--- Describe exactly how to reproduce the problem, using a minimal test-case -->\r\n\r\n<!--- Paste example playbooks or commands between quotes below -->\r\n```yaml\r\n - name: \"Collect list of Content Libraries from specified vCenter\"\r\n community.vmware.vmware_content_library_info:\r\n hostname: \"{{ hostname }}\"\r\n username: \"{{ username }}\"\r\n password: \"{{ password }}\"\r\n validate_certs: no\r\n register: libraries\r\n\r\n - name: \"Display list of found Content Libraries\"\r\n debug:\r\n var: libraries\r\n```\r\n\r\n<!--- HINT: You can paste gist.github.com links for larger files -->\r\n\r\n##### EXPECTED RESULTS\r\n<!--- Describe what you expected to happen when running the steps above -->\r\n```yaml\r\nTASK [Display list of found Content Libraries] ****************************************************************************************************************************\r\nok: [localhost] => {\r\n \"libraries\": {\r\n \"changed\": false,\r\n \"content_libs\": [\r\n \"6b5e0c60-3173-4a75-8101-33335f3bb7dd\",\r\n \"7bd40369-84d6-4fd5-9cf9-7c33377f3931\"\r\n ],\r\n \"failed\": false\r\n }\r\n}\r\n```\r\n\r\n##### ACTUAL RESULTS\r\n<!--- Describe what actually happened. If possible run with extra verbosity (-vvvv) -->\r\n\r\n<!--- Paste verbatim command output between quotes -->\r\n```yaml\r\nTASK [Display list of found Content Libraries] ****************************************************************************************************************************\r\nok: [localhost] => {\r\n \"libraries\": {\r\n \"changed\": false,\r\n \"content_libs\": [\r\n \"6b5e0c60-3173-4a75-8101-33335f3bb7dd\"\r\n ],\r\n \"failed\": false\r\n }\r\n}\r\n```\r\n\n", "before_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# Copyright: (c) 2019, Ansible Project\n# Copyright: (c) 2019, Pavan Bidkar <[email protected]>\n# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)\n# SPDX-License-Identifier: GPL-3.0-or-later\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\n\nDOCUMENTATION = r'''\n---\nmodule: vmware_content_library_info\nshort_description: Gather information about VMWare Content Library\ndescription:\n- Module to list the content libraries.\n- Module to get information about specific content library.\n- Content Library feature is introduced in vSphere 6.0 version, so this module is not supported in the earlier versions of vSphere.\n- All variables and VMware object names are case sensitive.\nauthor:\n- Pavan Bidkar (@pgbidkar)\nrequirements:\n- vSphere Automation SDK\noptions:\n library_id:\n description:\n - content library id for which details needs to be fetched.\n type: str\n required: False\nextends_documentation_fragment:\n- community.vmware.vmware_rest_client.documentation\n\n'''\n\nEXAMPLES = r'''\n- name: Get List of Content Libraries\n community.vmware.vmware_content_library_info:\n hostname: '{{ vcenter_hostname }}'\n username: '{{ vcenter_username }}'\n password: '{{ vcenter_password }}'\n delegate_to: localhost\n\n- name: Get information about content library\n community.vmware.vmware_content_library_info:\n hostname: '{{ vcenter_hostname }}'\n username: '{{ vcenter_username }}'\n password: '{{ vcenter_password }}'\n library_id: '13b0f060-f4d3-4f84-b61f-0fe1b0c0a5a8'\n delegate_to: localhost\n'''\n\nRETURN = r'''\ncontent_lib_details:\n description: list of content library metadata\n returned: on success\n type: list\n sample: [\n {\n \"library_creation_time\": \"2019-07-02T11:50:52.242000\",\n \"library_description\": \"new description\",\n \"library_id\": \"13b0f060-f4d3-4f84-b61f-0fe1b0c0a5a8\",\n \"library_name\": \"demo-local-lib\",\n \"library_publish_info\": {\n \"authentication_method\": \"NONE\",\n \"persist_json_enabled\": false,\n \"publish_url\": null,\n \"published\": false,\n \"user_name\": null\n },\n \"library_server_guid\": \"0fd5813b-aac7-4b92-9fb7-f18f16565613\",\n \"library_type\": \"LOCAL\",\n \"library_version\": \"3\"\n }\n ]\ncontent_libs:\n description: list of content libraries\n returned: on success\n type: list\n sample: [\n \"ded9c4d5-0dcd-4837-b1d8-af7398511e33\",\n \"36b72549-14ed-4b5f-94cb-6213fecacc02\"\n ]\n'''\n\nfrom ansible.module_utils.basic import AnsibleModule\nfrom ansible_collections.community.vmware.plugins.module_utils.vmware_rest_client import VmwareRestClient\n\n\nclass VmwareContentLibInfo(VmwareRestClient):\n def __init__(self, module):\n \"\"\"Constructor.\"\"\"\n super(VmwareContentLibInfo, self).__init__(module)\n self.content_service = self.api_client\n self.library_info = []\n\n def get_all_content_libs(self):\n \"\"\"Method to retrieve List of content libraries.\"\"\"\n self.module.exit_json(changed=False, content_libs=self.content_service.content.LocalLibrary.list())\n\n def get_content_lib_details(self, library_id):\n \"\"\"Method to retrieve Details of contentlib with library_id\"\"\"\n try:\n lib_details = self.content_service.content.LocalLibrary.get(library_id)\n except Exception as e:\n self.module.fail_json(exists=False, msg=\"%s\" % self.get_error_message(e))\n lib_publish_info = dict(\n persist_json_enabled=lib_details.publish_info.persist_json_enabled,\n authentication_method=lib_details.publish_info.authentication_method,\n publish_url=lib_details.publish_info.publish_url,\n published=lib_details.publish_info.published,\n user_name=lib_details.publish_info.user_name\n )\n self.library_info.append(\n dict(\n library_name=lib_details.name,\n library_description=lib_details.description,\n library_id=lib_details.id,\n library_type=lib_details.type,\n library_creation_time=lib_details.creation_time,\n library_server_guid=lib_details.server_guid,\n library_version=lib_details.version,\n library_publish_info=lib_publish_info\n )\n )\n\n self.module.exit_json(exists=False, changed=False, content_lib_details=self.library_info)\n\n\ndef main():\n argument_spec = VmwareRestClient.vmware_client_argument_spec()\n argument_spec.update(\n library_id=dict(type='str', required=False),\n )\n module = AnsibleModule(argument_spec=argument_spec,\n supports_check_mode=True)\n\n vmware_contentlib_info = VmwareContentLibInfo(module)\n if module.params.get('library_id'):\n vmware_contentlib_info.get_content_lib_details(module.params['library_id'])\n else:\n vmware_contentlib_info.get_all_content_libs()\n\n\nif __name__ == '__main__':\n main()\n", "path": "plugins/modules/vmware_content_library_info.py"}], "after_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# Copyright: (c) 2019, Ansible Project\n# Copyright: (c) 2019, Pavan Bidkar <[email protected]>\n# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)\n# SPDX-License-Identifier: GPL-3.0-or-later\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\n\nDOCUMENTATION = r'''\n---\nmodule: vmware_content_library_info\nshort_description: Gather information about VMWare Content Library\ndescription:\n- Module to list the content libraries.\n- Module to get information about specific content library.\n- Content Library feature is introduced in vSphere 6.0 version, so this module is not supported in the earlier versions of vSphere.\n- All variables and VMware object names are case sensitive.\nauthor:\n- Pavan Bidkar (@pgbidkar)\nrequirements:\n- vSphere Automation SDK\noptions:\n library_id:\n description:\n - content library id for which details needs to be fetched.\n type: str\n required: False\nextends_documentation_fragment:\n- community.vmware.vmware_rest_client.documentation\n\n'''\n\nEXAMPLES = r'''\n- name: Get List of Content Libraries\n community.vmware.vmware_content_library_info:\n hostname: '{{ vcenter_hostname }}'\n username: '{{ vcenter_username }}'\n password: '{{ vcenter_password }}'\n delegate_to: localhost\n\n- name: Get information about content library\n community.vmware.vmware_content_library_info:\n hostname: '{{ vcenter_hostname }}'\n username: '{{ vcenter_username }}'\n password: '{{ vcenter_password }}'\n library_id: '13b0f060-f4d3-4f84-b61f-0fe1b0c0a5a8'\n delegate_to: localhost\n'''\n\nRETURN = r'''\ncontent_lib_details:\n description: list of content library metadata\n returned: on success\n type: list\n sample: [\n {\n \"library_creation_time\": \"2019-07-02T11:50:52.242000\",\n \"library_description\": \"new description\",\n \"library_id\": \"13b0f060-f4d3-4f84-b61f-0fe1b0c0a5a8\",\n \"library_name\": \"demo-local-lib\",\n \"library_publish_info\": {\n \"authentication_method\": \"NONE\",\n \"persist_json_enabled\": false,\n \"publish_url\": null,\n \"published\": false,\n \"user_name\": null\n },\n \"library_server_guid\": \"0fd5813b-aac7-4b92-9fb7-f18f16565613\",\n \"library_type\": \"LOCAL\",\n \"library_version\": \"3\"\n }\n ]\ncontent_libs:\n description: list of content libraries\n returned: on success\n type: list\n sample: [\n \"ded9c4d5-0dcd-4837-b1d8-af7398511e33\",\n \"36b72549-14ed-4b5f-94cb-6213fecacc02\"\n ]\n'''\n\nfrom ansible.module_utils.basic import AnsibleModule\nfrom ansible_collections.community.vmware.plugins.module_utils.vmware_rest_client import VmwareRestClient\n\n\nclass VmwareContentLibInfo(VmwareRestClient):\n def __init__(self, module):\n \"\"\"Constructor.\"\"\"\n super(VmwareContentLibInfo, self).__init__(module)\n self.content_service = self.api_client\n self.local_content_libraries = self.content_service.content.LocalLibrary.list()\n if self.local_content_libraries is None:\n self.local_content_libraries = []\n\n self.subscribed_content_libraries = self.content_service.content.SubscribedLibrary.list()\n if self.subscribed_content_libraries is None:\n self.subscribed_content_libraries = []\n\n self.library_info = []\n\n def get_all_content_libs(self):\n \"\"\"Method to retrieve List of content libraries.\"\"\"\n content_libraries = self.local_content_libraries + self.subscribed_content_libraries\n\n self.module.exit_json(changed=False, content_libs=content_libraries)\n\n def get_content_lib_details(self, library_id):\n \"\"\"Method to retrieve Details of contentlib with library_id\"\"\"\n lib_publish_info = None\n\n if library_id in self.local_content_libraries:\n try:\n lib_details = self.content_service.content.LocalLibrary.get(library_id)\n lib_publish_info = dict(\n persist_json_enabled=lib_details.publish_info.persist_json_enabled,\n authentication_method=lib_details.publish_info.authentication_method,\n publish_url=lib_details.publish_info.publish_url,\n published=lib_details.publish_info.published,\n user_name=lib_details.publish_info.user_name\n )\n except Exception as e:\n self.module.fail_json(exists=False, msg=\"%s\" % self.get_error_message(e))\n elif library_id in self.subscribed_content_libraries:\n try:\n lib_details = self.content_service.content.SubscribedLibrary.get(library_id)\n except Exception as e:\n self.module.fail_json(exists=False, msg=\"%s\" % self.get_error_message(e))\n else:\n self.module.fail_json(exists=False, msg=\"Library %s not found.\" % library_id)\n\n self.library_info.append(\n dict(\n library_name=lib_details.name,\n library_description=lib_details.description,\n library_id=lib_details.id,\n library_type=lib_details.type,\n library_creation_time=lib_details.creation_time,\n library_server_guid=lib_details.server_guid,\n library_version=lib_details.version,\n library_publish_info=lib_publish_info\n )\n )\n\n self.module.exit_json(exists=False, changed=False, content_lib_details=self.library_info)\n\n\ndef main():\n argument_spec = VmwareRestClient.vmware_client_argument_spec()\n argument_spec.update(\n library_id=dict(type='str', required=False),\n )\n module = AnsibleModule(argument_spec=argument_spec,\n supports_check_mode=True)\n\n vmware_contentlib_info = VmwareContentLibInfo(module)\n if module.params.get('library_id'):\n vmware_contentlib_info.get_content_lib_details(module.params['library_id'])\n else:\n vmware_contentlib_info.get_all_content_libs()\n\n\nif __name__ == '__main__':\n main()\n", "path": "plugins/modules/vmware_content_library_info.py"}]} | 2,916 | 640 |
gh_patches_debug_17744 | rasdani/github-patches | git_diff | elastic__apm-agent-python-1190 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ElasticAPM crash because of a missing context since version 6.3.0
**Describe the bug**:
Since ElasticAPM release 6.3.0 our application has started to see a ton of crashes that we believed are cause by a recent change in ElasticAPM. The elasticsearch instrumentation package is trying to assign the type of DB to elasticsearch into an None context which causes it to crash.
Here's a snippet of a strack trace from our app:
```
...
File "/var/lib/assemblyline/.local/lib/python3.7/site-packages/elasticsearch/client/utils.py", line 168, in _wrapped
return func(*args, params=params, headers=headers, **kwargs)
File "/var/lib/assemblyline/.local/lib/python3.7/site-packages/elasticsearch/client/__init__.py", line 1026, in get
"GET", _make_path(index, doc_type, id), params=params, headers=headers
File "/var/lib/assemblyline/.local/lib/python3.7/site-packages/elasticapm/instrumentation/packages/base.py", line 210, in call_if_sampling
return self.call(module, method, wrapped, instance, args, kwargs)
File "/var/lib/assemblyline/.local/lib/python3.7/site-packages/elasticapm/instrumentation/packages/elasticsearch.py", line 113, in call
result_data = wrapped(*args, **kwargs)
File "/var/lib/assemblyline/.local/lib/python3.7/site-packages/elasticsearch/transport.py", line 388, in perform_request
timeout=timeout,
File "/var/lib/assemblyline/.local/lib/python3.7/site-packages/elasticapm/instrumentation/packages/base.py", line 210, in call_if_sampling
return self.call(module, method, wrapped, instance, args, kwargs)
File "/var/lib/assemblyline/.local/lib/python3.7/site-packages/elasticapm/instrumentation/packages/elasticsearch.py", line 56, in call
self._update_context_by_request_data(span.context, instance, args, kwargs)
File "/var/lib/assemblyline/.local/lib/python3.7/site-packages/elasticapm/instrumentation/packages/elasticsearch.py", line 72, in _update_context_by_request_data
context["db"] = {"type": "elasticsearch"}
TypeError: 'NoneType' object does not support item assignment
```
I believe this crash is related to a recent change in this commit: https://github.com/elastic/apm-agent-python/commit/ee75cb8f238303b79d0f697a7f2eca547a1dfe8c#diff-c8fb731f92134757656c157f5c3175bcb62e131c1fed1aec5041367603c204d0L62
You can see here, the context was previously assigned it's DB type in a way where even if the context was None it would still work but now it assumes the context is a dictionary. I'm not creating a PR to fix this because I'm not 100% sure if the old way was changed for a reason.
**Possible fix**
I have very limited understanding on what that context should be before reaching this function but possible fixes include:
1. Revert to the old way of assigning the DB type.
2. To test for None context before assigning a type.
```
if context is None:
context = {}
```
3. or make sure span.context default value is an empty dict instead of None
**To Reproduce**
I have no easy way to reproduce this crash because it does not happen all the time.
**Environment (please complete the following information)**
- OS: Linux
- Python version: 3.7.11
- Framework and version [e.g. Django 2.1]: Flask 2.0.1
- APM Server version: 7.12
- Agent version: 6.3.0+
**Aditional Information**
Our app in launched in Gunicorn using gevent workers.
ElasticAPM crash because of a missing context since version 6.3.0
**Describe the bug**:
Since ElasticAPM release 6.3.0 our application has started to see a ton of crashes that we believed are cause by a recent change in ElasticAPM. The elasticsearch instrumentation package is trying to assign the type of DB to elasticsearch into an None context which causes it to crash.
Here's a snippet of a strack trace from our app:
```
...
File "/var/lib/assemblyline/.local/lib/python3.7/site-packages/elasticsearch/client/utils.py", line 168, in _wrapped
return func(*args, params=params, headers=headers, **kwargs)
File "/var/lib/assemblyline/.local/lib/python3.7/site-packages/elasticsearch/client/__init__.py", line 1026, in get
"GET", _make_path(index, doc_type, id), params=params, headers=headers
File "/var/lib/assemblyline/.local/lib/python3.7/site-packages/elasticapm/instrumentation/packages/base.py", line 210, in call_if_sampling
return self.call(module, method, wrapped, instance, args, kwargs)
File "/var/lib/assemblyline/.local/lib/python3.7/site-packages/elasticapm/instrumentation/packages/elasticsearch.py", line 113, in call
result_data = wrapped(*args, **kwargs)
File "/var/lib/assemblyline/.local/lib/python3.7/site-packages/elasticsearch/transport.py", line 388, in perform_request
timeout=timeout,
File "/var/lib/assemblyline/.local/lib/python3.7/site-packages/elasticapm/instrumentation/packages/base.py", line 210, in call_if_sampling
return self.call(module, method, wrapped, instance, args, kwargs)
File "/var/lib/assemblyline/.local/lib/python3.7/site-packages/elasticapm/instrumentation/packages/elasticsearch.py", line 56, in call
self._update_context_by_request_data(span.context, instance, args, kwargs)
File "/var/lib/assemblyline/.local/lib/python3.7/site-packages/elasticapm/instrumentation/packages/elasticsearch.py", line 72, in _update_context_by_request_data
context["db"] = {"type": "elasticsearch"}
TypeError: 'NoneType' object does not support item assignment
```
I believe this crash is related to a recent change in this commit: https://github.com/elastic/apm-agent-python/commit/ee75cb8f238303b79d0f697a7f2eca547a1dfe8c#diff-c8fb731f92134757656c157f5c3175bcb62e131c1fed1aec5041367603c204d0L62
You can see here, the context was previously assigned it's DB type in a way where even if the context was None it would still work but now it assumes the context is a dictionary. I'm not creating a PR to fix this because I'm not 100% sure if the old way was changed for a reason.
**Possible fix**
I have very limited understanding on what that context should be before reaching this function but possible fixes include:
1. Revert to the old way of assigning the DB type.
2. To test for None context before assigning a type.
```
if context is None:
context = {}
```
3. or make sure span.context default value is an empty dict instead of None
**To Reproduce**
I have no easy way to reproduce this crash because it does not happen all the time.
**Environment (please complete the following information)**
- OS: Linux
- Python version: 3.7.11
- Framework and version [e.g. Django 2.1]: Flask 2.0.1
- APM Server version: 7.12
- Agent version: 6.3.0+
**Aditional Information**
Our app in launched in Gunicorn using gevent workers.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `elasticapm/instrumentation/packages/elasticsearch.py`
Content:
```
1 # BSD 3-Clause License
2 #
3 # Copyright (c) 2019, Elasticsearch BV
4 # All rights reserved.
5 #
6 # Redistribution and use in source and binary forms, with or without
7 # modification, are permitted provided that the following conditions are met:
8 #
9 # * Redistributions of source code must retain the above copyright notice, this
10 # list of conditions and the following disclaimer.
11 #
12 # * Redistributions in binary form must reproduce the above copyright notice,
13 # this list of conditions and the following disclaimer in the documentation
14 # and/or other materials provided with the distribution.
15 #
16 # * Neither the name of the copyright holder nor the names of its
17 # contributors may be used to endorse or promote products derived from
18 # this software without specific prior written permission.
19 #
20 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23 # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
24 # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
26 # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27 # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31 from __future__ import absolute_import
32
33 import re
34
35 import elasticapm
36 from elasticapm.instrumentation.packages.base import AbstractInstrumentedModule
37 from elasticapm.traces import execution_context
38 from elasticapm.utils.logging import get_logger
39
40 logger = get_logger("elasticapm.instrument")
41
42 should_capture_body_re = re.compile("/(_search|_msearch|_count|_async_search|_sql|_eql)(/|$)")
43
44
45 class ElasticsearchConnectionInstrumentation(AbstractInstrumentedModule):
46 name = "elasticsearch_connection"
47
48 instrument_list = [
49 ("elasticsearch.connection.http_urllib3", "Urllib3HttpConnection.perform_request"),
50 ("elasticsearch.connection.http_requests", "RequestsHttpConnection.perform_request"),
51 ]
52
53 def call(self, module, method, wrapped, instance, args, kwargs):
54 span = execution_context.get_span()
55
56 self._update_context_by_request_data(span.context, instance, args, kwargs)
57
58 status_code, headers, raw_data = wrapped(*args, **kwargs)
59
60 span.context["http"] = {"status_code": status_code}
61
62 return status_code, headers, raw_data
63
64 def _update_context_by_request_data(self, context, instance, args, kwargs):
65 args_len = len(args)
66 url = args[1] if args_len > 1 else kwargs.get("url")
67 params = args[2] if args_len > 2 else kwargs.get("params")
68 body_serialized = args[3] if args_len > 3 else kwargs.get("body")
69
70 should_capture_body = bool(should_capture_body_re.search(url))
71
72 context["db"] = {"type": "elasticsearch"}
73 if should_capture_body:
74 query = []
75 # using both q AND body is allowed in some API endpoints / ES versions,
76 # but not in others. We simply capture both if they are there so the
77 # user can see it.
78 if params and "q" in params:
79 # 'q' is already encoded to a byte string at this point
80 # we assume utf8, which is the default
81 query.append("q=" + params["q"].decode("utf-8", errors="replace"))
82 if body_serialized:
83 if isinstance(body_serialized, bytes):
84 query.append(body_serialized.decode("utf-8", errors="replace"))
85 else:
86 query.append(body_serialized)
87 if query:
88 context["db"]["statement"] = "\n\n".join(query)
89
90 context["destination"] = {
91 "address": instance.host,
92 "service": {"name": "elasticsearch", "resource": "elasticsearch", "type": "db"},
93 }
94
95
96 class ElasticsearchTransportInstrumentation(AbstractInstrumentedModule):
97 name = "elasticsearch_connection"
98
99 instrument_list = [
100 ("elasticsearch.transport", "Transport.perform_request"),
101 ]
102
103 def call(self, module, method, wrapped, instance, args, kwargs):
104 with elasticapm.capture_span(
105 self._get_signature(args, kwargs),
106 span_type="db",
107 span_subtype="elasticsearch",
108 span_action="query",
109 extra={},
110 skip_frames=2,
111 leaf=True,
112 ) as span:
113 result_data = wrapped(*args, **kwargs)
114
115 try:
116 span.context["db"]["rows_affected"] = result_data["hits"]["total"]["value"]
117 except (KeyError, TypeError):
118 pass
119
120 return result_data
121
122 def _get_signature(self, args, kwargs):
123 args_len = len(args)
124 http_method = args[0] if args_len else kwargs.get("method")
125 http_path = args[1] if args_len > 1 else kwargs.get("url")
126
127 return "ES %s %s" % (http_method, http_path)
128
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/elasticapm/instrumentation/packages/elasticsearch.py b/elasticapm/instrumentation/packages/elasticsearch.py
--- a/elasticapm/instrumentation/packages/elasticsearch.py
+++ b/elasticapm/instrumentation/packages/elasticsearch.py
@@ -34,7 +34,7 @@
import elasticapm
from elasticapm.instrumentation.packages.base import AbstractInstrumentedModule
-from elasticapm.traces import execution_context
+from elasticapm.traces import DroppedSpan, execution_context
from elasticapm.utils.logging import get_logger
logger = get_logger("elasticapm.instrument")
@@ -52,6 +52,8 @@
def call(self, module, method, wrapped, instance, args, kwargs):
span = execution_context.get_span()
+ if isinstance(span, DroppedSpan):
+ return wrapped(*args, **kwargs)
self._update_context_by_request_data(span.context, instance, args, kwargs)
| {"golden_diff": "diff --git a/elasticapm/instrumentation/packages/elasticsearch.py b/elasticapm/instrumentation/packages/elasticsearch.py\n--- a/elasticapm/instrumentation/packages/elasticsearch.py\n+++ b/elasticapm/instrumentation/packages/elasticsearch.py\n@@ -34,7 +34,7 @@\n \n import elasticapm\n from elasticapm.instrumentation.packages.base import AbstractInstrumentedModule\n-from elasticapm.traces import execution_context\n+from elasticapm.traces import DroppedSpan, execution_context\n from elasticapm.utils.logging import get_logger\n \n logger = get_logger(\"elasticapm.instrument\")\n@@ -52,6 +52,8 @@\n \n def call(self, module, method, wrapped, instance, args, kwargs):\n span = execution_context.get_span()\n+ if isinstance(span, DroppedSpan):\n+ return wrapped(*args, **kwargs)\n \n self._update_context_by_request_data(span.context, instance, args, kwargs)\n", "issue": "ElasticAPM crash because of a missing context since version 6.3.0\n**Describe the bug**: \r\n\r\nSince ElasticAPM release 6.3.0 our application has started to see a ton of crashes that we believed are cause by a recent change in ElasticAPM. The elasticsearch instrumentation package is trying to assign the type of DB to elasticsearch into an None context which causes it to crash. \r\n\r\nHere's a snippet of a strack trace from our app:\r\n```\r\n...\r\n File \"/var/lib/assemblyline/.local/lib/python3.7/site-packages/elasticsearch/client/utils.py\", line 168, in _wrapped\r\n return func(*args, params=params, headers=headers, **kwargs)\r\n File \"/var/lib/assemblyline/.local/lib/python3.7/site-packages/elasticsearch/client/__init__.py\", line 1026, in get\r\n \"GET\", _make_path(index, doc_type, id), params=params, headers=headers\r\n File \"/var/lib/assemblyline/.local/lib/python3.7/site-packages/elasticapm/instrumentation/packages/base.py\", line 210, in call_if_sampling\r\n return self.call(module, method, wrapped, instance, args, kwargs)\r\n File \"/var/lib/assemblyline/.local/lib/python3.7/site-packages/elasticapm/instrumentation/packages/elasticsearch.py\", line 113, in call\r\n result_data = wrapped(*args, **kwargs)\r\n File \"/var/lib/assemblyline/.local/lib/python3.7/site-packages/elasticsearch/transport.py\", line 388, in perform_request\r\n timeout=timeout,\r\n File \"/var/lib/assemblyline/.local/lib/python3.7/site-packages/elasticapm/instrumentation/packages/base.py\", line 210, in call_if_sampling\r\n return self.call(module, method, wrapped, instance, args, kwargs)\r\n File \"/var/lib/assemblyline/.local/lib/python3.7/site-packages/elasticapm/instrumentation/packages/elasticsearch.py\", line 56, in call\r\n self._update_context_by_request_data(span.context, instance, args, kwargs)\r\n File \"/var/lib/assemblyline/.local/lib/python3.7/site-packages/elasticapm/instrumentation/packages/elasticsearch.py\", line 72, in _update_context_by_request_data\r\n context[\"db\"] = {\"type\": \"elasticsearch\"}\r\nTypeError: 'NoneType' object does not support item assignment\r\n```\r\n\r\nI believe this crash is related to a recent change in this commit: https://github.com/elastic/apm-agent-python/commit/ee75cb8f238303b79d0f697a7f2eca547a1dfe8c#diff-c8fb731f92134757656c157f5c3175bcb62e131c1fed1aec5041367603c204d0L62\r\n\r\nYou can see here, the context was previously assigned it's DB type in a way where even if the context was None it would still work but now it assumes the context is a dictionary. I'm not creating a PR to fix this because I'm not 100% sure if the old way was changed for a reason. \r\n\r\n**Possible fix**\r\nI have very limited understanding on what that context should be before reaching this function but possible fixes include: \r\n\r\n1. Revert to the old way of assigning the DB type.\r\n\r\n2. To test for None context before assigning a type.\r\n```\r\nif context is None:\r\n context = {}\r\n```\r\n\r\n3. or make sure span.context default value is an empty dict instead of None\r\n\r\n**To Reproduce**\r\n\r\nI have no easy way to reproduce this crash because it does not happen all the time. \r\n\r\n**Environment (please complete the following information)**\r\n- OS: Linux\r\n- Python version: 3.7.11\r\n- Framework and version [e.g. Django 2.1]: Flask 2.0.1\r\n- APM Server version: 7.12\r\n- Agent version: 6.3.0+\r\n\r\n**Aditional Information**\r\n\r\nOur app in launched in Gunicorn using gevent workers.\nElasticAPM crash because of a missing context since version 6.3.0\n**Describe the bug**: \r\n\r\nSince ElasticAPM release 6.3.0 our application has started to see a ton of crashes that we believed are cause by a recent change in ElasticAPM. The elasticsearch instrumentation package is trying to assign the type of DB to elasticsearch into an None context which causes it to crash. \r\n\r\nHere's a snippet of a strack trace from our app:\r\n```\r\n...\r\n File \"/var/lib/assemblyline/.local/lib/python3.7/site-packages/elasticsearch/client/utils.py\", line 168, in _wrapped\r\n return func(*args, params=params, headers=headers, **kwargs)\r\n File \"/var/lib/assemblyline/.local/lib/python3.7/site-packages/elasticsearch/client/__init__.py\", line 1026, in get\r\n \"GET\", _make_path(index, doc_type, id), params=params, headers=headers\r\n File \"/var/lib/assemblyline/.local/lib/python3.7/site-packages/elasticapm/instrumentation/packages/base.py\", line 210, in call_if_sampling\r\n return self.call(module, method, wrapped, instance, args, kwargs)\r\n File \"/var/lib/assemblyline/.local/lib/python3.7/site-packages/elasticapm/instrumentation/packages/elasticsearch.py\", line 113, in call\r\n result_data = wrapped(*args, **kwargs)\r\n File \"/var/lib/assemblyline/.local/lib/python3.7/site-packages/elasticsearch/transport.py\", line 388, in perform_request\r\n timeout=timeout,\r\n File \"/var/lib/assemblyline/.local/lib/python3.7/site-packages/elasticapm/instrumentation/packages/base.py\", line 210, in call_if_sampling\r\n return self.call(module, method, wrapped, instance, args, kwargs)\r\n File \"/var/lib/assemblyline/.local/lib/python3.7/site-packages/elasticapm/instrumentation/packages/elasticsearch.py\", line 56, in call\r\n self._update_context_by_request_data(span.context, instance, args, kwargs)\r\n File \"/var/lib/assemblyline/.local/lib/python3.7/site-packages/elasticapm/instrumentation/packages/elasticsearch.py\", line 72, in _update_context_by_request_data\r\n context[\"db\"] = {\"type\": \"elasticsearch\"}\r\nTypeError: 'NoneType' object does not support item assignment\r\n```\r\n\r\nI believe this crash is related to a recent change in this commit: https://github.com/elastic/apm-agent-python/commit/ee75cb8f238303b79d0f697a7f2eca547a1dfe8c#diff-c8fb731f92134757656c157f5c3175bcb62e131c1fed1aec5041367603c204d0L62\r\n\r\nYou can see here, the context was previously assigned it's DB type in a way where even if the context was None it would still work but now it assumes the context is a dictionary. I'm not creating a PR to fix this because I'm not 100% sure if the old way was changed for a reason. \r\n\r\n**Possible fix**\r\nI have very limited understanding on what that context should be before reaching this function but possible fixes include: \r\n\r\n1. Revert to the old way of assigning the DB type.\r\n\r\n2. To test for None context before assigning a type.\r\n```\r\nif context is None:\r\n context = {}\r\n```\r\n\r\n3. or make sure span.context default value is an empty dict instead of None\r\n\r\n**To Reproduce**\r\n\r\nI have no easy way to reproduce this crash because it does not happen all the time. \r\n\r\n**Environment (please complete the following information)**\r\n- OS: Linux\r\n- Python version: 3.7.11\r\n- Framework and version [e.g. Django 2.1]: Flask 2.0.1\r\n- APM Server version: 7.12\r\n- Agent version: 6.3.0+\r\n\r\n**Aditional Information**\r\n\r\nOur app in launched in Gunicorn using gevent workers.\n", "before_files": [{"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nfrom __future__ import absolute_import\n\nimport re\n\nimport elasticapm\nfrom elasticapm.instrumentation.packages.base import AbstractInstrumentedModule\nfrom elasticapm.traces import execution_context\nfrom elasticapm.utils.logging import get_logger\n\nlogger = get_logger(\"elasticapm.instrument\")\n\nshould_capture_body_re = re.compile(\"/(_search|_msearch|_count|_async_search|_sql|_eql)(/|$)\")\n\n\nclass ElasticsearchConnectionInstrumentation(AbstractInstrumentedModule):\n name = \"elasticsearch_connection\"\n\n instrument_list = [\n (\"elasticsearch.connection.http_urllib3\", \"Urllib3HttpConnection.perform_request\"),\n (\"elasticsearch.connection.http_requests\", \"RequestsHttpConnection.perform_request\"),\n ]\n\n def call(self, module, method, wrapped, instance, args, kwargs):\n span = execution_context.get_span()\n\n self._update_context_by_request_data(span.context, instance, args, kwargs)\n\n status_code, headers, raw_data = wrapped(*args, **kwargs)\n\n span.context[\"http\"] = {\"status_code\": status_code}\n\n return status_code, headers, raw_data\n\n def _update_context_by_request_data(self, context, instance, args, kwargs):\n args_len = len(args)\n url = args[1] if args_len > 1 else kwargs.get(\"url\")\n params = args[2] if args_len > 2 else kwargs.get(\"params\")\n body_serialized = args[3] if args_len > 3 else kwargs.get(\"body\")\n\n should_capture_body = bool(should_capture_body_re.search(url))\n\n context[\"db\"] = {\"type\": \"elasticsearch\"}\n if should_capture_body:\n query = []\n # using both q AND body is allowed in some API endpoints / ES versions,\n # but not in others. We simply capture both if they are there so the\n # user can see it.\n if params and \"q\" in params:\n # 'q' is already encoded to a byte string at this point\n # we assume utf8, which is the default\n query.append(\"q=\" + params[\"q\"].decode(\"utf-8\", errors=\"replace\"))\n if body_serialized:\n if isinstance(body_serialized, bytes):\n query.append(body_serialized.decode(\"utf-8\", errors=\"replace\"))\n else:\n query.append(body_serialized)\n if query:\n context[\"db\"][\"statement\"] = \"\\n\\n\".join(query)\n\n context[\"destination\"] = {\n \"address\": instance.host,\n \"service\": {\"name\": \"elasticsearch\", \"resource\": \"elasticsearch\", \"type\": \"db\"},\n }\n\n\nclass ElasticsearchTransportInstrumentation(AbstractInstrumentedModule):\n name = \"elasticsearch_connection\"\n\n instrument_list = [\n (\"elasticsearch.transport\", \"Transport.perform_request\"),\n ]\n\n def call(self, module, method, wrapped, instance, args, kwargs):\n with elasticapm.capture_span(\n self._get_signature(args, kwargs),\n span_type=\"db\",\n span_subtype=\"elasticsearch\",\n span_action=\"query\",\n extra={},\n skip_frames=2,\n leaf=True,\n ) as span:\n result_data = wrapped(*args, **kwargs)\n\n try:\n span.context[\"db\"][\"rows_affected\"] = result_data[\"hits\"][\"total\"][\"value\"]\n except (KeyError, TypeError):\n pass\n\n return result_data\n\n def _get_signature(self, args, kwargs):\n args_len = len(args)\n http_method = args[0] if args_len else kwargs.get(\"method\")\n http_path = args[1] if args_len > 1 else kwargs.get(\"url\")\n\n return \"ES %s %s\" % (http_method, http_path)\n", "path": "elasticapm/instrumentation/packages/elasticsearch.py"}], "after_files": [{"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nfrom __future__ import absolute_import\n\nimport re\n\nimport elasticapm\nfrom elasticapm.instrumentation.packages.base import AbstractInstrumentedModule\nfrom elasticapm.traces import DroppedSpan, execution_context\nfrom elasticapm.utils.logging import get_logger\n\nlogger = get_logger(\"elasticapm.instrument\")\n\nshould_capture_body_re = re.compile(\"/(_search|_msearch|_count|_async_search|_sql|_eql)(/|$)\")\n\n\nclass ElasticsearchConnectionInstrumentation(AbstractInstrumentedModule):\n name = \"elasticsearch_connection\"\n\n instrument_list = [\n (\"elasticsearch.connection.http_urllib3\", \"Urllib3HttpConnection.perform_request\"),\n (\"elasticsearch.connection.http_requests\", \"RequestsHttpConnection.perform_request\"),\n ]\n\n def call(self, module, method, wrapped, instance, args, kwargs):\n span = execution_context.get_span()\n if isinstance(span, DroppedSpan):\n return wrapped(*args, **kwargs)\n\n self._update_context_by_request_data(span.context, instance, args, kwargs)\n\n status_code, headers, raw_data = wrapped(*args, **kwargs)\n\n span.context[\"http\"] = {\"status_code\": status_code}\n\n return status_code, headers, raw_data\n\n def _update_context_by_request_data(self, context, instance, args, kwargs):\n args_len = len(args)\n url = args[1] if args_len > 1 else kwargs.get(\"url\")\n params = args[2] if args_len > 2 else kwargs.get(\"params\")\n body_serialized = args[3] if args_len > 3 else kwargs.get(\"body\")\n\n should_capture_body = bool(should_capture_body_re.search(url))\n\n context[\"db\"] = {\"type\": \"elasticsearch\"}\n if should_capture_body:\n query = []\n # using both q AND body is allowed in some API endpoints / ES versions,\n # but not in others. We simply capture both if they are there so the\n # user can see it.\n if params and \"q\" in params:\n # 'q' is already encoded to a byte string at this point\n # we assume utf8, which is the default\n query.append(\"q=\" + params[\"q\"].decode(\"utf-8\", errors=\"replace\"))\n if body_serialized:\n if isinstance(body_serialized, bytes):\n query.append(body_serialized.decode(\"utf-8\", errors=\"replace\"))\n else:\n query.append(body_serialized)\n if query:\n context[\"db\"][\"statement\"] = \"\\n\\n\".join(query)\n\n context[\"destination\"] = {\n \"address\": instance.host,\n \"service\": {\"name\": \"elasticsearch\", \"resource\": \"elasticsearch\", \"type\": \"db\"},\n }\n\n\nclass ElasticsearchTransportInstrumentation(AbstractInstrumentedModule):\n name = \"elasticsearch_connection\"\n\n instrument_list = [\n (\"elasticsearch.transport\", \"Transport.perform_request\"),\n ]\n\n def call(self, module, method, wrapped, instance, args, kwargs):\n with elasticapm.capture_span(\n self._get_signature(args, kwargs),\n span_type=\"db\",\n span_subtype=\"elasticsearch\",\n span_action=\"query\",\n extra={},\n skip_frames=2,\n leaf=True,\n ) as span:\n result_data = wrapped(*args, **kwargs)\n\n try:\n span.context[\"db\"][\"rows_affected\"] = result_data[\"hits\"][\"total\"][\"value\"]\n except (KeyError, TypeError):\n pass\n\n return result_data\n\n def _get_signature(self, args, kwargs):\n args_len = len(args)\n http_method = args[0] if args_len else kwargs.get(\"method\")\n http_path = args[1] if args_len > 1 else kwargs.get(\"url\")\n\n return \"ES %s %s\" % (http_method, http_path)\n", "path": "elasticapm/instrumentation/packages/elasticsearch.py"}]} | 3,529 | 210 |
gh_patches_debug_24065 | rasdani/github-patches | git_diff | opendatacube__datacube-core-1558 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Regression with new database connection handling/transaction isolation
### Background
Digital Earth Australia provides a Python Environment on the NCI which includes ODC. It's currently running the very old version 1.8.8 under Python 3.8.13 and we want to upgrade to the latest version. We've created a new environment using ODC core version 1.8.17 running under Python 3.10.8, but in testing we've been getting errors which weren't occuring before.
This issue was reported and discussed internally in [Jira Issue DSNS-304](https://gajira.atlassian.net/browse/DSNS-304), and included code to reproduce the error.
### Error
The problem was reported as an unhandled PostgreSQL exception being thrown in a production script, which used to work cleanly.
<details>
<summary>Original code and stack trace from error</summary>
When executing the following script with the new `module load dea/20231204`
```python
#!/usr/bin/env python3
import datacube
message = """Will this work? Will this cause the new dea module to error out?"""
def main(product="ga_ls8c_ard_3"):
dc = datacube.Datacube(app="gen-list")
for l1_dataset in dc.index.datasets.search(product=product):
with open('io.txt', 'a') as file:
file.write(message + '\n')
if __name__ == "__main__":
main(product="ga_s2am_ard_3")
```
**Stack Trace**
```
Traceback (most recent call last):
File "/g/data/u46/users/dsg547/sandbox/ard_pipeline_support/tickets/DSNS-304/example/./odc_call.py", line 18, in <module>
main(product="ga_s2am_ard_3")
File "/g/data/u46/users/dsg547/sandbox/ard_pipeline_support/tickets/DSNS-304/example/./odc_call.py", line 14, in main
for l1_dataset in dc.index.datasets.search(product=product):
File "/g/data/v10/public/modules/dea/20231204/lib/python3.10/site-packages/datacube/index/postgres/_datasets.py", line 548, in search
for product, datasets in self._do_search_by_product(query,
File "/g/data/v10/public/modules/dea/20231204/lib/python3.10/site-packages/datacube/index/postgres/_datasets.py", line 691, in _do_search_by_product
with self._db_connection() as connection:
File "/g/data/v10/public/modules/dea/20231204/lib/python3.10/contextlib.py", line 142, in __exit__
next(self.gen)
File "/g/data/v10/public/modules/dea/20231204/lib/python3.10/site-packages/datacube/index/postgres/_transaction.py", line 60, in _db_connection
with self._index._active_connection(transaction=transaction) as conn:
File "/g/data/v10/public/modules/dea/20231204/lib/python3.10/contextlib.py", line 142, in __exit__
next(self.gen)
File "/g/data/v10/public/modules/dea/20231204/lib/python3.10/site-packages/datacube/index/postgres/index.py", line 157, in _active_connection
with self._db._connect() as conn:
File "/g/data/v10/public/modules/dea/20231204/lib/python3.10/contextlib.py", line 142, in __exit__
next(self.gen)
File "/g/data/v10/public/modules/dea/20231204/lib/python3.10/site-packages/datacube/drivers/postgres/_connections.py", line 231, in _connect
connection.close()
File "/g/data/v10/public/modules/dea/20231204/lib/python3.10/site-packages/sqlalchemy/engine/base.py", line 1251, in close
conn.close()
File "/g/data/v10/public/modules/dea/20231204/lib/python3.10/site-packages/sqlalchemy/pool/base.py", line 1166, in close
self._checkin()
File "/g/data/v10/public/modules/dea/20231204/lib/python3.10/site-packages/sqlalchemy/pool/base.py", line 1008, in _checkin
_finalize_fairy(
File "/g/data/v10/public/modules/dea/20231204/lib/python3.10/site-packages/sqlalchemy/pool/base.py", line 800, in _finalize_fairy
connection_record.checkin()
File "/g/data/v10/public/modules/dea/20231204/lib/python3.10/site-packages/sqlalchemy/pool/base.py", line 542, in checkin
finalizer(connection)
File "/g/data/v10/public/modules/dea/20231204/lib/python3.10/site-packages/sqlalchemy/engine/default.py", line 677, in _reset_characteristics
characteristic.reset_characteristic(self, dbapi_connection)
File "/g/data/v10/public/modules/dea/20231204/lib/python3.10/site-packages/sqlalchemy/engine/characteristics.py", line 50, in reset_characteristic
dialect.reset_isolation_level(dbapi_conn)
File "/g/data/v10/public/modules/dea/20231204/lib/python3.10/site-packages/sqlalchemy/engine/default.py", line 748, in reset_isolation_level
self.set_isolation_level(dbapi_conn, self.default_isolation_level)
File "/g/data/v10/public/modules/dea/20231204/lib/python3.10/site-packages/sqlalchemy/dialects/postgresql/psycopg2.py", line 851, in set_isolation_level
connection.set_isolation_level(level)
psycopg2.OperationalError: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
```
</details>
I believe @Ariana-B did some investigation resulting in https://github.com/opendatacube/datacube-core/pull/1525 , especially in relation to the `connection.set_isolation_level()` changes, but it hasn't yet been resolved.
### New Findings
I've run some more tests today, and have more of an idea of what the issue is.
- The network configuration at the NCI drops idle TCP connections after 5 minutes. This is extremely short, the normal default is 2 hours.
- The above code first loads 4Gb of database results into memory (about 3 minutes), then slowly writes lines to disk IO while iterating over DB results (more than 10 minutes for the ~500k results).
- Under ODC 1.8.8, the script then exits cleanly.
- Under ODC 1.8.17, something attempts to use the db connection to change the _isolation level_. That fails because the network connection was idle for more than 5 minutes.
I'm not sure whether the change is ODC or SQLAlchemy or psycopg2, but I suspect it's ODC.
I'm about to write up a proposed improvement in a new issue, to allow passing arbitrary connection parameters to `libpq`, which will allow working around the TCP idle timeout issue here.
Deprecation Warning on `pkg_resources` in 1.9 branch
When running from the `develop-1.9` branch with Python 3.10 I'm getting unwanted warning messages.
```
/home/547/dra547/.dea-sandbox/dea/20231204/local/lib/python3.10/site-packages/datacube/drivers/driver_cache.py:54: DeprecationWarning:
pkg_resources is deprecated as an API. See https://setuptools.pypa.io/en/latest/pkg_resources.html
from pkg_resources import iter_entry_points
```
This was fixed a while ago in #1464 in the `develop` branch and subsequent releases, but has snuck back into 1.9.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `datacube/drivers/driver_cache.py`
Content:
```
1 # This file is part of the Open Data Cube, see https://opendatacube.org for more information
2 #
3 # Copyright (c) 2015-2024 ODC Contributors
4 # SPDX-License-Identifier: Apache-2.0
5 import logging
6 from typing import Dict, Any, Tuple, Iterable
7
8 _LOG = logging.getLogger(__name__)
9
10
11 def load_drivers(group: str) -> Dict[str, Any]:
12 """
13 Load available drivers for a given group name.
14
15 Gracefully handles:
16
17 - Driver module not able to be imported
18 - Driver init function throwing an exception or returning None
19
20 By having driver entry_points pointing to a function, we defer loading the driver
21 module or running any code until required.
22
23 :param group: Name of the entry point group e.g. "datacube.plugins.io.read"
24
25 :returns: Dictionary String -> Driver Object
26 """
27
28 def safe_load(ep):
29 from pkg_resources import DistributionNotFound
30 # pylint: disable=broad-except,bare-except
31 try:
32 driver_init = ep.load()
33 except DistributionNotFound:
34 # This happens when entry points were marked with extra features,
35 # but extra feature were not requested for installation
36 return None
37 except Exception as e:
38 _LOG.warning('Failed to resolve driver %s::%s', group, ep.name)
39 _LOG.warning('Error was: %s', repr(e))
40 return None
41
42 try:
43 driver = driver_init()
44 except Exception:
45 _LOG.warning('Exception during driver init, driver name: %s::%s', group, ep.name)
46 return None
47
48 if driver is None:
49 _LOG.warning('Driver init returned None, driver name: %s::%s', group, ep.name)
50
51 return driver
52
53 def resolve_all(group: str) -> Iterable[Tuple[str, Any]]:
54 from pkg_resources import iter_entry_points
55 for ep in iter_entry_points(group=group, name=None):
56 driver = safe_load(ep)
57 if driver is not None:
58 yield (ep.name, driver)
59
60 return dict((name, driver) for name, driver in resolve_all(group))
61
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/datacube/drivers/driver_cache.py b/datacube/drivers/driver_cache.py
--- a/datacube/drivers/driver_cache.py
+++ b/datacube/drivers/driver_cache.py
@@ -26,14 +26,9 @@
"""
def safe_load(ep):
- from pkg_resources import DistributionNotFound
# pylint: disable=broad-except,bare-except
try:
driver_init = ep.load()
- except DistributionNotFound:
- # This happens when entry points were marked with extra features,
- # but extra feature were not requested for installation
- return None
except Exception as e:
_LOG.warning('Failed to resolve driver %s::%s', group, ep.name)
_LOG.warning('Error was: %s', repr(e))
@@ -51,8 +46,8 @@
return driver
def resolve_all(group: str) -> Iterable[Tuple[str, Any]]:
- from pkg_resources import iter_entry_points
- for ep in iter_entry_points(group=group, name=None):
+ from importlib_metadata import entry_points
+ for ep in entry_points(group=group):
driver = safe_load(ep)
if driver is not None:
yield (ep.name, driver)
| {"golden_diff": "diff --git a/datacube/drivers/driver_cache.py b/datacube/drivers/driver_cache.py\n--- a/datacube/drivers/driver_cache.py\n+++ b/datacube/drivers/driver_cache.py\n@@ -26,14 +26,9 @@\n \"\"\"\n \n def safe_load(ep):\n- from pkg_resources import DistributionNotFound\n # pylint: disable=broad-except,bare-except\n try:\n driver_init = ep.load()\n- except DistributionNotFound:\n- # This happens when entry points were marked with extra features,\n- # but extra feature were not requested for installation\n- return None\n except Exception as e:\n _LOG.warning('Failed to resolve driver %s::%s', group, ep.name)\n _LOG.warning('Error was: %s', repr(e))\n@@ -51,8 +46,8 @@\n return driver\n \n def resolve_all(group: str) -> Iterable[Tuple[str, Any]]:\n- from pkg_resources import iter_entry_points\n- for ep in iter_entry_points(group=group, name=None):\n+ from importlib_metadata import entry_points\n+ for ep in entry_points(group=group):\n driver = safe_load(ep)\n if driver is not None:\n yield (ep.name, driver)\n", "issue": "Regression with new database connection handling/transaction isolation\n### Background \r\nDigital Earth Australia provides a Python Environment on the NCI which includes ODC. It's currently running the very old version 1.8.8 under Python 3.8.13 and we want to upgrade to the latest version. We've created a new environment using ODC core version 1.8.17 running under Python 3.10.8, but in testing we've been getting errors which weren't occuring before.\r\n\r\nThis issue was reported and discussed internally in [Jira Issue DSNS-304](https://gajira.atlassian.net/browse/DSNS-304), and included code to reproduce the error.\r\n\r\n### Error\r\n\r\nThe problem was reported as an unhandled PostgreSQL exception being thrown in a production script, which used to work cleanly.\r\n\r\n<details>\r\n<summary>Original code and stack trace from error</summary>\r\n\r\nWhen executing the following script with the new `module load dea/20231204`\r\n\r\n```python\r\n#!/usr/bin/env python3\r\n\r\nimport datacube\r\n\r\nmessage = \"\"\"Will this work? Will this cause the new dea module to error out?\"\"\"\r\n\r\ndef main(product=\"ga_ls8c_ard_3\"):\r\n dc = datacube.Datacube(app=\"gen-list\")\r\n\r\n for l1_dataset in dc.index.datasets.search(product=product):\r\n with open('io.txt', 'a') as file:\r\n file.write(message + '\\n')\r\n\r\nif __name__ == \"__main__\":\r\n main(product=\"ga_s2am_ard_3\")\r\n```\r\n**Stack Trace**\r\n```\r\nTraceback (most recent call last):\r\n File \"/g/data/u46/users/dsg547/sandbox/ard_pipeline_support/tickets/DSNS-304/example/./odc_call.py\", line 18, in <module>\r\n main(product=\"ga_s2am_ard_3\")\r\n File \"/g/data/u46/users/dsg547/sandbox/ard_pipeline_support/tickets/DSNS-304/example/./odc_call.py\", line 14, in main\r\n for l1_dataset in dc.index.datasets.search(product=product):\r\n File \"/g/data/v10/public/modules/dea/20231204/lib/python3.10/site-packages/datacube/index/postgres/_datasets.py\", line 548, in search\r\n for product, datasets in self._do_search_by_product(query,\r\n File \"/g/data/v10/public/modules/dea/20231204/lib/python3.10/site-packages/datacube/index/postgres/_datasets.py\", line 691, in _do_search_by_product\r\n with self._db_connection() as connection:\r\n File \"/g/data/v10/public/modules/dea/20231204/lib/python3.10/contextlib.py\", line 142, in __exit__\r\n next(self.gen)\r\n File \"/g/data/v10/public/modules/dea/20231204/lib/python3.10/site-packages/datacube/index/postgres/_transaction.py\", line 60, in _db_connection\r\n with self._index._active_connection(transaction=transaction) as conn:\r\n File \"/g/data/v10/public/modules/dea/20231204/lib/python3.10/contextlib.py\", line 142, in __exit__\r\n next(self.gen)\r\n File \"/g/data/v10/public/modules/dea/20231204/lib/python3.10/site-packages/datacube/index/postgres/index.py\", line 157, in _active_connection\r\n with self._db._connect() as conn:\r\n File \"/g/data/v10/public/modules/dea/20231204/lib/python3.10/contextlib.py\", line 142, in __exit__\r\n next(self.gen)\r\n File \"/g/data/v10/public/modules/dea/20231204/lib/python3.10/site-packages/datacube/drivers/postgres/_connections.py\", line 231, in _connect\r\n connection.close()\r\n File \"/g/data/v10/public/modules/dea/20231204/lib/python3.10/site-packages/sqlalchemy/engine/base.py\", line 1251, in close\r\n conn.close()\r\n File \"/g/data/v10/public/modules/dea/20231204/lib/python3.10/site-packages/sqlalchemy/pool/base.py\", line 1166, in close\r\n self._checkin()\r\n File \"/g/data/v10/public/modules/dea/20231204/lib/python3.10/site-packages/sqlalchemy/pool/base.py\", line 1008, in _checkin\r\n _finalize_fairy(\r\n File \"/g/data/v10/public/modules/dea/20231204/lib/python3.10/site-packages/sqlalchemy/pool/base.py\", line 800, in _finalize_fairy\r\n connection_record.checkin()\r\n File \"/g/data/v10/public/modules/dea/20231204/lib/python3.10/site-packages/sqlalchemy/pool/base.py\", line 542, in checkin\r\n finalizer(connection)\r\n File \"/g/data/v10/public/modules/dea/20231204/lib/python3.10/site-packages/sqlalchemy/engine/default.py\", line 677, in _reset_characteristics\r\n characteristic.reset_characteristic(self, dbapi_connection)\r\n File \"/g/data/v10/public/modules/dea/20231204/lib/python3.10/site-packages/sqlalchemy/engine/characteristics.py\", line 50, in reset_characteristic\r\n dialect.reset_isolation_level(dbapi_conn)\r\n File \"/g/data/v10/public/modules/dea/20231204/lib/python3.10/site-packages/sqlalchemy/engine/default.py\", line 748, in reset_isolation_level\r\n self.set_isolation_level(dbapi_conn, self.default_isolation_level)\r\n File \"/g/data/v10/public/modules/dea/20231204/lib/python3.10/site-packages/sqlalchemy/dialects/postgresql/psycopg2.py\", line 851, in set_isolation_level\r\n connection.set_isolation_level(level)\r\npsycopg2.OperationalError: server closed the connection unexpectedly\r\n This probably means the server terminated abnormally\r\n before or while processing the request.\r\n```\r\n\r\n</details>\r\n\r\n\r\nI believe @Ariana-B did some investigation resulting in https://github.com/opendatacube/datacube-core/pull/1525 , especially in relation to the `connection.set_isolation_level()` changes, but it hasn't yet been resolved.\r\n\r\n### New Findings\r\n\r\nI've run some more tests today, and have more of an idea of what the issue is.\r\n\r\n- The network configuration at the NCI drops idle TCP connections after 5 minutes. This is extremely short, the normal default is 2 hours.\r\n- The above code first loads 4Gb of database results into memory (about 3 minutes), then slowly writes lines to disk IO while iterating over DB results (more than 10 minutes for the ~500k results).\r\n- Under ODC 1.8.8, the script then exits cleanly.\r\n- Under ODC 1.8.17, something attempts to use the db connection to change the _isolation level_. That fails because the network connection was idle for more than 5 minutes.\r\n\r\nI'm not sure whether the change is ODC or SQLAlchemy or psycopg2, but I suspect it's ODC.\r\n\r\nI'm about to write up a proposed improvement in a new issue, to allow passing arbitrary connection parameters to `libpq`, which will allow working around the TCP idle timeout issue here.\nDeprecation Warning on `pkg_resources` in 1.9 branch\nWhen running from the `develop-1.9` branch with Python 3.10 I'm getting unwanted warning messages.\r\n\r\n```\r\n/home/547/dra547/.dea-sandbox/dea/20231204/local/lib/python3.10/site-packages/datacube/drivers/driver_cache.py:54: DeprecationWarning: \r\npkg_resources is deprecated as an API. See https://setuptools.pypa.io/en/latest/pkg_resources.html\r\n from pkg_resources import iter_entry_points\r\n```\r\n\r\nThis was fixed a while ago in #1464 in the `develop` branch and subsequent releases, but has snuck back into 1.9.\r\n\r\n\n", "before_files": [{"content": "# This file is part of the Open Data Cube, see https://opendatacube.org for more information\n#\n# Copyright (c) 2015-2024 ODC Contributors\n# SPDX-License-Identifier: Apache-2.0\nimport logging\nfrom typing import Dict, Any, Tuple, Iterable\n\n_LOG = logging.getLogger(__name__)\n\n\ndef load_drivers(group: str) -> Dict[str, Any]:\n \"\"\"\n Load available drivers for a given group name.\n\n Gracefully handles:\n\n - Driver module not able to be imported\n - Driver init function throwing an exception or returning None\n\n By having driver entry_points pointing to a function, we defer loading the driver\n module or running any code until required.\n\n :param group: Name of the entry point group e.g. \"datacube.plugins.io.read\"\n\n :returns: Dictionary String -> Driver Object\n \"\"\"\n\n def safe_load(ep):\n from pkg_resources import DistributionNotFound\n # pylint: disable=broad-except,bare-except\n try:\n driver_init = ep.load()\n except DistributionNotFound:\n # This happens when entry points were marked with extra features,\n # but extra feature were not requested for installation\n return None\n except Exception as e:\n _LOG.warning('Failed to resolve driver %s::%s', group, ep.name)\n _LOG.warning('Error was: %s', repr(e))\n return None\n\n try:\n driver = driver_init()\n except Exception:\n _LOG.warning('Exception during driver init, driver name: %s::%s', group, ep.name)\n return None\n\n if driver is None:\n _LOG.warning('Driver init returned None, driver name: %s::%s', group, ep.name)\n\n return driver\n\n def resolve_all(group: str) -> Iterable[Tuple[str, Any]]:\n from pkg_resources import iter_entry_points\n for ep in iter_entry_points(group=group, name=None):\n driver = safe_load(ep)\n if driver is not None:\n yield (ep.name, driver)\n\n return dict((name, driver) for name, driver in resolve_all(group))\n", "path": "datacube/drivers/driver_cache.py"}], "after_files": [{"content": "# This file is part of the Open Data Cube, see https://opendatacube.org for more information\n#\n# Copyright (c) 2015-2024 ODC Contributors\n# SPDX-License-Identifier: Apache-2.0\nimport logging\nfrom typing import Dict, Any, Tuple, Iterable\n\n_LOG = logging.getLogger(__name__)\n\n\ndef load_drivers(group: str) -> Dict[str, Any]:\n \"\"\"\n Load available drivers for a given group name.\n\n Gracefully handles:\n\n - Driver module not able to be imported\n - Driver init function throwing an exception or returning None\n\n By having driver entry_points pointing to a function, we defer loading the driver\n module or running any code until required.\n\n :param group: Name of the entry point group e.g. \"datacube.plugins.io.read\"\n\n :returns: Dictionary String -> Driver Object\n \"\"\"\n\n def safe_load(ep):\n # pylint: disable=broad-except,bare-except\n try:\n driver_init = ep.load()\n except Exception as e:\n _LOG.warning('Failed to resolve driver %s::%s', group, ep.name)\n _LOG.warning('Error was: %s', repr(e))\n return None\n\n try:\n driver = driver_init()\n except Exception:\n _LOG.warning('Exception during driver init, driver name: %s::%s', group, ep.name)\n return None\n\n if driver is None:\n _LOG.warning('Driver init returned None, driver name: %s::%s', group, ep.name)\n\n return driver\n\n def resolve_all(group: str) -> Iterable[Tuple[str, Any]]:\n from importlib_metadata import entry_points\n for ep in entry_points(group=group):\n driver = safe_load(ep)\n if driver is not None:\n yield (ep.name, driver)\n\n return dict((name, driver) for name, driver in resolve_all(group))\n", "path": "datacube/drivers/driver_cache.py"}]} | 2,723 | 272 |
gh_patches_debug_17838 | rasdani/github-patches | git_diff | voxel51__fiftyone-1283 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[FR] update opencv-python-headless
### Proposal Summary
Currently this repo requires opencv-python-headless<=4.4.0.46. To cut a long story short there are no wheels available for python3.9 and I am unable to install fiftyone (I am using docker `image: jupyter/scipy-notebook:latest`). However version `4.5.3.56` is available for install without issue, and I propose updating the requirement for this dependency.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 """
3 Installs FiftyOne.
4
5 | Copyright 2017-2021, Voxel51, Inc.
6 | `voxel51.com <https://voxel51.com/>`_
7 |
8 """
9 import os
10 from setuptools import setup, find_packages
11 from wheel.bdist_wheel import bdist_wheel
12
13
14 class BdistWheelCustom(bdist_wheel):
15 def finalize_options(self):
16 bdist_wheel.finalize_options(self)
17 # make just the wheel require these packages, since they aren't needed
18 # for a development installation
19 self.distribution.install_requires += [
20 "fiftyone-brain>=0.7,<0.8",
21 "fiftyone-db>=0.3,<0.4",
22 ]
23
24
25 VERSION = "0.13.2"
26
27
28 def get_version():
29 if "RELEASE_VERSION" in os.environ:
30 version = os.environ["RELEASE_VERSION"]
31 if not version.startswith(VERSION):
32 raise ValueError(
33 "Release version does not match version: %s and %s"
34 % (version, VERSION)
35 )
36 return version
37
38 return VERSION
39
40
41 EXTRAS_REQUIREMENTS = {"desktop": ["fiftyone-desktop>=0.16,<0.17"]}
42
43
44 with open("README.md", "r") as fh:
45 long_description = fh.read()
46
47
48 setup(
49 name="fiftyone",
50 version=get_version(),
51 description=(
52 "FiftyOne: the open-source tool for building high-quality datasets "
53 "and computer vision models"
54 ),
55 author="Voxel51, Inc.",
56 author_email="[email protected]",
57 url="https://github.com/voxel51/fiftyone",
58 extras_require=EXTRAS_REQUIREMENTS,
59 license="Apache",
60 long_description=long_description,
61 long_description_content_type="text/markdown",
62 packages=find_packages() + ["fiftyone.recipes", "fiftyone.tutorials"],
63 package_dir={
64 "fiftyone.recipes": "docs/source/recipes",
65 "fiftyone.tutorials": "docs/source/tutorials",
66 },
67 include_package_data=True,
68 install_requires=[
69 # third-party packages
70 "argcomplete",
71 "boto3",
72 "Deprecated",
73 "eventlet",
74 "future",
75 "Jinja2",
76 "kaleido",
77 "matplotlib",
78 "mongoengine==0.20.0",
79 "motor>=2.3,<3",
80 "numpy",
81 "packaging",
82 "pandas",
83 "Pillow>=6.2",
84 "plotly>=4.14,<5",
85 "pprintpp",
86 "psutil",
87 "pymongo>=3.11,<4",
88 "PyYAML",
89 "retrying",
90 "scikit-learn",
91 "scikit-image",
92 "setuptools",
93 "tabulate",
94 "tornado>=5.1.1,<7",
95 "xmltodict",
96 "universal-analytics-python3>=1.0.1,<2",
97 # internal packages
98 "voxel51-eta>=0.5.2,<0.6",
99 # ETA dependency - restricted to a maximum version known to provide
100 # wheels here because it tends to publish sdists several hours before
101 # wheels. When users install FiftyOne in this window, they will need to
102 # compile OpenCV from source, leading to either errors or a
103 # time-consuming installation.
104 "opencv-python-headless<=4.4.0.46",
105 ],
106 classifiers=[
107 "Development Status :: 4 - Beta",
108 "Intended Audience :: Developers",
109 "Intended Audience :: Science/Research",
110 "License :: OSI Approved :: Apache Software License",
111 "Topic :: Scientific/Engineering :: Artificial Intelligence",
112 "Topic :: Scientific/Engineering :: Image Processing",
113 "Topic :: Scientific/Engineering :: Image Recognition",
114 "Topic :: Scientific/Engineering :: Information Analysis",
115 "Topic :: Scientific/Engineering :: Visualization",
116 "Operating System :: MacOS :: MacOS X",
117 "Operating System :: POSIX :: Linux",
118 "Operating System :: Microsoft :: Windows",
119 "Programming Language :: Python :: 3",
120 "Programming Language :: Python :: 3.6",
121 "Programming Language :: Python :: 3.7",
122 "Programming Language :: Python :: 3.8",
123 "Programming Language :: Python :: 3.9",
124 ],
125 entry_points={"console_scripts": ["fiftyone=fiftyone.core.cli:main"]},
126 python_requires=">=3.6",
127 cmdclass={"bdist_wheel": BdistWheelCustom},
128 )
129
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -78,6 +78,7 @@
"mongoengine==0.20.0",
"motor>=2.3,<3",
"numpy",
+ "opencv-python-headless",
"packaging",
"pandas",
"Pillow>=6.2",
@@ -96,12 +97,6 @@
"universal-analytics-python3>=1.0.1,<2",
# internal packages
"voxel51-eta>=0.5.2,<0.6",
- # ETA dependency - restricted to a maximum version known to provide
- # wheels here because it tends to publish sdists several hours before
- # wheels. When users install FiftyOne in this window, they will need to
- # compile OpenCV from source, leading to either errors or a
- # time-consuming installation.
- "opencv-python-headless<=4.4.0.46",
],
classifiers=[
"Development Status :: 4 - Beta",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -78,6 +78,7 @@\n \"mongoengine==0.20.0\",\n \"motor>=2.3,<3\",\n \"numpy\",\n+ \"opencv-python-headless\",\n \"packaging\",\n \"pandas\",\n \"Pillow>=6.2\",\n@@ -96,12 +97,6 @@\n \"universal-analytics-python3>=1.0.1,<2\",\n # internal packages\n \"voxel51-eta>=0.5.2,<0.6\",\n- # ETA dependency - restricted to a maximum version known to provide\n- # wheels here because it tends to publish sdists several hours before\n- # wheels. When users install FiftyOne in this window, they will need to\n- # compile OpenCV from source, leading to either errors or a\n- # time-consuming installation.\n- \"opencv-python-headless<=4.4.0.46\",\n ],\n classifiers=[\n \"Development Status :: 4 - Beta\",\n", "issue": "[FR] update opencv-python-headless\n### Proposal Summary\r\nCurrently this repo requires opencv-python-headless<=4.4.0.46. To cut a long story short there are no wheels available for python3.9 and I am unable to install fiftyone (I am using docker `image: jupyter/scipy-notebook:latest`). However version `4.5.3.56` is available for install without issue, and I propose updating the requirement for this dependency.\n", "before_files": [{"content": "#!/usr/bin/env python\n\"\"\"\nInstalls FiftyOne.\n\n| Copyright 2017-2021, Voxel51, Inc.\n| `voxel51.com <https://voxel51.com/>`_\n|\n\"\"\"\nimport os\nfrom setuptools import setup, find_packages\nfrom wheel.bdist_wheel import bdist_wheel\n\n\nclass BdistWheelCustom(bdist_wheel):\n def finalize_options(self):\n bdist_wheel.finalize_options(self)\n # make just the wheel require these packages, since they aren't needed\n # for a development installation\n self.distribution.install_requires += [\n \"fiftyone-brain>=0.7,<0.8\",\n \"fiftyone-db>=0.3,<0.4\",\n ]\n\n\nVERSION = \"0.13.2\"\n\n\ndef get_version():\n if \"RELEASE_VERSION\" in os.environ:\n version = os.environ[\"RELEASE_VERSION\"]\n if not version.startswith(VERSION):\n raise ValueError(\n \"Release version does not match version: %s and %s\"\n % (version, VERSION)\n )\n return version\n\n return VERSION\n\n\nEXTRAS_REQUIREMENTS = {\"desktop\": [\"fiftyone-desktop>=0.16,<0.17\"]}\n\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\n\nsetup(\n name=\"fiftyone\",\n version=get_version(),\n description=(\n \"FiftyOne: the open-source tool for building high-quality datasets \"\n \"and computer vision models\"\n ),\n author=\"Voxel51, Inc.\",\n author_email=\"[email protected]\",\n url=\"https://github.com/voxel51/fiftyone\",\n extras_require=EXTRAS_REQUIREMENTS,\n license=\"Apache\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n packages=find_packages() + [\"fiftyone.recipes\", \"fiftyone.tutorials\"],\n package_dir={\n \"fiftyone.recipes\": \"docs/source/recipes\",\n \"fiftyone.tutorials\": \"docs/source/tutorials\",\n },\n include_package_data=True,\n install_requires=[\n # third-party packages\n \"argcomplete\",\n \"boto3\",\n \"Deprecated\",\n \"eventlet\",\n \"future\",\n \"Jinja2\",\n \"kaleido\",\n \"matplotlib\",\n \"mongoengine==0.20.0\",\n \"motor>=2.3,<3\",\n \"numpy\",\n \"packaging\",\n \"pandas\",\n \"Pillow>=6.2\",\n \"plotly>=4.14,<5\",\n \"pprintpp\",\n \"psutil\",\n \"pymongo>=3.11,<4\",\n \"PyYAML\",\n \"retrying\",\n \"scikit-learn\",\n \"scikit-image\",\n \"setuptools\",\n \"tabulate\",\n \"tornado>=5.1.1,<7\",\n \"xmltodict\",\n \"universal-analytics-python3>=1.0.1,<2\",\n # internal packages\n \"voxel51-eta>=0.5.2,<0.6\",\n # ETA dependency - restricted to a maximum version known to provide\n # wheels here because it tends to publish sdists several hours before\n # wheels. When users install FiftyOne in this window, they will need to\n # compile OpenCV from source, leading to either errors or a\n # time-consuming installation.\n \"opencv-python-headless<=4.4.0.46\",\n ],\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n \"Topic :: Scientific/Engineering :: Image Processing\",\n \"Topic :: Scientific/Engineering :: Image Recognition\",\n \"Topic :: Scientific/Engineering :: Information Analysis\",\n \"Topic :: Scientific/Engineering :: Visualization\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: Microsoft :: Windows\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n ],\n entry_points={\"console_scripts\": [\"fiftyone=fiftyone.core.cli:main\"]},\n python_requires=\">=3.6\",\n cmdclass={\"bdist_wheel\": BdistWheelCustom},\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\"\"\"\nInstalls FiftyOne.\n\n| Copyright 2017-2021, Voxel51, Inc.\n| `voxel51.com <https://voxel51.com/>`_\n|\n\"\"\"\nimport os\nfrom setuptools import setup, find_packages\nfrom wheel.bdist_wheel import bdist_wheel\n\n\nclass BdistWheelCustom(bdist_wheel):\n def finalize_options(self):\n bdist_wheel.finalize_options(self)\n # make just the wheel require these packages, since they aren't needed\n # for a development installation\n self.distribution.install_requires += [\n \"fiftyone-brain>=0.7,<0.8\",\n \"fiftyone-db>=0.3,<0.4\",\n ]\n\n\nVERSION = \"0.13.2\"\n\n\ndef get_version():\n if \"RELEASE_VERSION\" in os.environ:\n version = os.environ[\"RELEASE_VERSION\"]\n if not version.startswith(VERSION):\n raise ValueError(\n \"Release version does not match version: %s and %s\"\n % (version, VERSION)\n )\n return version\n\n return VERSION\n\n\nEXTRAS_REQUIREMENTS = {\"desktop\": [\"fiftyone-desktop>=0.16,<0.17\"]}\n\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\n\nsetup(\n name=\"fiftyone\",\n version=get_version(),\n description=(\n \"FiftyOne: the open-source tool for building high-quality datasets \"\n \"and computer vision models\"\n ),\n author=\"Voxel51, Inc.\",\n author_email=\"[email protected]\",\n url=\"https://github.com/voxel51/fiftyone\",\n extras_require=EXTRAS_REQUIREMENTS,\n license=\"Apache\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n packages=find_packages() + [\"fiftyone.recipes\", \"fiftyone.tutorials\"],\n package_dir={\n \"fiftyone.recipes\": \"docs/source/recipes\",\n \"fiftyone.tutorials\": \"docs/source/tutorials\",\n },\n include_package_data=True,\n install_requires=[\n # third-party packages\n \"argcomplete\",\n \"boto3\",\n \"Deprecated\",\n \"eventlet\",\n \"future\",\n \"Jinja2\",\n \"kaleido\",\n \"matplotlib\",\n \"mongoengine==0.20.0\",\n \"motor>=2.3,<3\",\n \"numpy\",\n \"opencv-python-headless\",\n \"packaging\",\n \"pandas\",\n \"Pillow>=6.2\",\n \"plotly>=4.14,<5\",\n \"pprintpp\",\n \"psutil\",\n \"pymongo>=3.11,<4\",\n \"PyYAML\",\n \"retrying\",\n \"scikit-learn\",\n \"scikit-image\",\n \"setuptools\",\n \"tabulate\",\n \"tornado>=5.1.1,<7\",\n \"xmltodict\",\n \"universal-analytics-python3>=1.0.1,<2\",\n # internal packages\n \"voxel51-eta>=0.5.2,<0.6\",\n ],\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n \"Topic :: Scientific/Engineering :: Image Processing\",\n \"Topic :: Scientific/Engineering :: Image Recognition\",\n \"Topic :: Scientific/Engineering :: Information Analysis\",\n \"Topic :: Scientific/Engineering :: Visualization\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: Microsoft :: Windows\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n ],\n entry_points={\"console_scripts\": [\"fiftyone=fiftyone.core.cli:main\"]},\n python_requires=\">=3.6\",\n cmdclass={\"bdist_wheel\": BdistWheelCustom},\n)\n", "path": "setup.py"}]} | 1,643 | 242 |
gh_patches_debug_29558 | rasdani/github-patches | git_diff | hpcaitech__ColossalAI-3795 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[tensor] fix some unittests
[tensor] fix some unittests
[tensor] fix some unittests
[BUG]: booster API docstring is misaligned
### 🐛 Describe the bug
The example should follow markdown format to be correctly rendered on the documentation website.
### Environment
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `colossalai/booster/booster.py`
Content:
```
1 import warnings
2 from contextlib import contextmanager
3 from typing import Callable, Iterator, List, Optional, Tuple, Union
4
5 import torch
6 import torch.nn as nn
7 from torch.optim import Optimizer
8 from torch.optim.lr_scheduler import _LRScheduler as LRScheduler
9 from torch.utils.data import DataLoader
10
11 from colossalai.checkpoint_io import GeneralCheckpointIO
12
13 from .accelerator import Accelerator
14 from .mixed_precision import MixedPrecision, mixed_precision_factory
15 from .plugin import Plugin
16
17 __all__ = ['Booster']
18
19
20 class Booster:
21 """
22 Booster is a high-level API for training neural networks. It provides a unified interface for
23 training with different precision, accelerator, and plugin.
24
25 Examples:
26 >>> colossalai.launch(...)
27 >>> plugin = GeminiPlugin(stage=3, ...)
28 >>> booster = Booster(precision='fp16', plugin=plugin)
29 >>>
30 >>> model = GPT2()
31 >>> optimizer = Adam(model.parameters())
32 >>> dataloader = Dataloader(Dataset)
33 >>> lr_scheduler = LinearWarmupScheduler()
34 >>> criterion = GPTLMLoss()
35 >>>
36 >>> model, optimizer, lr_scheduler, dataloader = booster.boost(model, optimizer, lr_scheduler, dataloader)
37 >>>
38 >>> for epoch in range(max_epochs):
39 >>> for input_ids, attention_mask in dataloader:
40 >>> outputs = model(input_ids, attention_mask)
41 >>> loss = criterion(outputs.logits, input_ids)
42 >>> booster.backward(loss, optimizer)
43 >>> optimizer.step()
44 >>> lr_scheduler.step()
45 >>> optimizer.zero_grad()
46
47
48 Args:
49 device (str or torch.device): The device to run the training. Default: 'cuda'.
50 mixed_precision (str or MixedPrecision): The mixed precision to run the training. Default: None.
51 If the argument is a string, it can be 'fp16', 'fp16_apex', 'bf16', or 'fp8'.
52 'fp16' would use PyTorch AMP while `fp16_apex` would use Nvidia Apex.
53 plugin (Plugin): The plugin to run the training. Default: None.
54 """
55
56 def __init__(self,
57 device: str = 'cuda',
58 mixed_precision: Union[MixedPrecision, str] = None,
59 plugin: Optional[Plugin] = None) -> None:
60 if plugin is not None:
61 assert isinstance(
62 plugin, Plugin), f'Expected the argument plugin to be an instance of Plugin, but got {type(plugin)}.'
63 self.plugin = plugin
64
65 # set accelerator
66 if self.plugin and self.plugin.control_device():
67 self.accelerator = None
68 warnings.warn('The plugin will control the accelerator, so the device argument will be ignored.')
69 else:
70 self.accelerator = Accelerator(device)
71
72 # set precision
73 if self.plugin and self.plugin.control_precision():
74 warnings.warn('The plugin will control the precision, so the mixed_precision argument will be ignored.')
75 self.mixed_precision = None
76 elif mixed_precision is None:
77 self.mixed_precision = None
78 else:
79 # validate and set precision
80 if isinstance(mixed_precision, str):
81 # the user will take the default arguments for amp training
82 self.mixed_precision = mixed_precision_factory(mixed_precision)
83 elif isinstance(mixed_precision, MixedPrecision):
84 # the user can customize the arguments by passing the precision object
85 self.mixed_precision = mixed_precision
86 else:
87 raise ValueError(
88 f'Expected the argument mixed_precision to be a string or an instance of Precision, but got {type(mixed_precision)}.'
89 )
90
91 if self.plugin is not None and self.plugin.control_checkpoint_io():
92 self.checkpoint_io = self.plugin.get_checkpoint_io()
93 else:
94 self.checkpoint_io = GeneralCheckpointIO()
95
96 def boost(
97 self,
98 model: nn.Module,
99 optimizer: Optimizer,
100 criterion: Callable = None,
101 dataloader: DataLoader = None,
102 lr_scheduler: LRScheduler = None,
103 ) -> List[Union[nn.Module, Optimizer, LRScheduler, DataLoader]]:
104 """
105 Boost the model, optimizer, criterion, lr_scheduler, and dataloader.
106
107 Args:
108 model (nn.Module): The model to be boosted.
109 optimizer (Optimizer): The optimizer to be boosted.
110 criterion (Callable): The criterion to be boosted.
111 dataloader (DataLoader): The dataloader to be boosted.
112 lr_scheduler (LRScheduler): The lr_scheduler to be boosted.
113 """
114 # TODO(FrankLeeeee): consider multi-model and multi-optimizer case
115 # TODO(FrankLeeeee): consider multi-dataloader case
116 # transform model for mixed precision
117 if self.plugin:
118 model, optimizer, criterion, dataloader, lr_scheduler = self.plugin.configure(
119 model, optimizer, criterion, dataloader, lr_scheduler)
120
121 if self.plugin and not self.plugin.control_device():
122 # transform model for accelerator
123 model = self.accelerator.configure(model)
124
125 if self.mixed_precision and (self.plugin is None or self.plugin and not self.plugin.control_precision()):
126 # transform model for mixed precision
127 # when mixed_precision is specified and the plugin is not given or does not control the precision
128 model, optimizer, criterion = self.mixed_precision.configure(model, optimizer, criterion)
129
130 return model, optimizer, criterion, dataloader, lr_scheduler
131
132 def backward(self, loss: torch.Tensor, optimizer: Optimizer) -> None:
133 """Backward pass.
134
135 Args:
136 loss (torch.Tensor): The loss to be backpropagated.
137 optimizer (Optimizer): The optimizer to be updated.
138 """
139 # TODO: implement this method with plugin
140 optimizer.backward(loss)
141
142 def execute_pipeline(self,
143 data_iter: Iterator,
144 model: nn.Module,
145 criterion: Callable[[torch.Tensor], torch.Tensor],
146 optimizer: Optimizer,
147 return_loss: bool = True,
148 return_outputs: bool = False) -> Tuple[Optional[torch.Tensor], ...]:
149 # TODO: implement this method
150 # run pipeline forward backward pass
151 # return loss or outputs if needed
152 pass
153
154 def no_sync(self, model: nn.Module) -> contextmanager:
155 """Context manager to disable gradient synchronization across DP process groups.
156
157 Args:
158 model (nn.Module): The model to be disabled gradient synchronization.
159
160 Returns:
161 contextmanager: Context to disable gradient synchronization.
162 """
163 assert self.plugin is not None, f'no_sync is only enabled when a plugin is provided and the plugin supports no_sync.'
164 assert self.plugin.support_no_sync, f'The plugin {self.plugin.__class__.__name__} does not support no_sync.'
165 return self.plugin.no_sync(model)
166
167 def load_model(self, model: nn.Module, checkpoint: str, strict: bool = True):
168 """Load model from checkpoint.
169
170 Args:
171 model (nn.Module): A model boosted by Booster.
172 checkpoint (str): Path to the checkpoint. It must be a local path.
173 It should be a directory path if the checkpoint is sharded. Otherwise, it should be a file path.
174 strict (bool, optional): whether to strictly enforce that the keys
175 in :attr:`state_dict` match the keys returned by this module's
176 :meth:`~torch.nn.Module.state_dict` function. Defaults to True.
177 """
178 self.checkpoint_io.load_model(model, checkpoint, strict)
179
180 def save_model(self,
181 model: nn.Module,
182 checkpoint: str,
183 prefix: str = None,
184 shard: bool = False,
185 size_per_shard: int = 1024):
186 """Save model to checkpoint.
187
188 Args:
189 model (nn.Module): A model boosted by Booster.
190 checkpoint (str): Path to the checkpoint. It must be a local path.
191 It is a file path if ``shard=False``. Otherwise, it is a directory path.
192 prefix (str, optional): A prefix added to parameter and buffer
193 names to compose the keys in state_dict. Defaults to None.
194 shard (bool, optional): Whether to save checkpoint a sharded way.
195 If true, the checkpoint will be a folder. Otherwise, it will be a single file. Defaults to False.
196 size_per_shard (int, optional): Maximum size of checkpoint shard file in MB. This is useful only when ``shard=True``. Defaults to 1024.
197 """
198 self.checkpoint_io.save_model(model, checkpoint, prefix, shard, size_per_shard)
199
200 def load_optimizer(self, optimizer: Optimizer, checkpoint: str):
201 """Load optimizer from checkpoint.
202
203 Args:
204 optimizer (Optimizer): An optimizer boosted by Booster.
205 checkpoint (str): Path to the checkpoint. It must be a local path.
206 It should be a directory path if the checkpoint is sharded. Otherwise, it should be a file path.
207 """
208 self.checkpoint_io.load_optimizer(optimizer, checkpoint)
209
210 def save_optimizer(self, optimizer: Optimizer, checkpoint: str, shard: bool = False, size_per_shard: int = 1024):
211 """Save optimizer to checkpoint.
212 Warning: Saving sharded optimizer checkpoint is not supported yet.
213
214 Args:
215 optimizer (Optimizer): An optimizer boosted by Booster.
216 checkpoint (str): Path to the checkpoint. It must be a local path.
217 It is a file path if ``shard=False``. Otherwise, it is a directory path.
218 shard (bool, optional): Whether to save checkpoint a sharded way.
219 If true, the checkpoint will be a folder. Otherwise, it will be a single file. Defaults to False.
220 size_per_shard (int, optional): Maximum size of checkpoint shard file in MB. This is useful only when ``shard=True``. Defaults to 1024.
221 """
222 self.checkpoint_io.save_optimizer(optimizer, checkpoint, shard, size_per_shard)
223
224 def save_lr_scheduler(self, lr_scheduler: LRScheduler, checkpoint: str):
225 """Save lr scheduler to checkpoint.
226
227 Args:
228 lr_scheduler (LRScheduler): A lr scheduler boosted by Booster.
229 checkpoint (str): Path to the checkpoint. It must be a local file path.
230 """
231 self.checkpoint_io.save_lr_scheduler(lr_scheduler, checkpoint)
232
233 def load_lr_scheduler(self, lr_scheduler: LRScheduler, checkpoint: str):
234 """Load lr scheduler from checkpoint.
235
236 Args:
237 lr_scheduler (LRScheduler): A lr scheduler boosted by Booster.
238 checkpoint (str): Path to the checkpoint. It must be a local file path.
239 """
240 self.checkpoint_io.load_lr_scheduler(lr_scheduler, checkpoint)
241
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/colossalai/booster/booster.py b/colossalai/booster/booster.py
--- a/colossalai/booster/booster.py
+++ b/colossalai/booster/booster.py
@@ -23,27 +23,28 @@
training with different precision, accelerator, and plugin.
Examples:
- >>> colossalai.launch(...)
- >>> plugin = GeminiPlugin(stage=3, ...)
- >>> booster = Booster(precision='fp16', plugin=plugin)
- >>>
- >>> model = GPT2()
- >>> optimizer = Adam(model.parameters())
- >>> dataloader = Dataloader(Dataset)
- >>> lr_scheduler = LinearWarmupScheduler()
- >>> criterion = GPTLMLoss()
- >>>
- >>> model, optimizer, lr_scheduler, dataloader = booster.boost(model, optimizer, lr_scheduler, dataloader)
- >>>
- >>> for epoch in range(max_epochs):
- >>> for input_ids, attention_mask in dataloader:
- >>> outputs = model(input_ids, attention_mask)
- >>> loss = criterion(outputs.logits, input_ids)
- >>> booster.backward(loss, optimizer)
- >>> optimizer.step()
- >>> lr_scheduler.step()
- >>> optimizer.zero_grad()
-
+ ```python
+ colossalai.launch(...)
+ plugin = GeminiPlugin(stage=3, ...)
+ booster = Booster(precision='fp16', plugin=plugin)
+
+ model = GPT2()
+ optimizer = Adam(model.parameters())
+ dataloader = Dataloader(Dataset)
+ lr_scheduler = LinearWarmupScheduler()
+ criterion = GPTLMLoss()
+
+ model, optimizer, lr_scheduler, dataloader = booster.boost(model, optimizer, lr_scheduler, dataloader)
+
+ for epoch in range(max_epochs):
+ for input_ids, attention_mask in dataloader:
+ outputs = model(input_ids, attention_mask)
+ loss = criterion(outputs.logits, input_ids)
+ booster.backward(loss, optimizer)
+ optimizer.step()
+ lr_scheduler.step()
+ optimizer.zero_grad()
+ ```
Args:
device (str or torch.device): The device to run the training. Default: 'cuda'.
| {"golden_diff": "diff --git a/colossalai/booster/booster.py b/colossalai/booster/booster.py\n--- a/colossalai/booster/booster.py\n+++ b/colossalai/booster/booster.py\n@@ -23,27 +23,28 @@\n training with different precision, accelerator, and plugin.\n \n Examples:\n- >>> colossalai.launch(...)\n- >>> plugin = GeminiPlugin(stage=3, ...)\n- >>> booster = Booster(precision='fp16', plugin=plugin)\n- >>>\n- >>> model = GPT2()\n- >>> optimizer = Adam(model.parameters())\n- >>> dataloader = Dataloader(Dataset)\n- >>> lr_scheduler = LinearWarmupScheduler()\n- >>> criterion = GPTLMLoss()\n- >>>\n- >>> model, optimizer, lr_scheduler, dataloader = booster.boost(model, optimizer, lr_scheduler, dataloader)\n- >>>\n- >>> for epoch in range(max_epochs):\n- >>> for input_ids, attention_mask in dataloader:\n- >>> outputs = model(input_ids, attention_mask)\n- >>> loss = criterion(outputs.logits, input_ids)\n- >>> booster.backward(loss, optimizer)\n- >>> optimizer.step()\n- >>> lr_scheduler.step()\n- >>> optimizer.zero_grad()\n-\n+ ```python\n+ colossalai.launch(...)\n+ plugin = GeminiPlugin(stage=3, ...)\n+ booster = Booster(precision='fp16', plugin=plugin)\n+\n+ model = GPT2()\n+ optimizer = Adam(model.parameters())\n+ dataloader = Dataloader(Dataset)\n+ lr_scheduler = LinearWarmupScheduler()\n+ criterion = GPTLMLoss()\n+\n+ model, optimizer, lr_scheduler, dataloader = booster.boost(model, optimizer, lr_scheduler, dataloader)\n+\n+ for epoch in range(max_epochs):\n+ for input_ids, attention_mask in dataloader:\n+ outputs = model(input_ids, attention_mask)\n+ loss = criterion(outputs.logits, input_ids)\n+ booster.backward(loss, optimizer)\n+ optimizer.step()\n+ lr_scheduler.step()\n+ optimizer.zero_grad()\n+ ```\n \n Args:\n device (str or torch.device): The device to run the training. Default: 'cuda'.\n", "issue": "[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n[BUG]: booster API docstring is misaligned\n### \ud83d\udc1b Describe the bug\n\nThe example should follow markdown format to be correctly rendered on the documentation website.\n\n### Environment\n\n_No response_\n", "before_files": [{"content": "import warnings\nfrom contextlib import contextmanager\nfrom typing import Callable, Iterator, List, Optional, Tuple, Union\n\nimport torch\nimport torch.nn as nn\nfrom torch.optim import Optimizer\nfrom torch.optim.lr_scheduler import _LRScheduler as LRScheduler\nfrom torch.utils.data import DataLoader\n\nfrom colossalai.checkpoint_io import GeneralCheckpointIO\n\nfrom .accelerator import Accelerator\nfrom .mixed_precision import MixedPrecision, mixed_precision_factory\nfrom .plugin import Plugin\n\n__all__ = ['Booster']\n\n\nclass Booster:\n \"\"\"\n Booster is a high-level API for training neural networks. It provides a unified interface for\n training with different precision, accelerator, and plugin.\n\n Examples:\n >>> colossalai.launch(...)\n >>> plugin = GeminiPlugin(stage=3, ...)\n >>> booster = Booster(precision='fp16', plugin=plugin)\n >>>\n >>> model = GPT2()\n >>> optimizer = Adam(model.parameters())\n >>> dataloader = Dataloader(Dataset)\n >>> lr_scheduler = LinearWarmupScheduler()\n >>> criterion = GPTLMLoss()\n >>>\n >>> model, optimizer, lr_scheduler, dataloader = booster.boost(model, optimizer, lr_scheduler, dataloader)\n >>>\n >>> for epoch in range(max_epochs):\n >>> for input_ids, attention_mask in dataloader:\n >>> outputs = model(input_ids, attention_mask)\n >>> loss = criterion(outputs.logits, input_ids)\n >>> booster.backward(loss, optimizer)\n >>> optimizer.step()\n >>> lr_scheduler.step()\n >>> optimizer.zero_grad()\n\n\n Args:\n device (str or torch.device): The device to run the training. Default: 'cuda'.\n mixed_precision (str or MixedPrecision): The mixed precision to run the training. Default: None.\n If the argument is a string, it can be 'fp16', 'fp16_apex', 'bf16', or 'fp8'.\n 'fp16' would use PyTorch AMP while `fp16_apex` would use Nvidia Apex.\n plugin (Plugin): The plugin to run the training. Default: None.\n \"\"\"\n\n def __init__(self,\n device: str = 'cuda',\n mixed_precision: Union[MixedPrecision, str] = None,\n plugin: Optional[Plugin] = None) -> None:\n if plugin is not None:\n assert isinstance(\n plugin, Plugin), f'Expected the argument plugin to be an instance of Plugin, but got {type(plugin)}.'\n self.plugin = plugin\n\n # set accelerator\n if self.plugin and self.plugin.control_device():\n self.accelerator = None\n warnings.warn('The plugin will control the accelerator, so the device argument will be ignored.')\n else:\n self.accelerator = Accelerator(device)\n\n # set precision\n if self.plugin and self.plugin.control_precision():\n warnings.warn('The plugin will control the precision, so the mixed_precision argument will be ignored.')\n self.mixed_precision = None\n elif mixed_precision is None:\n self.mixed_precision = None\n else:\n # validate and set precision\n if isinstance(mixed_precision, str):\n # the user will take the default arguments for amp training\n self.mixed_precision = mixed_precision_factory(mixed_precision)\n elif isinstance(mixed_precision, MixedPrecision):\n # the user can customize the arguments by passing the precision object\n self.mixed_precision = mixed_precision\n else:\n raise ValueError(\n f'Expected the argument mixed_precision to be a string or an instance of Precision, but got {type(mixed_precision)}.'\n )\n\n if self.plugin is not None and self.plugin.control_checkpoint_io():\n self.checkpoint_io = self.plugin.get_checkpoint_io()\n else:\n self.checkpoint_io = GeneralCheckpointIO()\n\n def boost(\n self,\n model: nn.Module,\n optimizer: Optimizer,\n criterion: Callable = None,\n dataloader: DataLoader = None,\n lr_scheduler: LRScheduler = None,\n ) -> List[Union[nn.Module, Optimizer, LRScheduler, DataLoader]]:\n \"\"\"\n Boost the model, optimizer, criterion, lr_scheduler, and dataloader.\n\n Args:\n model (nn.Module): The model to be boosted.\n optimizer (Optimizer): The optimizer to be boosted.\n criterion (Callable): The criterion to be boosted.\n dataloader (DataLoader): The dataloader to be boosted.\n lr_scheduler (LRScheduler): The lr_scheduler to be boosted.\n \"\"\"\n # TODO(FrankLeeeee): consider multi-model and multi-optimizer case\n # TODO(FrankLeeeee): consider multi-dataloader case\n # transform model for mixed precision\n if self.plugin:\n model, optimizer, criterion, dataloader, lr_scheduler = self.plugin.configure(\n model, optimizer, criterion, dataloader, lr_scheduler)\n\n if self.plugin and not self.plugin.control_device():\n # transform model for accelerator\n model = self.accelerator.configure(model)\n\n if self.mixed_precision and (self.plugin is None or self.plugin and not self.plugin.control_precision()):\n # transform model for mixed precision\n # when mixed_precision is specified and the plugin is not given or does not control the precision\n model, optimizer, criterion = self.mixed_precision.configure(model, optimizer, criterion)\n\n return model, optimizer, criterion, dataloader, lr_scheduler\n\n def backward(self, loss: torch.Tensor, optimizer: Optimizer) -> None:\n \"\"\"Backward pass.\n\n Args:\n loss (torch.Tensor): The loss to be backpropagated.\n optimizer (Optimizer): The optimizer to be updated.\n \"\"\"\n # TODO: implement this method with plugin\n optimizer.backward(loss)\n\n def execute_pipeline(self,\n data_iter: Iterator,\n model: nn.Module,\n criterion: Callable[[torch.Tensor], torch.Tensor],\n optimizer: Optimizer,\n return_loss: bool = True,\n return_outputs: bool = False) -> Tuple[Optional[torch.Tensor], ...]:\n # TODO: implement this method\n # run pipeline forward backward pass\n # return loss or outputs if needed\n pass\n\n def no_sync(self, model: nn.Module) -> contextmanager:\n \"\"\"Context manager to disable gradient synchronization across DP process groups.\n\n Args:\n model (nn.Module): The model to be disabled gradient synchronization.\n\n Returns:\n contextmanager: Context to disable gradient synchronization.\n \"\"\"\n assert self.plugin is not None, f'no_sync is only enabled when a plugin is provided and the plugin supports no_sync.'\n assert self.plugin.support_no_sync, f'The plugin {self.plugin.__class__.__name__} does not support no_sync.'\n return self.plugin.no_sync(model)\n\n def load_model(self, model: nn.Module, checkpoint: str, strict: bool = True):\n \"\"\"Load model from checkpoint.\n\n Args:\n model (nn.Module): A model boosted by Booster.\n checkpoint (str): Path to the checkpoint. It must be a local path.\n It should be a directory path if the checkpoint is sharded. Otherwise, it should be a file path.\n strict (bool, optional): whether to strictly enforce that the keys\n in :attr:`state_dict` match the keys returned by this module's\n :meth:`~torch.nn.Module.state_dict` function. Defaults to True.\n \"\"\"\n self.checkpoint_io.load_model(model, checkpoint, strict)\n\n def save_model(self,\n model: nn.Module,\n checkpoint: str,\n prefix: str = None,\n shard: bool = False,\n size_per_shard: int = 1024):\n \"\"\"Save model to checkpoint.\n\n Args:\n model (nn.Module): A model boosted by Booster.\n checkpoint (str): Path to the checkpoint. It must be a local path.\n It is a file path if ``shard=False``. Otherwise, it is a directory path.\n prefix (str, optional): A prefix added to parameter and buffer\n names to compose the keys in state_dict. Defaults to None.\n shard (bool, optional): Whether to save checkpoint a sharded way.\n If true, the checkpoint will be a folder. Otherwise, it will be a single file. Defaults to False.\n size_per_shard (int, optional): Maximum size of checkpoint shard file in MB. This is useful only when ``shard=True``. Defaults to 1024.\n \"\"\"\n self.checkpoint_io.save_model(model, checkpoint, prefix, shard, size_per_shard)\n\n def load_optimizer(self, optimizer: Optimizer, checkpoint: str):\n \"\"\"Load optimizer from checkpoint.\n\n Args:\n optimizer (Optimizer): An optimizer boosted by Booster.\n checkpoint (str): Path to the checkpoint. It must be a local path.\n It should be a directory path if the checkpoint is sharded. Otherwise, it should be a file path.\n \"\"\"\n self.checkpoint_io.load_optimizer(optimizer, checkpoint)\n\n def save_optimizer(self, optimizer: Optimizer, checkpoint: str, shard: bool = False, size_per_shard: int = 1024):\n \"\"\"Save optimizer to checkpoint.\n Warning: Saving sharded optimizer checkpoint is not supported yet.\n\n Args:\n optimizer (Optimizer): An optimizer boosted by Booster.\n checkpoint (str): Path to the checkpoint. It must be a local path.\n It is a file path if ``shard=False``. Otherwise, it is a directory path.\n shard (bool, optional): Whether to save checkpoint a sharded way.\n If true, the checkpoint will be a folder. Otherwise, it will be a single file. Defaults to False.\n size_per_shard (int, optional): Maximum size of checkpoint shard file in MB. This is useful only when ``shard=True``. Defaults to 1024.\n \"\"\"\n self.checkpoint_io.save_optimizer(optimizer, checkpoint, shard, size_per_shard)\n\n def save_lr_scheduler(self, lr_scheduler: LRScheduler, checkpoint: str):\n \"\"\"Save lr scheduler to checkpoint.\n\n Args:\n lr_scheduler (LRScheduler): A lr scheduler boosted by Booster.\n checkpoint (str): Path to the checkpoint. It must be a local file path.\n \"\"\"\n self.checkpoint_io.save_lr_scheduler(lr_scheduler, checkpoint)\n\n def load_lr_scheduler(self, lr_scheduler: LRScheduler, checkpoint: str):\n \"\"\"Load lr scheduler from checkpoint.\n\n Args:\n lr_scheduler (LRScheduler): A lr scheduler boosted by Booster.\n checkpoint (str): Path to the checkpoint. It must be a local file path.\n \"\"\"\n self.checkpoint_io.load_lr_scheduler(lr_scheduler, checkpoint)\n", "path": "colossalai/booster/booster.py"}], "after_files": [{"content": "import warnings\nfrom contextlib import contextmanager\nfrom typing import Callable, Iterator, List, Optional, Tuple, Union\n\nimport torch\nimport torch.nn as nn\nfrom torch.optim import Optimizer\nfrom torch.optim.lr_scheduler import _LRScheduler as LRScheduler\nfrom torch.utils.data import DataLoader\n\nfrom colossalai.checkpoint_io import GeneralCheckpointIO\n\nfrom .accelerator import Accelerator\nfrom .mixed_precision import MixedPrecision, mixed_precision_factory\nfrom .plugin import Plugin\n\n__all__ = ['Booster']\n\n\nclass Booster:\n \"\"\"\n Booster is a high-level API for training neural networks. It provides a unified interface for\n training with different precision, accelerator, and plugin.\n\n Examples:\n ```python\n colossalai.launch(...)\n plugin = GeminiPlugin(stage=3, ...)\n booster = Booster(precision='fp16', plugin=plugin)\n\n model = GPT2()\n optimizer = Adam(model.parameters())\n dataloader = Dataloader(Dataset)\n lr_scheduler = LinearWarmupScheduler()\n criterion = GPTLMLoss()\n\n model, optimizer, lr_scheduler, dataloader = booster.boost(model, optimizer, lr_scheduler, dataloader)\n\n for epoch in range(max_epochs):\n for input_ids, attention_mask in dataloader:\n outputs = model(input_ids, attention_mask)\n loss = criterion(outputs.logits, input_ids)\n booster.backward(loss, optimizer)\n optimizer.step()\n lr_scheduler.step()\n optimizer.zero_grad()\n ```\n\n Args:\n device (str or torch.device): The device to run the training. Default: 'cuda'.\n mixed_precision (str or MixedPrecision): The mixed precision to run the training. Default: None.\n If the argument is a string, it can be 'fp16', 'fp16_apex', 'bf16', or 'fp8'.\n 'fp16' would use PyTorch AMP while `fp16_apex` would use Nvidia Apex.\n plugin (Plugin): The plugin to run the training. Default: None.\n \"\"\"\n\n def __init__(self,\n device: str = 'cuda',\n mixed_precision: Union[MixedPrecision, str] = None,\n plugin: Optional[Plugin] = None) -> None:\n if plugin is not None:\n assert isinstance(\n plugin, Plugin), f'Expected the argument plugin to be an instance of Plugin, but got {type(plugin)}.'\n self.plugin = plugin\n\n # set accelerator\n if self.plugin and self.plugin.control_device():\n self.accelerator = None\n warnings.warn('The plugin will control the accelerator, so the device argument will be ignored.')\n else:\n self.accelerator = Accelerator(device)\n\n # set precision\n if self.plugin and self.plugin.control_precision():\n warnings.warn('The plugin will control the precision, so the mixed_precision argument will be ignored.')\n self.mixed_precision = None\n elif mixed_precision is None:\n self.mixed_precision = None\n else:\n # validate and set precision\n if isinstance(mixed_precision, str):\n # the user will take the default arguments for amp training\n self.mixed_precision = mixed_precision_factory(mixed_precision)\n elif isinstance(mixed_precision, MixedPrecision):\n # the user can customize the arguments by passing the precision object\n self.mixed_precision = mixed_precision\n else:\n raise ValueError(\n f'Expected the argument mixed_precision to be a string or an instance of Precision, but got {type(mixed_precision)}.'\n )\n\n if self.plugin is not None and self.plugin.control_checkpoint_io():\n self.checkpoint_io = self.plugin.get_checkpoint_io()\n else:\n self.checkpoint_io = GeneralCheckpointIO()\n\n def boost(\n self,\n model: nn.Module,\n optimizer: Optimizer,\n criterion: Callable = None,\n dataloader: DataLoader = None,\n lr_scheduler: LRScheduler = None,\n ) -> List[Union[nn.Module, Optimizer, LRScheduler, DataLoader]]:\n \"\"\"\n Boost the model, optimizer, criterion, lr_scheduler, and dataloader.\n\n Args:\n model (nn.Module): The model to be boosted.\n optimizer (Optimizer): The optimizer to be boosted.\n criterion (Callable): The criterion to be boosted.\n dataloader (DataLoader): The dataloader to be boosted.\n lr_scheduler (LRScheduler): The lr_scheduler to be boosted.\n \"\"\"\n # TODO(FrankLeeeee): consider multi-model and multi-optimizer case\n # TODO(FrankLeeeee): consider multi-dataloader case\n # transform model for mixed precision\n if self.plugin:\n model, optimizer, criterion, dataloader, lr_scheduler = self.plugin.configure(\n model, optimizer, criterion, dataloader, lr_scheduler)\n\n if self.plugin and not self.plugin.control_device():\n # transform model for accelerator\n model = self.accelerator.configure(model)\n\n if self.mixed_precision and (self.plugin is None or self.plugin and not self.plugin.control_precision()):\n # transform model for mixed precision\n # when mixed_precision is specified and the plugin is not given or does not control the precision\n model, optimizer, criterion = self.mixed_precision.configure(model, optimizer, criterion)\n\n return model, optimizer, criterion, dataloader, lr_scheduler\n\n def backward(self, loss: torch.Tensor, optimizer: Optimizer) -> None:\n \"\"\"Backward pass.\n\n Args:\n loss (torch.Tensor): The loss to be backpropagated.\n optimizer (Optimizer): The optimizer to be updated.\n \"\"\"\n # TODO: implement this method with plugin\n optimizer.backward(loss)\n\n def execute_pipeline(self,\n data_iter: Iterator,\n model: nn.Module,\n criterion: Callable[[torch.Tensor], torch.Tensor],\n optimizer: Optimizer,\n return_loss: bool = True,\n return_outputs: bool = False) -> Tuple[Optional[torch.Tensor], ...]:\n # TODO: implement this method\n # run pipeline forward backward pass\n # return loss or outputs if needed\n pass\n\n def no_sync(self, model: nn.Module) -> contextmanager:\n \"\"\"Context manager to disable gradient synchronization across DP process groups.\n\n Args:\n model (nn.Module): The model to be disabled gradient synchronization.\n\n Returns:\n contextmanager: Context to disable gradient synchronization.\n \"\"\"\n assert self.plugin is not None, f'no_sync is only enabled when a plugin is provided and the plugin supports no_sync.'\n assert self.plugin.support_no_sync, f'The plugin {self.plugin.__class__.__name__} does not support no_sync.'\n return self.plugin.no_sync(model)\n\n def load_model(self, model: nn.Module, checkpoint: str, strict: bool = True):\n \"\"\"Load model from checkpoint.\n\n Args:\n model (nn.Module): A model boosted by Booster.\n checkpoint (str): Path to the checkpoint. It must be a local path.\n It should be a directory path if the checkpoint is sharded. Otherwise, it should be a file path.\n strict (bool, optional): whether to strictly enforce that the keys\n in :attr:`state_dict` match the keys returned by this module's\n :meth:`~torch.nn.Module.state_dict` function. Defaults to True.\n \"\"\"\n self.checkpoint_io.load_model(model, checkpoint, strict)\n\n def save_model(self,\n model: nn.Module,\n checkpoint: str,\n prefix: str = None,\n shard: bool = False,\n size_per_shard: int = 1024):\n \"\"\"Save model to checkpoint.\n\n Args:\n model (nn.Module): A model boosted by Booster.\n checkpoint (str): Path to the checkpoint. It must be a local path.\n It is a file path if ``shard=False``. Otherwise, it is a directory path.\n prefix (str, optional): A prefix added to parameter and buffer\n names to compose the keys in state_dict. Defaults to None.\n shard (bool, optional): Whether to save checkpoint a sharded way.\n If true, the checkpoint will be a folder. Otherwise, it will be a single file. Defaults to False.\n size_per_shard (int, optional): Maximum size of checkpoint shard file in MB. This is useful only when ``shard=True``. Defaults to 1024.\n \"\"\"\n self.checkpoint_io.save_model(model, checkpoint, prefix, shard, size_per_shard)\n\n def load_optimizer(self, optimizer: Optimizer, checkpoint: str):\n \"\"\"Load optimizer from checkpoint.\n\n Args:\n optimizer (Optimizer): An optimizer boosted by Booster.\n checkpoint (str): Path to the checkpoint. It must be a local path.\n It should be a directory path if the checkpoint is sharded. Otherwise, it should be a file path.\n \"\"\"\n self.checkpoint_io.load_optimizer(optimizer, checkpoint)\n\n def save_optimizer(self, optimizer: Optimizer, checkpoint: str, shard: bool = False, size_per_shard: int = 1024):\n \"\"\"Save optimizer to checkpoint.\n Warning: Saving sharded optimizer checkpoint is not supported yet.\n\n Args:\n optimizer (Optimizer): An optimizer boosted by Booster.\n checkpoint (str): Path to the checkpoint. It must be a local path.\n It is a file path if ``shard=False``. Otherwise, it is a directory path.\n shard (bool, optional): Whether to save checkpoint a sharded way.\n If true, the checkpoint will be a folder. Otherwise, it will be a single file. Defaults to False.\n size_per_shard (int, optional): Maximum size of checkpoint shard file in MB. This is useful only when ``shard=True``. Defaults to 1024.\n \"\"\"\n self.checkpoint_io.save_optimizer(optimizer, checkpoint, shard, size_per_shard)\n\n def save_lr_scheduler(self, lr_scheduler: LRScheduler, checkpoint: str):\n \"\"\"Save lr scheduler to checkpoint.\n\n Args:\n lr_scheduler (LRScheduler): A lr scheduler boosted by Booster.\n checkpoint (str): Path to the checkpoint. It must be a local file path.\n \"\"\"\n self.checkpoint_io.save_lr_scheduler(lr_scheduler, checkpoint)\n\n def load_lr_scheduler(self, lr_scheduler: LRScheduler, checkpoint: str):\n \"\"\"Load lr scheduler from checkpoint.\n\n Args:\n lr_scheduler (LRScheduler): A lr scheduler boosted by Booster.\n checkpoint (str): Path to the checkpoint. It must be a local file path.\n \"\"\"\n self.checkpoint_io.load_lr_scheduler(lr_scheduler, checkpoint)\n", "path": "colossalai/booster/booster.py"}]} | 3,232 | 509 |
gh_patches_debug_35712 | rasdani/github-patches | git_diff | googleapis__google-auth-library-python-655 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add support for Python 3.9
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `noxfile.py`
Content:
```
1 # Copyright 2019 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import nox
16
17 TEST_DEPENDENCIES = [
18 "flask",
19 "freezegun",
20 "mock",
21 "oauth2client",
22 "pyopenssl",
23 "pytest",
24 "pytest-cov",
25 "pytest-localserver",
26 "requests",
27 "urllib3",
28 "cryptography",
29 "responses",
30 "grpcio",
31 ]
32
33 ASYNC_DEPENDENCIES = ["pytest-asyncio", "aioresponses", "asynctest"]
34
35 BLACK_VERSION = "black==19.3b0"
36 BLACK_PATHS = [
37 "google",
38 "tests",
39 "tests_async",
40 "noxfile.py",
41 "setup.py",
42 "docs/conf.py",
43 ]
44
45
46 @nox.session(python="3.7")
47 def lint(session):
48 session.install("flake8", "flake8-import-order", "docutils", BLACK_VERSION)
49 session.install(".")
50 session.run("black", "--check", *BLACK_PATHS)
51 session.run(
52 "flake8",
53 "--import-order-style=google",
54 "--application-import-names=google,tests,system_tests",
55 "google",
56 "tests",
57 "tests_async",
58 )
59 session.run(
60 "python", "setup.py", "check", "--metadata", "--restructuredtext", "--strict"
61 )
62
63
64 @nox.session(python="3.6")
65 def blacken(session):
66 """Run black.
67
68 Format code to uniform standard.
69
70 This currently uses Python 3.6 due to the automated Kokoro run of synthtool.
71 That run uses an image that doesn't have 3.6 installed. Before updating this
72 check the state of the `gcp_ubuntu_config` we use for that Kokoro run.
73 """
74 session.install(BLACK_VERSION)
75 session.run("black", *BLACK_PATHS)
76
77
78 @nox.session(python=["3.6", "3.7", "3.8"])
79 def unit(session):
80 session.install(*TEST_DEPENDENCIES)
81 session.install(*(ASYNC_DEPENDENCIES))
82 session.install(".")
83 session.run(
84 "pytest",
85 "--cov=google.auth",
86 "--cov=google.oauth2",
87 "--cov=tests",
88 "tests",
89 "tests_async",
90 )
91
92
93 @nox.session(python=["2.7", "3.5"])
94 def unit_prev_versions(session):
95 session.install(*TEST_DEPENDENCIES)
96 session.install(".")
97 session.run(
98 "pytest", "--cov=google.auth", "--cov=google.oauth2", "--cov=tests", "tests"
99 )
100
101
102 @nox.session(python="3.7")
103 def cover(session):
104 session.install(*TEST_DEPENDENCIES)
105 session.install(*(ASYNC_DEPENDENCIES))
106 session.install(".")
107 session.run(
108 "pytest",
109 "--cov=google.auth",
110 "--cov=google.oauth2",
111 "--cov=tests",
112 "--cov=tests_async",
113 "--cov-report=",
114 "tests",
115 "tests_async",
116 )
117 session.run("coverage", "report", "--show-missing", "--fail-under=100")
118
119
120 @nox.session(python="3.7")
121 def docgen(session):
122 session.env["SPHINX_APIDOC_OPTIONS"] = "members,inherited-members,show-inheritance"
123 session.install(*TEST_DEPENDENCIES)
124 session.install("sphinx")
125 session.install(".")
126 session.run("rm", "-r", "docs/reference")
127 session.run(
128 "sphinx-apidoc",
129 "--output-dir",
130 "docs/reference",
131 "--separate",
132 "--module-first",
133 "google",
134 )
135
136
137 @nox.session(python="3.7")
138 def docs(session):
139 session.install("sphinx", "-r", "docs/requirements-docs.txt")
140 session.install(".")
141 session.run("make", "-C", "docs", "html")
142
143
144 @nox.session(python="pypy")
145 def pypy(session):
146 session.install(*TEST_DEPENDENCIES)
147 session.install(*ASYNC_DEPENDENCIES)
148 session.install(".")
149 session.run(
150 "pytest",
151 "--cov=google.auth",
152 "--cov=google.oauth2",
153 "--cov=tests",
154 "tests",
155 "tests_async",
156 )
157
```
Path: `setup.py`
Content:
```
1 # Copyright 2014 Google Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import io
16
17 from setuptools import find_packages
18 from setuptools import setup
19
20
21 DEPENDENCIES = (
22 "cachetools>=2.0.0,<5.0",
23 "pyasn1-modules>=0.2.1",
24 # rsa==4.5 is the last version to support 2.7
25 # https://github.com/sybrenstuvel/python-rsa/issues/152#issuecomment-643470233
26 'rsa<4.6; python_version < "3.5"',
27 'rsa>=3.1.4,<5; python_version >= "3.5"',
28 "setuptools>=40.3.0",
29 "six>=1.9.0",
30 )
31
32 extras = {"aiohttp": "aiohttp >= 3.6.2, < 4.0.0dev; python_version>='3.6'"}
33
34 with io.open("README.rst", "r") as fh:
35 long_description = fh.read()
36
37 version = "1.23.0"
38
39 setup(
40 name="google-auth",
41 version=version,
42 author="Google Cloud Platform",
43 author_email="[email protected]",
44 description="Google Authentication Library",
45 long_description=long_description,
46 url="https://github.com/googleapis/google-auth-library-python",
47 packages=find_packages(exclude=("tests*", "system_tests*")),
48 namespace_packages=("google",),
49 install_requires=DEPENDENCIES,
50 extras_require=extras,
51 python_requires=">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*",
52 license="Apache 2.0",
53 keywords="google auth oauth client",
54 classifiers=[
55 "Programming Language :: Python :: 2",
56 "Programming Language :: Python :: 2.7",
57 "Programming Language :: Python :: 3",
58 "Programming Language :: Python :: 3.5",
59 "Programming Language :: Python :: 3.6",
60 "Programming Language :: Python :: 3.7",
61 "Programming Language :: Python :: 3.8",
62 "Development Status :: 5 - Production/Stable",
63 "Intended Audience :: Developers",
64 "License :: OSI Approved :: Apache Software License",
65 "Operating System :: POSIX",
66 "Operating System :: Microsoft :: Windows",
67 "Operating System :: MacOS :: MacOS X",
68 "Operating System :: OS Independent",
69 "Topic :: Internet :: WWW/HTTP",
70 ],
71 )
72
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/noxfile.py b/noxfile.py
--- a/noxfile.py
+++ b/noxfile.py
@@ -75,7 +75,7 @@
session.run("black", *BLACK_PATHS)
[email protected](python=["3.6", "3.7", "3.8"])
[email protected](python=["3.6", "3.7", "3.8", "3.9"])
def unit(session):
session.install(*TEST_DEPENDENCIES)
session.install(*(ASYNC_DEPENDENCIES))
@@ -90,7 +90,7 @@
)
[email protected](python=["2.7", "3.5"])
[email protected](python=["2.7"])
def unit_prev_versions(session):
session.install(*TEST_DEPENDENCIES)
session.install(".")
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -23,8 +23,8 @@
"pyasn1-modules>=0.2.1",
# rsa==4.5 is the last version to support 2.7
# https://github.com/sybrenstuvel/python-rsa/issues/152#issuecomment-643470233
- 'rsa<4.6; python_version < "3.5"',
- 'rsa>=3.1.4,<5; python_version >= "3.5"',
+ 'rsa<4.6; python_version < "3.6"',
+ 'rsa>=3.1.4,<5; python_version >= "3.6"',
"setuptools>=40.3.0",
"six>=1.9.0",
)
@@ -48,17 +48,17 @@
namespace_packages=("google",),
install_requires=DEPENDENCIES,
extras_require=extras,
- python_requires=">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*",
+ python_requires=">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*",
license="Apache 2.0",
keywords="google auth oauth client",
classifiers=[
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
- "Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
+ "Programming Language :: Python :: 3.9",
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
| {"golden_diff": "diff --git a/noxfile.py b/noxfile.py\n--- a/noxfile.py\n+++ b/noxfile.py\n@@ -75,7 +75,7 @@\n session.run(\"black\", *BLACK_PATHS)\n \n \[email protected](python=[\"3.6\", \"3.7\", \"3.8\"])\[email protected](python=[\"3.6\", \"3.7\", \"3.8\", \"3.9\"])\n def unit(session):\n session.install(*TEST_DEPENDENCIES)\n session.install(*(ASYNC_DEPENDENCIES))\n@@ -90,7 +90,7 @@\n )\n \n \[email protected](python=[\"2.7\", \"3.5\"])\[email protected](python=[\"2.7\"])\n def unit_prev_versions(session):\n session.install(*TEST_DEPENDENCIES)\n session.install(\".\")\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -23,8 +23,8 @@\n \"pyasn1-modules>=0.2.1\",\n # rsa==4.5 is the last version to support 2.7\n # https://github.com/sybrenstuvel/python-rsa/issues/152#issuecomment-643470233\n- 'rsa<4.6; python_version < \"3.5\"',\n- 'rsa>=3.1.4,<5; python_version >= \"3.5\"',\n+ 'rsa<4.6; python_version < \"3.6\"',\n+ 'rsa>=3.1.4,<5; python_version >= \"3.6\"',\n \"setuptools>=40.3.0\",\n \"six>=1.9.0\",\n )\n@@ -48,17 +48,17 @@\n namespace_packages=(\"google\",),\n install_requires=DEPENDENCIES,\n extras_require=extras,\n- python_requires=\">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*\",\n+ python_requires=\">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*\",\n license=\"Apache 2.0\",\n keywords=\"google auth oauth client\",\n classifiers=[\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n- \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n+ \"Programming Language :: Python :: 3.9\",\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n", "issue": "Add support for Python 3.9\n\n", "before_files": [{"content": "# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport nox\n\nTEST_DEPENDENCIES = [\n \"flask\",\n \"freezegun\",\n \"mock\",\n \"oauth2client\",\n \"pyopenssl\",\n \"pytest\",\n \"pytest-cov\",\n \"pytest-localserver\",\n \"requests\",\n \"urllib3\",\n \"cryptography\",\n \"responses\",\n \"grpcio\",\n]\n\nASYNC_DEPENDENCIES = [\"pytest-asyncio\", \"aioresponses\", \"asynctest\"]\n\nBLACK_VERSION = \"black==19.3b0\"\nBLACK_PATHS = [\n \"google\",\n \"tests\",\n \"tests_async\",\n \"noxfile.py\",\n \"setup.py\",\n \"docs/conf.py\",\n]\n\n\[email protected](python=\"3.7\")\ndef lint(session):\n session.install(\"flake8\", \"flake8-import-order\", \"docutils\", BLACK_VERSION)\n session.install(\".\")\n session.run(\"black\", \"--check\", *BLACK_PATHS)\n session.run(\n \"flake8\",\n \"--import-order-style=google\",\n \"--application-import-names=google,tests,system_tests\",\n \"google\",\n \"tests\",\n \"tests_async\",\n )\n session.run(\n \"python\", \"setup.py\", \"check\", \"--metadata\", \"--restructuredtext\", \"--strict\"\n )\n\n\[email protected](python=\"3.6\")\ndef blacken(session):\n \"\"\"Run black.\n\n Format code to uniform standard.\n\n This currently uses Python 3.6 due to the automated Kokoro run of synthtool.\n That run uses an image that doesn't have 3.6 installed. Before updating this\n check the state of the `gcp_ubuntu_config` we use for that Kokoro run.\n \"\"\"\n session.install(BLACK_VERSION)\n session.run(\"black\", *BLACK_PATHS)\n\n\[email protected](python=[\"3.6\", \"3.7\", \"3.8\"])\ndef unit(session):\n session.install(*TEST_DEPENDENCIES)\n session.install(*(ASYNC_DEPENDENCIES))\n session.install(\".\")\n session.run(\n \"pytest\",\n \"--cov=google.auth\",\n \"--cov=google.oauth2\",\n \"--cov=tests\",\n \"tests\",\n \"tests_async\",\n )\n\n\[email protected](python=[\"2.7\", \"3.5\"])\ndef unit_prev_versions(session):\n session.install(*TEST_DEPENDENCIES)\n session.install(\".\")\n session.run(\n \"pytest\", \"--cov=google.auth\", \"--cov=google.oauth2\", \"--cov=tests\", \"tests\"\n )\n\n\[email protected](python=\"3.7\")\ndef cover(session):\n session.install(*TEST_DEPENDENCIES)\n session.install(*(ASYNC_DEPENDENCIES))\n session.install(\".\")\n session.run(\n \"pytest\",\n \"--cov=google.auth\",\n \"--cov=google.oauth2\",\n \"--cov=tests\",\n \"--cov=tests_async\",\n \"--cov-report=\",\n \"tests\",\n \"tests_async\",\n )\n session.run(\"coverage\", \"report\", \"--show-missing\", \"--fail-under=100\")\n\n\[email protected](python=\"3.7\")\ndef docgen(session):\n session.env[\"SPHINX_APIDOC_OPTIONS\"] = \"members,inherited-members,show-inheritance\"\n session.install(*TEST_DEPENDENCIES)\n session.install(\"sphinx\")\n session.install(\".\")\n session.run(\"rm\", \"-r\", \"docs/reference\")\n session.run(\n \"sphinx-apidoc\",\n \"--output-dir\",\n \"docs/reference\",\n \"--separate\",\n \"--module-first\",\n \"google\",\n )\n\n\[email protected](python=\"3.7\")\ndef docs(session):\n session.install(\"sphinx\", \"-r\", \"docs/requirements-docs.txt\")\n session.install(\".\")\n session.run(\"make\", \"-C\", \"docs\", \"html\")\n\n\[email protected](python=\"pypy\")\ndef pypy(session):\n session.install(*TEST_DEPENDENCIES)\n session.install(*ASYNC_DEPENDENCIES)\n session.install(\".\")\n session.run(\n \"pytest\",\n \"--cov=google.auth\",\n \"--cov=google.oauth2\",\n \"--cov=tests\",\n \"tests\",\n \"tests_async\",\n )\n", "path": "noxfile.py"}, {"content": "# Copyright 2014 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport io\n\nfrom setuptools import find_packages\nfrom setuptools import setup\n\n\nDEPENDENCIES = (\n \"cachetools>=2.0.0,<5.0\",\n \"pyasn1-modules>=0.2.1\",\n # rsa==4.5 is the last version to support 2.7\n # https://github.com/sybrenstuvel/python-rsa/issues/152#issuecomment-643470233\n 'rsa<4.6; python_version < \"3.5\"',\n 'rsa>=3.1.4,<5; python_version >= \"3.5\"',\n \"setuptools>=40.3.0\",\n \"six>=1.9.0\",\n)\n\nextras = {\"aiohttp\": \"aiohttp >= 3.6.2, < 4.0.0dev; python_version>='3.6'\"}\n\nwith io.open(\"README.rst\", \"r\") as fh:\n long_description = fh.read()\n\nversion = \"1.23.0\"\n\nsetup(\n name=\"google-auth\",\n version=version,\n author=\"Google Cloud Platform\",\n author_email=\"[email protected]\",\n description=\"Google Authentication Library\",\n long_description=long_description,\n url=\"https://github.com/googleapis/google-auth-library-python\",\n packages=find_packages(exclude=(\"tests*\", \"system_tests*\")),\n namespace_packages=(\"google\",),\n install_requires=DEPENDENCIES,\n extras_require=extras,\n python_requires=\">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*\",\n license=\"Apache 2.0\",\n keywords=\"google auth oauth client\",\n classifiers=[\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: POSIX\",\n \"Operating System :: Microsoft :: Windows\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet :: WWW/HTTP\",\n ],\n)\n", "path": "setup.py"}], "after_files": [{"content": "# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport nox\n\nTEST_DEPENDENCIES = [\n \"flask\",\n \"freezegun\",\n \"mock\",\n \"oauth2client\",\n \"pyopenssl\",\n \"pytest\",\n \"pytest-cov\",\n \"pytest-localserver\",\n \"requests\",\n \"urllib3\",\n \"cryptography\",\n \"responses\",\n \"grpcio\",\n]\n\nASYNC_DEPENDENCIES = [\"pytest-asyncio\", \"aioresponses\", \"asynctest\"]\n\nBLACK_VERSION = \"black==19.3b0\"\nBLACK_PATHS = [\n \"google\",\n \"tests\",\n \"tests_async\",\n \"noxfile.py\",\n \"setup.py\",\n \"docs/conf.py\",\n]\n\n\[email protected](python=\"3.7\")\ndef lint(session):\n session.install(\"flake8\", \"flake8-import-order\", \"docutils\", BLACK_VERSION)\n session.install(\".\")\n session.run(\"black\", \"--check\", *BLACK_PATHS)\n session.run(\n \"flake8\",\n \"--import-order-style=google\",\n \"--application-import-names=google,tests,system_tests\",\n \"google\",\n \"tests\",\n \"tests_async\",\n )\n session.run(\n \"python\", \"setup.py\", \"check\", \"--metadata\", \"--restructuredtext\", \"--strict\"\n )\n\n\[email protected](python=\"3.6\")\ndef blacken(session):\n \"\"\"Run black.\n\n Format code to uniform standard.\n\n This currently uses Python 3.6 due to the automated Kokoro run of synthtool.\n That run uses an image that doesn't have 3.6 installed. Before updating this\n check the state of the `gcp_ubuntu_config` we use for that Kokoro run.\n \"\"\"\n session.install(BLACK_VERSION)\n session.run(\"black\", *BLACK_PATHS)\n\n\[email protected](python=[\"3.6\", \"3.7\", \"3.8\", \"3.9\"])\ndef unit(session):\n session.install(*TEST_DEPENDENCIES)\n session.install(*(ASYNC_DEPENDENCIES))\n session.install(\".\")\n session.run(\n \"pytest\",\n \"--cov=google.auth\",\n \"--cov=google.oauth2\",\n \"--cov=tests\",\n \"tests\",\n \"tests_async\",\n )\n\n\[email protected](python=[\"2.7\"])\ndef unit_prev_versions(session):\n session.install(*TEST_DEPENDENCIES)\n session.install(\".\")\n session.run(\n \"pytest\", \"--cov=google.auth\", \"--cov=google.oauth2\", \"--cov=tests\", \"tests\"\n )\n\n\[email protected](python=\"3.7\")\ndef cover(session):\n session.install(*TEST_DEPENDENCIES)\n session.install(*(ASYNC_DEPENDENCIES))\n session.install(\".\")\n session.run(\n \"pytest\",\n \"--cov=google.auth\",\n \"--cov=google.oauth2\",\n \"--cov=tests\",\n \"--cov=tests_async\",\n \"--cov-report=\",\n \"tests\",\n \"tests_async\",\n )\n session.run(\"coverage\", \"report\", \"--show-missing\", \"--fail-under=100\")\n\n\[email protected](python=\"3.7\")\ndef docgen(session):\n session.env[\"SPHINX_APIDOC_OPTIONS\"] = \"members,inherited-members,show-inheritance\"\n session.install(*TEST_DEPENDENCIES)\n session.install(\"sphinx\")\n session.install(\".\")\n session.run(\"rm\", \"-r\", \"docs/reference\")\n session.run(\n \"sphinx-apidoc\",\n \"--output-dir\",\n \"docs/reference\",\n \"--separate\",\n \"--module-first\",\n \"google\",\n )\n\n\[email protected](python=\"3.7\")\ndef docs(session):\n session.install(\"sphinx\", \"-r\", \"docs/requirements-docs.txt\")\n session.install(\".\")\n session.run(\"make\", \"-C\", \"docs\", \"html\")\n\n\[email protected](python=\"pypy\")\ndef pypy(session):\n session.install(*TEST_DEPENDENCIES)\n session.install(*ASYNC_DEPENDENCIES)\n session.install(\".\")\n session.run(\n \"pytest\",\n \"--cov=google.auth\",\n \"--cov=google.oauth2\",\n \"--cov=tests\",\n \"tests\",\n \"tests_async\",\n )\n", "path": "noxfile.py"}, {"content": "# Copyright 2014 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport io\n\nfrom setuptools import find_packages\nfrom setuptools import setup\n\n\nDEPENDENCIES = (\n \"cachetools>=2.0.0,<5.0\",\n \"pyasn1-modules>=0.2.1\",\n # rsa==4.5 is the last version to support 2.7\n # https://github.com/sybrenstuvel/python-rsa/issues/152#issuecomment-643470233\n 'rsa<4.6; python_version < \"3.6\"',\n 'rsa>=3.1.4,<5; python_version >= \"3.6\"',\n \"setuptools>=40.3.0\",\n \"six>=1.9.0\",\n)\n\nextras = {\"aiohttp\": \"aiohttp >= 3.6.2, < 4.0.0dev; python_version>='3.6'\"}\n\nwith io.open(\"README.rst\", \"r\") as fh:\n long_description = fh.read()\n\nversion = \"1.23.0\"\n\nsetup(\n name=\"google-auth\",\n version=version,\n author=\"Google Cloud Platform\",\n author_email=\"[email protected]\",\n description=\"Google Authentication Library\",\n long_description=long_description,\n url=\"https://github.com/googleapis/google-auth-library-python\",\n packages=find_packages(exclude=(\"tests*\", \"system_tests*\")),\n namespace_packages=(\"google\",),\n install_requires=DEPENDENCIES,\n extras_require=extras,\n python_requires=\">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*\",\n license=\"Apache 2.0\",\n keywords=\"google auth oauth client\",\n classifiers=[\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: POSIX\",\n \"Operating System :: Microsoft :: Windows\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet :: WWW/HTTP\",\n ],\n)\n", "path": "setup.py"}]} | 2,477 | 627 |
gh_patches_debug_10654 | rasdani/github-patches | git_diff | quantumlib__Cirq-3404 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Read the docs has goofy copyright
**Description of the issue**
Copyright shows up on cirq tutorials on readthedocs.
**How to reproduce the issue**
Go to https://cirq.readthedocs.io/en/latest/docs/gates.html or other pages. Top part says "Copyright 2020 The Cirq Developers" with apache license.
**Cirq version**
0.9.1. It probably broke when we adjusted all the copyright notices.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `rtd_docs/conf.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # coverage: ignore
3
4 # The content for all documentation lives in ../docs. That folder is
5 # following the structure for the Google Quantum site configured for the
6 # internal CMS, devsite. The readthedocs layer is a secondary, which generates
7 # the content using sphinx to readthedocs.io until we go live with the devsite.
8 #
9 # This is the configuration file for the Sphinx documentation builder.
10 # See http://www.sphinx-doc.org/en/master/config for help
11
12 # -- Path setup --------------------------------------------------------------
13
14 # If extensions (or modules to document with autodoc) are in another directory,
15 # add these directories to sys.path here. If the directory is relative to the
16 # documentation root, use os.path.abspath to make it absolute, like shown here.
17 #
18 import inspect
19 import re
20 from typing import List, Any
21
22 import os
23 import sys
24 import shutil
25
26 import pypandoc
27
28 cirq_root_path = os.path.dirname(os.path.dirname(__file__))
29 sys.path.insert(0, cirq_root_path)
30 from cirq import _doc
31
32
33 def setup(app):
34 # just in case it exists (locally) remove the copied docs folder
35 shutil.rmtree("./docs", ignore_errors=True)
36 # copy recursively the actual content from the devsite folder
37 # to rtd_docs/docs
38 shutil.copytree(src="../docs", dst="./docs")
39 app.add_config_value('pandoc_use_parser', 'markdown', True)
40 app.connect('autodoc-process-docstring', autodoc_process)
41 app.connect('autodoc-skip-member', autodoc_skip_member)
42 app.connect('source-read', source_read)
43
44
45 def convert_markdown_mathjax_for_rst(lines: List[str]) -> List[str]:
46 if all('$$' not in line for line in lines):
47 return lines
48
49 data = '\n'.join(lines)
50 sections = data.split('$$')
51 if len(sections) % 2 != 1:
52 raise ValueError('Mismatched number of "$$" latex tokens.')
53
54 result = []
55 for i, s in enumerate(sections):
56 if i % 2:
57 # Avoid getting split across divs.
58 s = ' '.join(s.split('\n'))
59 # Avoid intermediate layers turning our newlines into slashes.
60 s = s.replace('\\\\', r'\newline')
61 # Turn latex like "|x\rangle" into "|x \rangle".
62 # The extra space seems to be necessary to survive a later pass.
63 s = re.sub(r'([a-zA-Z0-9])\\', r'\1 \\', s)
64 # Keep the $$ so MathJax can find it.
65 result.append('$${}$$'.format(s))
66 else:
67 # Work around bad table detection in pandoc by concatenating
68 # lines from the same paragraph.
69 s = '\n\n'.join(e.replace('\n', ' ') for e in s.split('\n\n'))
70
71 # Convert markdown to rst.
72 out = pypandoc.convert(s, to='rst', format='markdown_github')
73
74 # Not sure why pandoc is escaping these...
75 out = out.replace(r'\|', '|')
76
77 result.extend(out.split('\n'))
78
79 return result
80
81
82 def autodoc_skip_member(
83 app,
84 what: str,
85 name: str,
86 obj: Any,
87 skip: bool,
88 options,
89 ) -> bool:
90 """Public members already kept. Also include members marked as documented.
91 """
92 # Never skip if explicitly whitelisted.
93 if id(obj) in _doc.RECORDED_CONST_DOCS:
94 return False
95 # Skip all private methods.
96 if name.startswith('_'):
97 return True
98 # Fallback to default.
99 return skip
100
101
102 def autodoc_process(app, what: str, name: str, obj: Any, options,
103 lines: List[str]) -> None:
104 # Try to lookup in documented dictionary.
105 doc_string = _doc.RECORDED_CONST_DOCS.get(id(obj))
106 if name.startswith('cirq') and doc_string is not None:
107 # Override docstring if requested.
108 if doc_string is not None:
109 new_doc_string = inspect.cleandoc(doc_string)
110 lines[:] = new_doc_string.split('\n')
111 elif not (getattr(obj, '__module__', 'cirq') or '').startswith('cirq'):
112 # Don't convert objects from other modules.
113 return
114
115 # Don't convert output from Napoleon extension, which is already rst.
116 i = 0
117 while i < len(lines) and not lines[i].startswith(':'):
118 i += 1
119 if not i:
120 return
121
122 converted_lines = convert_markdown_mathjax_for_rst(lines[:i])
123 kept_lines = lines[i:]
124
125 data = pypandoc.convert(
126 '\n'.join(converted_lines),
127 to='rst',
128 format='markdown_github',
129 )
130
131 lines[:] = data.split('\n') + kept_lines
132
133
134 def source_read(app, docname, source):
135 source[0] = re.sub(r'"##### (Copyright 20\d\d The Cirq Developers)"',
136 r'"**\1**"', source[0])
137 source[0] = re.sub(r'"<table.*tfo-notebook-buttons.*"</table>"',
138 r'""',
139 source[0],
140 flags=re.S)
141
142
143 # -- Project information -----------------------------------------------------
144
145 project = 'Cirq'
146 copyright = '2018, The Cirq Developers' # pylint: disable=redefined-builtin
147 author = 'The Cirq Developers'
148
149 # The full version, including alpha/beta/rc tags
150 __version__ = ''
151 exec(open(os.path.join(cirq_root_path, 'cirq', '_version.py')).read())
152 release = __version__
153
154 # The short X.Y version
155 version = release # '.'.join(release.split('.')[:2])
156
157 # -- General configuration ---------------------------------------------------
158
159 # If your documentation needs a minimal Sphinx version, state it here.
160 # needs_sphinx = '1.0'
161
162 # Add any Sphinx extension module names here, as strings. They can be
163 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
164 # ones.
165 extensions = [
166 'myst_parser',
167 'nbsphinx',
168 'sphinx.ext.autodoc',
169 'sphinx.ext.autosummary',
170 'sphinx.ext.doctest',
171 'sphinx.ext.mathjax',
172 'sphinx.ext.napoleon',
173 'sphinx.ext.viewcode',
174 'sphinx_markdown_tables',
175 ]
176
177 # Add any paths that contain templates here, relative to this directory.
178 templates_path = ['_templates']
179
180 # Allow markdown includes.
181 # http://www.sphinx-doc.org/en/master/markdown.html
182 # The suffix(es) of source filenames.
183 # You can specify multiple suffix as a list of string:
184 #
185 source_suffix = {
186 '.rst': 'restructuredtext',
187 '.md': 'markdown',
188 }
189
190 # The master toctree document.
191 master_doc = 'index'
192
193 # The language for content autogenerated by Sphinx. Refer to documentation
194 # for a list of supported languages.
195 #
196 # This is also used if you do content translation via gettext catalogs.
197 # Usually you set "language" from the command line for these cases.
198 language = None
199
200 # List of patterns, relative to source directory, that match files and
201 # directories to ignore when looking for source files.
202 # This pattern also affects html_static_path and html_extra_path .
203 exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
204
205 # The name of the Pygments (syntax highlighting) style to use.
206 pygments_style = 'sphinx'
207
208 # -- Options for HTML output ---------------------------------------------
209
210 html_theme = 'sphinx_rtd_theme'
211 html_favicon = 'favicon.ico'
212 # html_theme_options = {}
213
214 # Add any paths that contain custom static files (such as style sheets) here,
215 # relative to this directory. They are copied after the builtin static files,
216 # so a file named "default.css" will overwrite the builtin "default.css".
217 html_static_path = ['_static']
218
219 # Custom sidebar templates, must be a dictionary that maps document names
220 # to template names.
221 #
222 # The default sidebars (for documents that don't match any pattern) are
223 # defined by theme itself. Builtin themes are using these templates by
224 # default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
225 # 'searchbox.html']``.
226 #
227 # html_sidebars = {}
228
229 html_logo = 'docs/images/Cirq_logo_notext.png'
230 html_css_files = ['tweak-style.css']
231
232 # -- Options for HTMLHelp output -----------------------------------------
233
234 # Output file base name for HTML help builder.
235 htmlhelp_basename = 'Cirqdoc'
236
237 # -- Options for LaTeX output --------------------------------------------
238
239 latex_elements = {
240 # The paper size ('letterpaper' or 'a4paper').
241 'papersize': 'letterpaper',
242
243 # The font size ('10pt', '11pt' or '12pt').
244 # 'pointsize': '10pt',
245
246 # Additional stuff for the LaTeX preamble.
247 # 'preamble': '',
248
249 # Latex figure (float) alignment
250 # 'figure_align': 'htbp',
251 }
252
253 # Grouping the document tree into LaTeX files. List of tuples
254 # (source start file, target name, title,
255 # author, documentclass [howto, manual, or own class]).
256 latex_documents = [
257 (master_doc, 'Cirq.tex', 'Cirq Documentation', 'The Cirq Developers',
258 'manual'),
259 ]
260
261 # -- Options for manual page output --------------------------------------
262
263 # One entry per manual page. List of tuples
264 # (source start file, name, description, authors, manual section).
265 man_pages = [(master_doc, 'cirq', 'Cirq Documentation', [author], 1)]
266
267 # -- Options for Texinfo output ------------------------------------------
268
269 # Grouping the document tree into Texinfo files. List of tuples
270 # (source start file, target name, title, author,
271 # dir menu entry, description, category)
272 texinfo_documents = [
273 (master_doc, 'Cirq', 'Cirq Documentation', author, 'Cirq',
274 'A python library for NISQ circuits.', 'Miscellaneous'),
275 ]
276
277 # -- Extension configuration -------------------------------------------------
278
279 # Generate subpages for reference docs automatically.
280 # http://www.sphinx-doc.org/en/master/ext/autosummary.html#generating-stub-pages-automatically
281 autosummary_generate = True
282
283 # to resolve name clashes between the generated files
284 autosummary_filename_map = {"cirq.QFT": "cirq.QFT_deprecated"}
285
286 myst_update_mathjax = False
287
288 # To allow for google.colab temporarily in notebooks
289 # TODO: after https://github.com/quantumlib/Cirq/issues/3368 turn this back off
290 nbsphinx_allow_errors = True
291
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/rtd_docs/conf.py b/rtd_docs/conf.py
--- a/rtd_docs/conf.py
+++ b/rtd_docs/conf.py
@@ -132,8 +132,14 @@
def source_read(app, docname, source):
- source[0] = re.sub(r'"##### (Copyright 20\d\d The Cirq Developers)"',
- r'"**\1**"', source[0])
+ source[0] = re.sub(r'"##### (Copyright 20\d\d The Cirq Developers)"', r'""',
+ source[0])
+ source[0] = re.sub(
+ r'(\{\s*?"cell_type": "code".*?"#@title.*License.".*?\},)',
+ r'',
+ source[0],
+ flags=re.S)
+
source[0] = re.sub(r'"<table.*tfo-notebook-buttons.*"</table>"',
r'""',
source[0],
| {"golden_diff": "diff --git a/rtd_docs/conf.py b/rtd_docs/conf.py\n--- a/rtd_docs/conf.py\n+++ b/rtd_docs/conf.py\n@@ -132,8 +132,14 @@\n \n \n def source_read(app, docname, source):\n- source[0] = re.sub(r'\"##### (Copyright 20\\d\\d The Cirq Developers)\"',\n- r'\"**\\1**\"', source[0])\n+ source[0] = re.sub(r'\"##### (Copyright 20\\d\\d The Cirq Developers)\"', r'\"\"',\n+ source[0])\n+ source[0] = re.sub(\n+ r'(\\{\\s*?\"cell_type\": \"code\".*?\"#@title.*License.\".*?\\},)',\n+ r'',\n+ source[0],\n+ flags=re.S)\n+\n source[0] = re.sub(r'\"<table.*tfo-notebook-buttons.*\"</table>\"',\n r'\"\"',\n source[0],\n", "issue": "Read the docs has goofy copyright\n**Description of the issue**\r\nCopyright shows up on cirq tutorials on readthedocs.\r\n\r\n**How to reproduce the issue**\r\nGo to https://cirq.readthedocs.io/en/latest/docs/gates.html or other pages. Top part says \"Copyright 2020 The Cirq Developers\" with apache license.\r\n\r\n**Cirq version**\r\n0.9.1. It probably broke when we adjusted all the copyright notices.\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# coverage: ignore\n\n# The content for all documentation lives in ../docs. That folder is\n# following the structure for the Google Quantum site configured for the\n# internal CMS, devsite. The readthedocs layer is a secondary, which generates\n# the content using sphinx to readthedocs.io until we go live with the devsite.\n#\n# This is the configuration file for the Sphinx documentation builder.\n# See http://www.sphinx-doc.org/en/master/config for help\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\nimport inspect\nimport re\nfrom typing import List, Any\n\nimport os\nimport sys\nimport shutil\n\nimport pypandoc\n\ncirq_root_path = os.path.dirname(os.path.dirname(__file__))\nsys.path.insert(0, cirq_root_path)\nfrom cirq import _doc\n\n\ndef setup(app):\n # just in case it exists (locally) remove the copied docs folder\n shutil.rmtree(\"./docs\", ignore_errors=True)\n # copy recursively the actual content from the devsite folder\n # to rtd_docs/docs\n shutil.copytree(src=\"../docs\", dst=\"./docs\")\n app.add_config_value('pandoc_use_parser', 'markdown', True)\n app.connect('autodoc-process-docstring', autodoc_process)\n app.connect('autodoc-skip-member', autodoc_skip_member)\n app.connect('source-read', source_read)\n\n\ndef convert_markdown_mathjax_for_rst(lines: List[str]) -> List[str]:\n if all('$$' not in line for line in lines):\n return lines\n\n data = '\\n'.join(lines)\n sections = data.split('$$')\n if len(sections) % 2 != 1:\n raise ValueError('Mismatched number of \"$$\" latex tokens.')\n\n result = []\n for i, s in enumerate(sections):\n if i % 2:\n # Avoid getting split across divs.\n s = ' '.join(s.split('\\n'))\n # Avoid intermediate layers turning our newlines into slashes.\n s = s.replace('\\\\\\\\', r'\\newline')\n # Turn latex like \"|x\\rangle\" into \"|x \\rangle\".\n # The extra space seems to be necessary to survive a later pass.\n s = re.sub(r'([a-zA-Z0-9])\\\\', r'\\1 \\\\', s)\n # Keep the $$ so MathJax can find it.\n result.append('$${}$$'.format(s))\n else:\n # Work around bad table detection in pandoc by concatenating\n # lines from the same paragraph.\n s = '\\n\\n'.join(e.replace('\\n', ' ') for e in s.split('\\n\\n'))\n\n # Convert markdown to rst.\n out = pypandoc.convert(s, to='rst', format='markdown_github')\n\n # Not sure why pandoc is escaping these...\n out = out.replace(r'\\|', '|')\n\n result.extend(out.split('\\n'))\n\n return result\n\n\ndef autodoc_skip_member(\n app,\n what: str,\n name: str,\n obj: Any,\n skip: bool,\n options,\n) -> bool:\n \"\"\"Public members already kept. Also include members marked as documented.\n \"\"\"\n # Never skip if explicitly whitelisted.\n if id(obj) in _doc.RECORDED_CONST_DOCS:\n return False\n # Skip all private methods.\n if name.startswith('_'):\n return True\n # Fallback to default.\n return skip\n\n\ndef autodoc_process(app, what: str, name: str, obj: Any, options,\n lines: List[str]) -> None:\n # Try to lookup in documented dictionary.\n doc_string = _doc.RECORDED_CONST_DOCS.get(id(obj))\n if name.startswith('cirq') and doc_string is not None:\n # Override docstring if requested.\n if doc_string is not None:\n new_doc_string = inspect.cleandoc(doc_string)\n lines[:] = new_doc_string.split('\\n')\n elif not (getattr(obj, '__module__', 'cirq') or '').startswith('cirq'):\n # Don't convert objects from other modules.\n return\n\n # Don't convert output from Napoleon extension, which is already rst.\n i = 0\n while i < len(lines) and not lines[i].startswith(':'):\n i += 1\n if not i:\n return\n\n converted_lines = convert_markdown_mathjax_for_rst(lines[:i])\n kept_lines = lines[i:]\n\n data = pypandoc.convert(\n '\\n'.join(converted_lines),\n to='rst',\n format='markdown_github',\n )\n\n lines[:] = data.split('\\n') + kept_lines\n\n\ndef source_read(app, docname, source):\n source[0] = re.sub(r'\"##### (Copyright 20\\d\\d The Cirq Developers)\"',\n r'\"**\\1**\"', source[0])\n source[0] = re.sub(r'\"<table.*tfo-notebook-buttons.*\"</table>\"',\n r'\"\"',\n source[0],\n flags=re.S)\n\n\n# -- Project information -----------------------------------------------------\n\nproject = 'Cirq'\ncopyright = '2018, The Cirq Developers' # pylint: disable=redefined-builtin\nauthor = 'The Cirq Developers'\n\n# The full version, including alpha/beta/rc tags\n__version__ = ''\nexec(open(os.path.join(cirq_root_path, 'cirq', '_version.py')).read())\nrelease = __version__\n\n# The short X.Y version\nversion = release # '.'.join(release.split('.')[:2])\n\n# -- General configuration ---------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'myst_parser',\n 'nbsphinx',\n 'sphinx.ext.autodoc',\n 'sphinx.ext.autosummary',\n 'sphinx.ext.doctest',\n 'sphinx.ext.mathjax',\n 'sphinx.ext.napoleon',\n 'sphinx.ext.viewcode',\n 'sphinx_markdown_tables',\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# Allow markdown includes.\n# http://www.sphinx-doc.org/en/master/markdown.html\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\nsource_suffix = {\n '.rst': 'restructuredtext',\n '.md': 'markdown',\n}\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path .\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# -- Options for HTML output ---------------------------------------------\n\nhtml_theme = 'sphinx_rtd_theme'\nhtml_favicon = 'favicon.ico'\n# html_theme_options = {}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# Custom sidebar templates, must be a dictionary that maps document names\n# to template names.\n#\n# The default sidebars (for documents that don't match any pattern) are\n# defined by theme itself. Builtin themes are using these templates by\n# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',\n# 'searchbox.html']``.\n#\n# html_sidebars = {}\n\nhtml_logo = 'docs/images/Cirq_logo_notext.png'\nhtml_css_files = ['tweak-style.css']\n\n# -- Options for HTMLHelp output -----------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'Cirqdoc'\n\n# -- Options for LaTeX output --------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n 'papersize': 'letterpaper',\n\n # The font size ('10pt', '11pt' or '12pt').\n # 'pointsize': '10pt',\n\n # Additional stuff for the LaTeX preamble.\n # 'preamble': '',\n\n # Latex figure (float) alignment\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, 'Cirq.tex', 'Cirq Documentation', 'The Cirq Developers',\n 'manual'),\n]\n\n# -- Options for manual page output --------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [(master_doc, 'cirq', 'Cirq Documentation', [author], 1)]\n\n# -- Options for Texinfo output ------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (master_doc, 'Cirq', 'Cirq Documentation', author, 'Cirq',\n 'A python library for NISQ circuits.', 'Miscellaneous'),\n]\n\n# -- Extension configuration -------------------------------------------------\n\n# Generate subpages for reference docs automatically.\n# http://www.sphinx-doc.org/en/master/ext/autosummary.html#generating-stub-pages-automatically\nautosummary_generate = True\n\n# to resolve name clashes between the generated files\nautosummary_filename_map = {\"cirq.QFT\": \"cirq.QFT_deprecated\"}\n\nmyst_update_mathjax = False\n\n# To allow for google.colab temporarily in notebooks\n# TODO: after https://github.com/quantumlib/Cirq/issues/3368 turn this back off\nnbsphinx_allow_errors = True\n", "path": "rtd_docs/conf.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# coverage: ignore\n\n# The content for all documentation lives in ../docs. That folder is\n# following the structure for the Google Quantum site configured for the\n# internal CMS, devsite. The readthedocs layer is a secondary, which generates\n# the content using sphinx to readthedocs.io until we go live with the devsite.\n#\n# This is the configuration file for the Sphinx documentation builder.\n# See http://www.sphinx-doc.org/en/master/config for help\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\nimport inspect\nimport re\nfrom typing import List, Any\n\nimport os\nimport sys\nimport shutil\n\nimport pypandoc\n\ncirq_root_path = os.path.dirname(os.path.dirname(__file__))\nsys.path.insert(0, cirq_root_path)\nfrom cirq import _doc\n\n\ndef setup(app):\n # just in case it exists (locally) remove the copied docs folder\n shutil.rmtree(\"./docs\", ignore_errors=True)\n # copy recursively the actual content from the devsite folder\n # to rtd_docs/docs\n shutil.copytree(src=\"../docs\", dst=\"./docs\")\n app.add_config_value('pandoc_use_parser', 'markdown', True)\n app.connect('autodoc-process-docstring', autodoc_process)\n app.connect('autodoc-skip-member', autodoc_skip_member)\n app.connect('source-read', source_read)\n\n\ndef convert_markdown_mathjax_for_rst(lines: List[str]) -> List[str]:\n if all('$$' not in line for line in lines):\n return lines\n\n data = '\\n'.join(lines)\n sections = data.split('$$')\n if len(sections) % 2 != 1:\n raise ValueError('Mismatched number of \"$$\" latex tokens.')\n\n result = []\n for i, s in enumerate(sections):\n if i % 2:\n # Avoid getting split across divs.\n s = ' '.join(s.split('\\n'))\n # Avoid intermediate layers turning our newlines into slashes.\n s = s.replace('\\\\\\\\', r'\\newline')\n # Turn latex like \"|x\\rangle\" into \"|x \\rangle\".\n # The extra space seems to be necessary to survive a later pass.\n s = re.sub(r'([a-zA-Z0-9])\\\\', r'\\1 \\\\', s)\n # Keep the $$ so MathJax can find it.\n result.append('$${}$$'.format(s))\n else:\n # Work around bad table detection in pandoc by concatenating\n # lines from the same paragraph.\n s = '\\n\\n'.join(e.replace('\\n', ' ') for e in s.split('\\n\\n'))\n\n # Convert markdown to rst.\n out = pypandoc.convert(s, to='rst', format='markdown_github')\n\n # Not sure why pandoc is escaping these...\n out = out.replace(r'\\|', '|')\n\n result.extend(out.split('\\n'))\n\n return result\n\n\ndef autodoc_skip_member(\n app,\n what: str,\n name: str,\n obj: Any,\n skip: bool,\n options,\n) -> bool:\n \"\"\"Public members already kept. Also include members marked as documented.\n \"\"\"\n # Never skip if explicitly whitelisted.\n if id(obj) in _doc.RECORDED_CONST_DOCS:\n return False\n # Skip all private methods.\n if name.startswith('_'):\n return True\n # Fallback to default.\n return skip\n\n\ndef autodoc_process(app, what: str, name: str, obj: Any, options,\n lines: List[str]) -> None:\n # Try to lookup in documented dictionary.\n doc_string = _doc.RECORDED_CONST_DOCS.get(id(obj))\n if name.startswith('cirq') and doc_string is not None:\n # Override docstring if requested.\n if doc_string is not None:\n new_doc_string = inspect.cleandoc(doc_string)\n lines[:] = new_doc_string.split('\\n')\n elif not (getattr(obj, '__module__', 'cirq') or '').startswith('cirq'):\n # Don't convert objects from other modules.\n return\n\n # Don't convert output from Napoleon extension, which is already rst.\n i = 0\n while i < len(lines) and not lines[i].startswith(':'):\n i += 1\n if not i:\n return\n\n converted_lines = convert_markdown_mathjax_for_rst(lines[:i])\n kept_lines = lines[i:]\n\n data = pypandoc.convert(\n '\\n'.join(converted_lines),\n to='rst',\n format='markdown_github',\n )\n\n lines[:] = data.split('\\n') + kept_lines\n\n\ndef source_read(app, docname, source):\n source[0] = re.sub(r'\"##### (Copyright 20\\d\\d The Cirq Developers)\"', r'\"\"',\n source[0])\n source[0] = re.sub(\n r'(\\{\\s*?\"cell_type\": \"code\".*?\"#@title.*License.\".*?\\},)',\n r'',\n source[0],\n flags=re.S)\n\n source[0] = re.sub(r'\"<table.*tfo-notebook-buttons.*\"</table>\"',\n r'\"\"',\n source[0],\n flags=re.S)\n\n\n# -- Project information -----------------------------------------------------\n\nproject = 'Cirq'\ncopyright = '2018, The Cirq Developers' # pylint: disable=redefined-builtin\nauthor = 'The Cirq Developers'\n\n# The full version, including alpha/beta/rc tags\n__version__ = ''\nexec(open(os.path.join(cirq_root_path, 'cirq', '_version.py')).read())\nrelease = __version__\n\n# The short X.Y version\nversion = release # '.'.join(release.split('.')[:2])\n\n# -- General configuration ---------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'myst_parser',\n 'nbsphinx',\n 'sphinx.ext.autodoc',\n 'sphinx.ext.autosummary',\n 'sphinx.ext.doctest',\n 'sphinx.ext.mathjax',\n 'sphinx.ext.napoleon',\n 'sphinx.ext.viewcode',\n 'sphinx_markdown_tables',\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# Allow markdown includes.\n# http://www.sphinx-doc.org/en/master/markdown.html\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\nsource_suffix = {\n '.rst': 'restructuredtext',\n '.md': 'markdown',\n}\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path .\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# -- Options for HTML output ---------------------------------------------\n\nhtml_theme = 'sphinx_rtd_theme'\nhtml_favicon = 'favicon.ico'\n# html_theme_options = {}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# Custom sidebar templates, must be a dictionary that maps document names\n# to template names.\n#\n# The default sidebars (for documents that don't match any pattern) are\n# defined by theme itself. Builtin themes are using these templates by\n# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',\n# 'searchbox.html']``.\n#\n# html_sidebars = {}\n\nhtml_logo = 'docs/images/Cirq_logo_notext.png'\nhtml_css_files = ['tweak-style.css']\n\n# -- Options for HTMLHelp output -----------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'Cirqdoc'\n\n# -- Options for LaTeX output --------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n 'papersize': 'letterpaper',\n\n # The font size ('10pt', '11pt' or '12pt').\n # 'pointsize': '10pt',\n\n # Additional stuff for the LaTeX preamble.\n # 'preamble': '',\n\n # Latex figure (float) alignment\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, 'Cirq.tex', 'Cirq Documentation', 'The Cirq Developers',\n 'manual'),\n]\n\n# -- Options for manual page output --------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [(master_doc, 'cirq', 'Cirq Documentation', [author], 1)]\n\n# -- Options for Texinfo output ------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (master_doc, 'Cirq', 'Cirq Documentation', author, 'Cirq',\n 'A python library for NISQ circuits.', 'Miscellaneous'),\n]\n\n# -- Extension configuration -------------------------------------------------\n\n# Generate subpages for reference docs automatically.\n# http://www.sphinx-doc.org/en/master/ext/autosummary.html#generating-stub-pages-automatically\nautosummary_generate = True\n\n# to resolve name clashes between the generated files\nautosummary_filename_map = {\"cirq.QFT\": \"cirq.QFT_deprecated\"}\n\nmyst_update_mathjax = False\n\n# To allow for google.colab temporarily in notebooks\n# TODO: after https://github.com/quantumlib/Cirq/issues/3368 turn this back off\nnbsphinx_allow_errors = True\n", "path": "rtd_docs/conf.py"}]} | 3,479 | 218 |
gh_patches_debug_16584 | rasdani/github-patches | git_diff | fossasia__open-event-server-6510 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Cron job for marking event invoices due is wrong
PR https://github.com/fossasia/open-event-server/pull/6166 is wrong. The query fails in every case
https://github.com/fossasia/open-event-server/blob/bc45c1d267b924b556d36855f29c1dd96e901209/app/api/helpers/scheduled_jobs.py#L164-L174
This line is not valid SQLAlchemy syntax:
https://github.com/fossasia/open-event-server/blob/bc45c1d267b924b556d36855f29c1dd96e901209/app/api/helpers/scheduled_jobs.py#L169
`Neither 'InstrumentedAttribute' object nor 'Comparator' object associated with EventInvoice.event has an attribute 'ends_at'`
It should be `db.session.query(EventInvoice).join(Event).filter(EventInvoice.status == 'upcoming', Event.ends_at >= datetime.datetime.now()).all()`
Each run of this task is failing
Fix it and write tests to ensure it is working
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `app/api/helpers/scheduled_jobs.py`
Content:
```
1 import datetime
2
3 import pytz
4 from dateutil.relativedelta import relativedelta
5 from flask import render_template
6
7 from app.api.helpers.db import safe_query, save_to_db
8 from app.api.helpers.mail import send_email_after_event, send_email_for_monthly_fee_payment, \
9 send_followup_email_for_monthly_fee_payment
10 from app.api.helpers.notification import send_notif_monthly_fee_payment, send_followup_notif_monthly_fee_payment, \
11 send_notif_after_event
12 from app.api.helpers.query import get_upcoming_events, get_user_event_roles_by_role_name
13 from app.api.helpers.utilities import monthdelta
14 from app.api.helpers.files import create_save_pdf
15 from app.api.helpers.storage import UPLOAD_PATHS
16 from app.models import db
17 from app.models.event import Event
18 from app.models.event_invoice import EventInvoice
19 from app.models.order import Order
20 from app.models.speaker import Speaker
21 from app.models.session import Session
22 from app.models.ticket import Ticket
23 from app.models.ticket_fee import TicketFees, get_fee
24
25 from app.settings import get_settings
26
27
28 def send_after_event_mail():
29 from app import current_app as app
30 with app.app_context():
31 events = Event.query.filter_by(state='published', deleted_at=None).all()
32 upcoming_events = get_upcoming_events()
33 upcoming_event_links = "<ul>"
34 for upcoming_event in upcoming_events:
35 frontend_url = get_settings()['frontend_url']
36 upcoming_event_links += "<li><a href='{}/events/{}'>{}</a></li>" \
37 .format(frontend_url, upcoming_event.id, upcoming_event.name)
38 upcoming_event_links += "</ul>"
39 for event in events:
40 organizers = get_user_event_roles_by_role_name(event.id, 'organizer')
41 speakers = Speaker.query.filter_by(event_id=event.id, deleted_at=None).all()
42 owner = get_user_event_roles_by_role_name(event.id, 'owner').first()
43 current_time = datetime.datetime.now(pytz.timezone(event.timezone))
44 time_difference = current_time - event.ends_at
45 time_difference_minutes = (time_difference.days * 24 * 60) + \
46 (time_difference.seconds / 60)
47 if current_time > event.ends_at and time_difference_minutes < 1440:
48 for speaker in speakers:
49 if not speaker.is_email_overridden:
50 send_email_after_event(speaker.user.email, event.name, upcoming_event_links)
51 send_notif_after_event(speaker.user, event.name)
52 for organizer in organizers:
53 send_email_after_event(organizer.user.email, event.name, upcoming_event_links)
54 send_notif_after_event(organizer.user, event.name)
55 if owner:
56 send_email_after_event(owner.user.email, event.name, upcoming_event_links)
57 send_notif_after_event(owner.user, event.name)
58
59
60 def change_session_state_on_event_completion():
61 from app import current_app as app
62 with app.app_context():
63 sessions_to_be_changed = Session.query.join(Event).filter(Session.state == 'pending')\
64 .filter(Event.ends_at < datetime.datetime.now())
65 for session in sessions_to_be_changed:
66 session.state = 'rejected'
67 save_to_db(session, 'Changed {} session state to rejected'.format(session.title))
68
69
70 def send_event_fee_notification():
71 from app import current_app as app
72 with app.app_context():
73 events = Event.query.filter_by(deleted_at=None, state='published').all()
74 for event in events:
75 latest_invoice = EventInvoice.query.filter_by(
76 event_id=event.id).order_by(EventInvoice.created_at.desc()).first()
77
78 if latest_invoice:
79 orders = Order.query \
80 .filter_by(event_id=event.id) \
81 .filter_by(status='completed') \
82 .filter(Order.completed_at > latest_invoice.created_at).all()
83 else:
84 orders = Order.query.filter_by(
85 event_id=event.id).filter_by(status='completed').all()
86
87 fee_total = 0
88 for order in orders:
89 for order_ticket in order.tickets:
90 ticket = safe_query(db, Ticket, 'id', order_ticket.ticket_id, 'ticket_id')
91 if order.paid_via != 'free' and order.amount > 0 and ticket.price > 0:
92 fee = ticket.price * (get_fee(order.event.payment_currency) / 100.0)
93 fee_total += fee
94
95 if fee_total > 0:
96 owner = get_user_event_roles_by_role_name(event.id, 'owner').first()
97 new_invoice = EventInvoice(
98 amount=fee_total, event_id=event.id, user_id=owner.user.id)
99
100 if event.discount_code_id and event.discount_code:
101 r = relativedelta(datetime.utcnow(), event.created_at)
102 if r <= event.discount_code.valid_till:
103 new_invoice.amount = fee_total - \
104 (fee_total * (event.discount_code.value / 100.0))
105 new_invoice.discount_code_id = event.discount_code_id
106
107 save_to_db(new_invoice)
108 prev_month = monthdelta(new_invoice.created_at, 1).strftime(
109 "%b %Y") # Displayed as Aug 2016
110 app_name = get_settings()['app_name']
111 frontend_url = get_settings()['frontend_url']
112 link = '{}/invoices/{}'.format(frontend_url, new_invoice.identifier)
113 send_email_for_monthly_fee_payment(new_invoice.user.email,
114 event.name,
115 prev_month,
116 new_invoice.amount,
117 app_name,
118 link)
119 send_notif_monthly_fee_payment(new_invoice.user,
120 event.name,
121 prev_month,
122 new_invoice.amount,
123 app_name,
124 link,
125 new_invoice.event_id)
126
127
128 def send_event_fee_notification_followup():
129 from app import current_app as app
130 with app.app_context():
131 incomplete_invoices = EventInvoice.query.filter(EventInvoice.status != 'paid').all()
132 for incomplete_invoice in incomplete_invoices:
133 if incomplete_invoice.amount > 0:
134 prev_month = monthdelta(incomplete_invoice.created_at, 1).strftime(
135 "%b %Y") # Displayed as Aug 2016
136 app_name = get_settings()['app_name']
137 frontend_url = get_settings()['frontend_url']
138 link = '{}/event-invoice/{}/review'.format(frontend_url,
139 incomplete_invoice.identifier)
140 send_followup_email_for_monthly_fee_payment(incomplete_invoice.user.email,
141 incomplete_invoice.event.name,
142 prev_month,
143 incomplete_invoice.amount,
144 app_name,
145 link)
146 send_followup_notif_monthly_fee_payment(incomplete_invoice.user,
147 incomplete_invoice.event.name,
148 prev_month,
149 incomplete_invoice.amount,
150 app_name,
151 link,
152 incomplete_invoice.event.id)
153
154
155 def expire_pending_tickets():
156 from app import current_app as app
157 with app.app_context():
158 db.session.query(Order).filter(Order.status == 'pending',
159 (Order.created_at + datetime.timedelta(minutes=30)) <= datetime.datetime.now()).\
160 update({'status': 'expired'})
161 db.session.commit()
162
163
164 def event_invoices_mark_due():
165 from app import current_app as app
166 with app.app_context():
167 db.session.query(EventInvoice).\
168 filter(EventInvoice.status == 'upcoming',
169 EventInvoice.event.ends_at >= datetime.datetime.now(),
170 (EventInvoice.created_at + datetime.timedelta(days=30) <=
171 datetime.datetime.now())).\
172 update({'status': 'due'})
173
174 db.session.commit()
175
176
177 def send_monthly_event_invoice():
178 from app import current_app as app
179 with app.app_context():
180 events = Event.query.filter_by(deleted_at=None, state='published').all()
181 for event in events:
182 # calculate net & gross revenues
183 user = event.owner
184 admin_info = get_settings()
185 currency = event.payment_currency
186 ticket_fee_object = db.session.query(TicketFees).filter_by(currency=currency).one()
187 ticket_fee_percentage = ticket_fee_object.service_fee
188 ticket_fee_maximum = ticket_fee_object.maximum_fee
189 orders = Order.query.filter_by(event=event).all()
190 gross_revenue = event.calc_monthly_revenue()
191 ticket_fees = event.tickets_sold * (ticket_fee_percentage / 100)
192 if ticket_fees > ticket_fee_maximum:
193 ticket_fees = ticket_fee_maximum
194 net_revenue = gross_revenue - ticket_fees
195 payment_details = {
196 'tickets_sold': event.tickets_sold,
197 'gross_revenue': gross_revenue,
198 'net_revenue': net_revenue,
199 'amount_payable': ticket_fees
200 }
201 # save invoice as pdf
202 pdf = create_save_pdf(render_template('pdf/event_invoice.html', orders=orders, user=user,
203 admin_info=admin_info, currency=currency, event=event,
204 ticket_fee_object=ticket_fee_object, payment_details=payment_details,
205 net_revenue=net_revenue), UPLOAD_PATHS['pdf']['event_invoice'],
206 dir_path='/static/uploads/pdf/event_invoices/', identifier=event.identifier)
207 # save event_invoice info to DB
208
209 event_invoice = EventInvoice(amount=net_revenue, invoice_pdf_url=pdf, event_id=event.id)
210 save_to_db(event_invoice)
211
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/app/api/helpers/scheduled_jobs.py b/app/api/helpers/scheduled_jobs.py
--- a/app/api/helpers/scheduled_jobs.py
+++ b/app/api/helpers/scheduled_jobs.py
@@ -164,14 +164,12 @@
def event_invoices_mark_due():
from app import current_app as app
with app.app_context():
- db.session.query(EventInvoice).\
- filter(EventInvoice.status == 'upcoming',
- EventInvoice.event.ends_at >= datetime.datetime.now(),
- (EventInvoice.created_at + datetime.timedelta(days=30) <=
- datetime.datetime.now())).\
- update({'status': 'due'})
-
- db.session.commit()
+ db.session.query(EventInvoice).filter(
+ EventInvoice.status == 'upcoming',
+ Event.id == EventInvoice.event_id,
+ Event.ends_at >= datetime.datetime.now(),
+ (EventInvoice.created_at + datetime.timedelta(days=30) <= datetime.datetime.now())
+ ).update({EventInvoice.status: 'due'}, synchronize_session=False)
def send_monthly_event_invoice():
| {"golden_diff": "diff --git a/app/api/helpers/scheduled_jobs.py b/app/api/helpers/scheduled_jobs.py\n--- a/app/api/helpers/scheduled_jobs.py\n+++ b/app/api/helpers/scheduled_jobs.py\n@@ -164,14 +164,12 @@\n def event_invoices_mark_due():\n from app import current_app as app\n with app.app_context():\n- db.session.query(EventInvoice).\\\n- filter(EventInvoice.status == 'upcoming',\n- EventInvoice.event.ends_at >= datetime.datetime.now(),\n- (EventInvoice.created_at + datetime.timedelta(days=30) <=\n- datetime.datetime.now())).\\\n- update({'status': 'due'})\n-\n- db.session.commit()\n+ db.session.query(EventInvoice).filter(\n+ EventInvoice.status == 'upcoming',\n+ Event.id == EventInvoice.event_id,\n+ Event.ends_at >= datetime.datetime.now(),\n+ (EventInvoice.created_at + datetime.timedelta(days=30) <= datetime.datetime.now())\n+ ).update({EventInvoice.status: 'due'}, synchronize_session=False)\n \n \n def send_monthly_event_invoice():\n", "issue": "Cron job for marking event invoices due is wrong\nPR https://github.com/fossasia/open-event-server/pull/6166 is wrong. The query fails in every case\r\n\r\nhttps://github.com/fossasia/open-event-server/blob/bc45c1d267b924b556d36855f29c1dd96e901209/app/api/helpers/scheduled_jobs.py#L164-L174\r\n\r\nThis line is not valid SQLAlchemy syntax:\r\n\r\nhttps://github.com/fossasia/open-event-server/blob/bc45c1d267b924b556d36855f29c1dd96e901209/app/api/helpers/scheduled_jobs.py#L169\r\n\r\n`Neither 'InstrumentedAttribute' object nor 'Comparator' object associated with EventInvoice.event has an attribute 'ends_at'`\r\n\r\nIt should be `db.session.query(EventInvoice).join(Event).filter(EventInvoice.status == 'upcoming', Event.ends_at >= datetime.datetime.now()).all()`\r\n\r\nEach run of this task is failing\r\n\r\nFix it and write tests to ensure it is working\n", "before_files": [{"content": "import datetime\n\nimport pytz\nfrom dateutil.relativedelta import relativedelta\nfrom flask import render_template\n\nfrom app.api.helpers.db import safe_query, save_to_db\nfrom app.api.helpers.mail import send_email_after_event, send_email_for_monthly_fee_payment, \\\n send_followup_email_for_monthly_fee_payment\nfrom app.api.helpers.notification import send_notif_monthly_fee_payment, send_followup_notif_monthly_fee_payment, \\\n send_notif_after_event\nfrom app.api.helpers.query import get_upcoming_events, get_user_event_roles_by_role_name\nfrom app.api.helpers.utilities import monthdelta\nfrom app.api.helpers.files import create_save_pdf\nfrom app.api.helpers.storage import UPLOAD_PATHS\nfrom app.models import db\nfrom app.models.event import Event\nfrom app.models.event_invoice import EventInvoice\nfrom app.models.order import Order\nfrom app.models.speaker import Speaker\nfrom app.models.session import Session\nfrom app.models.ticket import Ticket\nfrom app.models.ticket_fee import TicketFees, get_fee\n\nfrom app.settings import get_settings\n\n\ndef send_after_event_mail():\n from app import current_app as app\n with app.app_context():\n events = Event.query.filter_by(state='published', deleted_at=None).all()\n upcoming_events = get_upcoming_events()\n upcoming_event_links = \"<ul>\"\n for upcoming_event in upcoming_events:\n frontend_url = get_settings()['frontend_url']\n upcoming_event_links += \"<li><a href='{}/events/{}'>{}</a></li>\" \\\n .format(frontend_url, upcoming_event.id, upcoming_event.name)\n upcoming_event_links += \"</ul>\"\n for event in events:\n organizers = get_user_event_roles_by_role_name(event.id, 'organizer')\n speakers = Speaker.query.filter_by(event_id=event.id, deleted_at=None).all()\n owner = get_user_event_roles_by_role_name(event.id, 'owner').first()\n current_time = datetime.datetime.now(pytz.timezone(event.timezone))\n time_difference = current_time - event.ends_at\n time_difference_minutes = (time_difference.days * 24 * 60) + \\\n (time_difference.seconds / 60)\n if current_time > event.ends_at and time_difference_minutes < 1440:\n for speaker in speakers:\n if not speaker.is_email_overridden:\n send_email_after_event(speaker.user.email, event.name, upcoming_event_links)\n send_notif_after_event(speaker.user, event.name)\n for organizer in organizers:\n send_email_after_event(organizer.user.email, event.name, upcoming_event_links)\n send_notif_after_event(organizer.user, event.name)\n if owner:\n send_email_after_event(owner.user.email, event.name, upcoming_event_links)\n send_notif_after_event(owner.user, event.name)\n\n\ndef change_session_state_on_event_completion():\n from app import current_app as app\n with app.app_context():\n sessions_to_be_changed = Session.query.join(Event).filter(Session.state == 'pending')\\\n .filter(Event.ends_at < datetime.datetime.now())\n for session in sessions_to_be_changed:\n session.state = 'rejected'\n save_to_db(session, 'Changed {} session state to rejected'.format(session.title))\n\n\ndef send_event_fee_notification():\n from app import current_app as app\n with app.app_context():\n events = Event.query.filter_by(deleted_at=None, state='published').all()\n for event in events:\n latest_invoice = EventInvoice.query.filter_by(\n event_id=event.id).order_by(EventInvoice.created_at.desc()).first()\n\n if latest_invoice:\n orders = Order.query \\\n .filter_by(event_id=event.id) \\\n .filter_by(status='completed') \\\n .filter(Order.completed_at > latest_invoice.created_at).all()\n else:\n orders = Order.query.filter_by(\n event_id=event.id).filter_by(status='completed').all()\n\n fee_total = 0\n for order in orders:\n for order_ticket in order.tickets:\n ticket = safe_query(db, Ticket, 'id', order_ticket.ticket_id, 'ticket_id')\n if order.paid_via != 'free' and order.amount > 0 and ticket.price > 0:\n fee = ticket.price * (get_fee(order.event.payment_currency) / 100.0)\n fee_total += fee\n\n if fee_total > 0:\n owner = get_user_event_roles_by_role_name(event.id, 'owner').first()\n new_invoice = EventInvoice(\n amount=fee_total, event_id=event.id, user_id=owner.user.id)\n\n if event.discount_code_id and event.discount_code:\n r = relativedelta(datetime.utcnow(), event.created_at)\n if r <= event.discount_code.valid_till:\n new_invoice.amount = fee_total - \\\n (fee_total * (event.discount_code.value / 100.0))\n new_invoice.discount_code_id = event.discount_code_id\n\n save_to_db(new_invoice)\n prev_month = monthdelta(new_invoice.created_at, 1).strftime(\n \"%b %Y\") # Displayed as Aug 2016\n app_name = get_settings()['app_name']\n frontend_url = get_settings()['frontend_url']\n link = '{}/invoices/{}'.format(frontend_url, new_invoice.identifier)\n send_email_for_monthly_fee_payment(new_invoice.user.email,\n event.name,\n prev_month,\n new_invoice.amount,\n app_name,\n link)\n send_notif_monthly_fee_payment(new_invoice.user,\n event.name,\n prev_month,\n new_invoice.amount,\n app_name,\n link,\n new_invoice.event_id)\n\n\ndef send_event_fee_notification_followup():\n from app import current_app as app\n with app.app_context():\n incomplete_invoices = EventInvoice.query.filter(EventInvoice.status != 'paid').all()\n for incomplete_invoice in incomplete_invoices:\n if incomplete_invoice.amount > 0:\n prev_month = monthdelta(incomplete_invoice.created_at, 1).strftime(\n \"%b %Y\") # Displayed as Aug 2016\n app_name = get_settings()['app_name']\n frontend_url = get_settings()['frontend_url']\n link = '{}/event-invoice/{}/review'.format(frontend_url,\n incomplete_invoice.identifier)\n send_followup_email_for_monthly_fee_payment(incomplete_invoice.user.email,\n incomplete_invoice.event.name,\n prev_month,\n incomplete_invoice.amount,\n app_name,\n link)\n send_followup_notif_monthly_fee_payment(incomplete_invoice.user,\n incomplete_invoice.event.name,\n prev_month,\n incomplete_invoice.amount,\n app_name,\n link,\n incomplete_invoice.event.id)\n\n\ndef expire_pending_tickets():\n from app import current_app as app\n with app.app_context():\n db.session.query(Order).filter(Order.status == 'pending',\n (Order.created_at + datetime.timedelta(minutes=30)) <= datetime.datetime.now()).\\\n update({'status': 'expired'})\n db.session.commit()\n\n\ndef event_invoices_mark_due():\n from app import current_app as app\n with app.app_context():\n db.session.query(EventInvoice).\\\n filter(EventInvoice.status == 'upcoming',\n EventInvoice.event.ends_at >= datetime.datetime.now(),\n (EventInvoice.created_at + datetime.timedelta(days=30) <=\n datetime.datetime.now())).\\\n update({'status': 'due'})\n\n db.session.commit()\n\n\ndef send_monthly_event_invoice():\n from app import current_app as app\n with app.app_context():\n events = Event.query.filter_by(deleted_at=None, state='published').all()\n for event in events:\n # calculate net & gross revenues\n user = event.owner\n admin_info = get_settings()\n currency = event.payment_currency\n ticket_fee_object = db.session.query(TicketFees).filter_by(currency=currency).one()\n ticket_fee_percentage = ticket_fee_object.service_fee\n ticket_fee_maximum = ticket_fee_object.maximum_fee\n orders = Order.query.filter_by(event=event).all()\n gross_revenue = event.calc_monthly_revenue()\n ticket_fees = event.tickets_sold * (ticket_fee_percentage / 100)\n if ticket_fees > ticket_fee_maximum:\n ticket_fees = ticket_fee_maximum\n net_revenue = gross_revenue - ticket_fees\n payment_details = {\n 'tickets_sold': event.tickets_sold,\n 'gross_revenue': gross_revenue,\n 'net_revenue': net_revenue,\n 'amount_payable': ticket_fees\n }\n # save invoice as pdf\n pdf = create_save_pdf(render_template('pdf/event_invoice.html', orders=orders, user=user,\n admin_info=admin_info, currency=currency, event=event,\n ticket_fee_object=ticket_fee_object, payment_details=payment_details,\n net_revenue=net_revenue), UPLOAD_PATHS['pdf']['event_invoice'],\n dir_path='/static/uploads/pdf/event_invoices/', identifier=event.identifier)\n # save event_invoice info to DB\n\n event_invoice = EventInvoice(amount=net_revenue, invoice_pdf_url=pdf, event_id=event.id)\n save_to_db(event_invoice)\n", "path": "app/api/helpers/scheduled_jobs.py"}], "after_files": [{"content": "import datetime\n\nimport pytz\nfrom dateutil.relativedelta import relativedelta\nfrom flask import render_template\n\nfrom app.api.helpers.db import safe_query, save_to_db\nfrom app.api.helpers.mail import send_email_after_event, send_email_for_monthly_fee_payment, \\\n send_followup_email_for_monthly_fee_payment\nfrom app.api.helpers.notification import send_notif_monthly_fee_payment, send_followup_notif_monthly_fee_payment, \\\n send_notif_after_event\nfrom app.api.helpers.query import get_upcoming_events, get_user_event_roles_by_role_name\nfrom app.api.helpers.utilities import monthdelta\nfrom app.api.helpers.files import create_save_pdf\nfrom app.api.helpers.storage import UPLOAD_PATHS\nfrom app.models import db\nfrom app.models.event import Event\nfrom app.models.event_invoice import EventInvoice\nfrom app.models.order import Order\nfrom app.models.speaker import Speaker\nfrom app.models.session import Session\nfrom app.models.ticket import Ticket\nfrom app.models.ticket_fee import TicketFees, get_fee\n\nfrom app.settings import get_settings\n\n\ndef send_after_event_mail():\n from app import current_app as app\n with app.app_context():\n events = Event.query.filter_by(state='published', deleted_at=None).all()\n upcoming_events = get_upcoming_events()\n upcoming_event_links = \"<ul>\"\n for upcoming_event in upcoming_events:\n frontend_url = get_settings()['frontend_url']\n upcoming_event_links += \"<li><a href='{}/events/{}'>{}</a></li>\" \\\n .format(frontend_url, upcoming_event.id, upcoming_event.name)\n upcoming_event_links += \"</ul>\"\n for event in events:\n organizers = get_user_event_roles_by_role_name(event.id, 'organizer')\n speakers = Speaker.query.filter_by(event_id=event.id, deleted_at=None).all()\n owner = get_user_event_roles_by_role_name(event.id, 'owner').first()\n current_time = datetime.datetime.now(pytz.timezone(event.timezone))\n time_difference = current_time - event.ends_at\n time_difference_minutes = (time_difference.days * 24 * 60) + \\\n (time_difference.seconds / 60)\n if current_time > event.ends_at and time_difference_minutes < 1440:\n for speaker in speakers:\n if not speaker.is_email_overridden:\n send_email_after_event(speaker.user.email, event.name, upcoming_event_links)\n send_notif_after_event(speaker.user, event.name)\n for organizer in organizers:\n send_email_after_event(organizer.user.email, event.name, upcoming_event_links)\n send_notif_after_event(organizer.user, event.name)\n if owner:\n send_email_after_event(owner.user.email, event.name, upcoming_event_links)\n send_notif_after_event(owner.user, event.name)\n\n\ndef change_session_state_on_event_completion():\n from app import current_app as app\n with app.app_context():\n sessions_to_be_changed = Session.query.join(Event).filter(Session.state == 'pending')\\\n .filter(Event.ends_at < datetime.datetime.now())\n for session in sessions_to_be_changed:\n session.state = 'rejected'\n save_to_db(session, 'Changed {} session state to rejected'.format(session.title))\n\n\ndef send_event_fee_notification():\n from app import current_app as app\n with app.app_context():\n events = Event.query.filter_by(deleted_at=None, state='published').all()\n for event in events:\n latest_invoice = EventInvoice.query.filter_by(\n event_id=event.id).order_by(EventInvoice.created_at.desc()).first()\n\n if latest_invoice:\n orders = Order.query \\\n .filter_by(event_id=event.id) \\\n .filter_by(status='completed') \\\n .filter(Order.completed_at > latest_invoice.created_at).all()\n else:\n orders = Order.query.filter_by(\n event_id=event.id).filter_by(status='completed').all()\n\n fee_total = 0\n for order in orders:\n for order_ticket in order.tickets:\n ticket = safe_query(db, Ticket, 'id', order_ticket.ticket_id, 'ticket_id')\n if order.paid_via != 'free' and order.amount > 0 and ticket.price > 0:\n fee = ticket.price * (get_fee(order.event.payment_currency) / 100.0)\n fee_total += fee\n\n if fee_total > 0:\n owner = get_user_event_roles_by_role_name(event.id, 'owner').first()\n new_invoice = EventInvoice(\n amount=fee_total, event_id=event.id, user_id=owner.user.id)\n\n if event.discount_code_id and event.discount_code:\n r = relativedelta(datetime.utcnow(), event.created_at)\n if r <= event.discount_code.valid_till:\n new_invoice.amount = fee_total - \\\n (fee_total * (event.discount_code.value / 100.0))\n new_invoice.discount_code_id = event.discount_code_id\n\n save_to_db(new_invoice)\n prev_month = monthdelta(new_invoice.created_at, 1).strftime(\n \"%b %Y\") # Displayed as Aug 2016\n app_name = get_settings()['app_name']\n frontend_url = get_settings()['frontend_url']\n link = '{}/invoices/{}'.format(frontend_url, new_invoice.identifier)\n send_email_for_monthly_fee_payment(new_invoice.user.email,\n event.name,\n prev_month,\n new_invoice.amount,\n app_name,\n link)\n send_notif_monthly_fee_payment(new_invoice.user,\n event.name,\n prev_month,\n new_invoice.amount,\n app_name,\n link,\n new_invoice.event_id)\n\n\ndef send_event_fee_notification_followup():\n from app import current_app as app\n with app.app_context():\n incomplete_invoices = EventInvoice.query.filter(EventInvoice.status != 'paid').all()\n for incomplete_invoice in incomplete_invoices:\n if incomplete_invoice.amount > 0:\n prev_month = monthdelta(incomplete_invoice.created_at, 1).strftime(\n \"%b %Y\") # Displayed as Aug 2016\n app_name = get_settings()['app_name']\n frontend_url = get_settings()['frontend_url']\n link = '{}/event-invoice/{}/review'.format(frontend_url,\n incomplete_invoice.identifier)\n send_followup_email_for_monthly_fee_payment(incomplete_invoice.user.email,\n incomplete_invoice.event.name,\n prev_month,\n incomplete_invoice.amount,\n app_name,\n link)\n send_followup_notif_monthly_fee_payment(incomplete_invoice.user,\n incomplete_invoice.event.name,\n prev_month,\n incomplete_invoice.amount,\n app_name,\n link,\n incomplete_invoice.event.id)\n\n\ndef expire_pending_tickets():\n from app import current_app as app\n with app.app_context():\n db.session.query(Order).filter(Order.status == 'pending',\n (Order.created_at + datetime.timedelta(minutes=30)) <= datetime.datetime.now()).\\\n update({'status': 'expired'})\n db.session.commit()\n\n\ndef event_invoices_mark_due():\n from app import current_app as app\n with app.app_context():\n db.session.query(EventInvoice).filter(\n EventInvoice.status == 'upcoming',\n Event.id == EventInvoice.event_id,\n Event.ends_at >= datetime.datetime.now(),\n (EventInvoice.created_at + datetime.timedelta(days=30) <= datetime.datetime.now())\n ).update({EventInvoice.status: 'due'}, synchronize_session=False)\n\n\ndef send_monthly_event_invoice():\n from app import current_app as app\n with app.app_context():\n events = Event.query.filter_by(deleted_at=None, state='published').all()\n for event in events:\n # calculate net & gross revenues\n user = event.owner\n admin_info = get_settings()\n currency = event.payment_currency\n ticket_fee_object = db.session.query(TicketFees).filter_by(currency=currency).one()\n ticket_fee_percentage = ticket_fee_object.service_fee\n ticket_fee_maximum = ticket_fee_object.maximum_fee\n orders = Order.query.filter_by(event=event).all()\n gross_revenue = event.calc_monthly_revenue()\n ticket_fees = event.tickets_sold * (ticket_fee_percentage / 100)\n if ticket_fees > ticket_fee_maximum:\n ticket_fees = ticket_fee_maximum\n net_revenue = gross_revenue - ticket_fees\n payment_details = {\n 'tickets_sold': event.tickets_sold,\n 'gross_revenue': gross_revenue,\n 'net_revenue': net_revenue,\n 'amount_payable': ticket_fees\n }\n # save invoice as pdf\n pdf = create_save_pdf(render_template('pdf/event_invoice.html', orders=orders, user=user,\n admin_info=admin_info, currency=currency, event=event,\n ticket_fee_object=ticket_fee_object, payment_details=payment_details,\n net_revenue=net_revenue), UPLOAD_PATHS['pdf']['event_invoice'],\n dir_path='/static/uploads/pdf/event_invoices/', identifier=event.identifier)\n # save event_invoice info to DB\n\n event_invoice = EventInvoice(amount=net_revenue, invoice_pdf_url=pdf, event_id=event.id)\n save_to_db(event_invoice)\n", "path": "app/api/helpers/scheduled_jobs.py"}]} | 2,982 | 234 |
gh_patches_debug_4799 | rasdani/github-patches | git_diff | quantumlib__Cirq-4319 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
JSON backwards compatibility has been flushed
https://github.com/quantumlib/Cirq/pull/4099 cleared some deprecated classes. Serialized files have a much longer lifespan than code. It was always the intent to keep much longer-term backwards compatibility for data files, especially experimental results which likely have `cirq.TrialResult` classes.
1) the old TrialResult.json should be kept around like the "inward" files
2) The resolver should re-direct "TrialResult" keys to `cirq.Result`. In some sense, this should have been done along with the initial rename.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cirq-core/cirq/json_resolver_cache.py`
Content:
```
1 # Copyright 2020 The Cirq Developers
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import functools
16 from typing import Dict, TYPE_CHECKING
17
18 from cirq.protocols.json_serialization import ObjectFactory
19
20 if TYPE_CHECKING:
21 import cirq.ops.pauli_gates
22 import cirq.devices.unconstrained_device
23
24
25 @functools.lru_cache(maxsize=1)
26 def _class_resolver_dictionary() -> Dict[str, ObjectFactory]:
27 import cirq
28 from cirq.ops import raw_types
29 import pandas as pd
30 import numpy as np
31 from cirq.devices.noise_model import _NoNoiseModel
32 from cirq.experiments import CrossEntropyResult, CrossEntropyResultDict, GridInteractionLayer
33 from cirq.experiments.grid_parallel_two_qubit_xeb import GridParallelXEBMetadata
34
35 def _identity_operation_from_dict(qubits, **kwargs):
36 return cirq.identity_each(*qubits)
37
38 def single_qubit_matrix_gate(matrix):
39 if not isinstance(matrix, np.ndarray):
40 matrix = np.array(matrix, dtype=np.complex128)
41 return cirq.MatrixGate(matrix, qid_shape=(matrix.shape[0],))
42
43 def two_qubit_matrix_gate(matrix):
44 if not isinstance(matrix, np.ndarray):
45 matrix = np.array(matrix, dtype=np.complex128)
46 return cirq.MatrixGate(matrix, qid_shape=(2, 2))
47
48 import sympy
49
50 return {
51 'AmplitudeDampingChannel': cirq.AmplitudeDampingChannel,
52 'AsymmetricDepolarizingChannel': cirq.AsymmetricDepolarizingChannel,
53 'BitFlipChannel': cirq.BitFlipChannel,
54 'BitstringAccumulator': cirq.work.BitstringAccumulator,
55 'ProductState': cirq.ProductState,
56 'CCNotPowGate': cirq.CCNotPowGate,
57 'CCXPowGate': cirq.CCXPowGate,
58 'CCZPowGate': cirq.CCZPowGate,
59 'CNotPowGate': cirq.CNotPowGate,
60 'ControlledGate': cirq.ControlledGate,
61 'ControlledOperation': cirq.ControlledOperation,
62 'CSwapGate': cirq.CSwapGate,
63 'CXPowGate': cirq.CXPowGate,
64 'CZPowGate': cirq.CZPowGate,
65 'CrossEntropyResult': CrossEntropyResult,
66 'CrossEntropyResultDict': CrossEntropyResultDict,
67 'Circuit': cirq.Circuit,
68 'CircuitOperation': cirq.CircuitOperation,
69 'CliffordState': cirq.CliffordState,
70 'CliffordTableau': cirq.CliffordTableau,
71 'DepolarizingChannel': cirq.DepolarizingChannel,
72 'ConstantQubitNoiseModel': cirq.ConstantQubitNoiseModel,
73 'Duration': cirq.Duration,
74 'FrozenCircuit': cirq.FrozenCircuit,
75 'FSimGate': cirq.FSimGate,
76 'DensePauliString': cirq.DensePauliString,
77 'MutableDensePauliString': cirq.MutableDensePauliString,
78 'MutablePauliString': cirq.MutablePauliString,
79 'ObservableMeasuredResult': cirq.work.ObservableMeasuredResult,
80 'GateOperation': cirq.GateOperation,
81 'GeneralizedAmplitudeDampingChannel': cirq.GeneralizedAmplitudeDampingChannel,
82 'GlobalPhaseOperation': cirq.GlobalPhaseOperation,
83 'GridInteractionLayer': GridInteractionLayer,
84 'GridParallelXEBMetadata': GridParallelXEBMetadata,
85 'GridQid': cirq.GridQid,
86 'GridQubit': cirq.GridQubit,
87 'HPowGate': cirq.HPowGate,
88 'ISwapPowGate': cirq.ISwapPowGate,
89 'IdentityGate': cirq.IdentityGate,
90 'IdentityOperation': _identity_operation_from_dict,
91 'InitObsSetting': cirq.work.InitObsSetting,
92 'LinearDict': cirq.LinearDict,
93 'LineQubit': cirq.LineQubit,
94 'LineQid': cirq.LineQid,
95 'MatrixGate': cirq.MatrixGate,
96 'MeasurementKey': cirq.MeasurementKey,
97 'MeasurementGate': cirq.MeasurementGate,
98 '_MeasurementSpec': cirq.work._MeasurementSpec,
99 'Moment': cirq.Moment,
100 '_XEigenState': cirq.value.product_state._XEigenState, # type: ignore
101 '_YEigenState': cirq.value.product_state._YEigenState, # type: ignore
102 '_ZEigenState': cirq.value.product_state._ZEigenState, # type: ignore
103 '_NoNoiseModel': _NoNoiseModel,
104 'NamedQubit': cirq.NamedQubit,
105 'NamedQid': cirq.NamedQid,
106 'NoIdentifierQubit': cirq.testing.NoIdentifierQubit,
107 '_PauliX': cirq.ops.pauli_gates._PauliX,
108 '_PauliY': cirq.ops.pauli_gates._PauliY,
109 '_PauliZ': cirq.ops.pauli_gates._PauliZ,
110 'ParamResolver': cirq.ParamResolver,
111 'ParallelGateOperation': cirq.ParallelGateOperation,
112 'PauliString': cirq.PauliString,
113 'PhaseDampingChannel': cirq.PhaseDampingChannel,
114 'PhaseFlipChannel': cirq.PhaseFlipChannel,
115 'PhaseGradientGate': cirq.PhaseGradientGate,
116 'PhasedFSimGate': cirq.PhasedFSimGate,
117 'PhasedISwapPowGate': cirq.PhasedISwapPowGate,
118 'PhasedXPowGate': cirq.PhasedXPowGate,
119 'PhasedXZGate': cirq.PhasedXZGate,
120 'RandomGateChannel': cirq.RandomGateChannel,
121 'QuantumFourierTransformGate': cirq.QuantumFourierTransformGate,
122 'RepetitionsStoppingCriteria': cirq.work.RepetitionsStoppingCriteria,
123 'ResetChannel': cirq.ResetChannel,
124 'SingleQubitMatrixGate': single_qubit_matrix_gate,
125 'SingleQubitPauliStringGateOperation': cirq.SingleQubitPauliStringGateOperation,
126 'SingleQubitReadoutCalibrationResult': cirq.experiments.SingleQubitReadoutCalibrationResult,
127 'StabilizerStateChForm': cirq.StabilizerStateChForm,
128 'SwapPowGate': cirq.SwapPowGate,
129 'SymmetricalQidPair': cirq.SymmetricalQidPair,
130 'TaggedOperation': cirq.TaggedOperation,
131 'Result': cirq.Result,
132 'Rx': cirq.Rx,
133 'Ry': cirq.Ry,
134 'Rz': cirq.Rz,
135 'TwoQubitMatrixGate': two_qubit_matrix_gate,
136 '_UnconstrainedDevice': cirq.devices.unconstrained_device._UnconstrainedDevice,
137 'VarianceStoppingCriteria': cirq.work.VarianceStoppingCriteria,
138 'VirtualTag': cirq.VirtualTag,
139 'WaitGate': cirq.WaitGate,
140 '_QubitAsQid': raw_types._QubitAsQid,
141 # The formatter keeps putting this back
142 # pylint: disable=line-too-long
143 'XEBPhasedFSimCharacterizationOptions': cirq.experiments.XEBPhasedFSimCharacterizationOptions,
144 # pylint: enable=line-too-long
145 'XPowGate': cirq.XPowGate,
146 'XXPowGate': cirq.XXPowGate,
147 'YPowGate': cirq.YPowGate,
148 'YYPowGate': cirq.YYPowGate,
149 'ZPowGate': cirq.ZPowGate,
150 'ZZPowGate': cirq.ZZPowGate,
151 # not a cirq class, but treated as one:
152 'pandas.DataFrame': pd.DataFrame,
153 'pandas.Index': pd.Index,
154 'pandas.MultiIndex': pd.MultiIndex.from_tuples,
155 'sympy.Symbol': sympy.Symbol,
156 'sympy.Add': lambda args: sympy.Add(*args),
157 'sympy.Mul': lambda args: sympy.Mul(*args),
158 'sympy.Pow': lambda args: sympy.Pow(*args),
159 'sympy.Float': lambda approx: sympy.Float(approx),
160 'sympy.Integer': sympy.Integer,
161 'sympy.Rational': sympy.Rational,
162 'sympy.pi': lambda: sympy.pi,
163 'sympy.E': lambda: sympy.E,
164 'sympy.EulerGamma': lambda: sympy.EulerGamma,
165 'complex': complex,
166 }
167
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/cirq-core/cirq/json_resolver_cache.py b/cirq-core/cirq/json_resolver_cache.py
--- a/cirq-core/cirq/json_resolver_cache.py
+++ b/cirq-core/cirq/json_resolver_cache.py
@@ -128,6 +128,7 @@
'SwapPowGate': cirq.SwapPowGate,
'SymmetricalQidPair': cirq.SymmetricalQidPair,
'TaggedOperation': cirq.TaggedOperation,
+ 'TrialResult': cirq.Result, # keep support for Cirq < 0.11.
'Result': cirq.Result,
'Rx': cirq.Rx,
'Ry': cirq.Ry,
| {"golden_diff": "diff --git a/cirq-core/cirq/json_resolver_cache.py b/cirq-core/cirq/json_resolver_cache.py\n--- a/cirq-core/cirq/json_resolver_cache.py\n+++ b/cirq-core/cirq/json_resolver_cache.py\n@@ -128,6 +128,7 @@\n 'SwapPowGate': cirq.SwapPowGate,\n 'SymmetricalQidPair': cirq.SymmetricalQidPair,\n 'TaggedOperation': cirq.TaggedOperation,\n+ 'TrialResult': cirq.Result, # keep support for Cirq < 0.11.\n 'Result': cirq.Result,\n 'Rx': cirq.Rx,\n 'Ry': cirq.Ry,\n", "issue": "JSON backwards compatibility has been flushed\nhttps://github.com/quantumlib/Cirq/pull/4099 cleared some deprecated classes. Serialized files have a much longer lifespan than code. It was always the intent to keep much longer-term backwards compatibility for data files, especially experimental results which likely have `cirq.TrialResult` classes.\r\n\r\n1) the old TrialResult.json should be kept around like the \"inward\" files \r\n2) The resolver should re-direct \"TrialResult\" keys to `cirq.Result`. In some sense, this should have been done along with the initial rename. \n", "before_files": [{"content": "# Copyright 2020 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport functools\nfrom typing import Dict, TYPE_CHECKING\n\nfrom cirq.protocols.json_serialization import ObjectFactory\n\nif TYPE_CHECKING:\n import cirq.ops.pauli_gates\n import cirq.devices.unconstrained_device\n\n\[email protected]_cache(maxsize=1)\ndef _class_resolver_dictionary() -> Dict[str, ObjectFactory]:\n import cirq\n from cirq.ops import raw_types\n import pandas as pd\n import numpy as np\n from cirq.devices.noise_model import _NoNoiseModel\n from cirq.experiments import CrossEntropyResult, CrossEntropyResultDict, GridInteractionLayer\n from cirq.experiments.grid_parallel_two_qubit_xeb import GridParallelXEBMetadata\n\n def _identity_operation_from_dict(qubits, **kwargs):\n return cirq.identity_each(*qubits)\n\n def single_qubit_matrix_gate(matrix):\n if not isinstance(matrix, np.ndarray):\n matrix = np.array(matrix, dtype=np.complex128)\n return cirq.MatrixGate(matrix, qid_shape=(matrix.shape[0],))\n\n def two_qubit_matrix_gate(matrix):\n if not isinstance(matrix, np.ndarray):\n matrix = np.array(matrix, dtype=np.complex128)\n return cirq.MatrixGate(matrix, qid_shape=(2, 2))\n\n import sympy\n\n return {\n 'AmplitudeDampingChannel': cirq.AmplitudeDampingChannel,\n 'AsymmetricDepolarizingChannel': cirq.AsymmetricDepolarizingChannel,\n 'BitFlipChannel': cirq.BitFlipChannel,\n 'BitstringAccumulator': cirq.work.BitstringAccumulator,\n 'ProductState': cirq.ProductState,\n 'CCNotPowGate': cirq.CCNotPowGate,\n 'CCXPowGate': cirq.CCXPowGate,\n 'CCZPowGate': cirq.CCZPowGate,\n 'CNotPowGate': cirq.CNotPowGate,\n 'ControlledGate': cirq.ControlledGate,\n 'ControlledOperation': cirq.ControlledOperation,\n 'CSwapGate': cirq.CSwapGate,\n 'CXPowGate': cirq.CXPowGate,\n 'CZPowGate': cirq.CZPowGate,\n 'CrossEntropyResult': CrossEntropyResult,\n 'CrossEntropyResultDict': CrossEntropyResultDict,\n 'Circuit': cirq.Circuit,\n 'CircuitOperation': cirq.CircuitOperation,\n 'CliffordState': cirq.CliffordState,\n 'CliffordTableau': cirq.CliffordTableau,\n 'DepolarizingChannel': cirq.DepolarizingChannel,\n 'ConstantQubitNoiseModel': cirq.ConstantQubitNoiseModel,\n 'Duration': cirq.Duration,\n 'FrozenCircuit': cirq.FrozenCircuit,\n 'FSimGate': cirq.FSimGate,\n 'DensePauliString': cirq.DensePauliString,\n 'MutableDensePauliString': cirq.MutableDensePauliString,\n 'MutablePauliString': cirq.MutablePauliString,\n 'ObservableMeasuredResult': cirq.work.ObservableMeasuredResult,\n 'GateOperation': cirq.GateOperation,\n 'GeneralizedAmplitudeDampingChannel': cirq.GeneralizedAmplitudeDampingChannel,\n 'GlobalPhaseOperation': cirq.GlobalPhaseOperation,\n 'GridInteractionLayer': GridInteractionLayer,\n 'GridParallelXEBMetadata': GridParallelXEBMetadata,\n 'GridQid': cirq.GridQid,\n 'GridQubit': cirq.GridQubit,\n 'HPowGate': cirq.HPowGate,\n 'ISwapPowGate': cirq.ISwapPowGate,\n 'IdentityGate': cirq.IdentityGate,\n 'IdentityOperation': _identity_operation_from_dict,\n 'InitObsSetting': cirq.work.InitObsSetting,\n 'LinearDict': cirq.LinearDict,\n 'LineQubit': cirq.LineQubit,\n 'LineQid': cirq.LineQid,\n 'MatrixGate': cirq.MatrixGate,\n 'MeasurementKey': cirq.MeasurementKey,\n 'MeasurementGate': cirq.MeasurementGate,\n '_MeasurementSpec': cirq.work._MeasurementSpec,\n 'Moment': cirq.Moment,\n '_XEigenState': cirq.value.product_state._XEigenState, # type: ignore\n '_YEigenState': cirq.value.product_state._YEigenState, # type: ignore\n '_ZEigenState': cirq.value.product_state._ZEigenState, # type: ignore\n '_NoNoiseModel': _NoNoiseModel,\n 'NamedQubit': cirq.NamedQubit,\n 'NamedQid': cirq.NamedQid,\n 'NoIdentifierQubit': cirq.testing.NoIdentifierQubit,\n '_PauliX': cirq.ops.pauli_gates._PauliX,\n '_PauliY': cirq.ops.pauli_gates._PauliY,\n '_PauliZ': cirq.ops.pauli_gates._PauliZ,\n 'ParamResolver': cirq.ParamResolver,\n 'ParallelGateOperation': cirq.ParallelGateOperation,\n 'PauliString': cirq.PauliString,\n 'PhaseDampingChannel': cirq.PhaseDampingChannel,\n 'PhaseFlipChannel': cirq.PhaseFlipChannel,\n 'PhaseGradientGate': cirq.PhaseGradientGate,\n 'PhasedFSimGate': cirq.PhasedFSimGate,\n 'PhasedISwapPowGate': cirq.PhasedISwapPowGate,\n 'PhasedXPowGate': cirq.PhasedXPowGate,\n 'PhasedXZGate': cirq.PhasedXZGate,\n 'RandomGateChannel': cirq.RandomGateChannel,\n 'QuantumFourierTransformGate': cirq.QuantumFourierTransformGate,\n 'RepetitionsStoppingCriteria': cirq.work.RepetitionsStoppingCriteria,\n 'ResetChannel': cirq.ResetChannel,\n 'SingleQubitMatrixGate': single_qubit_matrix_gate,\n 'SingleQubitPauliStringGateOperation': cirq.SingleQubitPauliStringGateOperation,\n 'SingleQubitReadoutCalibrationResult': cirq.experiments.SingleQubitReadoutCalibrationResult,\n 'StabilizerStateChForm': cirq.StabilizerStateChForm,\n 'SwapPowGate': cirq.SwapPowGate,\n 'SymmetricalQidPair': cirq.SymmetricalQidPair,\n 'TaggedOperation': cirq.TaggedOperation,\n 'Result': cirq.Result,\n 'Rx': cirq.Rx,\n 'Ry': cirq.Ry,\n 'Rz': cirq.Rz,\n 'TwoQubitMatrixGate': two_qubit_matrix_gate,\n '_UnconstrainedDevice': cirq.devices.unconstrained_device._UnconstrainedDevice,\n 'VarianceStoppingCriteria': cirq.work.VarianceStoppingCriteria,\n 'VirtualTag': cirq.VirtualTag,\n 'WaitGate': cirq.WaitGate,\n '_QubitAsQid': raw_types._QubitAsQid,\n # The formatter keeps putting this back\n # pylint: disable=line-too-long\n 'XEBPhasedFSimCharacterizationOptions': cirq.experiments.XEBPhasedFSimCharacterizationOptions,\n # pylint: enable=line-too-long\n 'XPowGate': cirq.XPowGate,\n 'XXPowGate': cirq.XXPowGate,\n 'YPowGate': cirq.YPowGate,\n 'YYPowGate': cirq.YYPowGate,\n 'ZPowGate': cirq.ZPowGate,\n 'ZZPowGate': cirq.ZZPowGate,\n # not a cirq class, but treated as one:\n 'pandas.DataFrame': pd.DataFrame,\n 'pandas.Index': pd.Index,\n 'pandas.MultiIndex': pd.MultiIndex.from_tuples,\n 'sympy.Symbol': sympy.Symbol,\n 'sympy.Add': lambda args: sympy.Add(*args),\n 'sympy.Mul': lambda args: sympy.Mul(*args),\n 'sympy.Pow': lambda args: sympy.Pow(*args),\n 'sympy.Float': lambda approx: sympy.Float(approx),\n 'sympy.Integer': sympy.Integer,\n 'sympy.Rational': sympy.Rational,\n 'sympy.pi': lambda: sympy.pi,\n 'sympy.E': lambda: sympy.E,\n 'sympy.EulerGamma': lambda: sympy.EulerGamma,\n 'complex': complex,\n }\n", "path": "cirq-core/cirq/json_resolver_cache.py"}], "after_files": [{"content": "# Copyright 2020 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport functools\nfrom typing import Dict, TYPE_CHECKING\n\nfrom cirq.protocols.json_serialization import ObjectFactory\n\nif TYPE_CHECKING:\n import cirq.ops.pauli_gates\n import cirq.devices.unconstrained_device\n\n\[email protected]_cache(maxsize=1)\ndef _class_resolver_dictionary() -> Dict[str, ObjectFactory]:\n import cirq\n from cirq.ops import raw_types\n import pandas as pd\n import numpy as np\n from cirq.devices.noise_model import _NoNoiseModel\n from cirq.experiments import CrossEntropyResult, CrossEntropyResultDict, GridInteractionLayer\n from cirq.experiments.grid_parallel_two_qubit_xeb import GridParallelXEBMetadata\n\n def _identity_operation_from_dict(qubits, **kwargs):\n return cirq.identity_each(*qubits)\n\n def single_qubit_matrix_gate(matrix):\n if not isinstance(matrix, np.ndarray):\n matrix = np.array(matrix, dtype=np.complex128)\n return cirq.MatrixGate(matrix, qid_shape=(matrix.shape[0],))\n\n def two_qubit_matrix_gate(matrix):\n if not isinstance(matrix, np.ndarray):\n matrix = np.array(matrix, dtype=np.complex128)\n return cirq.MatrixGate(matrix, qid_shape=(2, 2))\n\n import sympy\n\n return {\n 'AmplitudeDampingChannel': cirq.AmplitudeDampingChannel,\n 'AsymmetricDepolarizingChannel': cirq.AsymmetricDepolarizingChannel,\n 'BitFlipChannel': cirq.BitFlipChannel,\n 'BitstringAccumulator': cirq.work.BitstringAccumulator,\n 'ProductState': cirq.ProductState,\n 'CCNotPowGate': cirq.CCNotPowGate,\n 'CCXPowGate': cirq.CCXPowGate,\n 'CCZPowGate': cirq.CCZPowGate,\n 'CNotPowGate': cirq.CNotPowGate,\n 'ControlledGate': cirq.ControlledGate,\n 'ControlledOperation': cirq.ControlledOperation,\n 'CSwapGate': cirq.CSwapGate,\n 'CXPowGate': cirq.CXPowGate,\n 'CZPowGate': cirq.CZPowGate,\n 'CrossEntropyResult': CrossEntropyResult,\n 'CrossEntropyResultDict': CrossEntropyResultDict,\n 'Circuit': cirq.Circuit,\n 'CircuitOperation': cirq.CircuitOperation,\n 'CliffordState': cirq.CliffordState,\n 'CliffordTableau': cirq.CliffordTableau,\n 'DepolarizingChannel': cirq.DepolarizingChannel,\n 'ConstantQubitNoiseModel': cirq.ConstantQubitNoiseModel,\n 'Duration': cirq.Duration,\n 'FrozenCircuit': cirq.FrozenCircuit,\n 'FSimGate': cirq.FSimGate,\n 'DensePauliString': cirq.DensePauliString,\n 'MutableDensePauliString': cirq.MutableDensePauliString,\n 'MutablePauliString': cirq.MutablePauliString,\n 'ObservableMeasuredResult': cirq.work.ObservableMeasuredResult,\n 'GateOperation': cirq.GateOperation,\n 'GeneralizedAmplitudeDampingChannel': cirq.GeneralizedAmplitudeDampingChannel,\n 'GlobalPhaseOperation': cirq.GlobalPhaseOperation,\n 'GridInteractionLayer': GridInteractionLayer,\n 'GridParallelXEBMetadata': GridParallelXEBMetadata,\n 'GridQid': cirq.GridQid,\n 'GridQubit': cirq.GridQubit,\n 'HPowGate': cirq.HPowGate,\n 'ISwapPowGate': cirq.ISwapPowGate,\n 'IdentityGate': cirq.IdentityGate,\n 'IdentityOperation': _identity_operation_from_dict,\n 'InitObsSetting': cirq.work.InitObsSetting,\n 'LinearDict': cirq.LinearDict,\n 'LineQubit': cirq.LineQubit,\n 'LineQid': cirq.LineQid,\n 'MatrixGate': cirq.MatrixGate,\n 'MeasurementKey': cirq.MeasurementKey,\n 'MeasurementGate': cirq.MeasurementGate,\n '_MeasurementSpec': cirq.work._MeasurementSpec,\n 'Moment': cirq.Moment,\n '_XEigenState': cirq.value.product_state._XEigenState, # type: ignore\n '_YEigenState': cirq.value.product_state._YEigenState, # type: ignore\n '_ZEigenState': cirq.value.product_state._ZEigenState, # type: ignore\n '_NoNoiseModel': _NoNoiseModel,\n 'NamedQubit': cirq.NamedQubit,\n 'NamedQid': cirq.NamedQid,\n 'NoIdentifierQubit': cirq.testing.NoIdentifierQubit,\n '_PauliX': cirq.ops.pauli_gates._PauliX,\n '_PauliY': cirq.ops.pauli_gates._PauliY,\n '_PauliZ': cirq.ops.pauli_gates._PauliZ,\n 'ParamResolver': cirq.ParamResolver,\n 'ParallelGateOperation': cirq.ParallelGateOperation,\n 'PauliString': cirq.PauliString,\n 'PhaseDampingChannel': cirq.PhaseDampingChannel,\n 'PhaseFlipChannel': cirq.PhaseFlipChannel,\n 'PhaseGradientGate': cirq.PhaseGradientGate,\n 'PhasedFSimGate': cirq.PhasedFSimGate,\n 'PhasedISwapPowGate': cirq.PhasedISwapPowGate,\n 'PhasedXPowGate': cirq.PhasedXPowGate,\n 'PhasedXZGate': cirq.PhasedXZGate,\n 'RandomGateChannel': cirq.RandomGateChannel,\n 'QuantumFourierTransformGate': cirq.QuantumFourierTransformGate,\n 'RepetitionsStoppingCriteria': cirq.work.RepetitionsStoppingCriteria,\n 'ResetChannel': cirq.ResetChannel,\n 'SingleQubitMatrixGate': single_qubit_matrix_gate,\n 'SingleQubitPauliStringGateOperation': cirq.SingleQubitPauliStringGateOperation,\n 'SingleQubitReadoutCalibrationResult': cirq.experiments.SingleQubitReadoutCalibrationResult,\n 'StabilizerStateChForm': cirq.StabilizerStateChForm,\n 'SwapPowGate': cirq.SwapPowGate,\n 'SymmetricalQidPair': cirq.SymmetricalQidPair,\n 'TaggedOperation': cirq.TaggedOperation,\n 'TrialResult': cirq.Result, # keep support for Cirq < 0.11.\n 'Result': cirq.Result,\n 'Rx': cirq.Rx,\n 'Ry': cirq.Ry,\n 'Rz': cirq.Rz,\n 'TwoQubitMatrixGate': two_qubit_matrix_gate,\n '_UnconstrainedDevice': cirq.devices.unconstrained_device._UnconstrainedDevice,\n 'VarianceStoppingCriteria': cirq.work.VarianceStoppingCriteria,\n 'VirtualTag': cirq.VirtualTag,\n 'WaitGate': cirq.WaitGate,\n '_QubitAsQid': raw_types._QubitAsQid,\n # The formatter keeps putting this back\n # pylint: disable=line-too-long\n 'XEBPhasedFSimCharacterizationOptions': cirq.experiments.XEBPhasedFSimCharacterizationOptions,\n # pylint: enable=line-too-long\n 'XPowGate': cirq.XPowGate,\n 'XXPowGate': cirq.XXPowGate,\n 'YPowGate': cirq.YPowGate,\n 'YYPowGate': cirq.YYPowGate,\n 'ZPowGate': cirq.ZPowGate,\n 'ZZPowGate': cirq.ZZPowGate,\n # not a cirq class, but treated as one:\n 'pandas.DataFrame': pd.DataFrame,\n 'pandas.Index': pd.Index,\n 'pandas.MultiIndex': pd.MultiIndex.from_tuples,\n 'sympy.Symbol': sympy.Symbol,\n 'sympy.Add': lambda args: sympy.Add(*args),\n 'sympy.Mul': lambda args: sympy.Mul(*args),\n 'sympy.Pow': lambda args: sympy.Pow(*args),\n 'sympy.Float': lambda approx: sympy.Float(approx),\n 'sympy.Integer': sympy.Integer,\n 'sympy.Rational': sympy.Rational,\n 'sympy.pi': lambda: sympy.pi,\n 'sympy.E': lambda: sympy.E,\n 'sympy.EulerGamma': lambda: sympy.EulerGamma,\n 'complex': complex,\n }\n", "path": "cirq-core/cirq/json_resolver_cache.py"}]} | 2,780 | 155 |
gh_patches_debug_4528 | rasdani/github-patches | git_diff | ansible-collections__community.general-2239 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Examples of nomad_job_info not working
### Summary
The examples for nomad_job_info are using the module nomad_job instead of nomad_job_info.
### Issue Type
Documentation Report
### Component Name
nomad_job_info.py
### Ansible Version
```console (paste below)
$ ansible --version
```
### Configuration
```console (paste below)
$ ansible-config dump --only-changed
```
### OS / Environment
_No response_
### Additional Information
Example:
- name: List Nomad jobs
community.general.nomad_job:
host: localhost
register: result
Instead of:
- name: List Nomad jobs
community.general.nomad_job_info:
host: localhost
register: result
### Code of Conduct
I agree to follow the Ansible Code of Conduct
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `plugins/modules/clustering/nomad/nomad_job_info.py`
Content:
```
1 #!/usr/bin/python
2 # coding: utf-8 -*-
3
4 # (c) 2020, FERREIRA Christophe <[email protected]>
5 # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
6
7 from __future__ import absolute_import, division, print_function
8 __metaclass__ = type
9
10 DOCUMENTATION = '''
11 ---
12 module: nomad_job_info
13 author: FERREIRA Christophe (@chris93111)
14 version_added: "1.3.0"
15 short_description: Get Nomad Jobs info
16 description:
17 - Get info for one Nomad job.
18 - List Nomad jobs.
19 requirements:
20 - python-nomad
21 extends_documentation_fragment:
22 - community.general.nomad
23 options:
24 name:
25 description:
26 - Name of job for Get info.
27 - If not specified, lists all jobs.
28 type: str
29 notes:
30 - C(check_mode) is supported.
31 seealso:
32 - name: Nomad jobs documentation
33 description: Complete documentation for Nomad API jobs.
34 link: https://www.nomadproject.io/api-docs/jobs/
35 '''
36
37 EXAMPLES = '''
38 - name: Get info for job awx
39 community.general.nomad_job:
40 host: localhost
41 name: awx
42 register: result
43
44 - name: List Nomad jobs
45 community.general.nomad_job:
46 host: localhost
47 register: result
48
49 '''
50
51 RETURN = '''
52 result:
53 description: List with dictionary contains jobs info
54 returned: success
55 type: list
56 sample: [
57 {
58 "Affinities": null,
59 "AllAtOnce": false,
60 "Constraints": null,
61 "ConsulToken": "",
62 "CreateIndex": 13,
63 "Datacenters": [
64 "dc1"
65 ],
66 "Dispatched": false,
67 "ID": "example",
68 "JobModifyIndex": 13,
69 "Meta": null,
70 "ModifyIndex": 13,
71 "Multiregion": null,
72 "Name": "example",
73 "Namespace": "default",
74 "NomadTokenID": "",
75 "ParameterizedJob": null,
76 "ParentID": "",
77 "Payload": null,
78 "Periodic": null,
79 "Priority": 50,
80 "Region": "global",
81 "Spreads": null,
82 "Stable": false,
83 "Status": "pending",
84 "StatusDescription": "",
85 "Stop": false,
86 "SubmitTime": 1602244370615307000,
87 "TaskGroups": [
88 {
89 "Affinities": null,
90 "Constraints": null,
91 "Count": 1,
92 "EphemeralDisk": {
93 "Migrate": false,
94 "SizeMB": 300,
95 "Sticky": false
96 },
97 "Meta": null,
98 "Migrate": {
99 "HealthCheck": "checks",
100 "HealthyDeadline": 300000000000,
101 "MaxParallel": 1,
102 "MinHealthyTime": 10000000000
103 },
104 "Name": "cache",
105 "Networks": null,
106 "ReschedulePolicy": {
107 "Attempts": 0,
108 "Delay": 30000000000,
109 "DelayFunction": "exponential",
110 "Interval": 0,
111 "MaxDelay": 3600000000000,
112 "Unlimited": true
113 },
114 "RestartPolicy": {
115 "Attempts": 3,
116 "Delay": 15000000000,
117 "Interval": 1800000000000,
118 "Mode": "fail"
119 },
120 "Scaling": null,
121 "Services": null,
122 "ShutdownDelay": null,
123 "Spreads": null,
124 "StopAfterClientDisconnect": null,
125 "Tasks": [
126 {
127 "Affinities": null,
128 "Artifacts": null,
129 "CSIPluginConfig": null,
130 "Config": {
131 "image": "redis:3.2",
132 "port_map": [
133 {
134 "db": 6379.0
135 }
136 ]
137 },
138 "Constraints": null,
139 "DispatchPayload": null,
140 "Driver": "docker",
141 "Env": null,
142 "KillSignal": "",
143 "KillTimeout": 5000000000,
144 "Kind": "",
145 "Leader": false,
146 "Lifecycle": null,
147 "LogConfig": {
148 "MaxFileSizeMB": 10,
149 "MaxFiles": 10
150 },
151 "Meta": null,
152 "Name": "redis",
153 "Resources": {
154 "CPU": 500,
155 "Devices": null,
156 "DiskMB": 0,
157 "IOPS": 0,
158 "MemoryMB": 256,
159 "Networks": [
160 {
161 "CIDR": "",
162 "DNS": null,
163 "Device": "",
164 "DynamicPorts": [
165 {
166 "HostNetwork": "default",
167 "Label": "db",
168 "To": 0,
169 "Value": 0
170 }
171 ],
172 "IP": "",
173 "MBits": 10,
174 "Mode": "",
175 "ReservedPorts": null
176 }
177 ]
178 },
179 "RestartPolicy": {
180 "Attempts": 3,
181 "Delay": 15000000000,
182 "Interval": 1800000000000,
183 "Mode": "fail"
184 },
185 "Services": [
186 {
187 "AddressMode": "auto",
188 "CanaryMeta": null,
189 "CanaryTags": null,
190 "Checks": [
191 {
192 "AddressMode": "",
193 "Args": null,
194 "CheckRestart": null,
195 "Command": "",
196 "Expose": false,
197 "FailuresBeforeCritical": 0,
198 "GRPCService": "",
199 "GRPCUseTLS": false,
200 "Header": null,
201 "InitialStatus": "",
202 "Interval": 10000000000,
203 "Method": "",
204 "Name": "alive",
205 "Path": "",
206 "PortLabel": "",
207 "Protocol": "",
208 "SuccessBeforePassing": 0,
209 "TLSSkipVerify": false,
210 "TaskName": "",
211 "Timeout": 2000000000,
212 "Type": "tcp"
213 }
214 ],
215 "Connect": null,
216 "EnableTagOverride": false,
217 "Meta": null,
218 "Name": "redis-cache",
219 "PortLabel": "db",
220 "Tags": [
221 "global",
222 "cache"
223 ],
224 "TaskName": ""
225 }
226 ],
227 "ShutdownDelay": 0,
228 "Templates": null,
229 "User": "",
230 "Vault": null,
231 "VolumeMounts": null
232 }
233 ],
234 "Update": {
235 "AutoPromote": false,
236 "AutoRevert": false,
237 "Canary": 0,
238 "HealthCheck": "checks",
239 "HealthyDeadline": 180000000000,
240 "MaxParallel": 1,
241 "MinHealthyTime": 10000000000,
242 "ProgressDeadline": 600000000000,
243 "Stagger": 30000000000
244 },
245 "Volumes": null
246 }
247 ],
248 "Type": "service",
249 "Update": {
250 "AutoPromote": false,
251 "AutoRevert": false,
252 "Canary": 0,
253 "HealthCheck": "",
254 "HealthyDeadline": 0,
255 "MaxParallel": 1,
256 "MinHealthyTime": 0,
257 "ProgressDeadline": 0,
258 "Stagger": 30000000000
259 },
260 "VaultNamespace": "",
261 "VaultToken": "",
262 "Version": 0
263 }
264 ]
265
266 '''
267
268
269 import os
270 import json
271
272 from ansible.module_utils.basic import AnsibleModule, missing_required_lib
273 from ansible.module_utils._text import to_native
274
275 import_nomad = None
276 try:
277 import nomad
278 import_nomad = True
279 except ImportError:
280 import_nomad = False
281
282
283 def run():
284 module = AnsibleModule(
285 argument_spec=dict(
286 host=dict(required=True, type='str'),
287 use_ssl=dict(type='bool', default=True),
288 timeout=dict(type='int', default=5),
289 validate_certs=dict(type='bool', default=True),
290 client_cert=dict(type='path', default=None),
291 client_key=dict(type='path', default=None),
292 namespace=dict(type='str', default=None),
293 name=dict(type='str', default=None),
294 token=dict(type='str', default=None, no_log=True)
295 ),
296 supports_check_mode=True
297 )
298
299 if not import_nomad:
300 module.fail_json(msg=missing_required_lib("python-nomad"))
301
302 certificate_ssl = (module.params.get('client_cert'), module.params.get('client_key'))
303
304 nomad_client = nomad.Nomad(
305 host=module.params.get('host'),
306 secure=module.params.get('use_ssl'),
307 timeout=module.params.get('timeout'),
308 verify=module.params.get('validate_certs'),
309 cert=certificate_ssl,
310 namespace=module.params.get('namespace'),
311 token=module.params.get('token')
312 )
313
314 changed = False
315 result = list()
316 try:
317 job_list = nomad_client.jobs.get_jobs()
318 for job in job_list:
319 result.append(nomad_client.job.get_job(job.get('ID')))
320 except Exception as e:
321 module.fail_json(msg=to_native(e))
322
323 if module.params.get('name'):
324 filter = list()
325 try:
326 for job in result:
327 if job.get('ID') == module.params.get('name'):
328 filter.append(job)
329 result = filter
330 if not filter:
331 module.fail_json(msg="Couldn't find Job with id " + str(module.params.get('name')))
332 except Exception as e:
333 module.fail_json(msg=to_native(e))
334
335 module.exit_json(changed=changed, result=result)
336
337
338 def main():
339
340 run()
341
342
343 if __name__ == "__main__":
344 main()
345
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/plugins/modules/clustering/nomad/nomad_job_info.py b/plugins/modules/clustering/nomad/nomad_job_info.py
--- a/plugins/modules/clustering/nomad/nomad_job_info.py
+++ b/plugins/modules/clustering/nomad/nomad_job_info.py
@@ -36,13 +36,13 @@
EXAMPLES = '''
- name: Get info for job awx
- community.general.nomad_job:
+ community.general.nomad_job_info:
host: localhost
name: awx
register: result
- name: List Nomad jobs
- community.general.nomad_job:
+ community.general.nomad_job_info:
host: localhost
register: result
| {"golden_diff": "diff --git a/plugins/modules/clustering/nomad/nomad_job_info.py b/plugins/modules/clustering/nomad/nomad_job_info.py\n--- a/plugins/modules/clustering/nomad/nomad_job_info.py\n+++ b/plugins/modules/clustering/nomad/nomad_job_info.py\n@@ -36,13 +36,13 @@\n \n EXAMPLES = '''\n - name: Get info for job awx\n- community.general.nomad_job:\n+ community.general.nomad_job_info:\n host: localhost\n name: awx\n register: result\n \n - name: List Nomad jobs\n- community.general.nomad_job:\n+ community.general.nomad_job_info:\n host: localhost\n register: result\n", "issue": "Examples of nomad_job_info not working\n### Summary\n\nThe examples for nomad_job_info are using the module nomad_job instead of nomad_job_info.\n\n### Issue Type\n\nDocumentation Report\n\n### Component Name\n\nnomad_job_info.py\n\n### Ansible Version\n\n```console (paste below)\r\n$ ansible --version\r\n\r\n```\r\n\n\n### Configuration\n\n```console (paste below)\r\n$ ansible-config dump --only-changed\r\n\r\n```\r\n\n\n### OS / Environment\n\n_No response_\n\n### Additional Information\n\nExample:\r\n- name: List Nomad jobs\r\n community.general.nomad_job:\r\n host: localhost\r\n register: result\r\n\r\nInstead of:\r\n- name: List Nomad jobs\r\n community.general.nomad_job_info:\r\n host: localhost\r\n register: result\r\n\n\n### Code of Conduct\n\nI agree to follow the Ansible Code of Conduct\n", "before_files": [{"content": "#!/usr/bin/python\n# coding: utf-8 -*-\n\n# (c) 2020, FERREIRA Christophe <[email protected]>\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\nDOCUMENTATION = '''\n---\nmodule: nomad_job_info\nauthor: FERREIRA Christophe (@chris93111)\nversion_added: \"1.3.0\"\nshort_description: Get Nomad Jobs info\ndescription:\n - Get info for one Nomad job.\n - List Nomad jobs.\nrequirements:\n - python-nomad\nextends_documentation_fragment:\n - community.general.nomad\noptions:\n name:\n description:\n - Name of job for Get info.\n - If not specified, lists all jobs.\n type: str\nnotes:\n - C(check_mode) is supported.\nseealso:\n - name: Nomad jobs documentation\n description: Complete documentation for Nomad API jobs.\n link: https://www.nomadproject.io/api-docs/jobs/\n'''\n\nEXAMPLES = '''\n- name: Get info for job awx\n community.general.nomad_job:\n host: localhost\n name: awx\n register: result\n\n- name: List Nomad jobs\n community.general.nomad_job:\n host: localhost\n register: result\n\n'''\n\nRETURN = '''\nresult:\n description: List with dictionary contains jobs info\n returned: success\n type: list\n sample: [\n {\n \"Affinities\": null,\n \"AllAtOnce\": false,\n \"Constraints\": null,\n \"ConsulToken\": \"\",\n \"CreateIndex\": 13,\n \"Datacenters\": [\n \"dc1\"\n ],\n \"Dispatched\": false,\n \"ID\": \"example\",\n \"JobModifyIndex\": 13,\n \"Meta\": null,\n \"ModifyIndex\": 13,\n \"Multiregion\": null,\n \"Name\": \"example\",\n \"Namespace\": \"default\",\n \"NomadTokenID\": \"\",\n \"ParameterizedJob\": null,\n \"ParentID\": \"\",\n \"Payload\": null,\n \"Periodic\": null,\n \"Priority\": 50,\n \"Region\": \"global\",\n \"Spreads\": null,\n \"Stable\": false,\n \"Status\": \"pending\",\n \"StatusDescription\": \"\",\n \"Stop\": false,\n \"SubmitTime\": 1602244370615307000,\n \"TaskGroups\": [\n {\n \"Affinities\": null,\n \"Constraints\": null,\n \"Count\": 1,\n \"EphemeralDisk\": {\n \"Migrate\": false,\n \"SizeMB\": 300,\n \"Sticky\": false\n },\n \"Meta\": null,\n \"Migrate\": {\n \"HealthCheck\": \"checks\",\n \"HealthyDeadline\": 300000000000,\n \"MaxParallel\": 1,\n \"MinHealthyTime\": 10000000000\n },\n \"Name\": \"cache\",\n \"Networks\": null,\n \"ReschedulePolicy\": {\n \"Attempts\": 0,\n \"Delay\": 30000000000,\n \"DelayFunction\": \"exponential\",\n \"Interval\": 0,\n \"MaxDelay\": 3600000000000,\n \"Unlimited\": true\n },\n \"RestartPolicy\": {\n \"Attempts\": 3,\n \"Delay\": 15000000000,\n \"Interval\": 1800000000000,\n \"Mode\": \"fail\"\n },\n \"Scaling\": null,\n \"Services\": null,\n \"ShutdownDelay\": null,\n \"Spreads\": null,\n \"StopAfterClientDisconnect\": null,\n \"Tasks\": [\n {\n \"Affinities\": null,\n \"Artifacts\": null,\n \"CSIPluginConfig\": null,\n \"Config\": {\n \"image\": \"redis:3.2\",\n \"port_map\": [\n {\n \"db\": 6379.0\n }\n ]\n },\n \"Constraints\": null,\n \"DispatchPayload\": null,\n \"Driver\": \"docker\",\n \"Env\": null,\n \"KillSignal\": \"\",\n \"KillTimeout\": 5000000000,\n \"Kind\": \"\",\n \"Leader\": false,\n \"Lifecycle\": null,\n \"LogConfig\": {\n \"MaxFileSizeMB\": 10,\n \"MaxFiles\": 10\n },\n \"Meta\": null,\n \"Name\": \"redis\",\n \"Resources\": {\n \"CPU\": 500,\n \"Devices\": null,\n \"DiskMB\": 0,\n \"IOPS\": 0,\n \"MemoryMB\": 256,\n \"Networks\": [\n {\n \"CIDR\": \"\",\n \"DNS\": null,\n \"Device\": \"\",\n \"DynamicPorts\": [\n {\n \"HostNetwork\": \"default\",\n \"Label\": \"db\",\n \"To\": 0,\n \"Value\": 0\n }\n ],\n \"IP\": \"\",\n \"MBits\": 10,\n \"Mode\": \"\",\n \"ReservedPorts\": null\n }\n ]\n },\n \"RestartPolicy\": {\n \"Attempts\": 3,\n \"Delay\": 15000000000,\n \"Interval\": 1800000000000,\n \"Mode\": \"fail\"\n },\n \"Services\": [\n {\n \"AddressMode\": \"auto\",\n \"CanaryMeta\": null,\n \"CanaryTags\": null,\n \"Checks\": [\n {\n \"AddressMode\": \"\",\n \"Args\": null,\n \"CheckRestart\": null,\n \"Command\": \"\",\n \"Expose\": false,\n \"FailuresBeforeCritical\": 0,\n \"GRPCService\": \"\",\n \"GRPCUseTLS\": false,\n \"Header\": null,\n \"InitialStatus\": \"\",\n \"Interval\": 10000000000,\n \"Method\": \"\",\n \"Name\": \"alive\",\n \"Path\": \"\",\n \"PortLabel\": \"\",\n \"Protocol\": \"\",\n \"SuccessBeforePassing\": 0,\n \"TLSSkipVerify\": false,\n \"TaskName\": \"\",\n \"Timeout\": 2000000000,\n \"Type\": \"tcp\"\n }\n ],\n \"Connect\": null,\n \"EnableTagOverride\": false,\n \"Meta\": null,\n \"Name\": \"redis-cache\",\n \"PortLabel\": \"db\",\n \"Tags\": [\n \"global\",\n \"cache\"\n ],\n \"TaskName\": \"\"\n }\n ],\n \"ShutdownDelay\": 0,\n \"Templates\": null,\n \"User\": \"\",\n \"Vault\": null,\n \"VolumeMounts\": null\n }\n ],\n \"Update\": {\n \"AutoPromote\": false,\n \"AutoRevert\": false,\n \"Canary\": 0,\n \"HealthCheck\": \"checks\",\n \"HealthyDeadline\": 180000000000,\n \"MaxParallel\": 1,\n \"MinHealthyTime\": 10000000000,\n \"ProgressDeadline\": 600000000000,\n \"Stagger\": 30000000000\n },\n \"Volumes\": null\n }\n ],\n \"Type\": \"service\",\n \"Update\": {\n \"AutoPromote\": false,\n \"AutoRevert\": false,\n \"Canary\": 0,\n \"HealthCheck\": \"\",\n \"HealthyDeadline\": 0,\n \"MaxParallel\": 1,\n \"MinHealthyTime\": 0,\n \"ProgressDeadline\": 0,\n \"Stagger\": 30000000000\n },\n \"VaultNamespace\": \"\",\n \"VaultToken\": \"\",\n \"Version\": 0\n }\n ]\n\n'''\n\n\nimport os\nimport json\n\nfrom ansible.module_utils.basic import AnsibleModule, missing_required_lib\nfrom ansible.module_utils._text import to_native\n\nimport_nomad = None\ntry:\n import nomad\n import_nomad = True\nexcept ImportError:\n import_nomad = False\n\n\ndef run():\n module = AnsibleModule(\n argument_spec=dict(\n host=dict(required=True, type='str'),\n use_ssl=dict(type='bool', default=True),\n timeout=dict(type='int', default=5),\n validate_certs=dict(type='bool', default=True),\n client_cert=dict(type='path', default=None),\n client_key=dict(type='path', default=None),\n namespace=dict(type='str', default=None),\n name=dict(type='str', default=None),\n token=dict(type='str', default=None, no_log=True)\n ),\n supports_check_mode=True\n )\n\n if not import_nomad:\n module.fail_json(msg=missing_required_lib(\"python-nomad\"))\n\n certificate_ssl = (module.params.get('client_cert'), module.params.get('client_key'))\n\n nomad_client = nomad.Nomad(\n host=module.params.get('host'),\n secure=module.params.get('use_ssl'),\n timeout=module.params.get('timeout'),\n verify=module.params.get('validate_certs'),\n cert=certificate_ssl,\n namespace=module.params.get('namespace'),\n token=module.params.get('token')\n )\n\n changed = False\n result = list()\n try:\n job_list = nomad_client.jobs.get_jobs()\n for job in job_list:\n result.append(nomad_client.job.get_job(job.get('ID')))\n except Exception as e:\n module.fail_json(msg=to_native(e))\n\n if module.params.get('name'):\n filter = list()\n try:\n for job in result:\n if job.get('ID') == module.params.get('name'):\n filter.append(job)\n result = filter\n if not filter:\n module.fail_json(msg=\"Couldn't find Job with id \" + str(module.params.get('name')))\n except Exception as e:\n module.fail_json(msg=to_native(e))\n\n module.exit_json(changed=changed, result=result)\n\n\ndef main():\n\n run()\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "plugins/modules/clustering/nomad/nomad_job_info.py"}], "after_files": [{"content": "#!/usr/bin/python\n# coding: utf-8 -*-\n\n# (c) 2020, FERREIRA Christophe <[email protected]>\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\nDOCUMENTATION = '''\n---\nmodule: nomad_job_info\nauthor: FERREIRA Christophe (@chris93111)\nversion_added: \"1.3.0\"\nshort_description: Get Nomad Jobs info\ndescription:\n - Get info for one Nomad job.\n - List Nomad jobs.\nrequirements:\n - python-nomad\nextends_documentation_fragment:\n - community.general.nomad\noptions:\n name:\n description:\n - Name of job for Get info.\n - If not specified, lists all jobs.\n type: str\nnotes:\n - C(check_mode) is supported.\nseealso:\n - name: Nomad jobs documentation\n description: Complete documentation for Nomad API jobs.\n link: https://www.nomadproject.io/api-docs/jobs/\n'''\n\nEXAMPLES = '''\n- name: Get info for job awx\n community.general.nomad_job_info:\n host: localhost\n name: awx\n register: result\n\n- name: List Nomad jobs\n community.general.nomad_job_info:\n host: localhost\n register: result\n\n'''\n\nRETURN = '''\nresult:\n description: List with dictionary contains jobs info\n returned: success\n type: list\n sample: [\n {\n \"Affinities\": null,\n \"AllAtOnce\": false,\n \"Constraints\": null,\n \"ConsulToken\": \"\",\n \"CreateIndex\": 13,\n \"Datacenters\": [\n \"dc1\"\n ],\n \"Dispatched\": false,\n \"ID\": \"example\",\n \"JobModifyIndex\": 13,\n \"Meta\": null,\n \"ModifyIndex\": 13,\n \"Multiregion\": null,\n \"Name\": \"example\",\n \"Namespace\": \"default\",\n \"NomadTokenID\": \"\",\n \"ParameterizedJob\": null,\n \"ParentID\": \"\",\n \"Payload\": null,\n \"Periodic\": null,\n \"Priority\": 50,\n \"Region\": \"global\",\n \"Spreads\": null,\n \"Stable\": false,\n \"Status\": \"pending\",\n \"StatusDescription\": \"\",\n \"Stop\": false,\n \"SubmitTime\": 1602244370615307000,\n \"TaskGroups\": [\n {\n \"Affinities\": null,\n \"Constraints\": null,\n \"Count\": 1,\n \"EphemeralDisk\": {\n \"Migrate\": false,\n \"SizeMB\": 300,\n \"Sticky\": false\n },\n \"Meta\": null,\n \"Migrate\": {\n \"HealthCheck\": \"checks\",\n \"HealthyDeadline\": 300000000000,\n \"MaxParallel\": 1,\n \"MinHealthyTime\": 10000000000\n },\n \"Name\": \"cache\",\n \"Networks\": null,\n \"ReschedulePolicy\": {\n \"Attempts\": 0,\n \"Delay\": 30000000000,\n \"DelayFunction\": \"exponential\",\n \"Interval\": 0,\n \"MaxDelay\": 3600000000000,\n \"Unlimited\": true\n },\n \"RestartPolicy\": {\n \"Attempts\": 3,\n \"Delay\": 15000000000,\n \"Interval\": 1800000000000,\n \"Mode\": \"fail\"\n },\n \"Scaling\": null,\n \"Services\": null,\n \"ShutdownDelay\": null,\n \"Spreads\": null,\n \"StopAfterClientDisconnect\": null,\n \"Tasks\": [\n {\n \"Affinities\": null,\n \"Artifacts\": null,\n \"CSIPluginConfig\": null,\n \"Config\": {\n \"image\": \"redis:3.2\",\n \"port_map\": [\n {\n \"db\": 6379.0\n }\n ]\n },\n \"Constraints\": null,\n \"DispatchPayload\": null,\n \"Driver\": \"docker\",\n \"Env\": null,\n \"KillSignal\": \"\",\n \"KillTimeout\": 5000000000,\n \"Kind\": \"\",\n \"Leader\": false,\n \"Lifecycle\": null,\n \"LogConfig\": {\n \"MaxFileSizeMB\": 10,\n \"MaxFiles\": 10\n },\n \"Meta\": null,\n \"Name\": \"redis\",\n \"Resources\": {\n \"CPU\": 500,\n \"Devices\": null,\n \"DiskMB\": 0,\n \"IOPS\": 0,\n \"MemoryMB\": 256,\n \"Networks\": [\n {\n \"CIDR\": \"\",\n \"DNS\": null,\n \"Device\": \"\",\n \"DynamicPorts\": [\n {\n \"HostNetwork\": \"default\",\n \"Label\": \"db\",\n \"To\": 0,\n \"Value\": 0\n }\n ],\n \"IP\": \"\",\n \"MBits\": 10,\n \"Mode\": \"\",\n \"ReservedPorts\": null\n }\n ]\n },\n \"RestartPolicy\": {\n \"Attempts\": 3,\n \"Delay\": 15000000000,\n \"Interval\": 1800000000000,\n \"Mode\": \"fail\"\n },\n \"Services\": [\n {\n \"AddressMode\": \"auto\",\n \"CanaryMeta\": null,\n \"CanaryTags\": null,\n \"Checks\": [\n {\n \"AddressMode\": \"\",\n \"Args\": null,\n \"CheckRestart\": null,\n \"Command\": \"\",\n \"Expose\": false,\n \"FailuresBeforeCritical\": 0,\n \"GRPCService\": \"\",\n \"GRPCUseTLS\": false,\n \"Header\": null,\n \"InitialStatus\": \"\",\n \"Interval\": 10000000000,\n \"Method\": \"\",\n \"Name\": \"alive\",\n \"Path\": \"\",\n \"PortLabel\": \"\",\n \"Protocol\": \"\",\n \"SuccessBeforePassing\": 0,\n \"TLSSkipVerify\": false,\n \"TaskName\": \"\",\n \"Timeout\": 2000000000,\n \"Type\": \"tcp\"\n }\n ],\n \"Connect\": null,\n \"EnableTagOverride\": false,\n \"Meta\": null,\n \"Name\": \"redis-cache\",\n \"PortLabel\": \"db\",\n \"Tags\": [\n \"global\",\n \"cache\"\n ],\n \"TaskName\": \"\"\n }\n ],\n \"ShutdownDelay\": 0,\n \"Templates\": null,\n \"User\": \"\",\n \"Vault\": null,\n \"VolumeMounts\": null\n }\n ],\n \"Update\": {\n \"AutoPromote\": false,\n \"AutoRevert\": false,\n \"Canary\": 0,\n \"HealthCheck\": \"checks\",\n \"HealthyDeadline\": 180000000000,\n \"MaxParallel\": 1,\n \"MinHealthyTime\": 10000000000,\n \"ProgressDeadline\": 600000000000,\n \"Stagger\": 30000000000\n },\n \"Volumes\": null\n }\n ],\n \"Type\": \"service\",\n \"Update\": {\n \"AutoPromote\": false,\n \"AutoRevert\": false,\n \"Canary\": 0,\n \"HealthCheck\": \"\",\n \"HealthyDeadline\": 0,\n \"MaxParallel\": 1,\n \"MinHealthyTime\": 0,\n \"ProgressDeadline\": 0,\n \"Stagger\": 30000000000\n },\n \"VaultNamespace\": \"\",\n \"VaultToken\": \"\",\n \"Version\": 0\n }\n ]\n\n'''\n\n\nimport os\nimport json\n\nfrom ansible.module_utils.basic import AnsibleModule, missing_required_lib\nfrom ansible.module_utils._text import to_native\n\nimport_nomad = None\ntry:\n import nomad\n import_nomad = True\nexcept ImportError:\n import_nomad = False\n\n\ndef run():\n module = AnsibleModule(\n argument_spec=dict(\n host=dict(required=True, type='str'),\n use_ssl=dict(type='bool', default=True),\n timeout=dict(type='int', default=5),\n validate_certs=dict(type='bool', default=True),\n client_cert=dict(type='path', default=None),\n client_key=dict(type='path', default=None),\n namespace=dict(type='str', default=None),\n name=dict(type='str', default=None),\n token=dict(type='str', default=None, no_log=True)\n ),\n supports_check_mode=True\n )\n\n if not import_nomad:\n module.fail_json(msg=missing_required_lib(\"python-nomad\"))\n\n certificate_ssl = (module.params.get('client_cert'), module.params.get('client_key'))\n\n nomad_client = nomad.Nomad(\n host=module.params.get('host'),\n secure=module.params.get('use_ssl'),\n timeout=module.params.get('timeout'),\n verify=module.params.get('validate_certs'),\n cert=certificate_ssl,\n namespace=module.params.get('namespace'),\n token=module.params.get('token')\n )\n\n changed = False\n result = list()\n try:\n job_list = nomad_client.jobs.get_jobs()\n for job in job_list:\n result.append(nomad_client.job.get_job(job.get('ID')))\n except Exception as e:\n module.fail_json(msg=to_native(e))\n\n if module.params.get('name'):\n filter = list()\n try:\n for job in result:\n if job.get('ID') == module.params.get('name'):\n filter.append(job)\n result = filter\n if not filter:\n module.fail_json(msg=\"Couldn't find Job with id \" + str(module.params.get('name')))\n except Exception as e:\n module.fail_json(msg=to_native(e))\n\n module.exit_json(changed=changed, result=result)\n\n\ndef main():\n\n run()\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "plugins/modules/clustering/nomad/nomad_job_info.py"}]} | 3,695 | 161 |
gh_patches_debug_29707 | rasdani/github-patches | git_diff | e-valuation__EvaP-2040 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Replace `toggle_no_grades` with an idempotent alternative
Currently, there is a URL for toggling grade status of a course (`grades:toggle_no_grades`). Instead of having a toggle, we would like an idempotent operation, that is, the endpoint should be something like `grades:set_gets_grade_documents` where the desired boolean value is sent with the request. This way, two people who want to change the status don't revert each others action but instead set the same value.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `evap/grades/urls.py`
Content:
```
1 from django.urls import path
2
3 from evap.grades import views
4
5 app_name = "grades"
6
7 urlpatterns = [
8 path("", views.IndexView.as_view(), name="index"),
9 path("download/<int:grade_document_id>", views.download_grades, name="download_grades"),
10 path("semester/<int:semester_id>", views.SemesterView.as_view(), name="semester_view"),
11 path("course/<int:course_id>", views.CourseView.as_view(), name="course_view"),
12 path("course/<int:course_id>/upload", views.upload_grades, name="upload_grades"),
13 path("grade_document/<int:grade_document_id>/edit", views.edit_grades, name="edit_grades"),
14
15 path("delete_grades", views.delete_grades, name="delete_grades"),
16 path("toggle_no_grades", views.toggle_no_grades, name="toggle_no_grades"),
17 ]
18
```
Path: `evap/grades/views.py`
Content:
```
1 from django.conf import settings
2 from django.contrib import messages
3 from django.core.exceptions import PermissionDenied
4 from django.db.models.query import QuerySet
5 from django.http import FileResponse, HttpResponse
6 from django.shortcuts import get_object_or_404, redirect, render
7 from django.utils.translation import gettext as _
8 from django.views.decorators.http import require_GET, require_POST
9 from django.views.generic import DetailView, TemplateView
10
11 from evap.evaluation.auth import (
12 grade_downloader_required,
13 grade_publisher_or_manager_required,
14 grade_publisher_required,
15 )
16 from evap.evaluation.models import Course, EmailTemplate, Evaluation, Semester
17 from evap.evaluation.tools import get_object_from_dict_pk_entry_or_logged_40x, ilen
18 from evap.grades.forms import GradeDocumentForm
19 from evap.grades.models import GradeDocument
20
21
22 @grade_publisher_required
23 class IndexView(TemplateView):
24 template_name = "grades_index.html"
25
26 def get_context_data(self, **kwargs):
27 return super().get_context_data(**kwargs) | {
28 "semesters": Semester.objects.filter(grade_documents_are_deleted=False),
29 "disable_breadcrumb_grades": True,
30 }
31
32
33 def course_grade_document_count_tuples(courses: QuerySet[Course]) -> list[tuple[Course, int, int]]:
34 courses = courses.prefetch_related("degrees", "responsibles", "evaluations", "grade_documents")
35
36 return [
37 (
38 course,
39 ilen(gd for gd in course.grade_documents.all() if gd.type == GradeDocument.Type.MIDTERM_GRADES),
40 ilen(gd for gd in course.grade_documents.all() if gd.type == GradeDocument.Type.FINAL_GRADES),
41 )
42 for course in courses
43 ]
44
45
46 @grade_publisher_required
47 class SemesterView(DetailView):
48 template_name = "grades_semester_view.html"
49 model = Semester
50 pk_url_kwarg = "semester_id"
51
52 object: Semester
53
54 def get_object(self, *args, **kwargs):
55 semester = super().get_object(*args, **kwargs)
56 if semester.grade_documents_are_deleted:
57 raise PermissionDenied
58 return semester
59
60 def get_context_data(self, **kwargs):
61 courses = (
62 self.object.courses.filter(evaluations__wait_for_grade_upload_before_publishing=True)
63 .exclude(evaluations__state=Evaluation.State.NEW)
64 .distinct()
65 )
66 courses = course_grade_document_count_tuples(courses)
67
68 return super().get_context_data(**kwargs) | {
69 "courses": courses,
70 "disable_breadcrumb_semester": True,
71 }
72
73
74 @grade_publisher_or_manager_required
75 class CourseView(DetailView):
76 template_name = "grades_course_view.html"
77 model = Course
78 pk_url_kwarg = "course_id"
79
80 def get_object(self, *args, **kwargs):
81 course = super().get_object(*args, **kwargs)
82 if course.semester.grade_documents_are_deleted:
83 raise PermissionDenied
84 return course
85
86 def get_context_data(self, **kwargs):
87 return super().get_context_data(**kwargs) | {
88 "semester": self.object.semester,
89 "grade_documents": self.object.grade_documents.all(),
90 "disable_breadcrumb_course": True,
91 }
92
93
94 def on_grading_process_finished(course):
95 evaluations = course.evaluations.all()
96 if all(evaluation.state == Evaluation.State.REVIEWED for evaluation in evaluations):
97 for evaluation in evaluations:
98 assert evaluation.grading_process_is_finished
99 for evaluation in evaluations:
100 evaluation.publish()
101 evaluation.save()
102
103 EmailTemplate.send_participant_publish_notifications(evaluations)
104 EmailTemplate.send_contributor_publish_notifications(evaluations)
105
106
107 @grade_publisher_required
108 def upload_grades(request, course_id):
109 course = get_object_or_404(Course, id=course_id)
110 semester = course.semester
111 if semester.grade_documents_are_deleted:
112 raise PermissionDenied
113
114 final_grades = request.GET.get("final") == "true" # if parameter is not given, assume midterm grades
115
116 grade_document = GradeDocument(course=course)
117 if final_grades:
118 grade_document.type = GradeDocument.Type.FINAL_GRADES
119 grade_document.description_en = settings.DEFAULT_FINAL_GRADES_DESCRIPTION_EN
120 grade_document.description_de = settings.DEFAULT_FINAL_GRADES_DESCRIPTION_DE
121 else:
122 grade_document.type = GradeDocument.Type.MIDTERM_GRADES
123 grade_document.description_en = settings.DEFAULT_MIDTERM_GRADES_DESCRIPTION_EN
124 grade_document.description_de = settings.DEFAULT_MIDTERM_GRADES_DESCRIPTION_DE
125
126 form = GradeDocumentForm(request.POST or None, request.FILES or None, instance=grade_document)
127
128 if form.is_valid():
129 form.save(modifying_user=request.user)
130
131 if final_grades:
132 on_grading_process_finished(course)
133
134 messages.success(request, _("Successfully uploaded grades."))
135 return redirect("grades:course_view", course.id)
136
137 template_data = {
138 "semester": semester,
139 "course": course,
140 "form": form,
141 "final_grades": final_grades,
142 "show_automated_publishing_info": final_grades,
143 }
144 return render(request, "grades_upload_form.html", template_data)
145
146
147 @require_POST
148 @grade_publisher_required
149 def toggle_no_grades(request):
150 course = get_object_from_dict_pk_entry_or_logged_40x(Course, request.POST, "course_id")
151 if course.semester.grade_documents_are_deleted:
152 raise PermissionDenied
153
154 course.gets_no_grade_documents = not course.gets_no_grade_documents
155 course.save()
156
157 if course.gets_no_grade_documents:
158 on_grading_process_finished(course)
159
160 return HttpResponse() # 200 OK
161
162
163 @require_GET
164 @grade_downloader_required
165 def download_grades(request, grade_document_id):
166 grade_document = get_object_or_404(GradeDocument, id=grade_document_id)
167 if grade_document.course.semester.grade_documents_are_deleted:
168 raise PermissionDenied
169
170 return FileResponse(grade_document.file.open(), filename=grade_document.filename(), as_attachment=True)
171
172
173 @grade_publisher_required
174 def edit_grades(request, grade_document_id):
175 grade_document = get_object_or_404(GradeDocument, id=grade_document_id)
176 course = grade_document.course
177 semester = course.semester
178 if semester.grade_documents_are_deleted:
179 raise PermissionDenied
180
181 form = GradeDocumentForm(request.POST or None, request.FILES or None, instance=grade_document)
182
183 final_grades = (
184 grade_document.type == GradeDocument.Type.FINAL_GRADES
185 ) # if parameter is not given, assume midterm grades
186
187 if form.is_valid():
188 form.save(modifying_user=request.user)
189 messages.success(request, _("Successfully updated grades."))
190 return redirect("grades:course_view", course.id)
191
192 template_data = {
193 "semester": semester,
194 "course": course,
195 "form": form,
196 "show_automated_publishing_info": False,
197 "final_grades": final_grades,
198 }
199 return render(request, "grades_upload_form.html", template_data)
200
201
202 @require_POST
203 @grade_publisher_required
204 def delete_grades(request):
205 grade_document = get_object_from_dict_pk_entry_or_logged_40x(GradeDocument, request.POST, "grade_document_id")
206 grade_document.delete()
207 return HttpResponse() # 200 OK
208
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/evap/grades/urls.py b/evap/grades/urls.py
--- a/evap/grades/urls.py
+++ b/evap/grades/urls.py
@@ -13,5 +13,5 @@
path("grade_document/<int:grade_document_id>/edit", views.edit_grades, name="edit_grades"),
path("delete_grades", views.delete_grades, name="delete_grades"),
- path("toggle_no_grades", views.toggle_no_grades, name="toggle_no_grades"),
+ path("set_no_grades", views.set_no_grades, name="set_no_grades"),
]
diff --git a/evap/grades/views.py b/evap/grades/views.py
--- a/evap/grades/views.py
+++ b/evap/grades/views.py
@@ -1,6 +1,6 @@
from django.conf import settings
from django.contrib import messages
-from django.core.exceptions import PermissionDenied
+from django.core.exceptions import PermissionDenied, SuspiciousOperation
from django.db.models.query import QuerySet
from django.http import FileResponse, HttpResponse
from django.shortcuts import get_object_or_404, redirect, render
@@ -146,12 +146,18 @@
@require_POST
@grade_publisher_required
-def toggle_no_grades(request):
+def set_no_grades(request):
course = get_object_from_dict_pk_entry_or_logged_40x(Course, request.POST, "course_id")
+
+ try:
+ status = bool(int(request.POST["status"]))
+ except (KeyError, TypeError, ValueError) as e:
+ raise SuspiciousOperation from e
+
if course.semester.grade_documents_are_deleted:
raise PermissionDenied
- course.gets_no_grade_documents = not course.gets_no_grade_documents
+ course.gets_no_grade_documents = status
course.save()
if course.gets_no_grade_documents:
| {"golden_diff": "diff --git a/evap/grades/urls.py b/evap/grades/urls.py\n--- a/evap/grades/urls.py\n+++ b/evap/grades/urls.py\n@@ -13,5 +13,5 @@\n path(\"grade_document/<int:grade_document_id>/edit\", views.edit_grades, name=\"edit_grades\"),\n \n path(\"delete_grades\", views.delete_grades, name=\"delete_grades\"),\n- path(\"toggle_no_grades\", views.toggle_no_grades, name=\"toggle_no_grades\"),\n+ path(\"set_no_grades\", views.set_no_grades, name=\"set_no_grades\"),\n ]\ndiff --git a/evap/grades/views.py b/evap/grades/views.py\n--- a/evap/grades/views.py\n+++ b/evap/grades/views.py\n@@ -1,6 +1,6 @@\n from django.conf import settings\n from django.contrib import messages\n-from django.core.exceptions import PermissionDenied\n+from django.core.exceptions import PermissionDenied, SuspiciousOperation\n from django.db.models.query import QuerySet\n from django.http import FileResponse, HttpResponse\n from django.shortcuts import get_object_or_404, redirect, render\n@@ -146,12 +146,18 @@\n \n @require_POST\n @grade_publisher_required\n-def toggle_no_grades(request):\n+def set_no_grades(request):\n course = get_object_from_dict_pk_entry_or_logged_40x(Course, request.POST, \"course_id\")\n+\n+ try:\n+ status = bool(int(request.POST[\"status\"]))\n+ except (KeyError, TypeError, ValueError) as e:\n+ raise SuspiciousOperation from e\n+\n if course.semester.grade_documents_are_deleted:\n raise PermissionDenied\n \n- course.gets_no_grade_documents = not course.gets_no_grade_documents\n+ course.gets_no_grade_documents = status\n course.save()\n \n if course.gets_no_grade_documents:\n", "issue": "Replace `toggle_no_grades` with an idempotent alternative\nCurrently, there is a URL for toggling grade status of a course (`grades:toggle_no_grades`). Instead of having a toggle, we would like an idempotent operation, that is, the endpoint should be something like `grades:set_gets_grade_documents` where the desired boolean value is sent with the request. This way, two people who want to change the status don't revert each others action but instead set the same value.\n", "before_files": [{"content": "from django.urls import path\n\nfrom evap.grades import views\n\napp_name = \"grades\"\n\nurlpatterns = [\n path(\"\", views.IndexView.as_view(), name=\"index\"),\n path(\"download/<int:grade_document_id>\", views.download_grades, name=\"download_grades\"),\n path(\"semester/<int:semester_id>\", views.SemesterView.as_view(), name=\"semester_view\"),\n path(\"course/<int:course_id>\", views.CourseView.as_view(), name=\"course_view\"),\n path(\"course/<int:course_id>/upload\", views.upload_grades, name=\"upload_grades\"),\n path(\"grade_document/<int:grade_document_id>/edit\", views.edit_grades, name=\"edit_grades\"),\n\n path(\"delete_grades\", views.delete_grades, name=\"delete_grades\"),\n path(\"toggle_no_grades\", views.toggle_no_grades, name=\"toggle_no_grades\"),\n]\n", "path": "evap/grades/urls.py"}, {"content": "from django.conf import settings\nfrom django.contrib import messages\nfrom django.core.exceptions import PermissionDenied\nfrom django.db.models.query import QuerySet\nfrom django.http import FileResponse, HttpResponse\nfrom django.shortcuts import get_object_or_404, redirect, render\nfrom django.utils.translation import gettext as _\nfrom django.views.decorators.http import require_GET, require_POST\nfrom django.views.generic import DetailView, TemplateView\n\nfrom evap.evaluation.auth import (\n grade_downloader_required,\n grade_publisher_or_manager_required,\n grade_publisher_required,\n)\nfrom evap.evaluation.models import Course, EmailTemplate, Evaluation, Semester\nfrom evap.evaluation.tools import get_object_from_dict_pk_entry_or_logged_40x, ilen\nfrom evap.grades.forms import GradeDocumentForm\nfrom evap.grades.models import GradeDocument\n\n\n@grade_publisher_required\nclass IndexView(TemplateView):\n template_name = \"grades_index.html\"\n\n def get_context_data(self, **kwargs):\n return super().get_context_data(**kwargs) | {\n \"semesters\": Semester.objects.filter(grade_documents_are_deleted=False),\n \"disable_breadcrumb_grades\": True,\n }\n\n\ndef course_grade_document_count_tuples(courses: QuerySet[Course]) -> list[tuple[Course, int, int]]:\n courses = courses.prefetch_related(\"degrees\", \"responsibles\", \"evaluations\", \"grade_documents\")\n\n return [\n (\n course,\n ilen(gd for gd in course.grade_documents.all() if gd.type == GradeDocument.Type.MIDTERM_GRADES),\n ilen(gd for gd in course.grade_documents.all() if gd.type == GradeDocument.Type.FINAL_GRADES),\n )\n for course in courses\n ]\n\n\n@grade_publisher_required\nclass SemesterView(DetailView):\n template_name = \"grades_semester_view.html\"\n model = Semester\n pk_url_kwarg = \"semester_id\"\n\n object: Semester\n\n def get_object(self, *args, **kwargs):\n semester = super().get_object(*args, **kwargs)\n if semester.grade_documents_are_deleted:\n raise PermissionDenied\n return semester\n\n def get_context_data(self, **kwargs):\n courses = (\n self.object.courses.filter(evaluations__wait_for_grade_upload_before_publishing=True)\n .exclude(evaluations__state=Evaluation.State.NEW)\n .distinct()\n )\n courses = course_grade_document_count_tuples(courses)\n\n return super().get_context_data(**kwargs) | {\n \"courses\": courses,\n \"disable_breadcrumb_semester\": True,\n }\n\n\n@grade_publisher_or_manager_required\nclass CourseView(DetailView):\n template_name = \"grades_course_view.html\"\n model = Course\n pk_url_kwarg = \"course_id\"\n\n def get_object(self, *args, **kwargs):\n course = super().get_object(*args, **kwargs)\n if course.semester.grade_documents_are_deleted:\n raise PermissionDenied\n return course\n\n def get_context_data(self, **kwargs):\n return super().get_context_data(**kwargs) | {\n \"semester\": self.object.semester,\n \"grade_documents\": self.object.grade_documents.all(),\n \"disable_breadcrumb_course\": True,\n }\n\n\ndef on_grading_process_finished(course):\n evaluations = course.evaluations.all()\n if all(evaluation.state == Evaluation.State.REVIEWED for evaluation in evaluations):\n for evaluation in evaluations:\n assert evaluation.grading_process_is_finished\n for evaluation in evaluations:\n evaluation.publish()\n evaluation.save()\n\n EmailTemplate.send_participant_publish_notifications(evaluations)\n EmailTemplate.send_contributor_publish_notifications(evaluations)\n\n\n@grade_publisher_required\ndef upload_grades(request, course_id):\n course = get_object_or_404(Course, id=course_id)\n semester = course.semester\n if semester.grade_documents_are_deleted:\n raise PermissionDenied\n\n final_grades = request.GET.get(\"final\") == \"true\" # if parameter is not given, assume midterm grades\n\n grade_document = GradeDocument(course=course)\n if final_grades:\n grade_document.type = GradeDocument.Type.FINAL_GRADES\n grade_document.description_en = settings.DEFAULT_FINAL_GRADES_DESCRIPTION_EN\n grade_document.description_de = settings.DEFAULT_FINAL_GRADES_DESCRIPTION_DE\n else:\n grade_document.type = GradeDocument.Type.MIDTERM_GRADES\n grade_document.description_en = settings.DEFAULT_MIDTERM_GRADES_DESCRIPTION_EN\n grade_document.description_de = settings.DEFAULT_MIDTERM_GRADES_DESCRIPTION_DE\n\n form = GradeDocumentForm(request.POST or None, request.FILES or None, instance=grade_document)\n\n if form.is_valid():\n form.save(modifying_user=request.user)\n\n if final_grades:\n on_grading_process_finished(course)\n\n messages.success(request, _(\"Successfully uploaded grades.\"))\n return redirect(\"grades:course_view\", course.id)\n\n template_data = {\n \"semester\": semester,\n \"course\": course,\n \"form\": form,\n \"final_grades\": final_grades,\n \"show_automated_publishing_info\": final_grades,\n }\n return render(request, \"grades_upload_form.html\", template_data)\n\n\n@require_POST\n@grade_publisher_required\ndef toggle_no_grades(request):\n course = get_object_from_dict_pk_entry_or_logged_40x(Course, request.POST, \"course_id\")\n if course.semester.grade_documents_are_deleted:\n raise PermissionDenied\n\n course.gets_no_grade_documents = not course.gets_no_grade_documents\n course.save()\n\n if course.gets_no_grade_documents:\n on_grading_process_finished(course)\n\n return HttpResponse() # 200 OK\n\n\n@require_GET\n@grade_downloader_required\ndef download_grades(request, grade_document_id):\n grade_document = get_object_or_404(GradeDocument, id=grade_document_id)\n if grade_document.course.semester.grade_documents_are_deleted:\n raise PermissionDenied\n\n return FileResponse(grade_document.file.open(), filename=grade_document.filename(), as_attachment=True)\n\n\n@grade_publisher_required\ndef edit_grades(request, grade_document_id):\n grade_document = get_object_or_404(GradeDocument, id=grade_document_id)\n course = grade_document.course\n semester = course.semester\n if semester.grade_documents_are_deleted:\n raise PermissionDenied\n\n form = GradeDocumentForm(request.POST or None, request.FILES or None, instance=grade_document)\n\n final_grades = (\n grade_document.type == GradeDocument.Type.FINAL_GRADES\n ) # if parameter is not given, assume midterm grades\n\n if form.is_valid():\n form.save(modifying_user=request.user)\n messages.success(request, _(\"Successfully updated grades.\"))\n return redirect(\"grades:course_view\", course.id)\n\n template_data = {\n \"semester\": semester,\n \"course\": course,\n \"form\": form,\n \"show_automated_publishing_info\": False,\n \"final_grades\": final_grades,\n }\n return render(request, \"grades_upload_form.html\", template_data)\n\n\n@require_POST\n@grade_publisher_required\ndef delete_grades(request):\n grade_document = get_object_from_dict_pk_entry_or_logged_40x(GradeDocument, request.POST, \"grade_document_id\")\n grade_document.delete()\n return HttpResponse() # 200 OK\n", "path": "evap/grades/views.py"}], "after_files": [{"content": "from django.urls import path\n\nfrom evap.grades import views\n\napp_name = \"grades\"\n\nurlpatterns = [\n path(\"\", views.IndexView.as_view(), name=\"index\"),\n path(\"download/<int:grade_document_id>\", views.download_grades, name=\"download_grades\"),\n path(\"semester/<int:semester_id>\", views.SemesterView.as_view(), name=\"semester_view\"),\n path(\"course/<int:course_id>\", views.CourseView.as_view(), name=\"course_view\"),\n path(\"course/<int:course_id>/upload\", views.upload_grades, name=\"upload_grades\"),\n path(\"grade_document/<int:grade_document_id>/edit\", views.edit_grades, name=\"edit_grades\"),\n\n path(\"delete_grades\", views.delete_grades, name=\"delete_grades\"),\n path(\"set_no_grades\", views.set_no_grades, name=\"set_no_grades\"),\n]\n", "path": "evap/grades/urls.py"}, {"content": "from django.conf import settings\nfrom django.contrib import messages\nfrom django.core.exceptions import PermissionDenied, SuspiciousOperation\nfrom django.db.models.query import QuerySet\nfrom django.http import FileResponse, HttpResponse\nfrom django.shortcuts import get_object_or_404, redirect, render\nfrom django.utils.translation import gettext as _\nfrom django.views.decorators.http import require_GET, require_POST\nfrom django.views.generic import DetailView, TemplateView\n\nfrom evap.evaluation.auth import (\n grade_downloader_required,\n grade_publisher_or_manager_required,\n grade_publisher_required,\n)\nfrom evap.evaluation.models import Course, EmailTemplate, Evaluation, Semester\nfrom evap.evaluation.tools import get_object_from_dict_pk_entry_or_logged_40x, ilen\nfrom evap.grades.forms import GradeDocumentForm\nfrom evap.grades.models import GradeDocument\n\n\n@grade_publisher_required\nclass IndexView(TemplateView):\n template_name = \"grades_index.html\"\n\n def get_context_data(self, **kwargs):\n return super().get_context_data(**kwargs) | {\n \"semesters\": Semester.objects.filter(grade_documents_are_deleted=False),\n \"disable_breadcrumb_grades\": True,\n }\n\n\ndef course_grade_document_count_tuples(courses: QuerySet[Course]) -> list[tuple[Course, int, int]]:\n courses = courses.prefetch_related(\"degrees\", \"responsibles\", \"evaluations\", \"grade_documents\")\n\n return [\n (\n course,\n ilen(gd for gd in course.grade_documents.all() if gd.type == GradeDocument.Type.MIDTERM_GRADES),\n ilen(gd for gd in course.grade_documents.all() if gd.type == GradeDocument.Type.FINAL_GRADES),\n )\n for course in courses\n ]\n\n\n@grade_publisher_required\nclass SemesterView(DetailView):\n template_name = \"grades_semester_view.html\"\n model = Semester\n pk_url_kwarg = \"semester_id\"\n\n object: Semester\n\n def get_object(self, *args, **kwargs):\n semester = super().get_object(*args, **kwargs)\n if semester.grade_documents_are_deleted:\n raise PermissionDenied\n return semester\n\n def get_context_data(self, **kwargs):\n courses = (\n self.object.courses.filter(evaluations__wait_for_grade_upload_before_publishing=True)\n .exclude(evaluations__state=Evaluation.State.NEW)\n .distinct()\n )\n courses = course_grade_document_count_tuples(courses)\n\n return super().get_context_data(**kwargs) | {\n \"courses\": courses,\n \"disable_breadcrumb_semester\": True,\n }\n\n\n@grade_publisher_or_manager_required\nclass CourseView(DetailView):\n template_name = \"grades_course_view.html\"\n model = Course\n pk_url_kwarg = \"course_id\"\n\n def get_object(self, *args, **kwargs):\n course = super().get_object(*args, **kwargs)\n if course.semester.grade_documents_are_deleted:\n raise PermissionDenied\n return course\n\n def get_context_data(self, **kwargs):\n return super().get_context_data(**kwargs) | {\n \"semester\": self.object.semester,\n \"grade_documents\": self.object.grade_documents.all(),\n \"disable_breadcrumb_course\": True,\n }\n\n\ndef on_grading_process_finished(course):\n evaluations = course.evaluations.all()\n if all(evaluation.state == Evaluation.State.REVIEWED for evaluation in evaluations):\n for evaluation in evaluations:\n assert evaluation.grading_process_is_finished\n for evaluation in evaluations:\n evaluation.publish()\n evaluation.save()\n\n EmailTemplate.send_participant_publish_notifications(evaluations)\n EmailTemplate.send_contributor_publish_notifications(evaluations)\n\n\n@grade_publisher_required\ndef upload_grades(request, course_id):\n course = get_object_or_404(Course, id=course_id)\n semester = course.semester\n if semester.grade_documents_are_deleted:\n raise PermissionDenied\n\n final_grades = request.GET.get(\"final\") == \"true\" # if parameter is not given, assume midterm grades\n\n grade_document = GradeDocument(course=course)\n if final_grades:\n grade_document.type = GradeDocument.Type.FINAL_GRADES\n grade_document.description_en = settings.DEFAULT_FINAL_GRADES_DESCRIPTION_EN\n grade_document.description_de = settings.DEFAULT_FINAL_GRADES_DESCRIPTION_DE\n else:\n grade_document.type = GradeDocument.Type.MIDTERM_GRADES\n grade_document.description_en = settings.DEFAULT_MIDTERM_GRADES_DESCRIPTION_EN\n grade_document.description_de = settings.DEFAULT_MIDTERM_GRADES_DESCRIPTION_DE\n\n form = GradeDocumentForm(request.POST or None, request.FILES or None, instance=grade_document)\n\n if form.is_valid():\n form.save(modifying_user=request.user)\n\n if final_grades:\n on_grading_process_finished(course)\n\n messages.success(request, _(\"Successfully uploaded grades.\"))\n return redirect(\"grades:course_view\", course.id)\n\n template_data = {\n \"semester\": semester,\n \"course\": course,\n \"form\": form,\n \"final_grades\": final_grades,\n \"show_automated_publishing_info\": final_grades,\n }\n return render(request, \"grades_upload_form.html\", template_data)\n\n\n@require_POST\n@grade_publisher_required\ndef set_no_grades(request):\n course = get_object_from_dict_pk_entry_or_logged_40x(Course, request.POST, \"course_id\")\n\n try:\n status = bool(int(request.POST[\"status\"]))\n except (KeyError, TypeError, ValueError) as e:\n raise SuspiciousOperation from e\n\n if course.semester.grade_documents_are_deleted:\n raise PermissionDenied\n\n course.gets_no_grade_documents = status\n course.save()\n\n if course.gets_no_grade_documents:\n on_grading_process_finished(course)\n\n return HttpResponse() # 200 OK\n\n\n@require_GET\n@grade_downloader_required\ndef download_grades(request, grade_document_id):\n grade_document = get_object_or_404(GradeDocument, id=grade_document_id)\n if grade_document.course.semester.grade_documents_are_deleted:\n raise PermissionDenied\n\n return FileResponse(grade_document.file.open(), filename=grade_document.filename(), as_attachment=True)\n\n\n@grade_publisher_required\ndef edit_grades(request, grade_document_id):\n grade_document = get_object_or_404(GradeDocument, id=grade_document_id)\n course = grade_document.course\n semester = course.semester\n if semester.grade_documents_are_deleted:\n raise PermissionDenied\n\n form = GradeDocumentForm(request.POST or None, request.FILES or None, instance=grade_document)\n\n final_grades = (\n grade_document.type == GradeDocument.Type.FINAL_GRADES\n ) # if parameter is not given, assume midterm grades\n\n if form.is_valid():\n form.save(modifying_user=request.user)\n messages.success(request, _(\"Successfully updated grades.\"))\n return redirect(\"grades:course_view\", course.id)\n\n template_data = {\n \"semester\": semester,\n \"course\": course,\n \"form\": form,\n \"show_automated_publishing_info\": False,\n \"final_grades\": final_grades,\n }\n return render(request, \"grades_upload_form.html\", template_data)\n\n\n@require_POST\n@grade_publisher_required\ndef delete_grades(request):\n grade_document = get_object_from_dict_pk_entry_or_logged_40x(GradeDocument, request.POST, \"grade_document_id\")\n grade_document.delete()\n return HttpResponse() # 200 OK\n", "path": "evap/grades/views.py"}]} | 2,702 | 425 |
gh_patches_debug_37255 | rasdani/github-patches | git_diff | zulip__zulip-14742 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
upgrade-zulip-from-git: Provide useful error messages when `update-prod-static` fails
In https://github.com/zulip/zulip/issues/14615, we have `upgrade-zulip-from-git` failing inside `update-prod-static`, and the error message is terrible:
```
2020-04-17 21:23:41,590 upgrade-zulip-stage-2: Building static assets...
Cached version not found! Installing node modules.
+ /srv/zulip-yarn/bin/yarn install --non-interactive --frozen-lockfile --prod
Error running a subcommand of ./tools/update-prod-static: /srv/zulip-yarn/bin/yarn install --non-interactive --frozen-lockfile --prod
Actual error output for the subcommand is just above this.
Traceback (most recent call last):
File "./tools/update-prod-static", line 37, in <module>
setup_node_modules(production=True, stdout=fp, stderr=fp)
File "./tools/../scripts/lib/node_cache.py", line 66, in setup_node_modules
stderr=stderr)
File "./tools/../scripts/lib/node_cache.py", line 91, in do_yarn_install
cwd=target_path, stdout=stdout, stderr=stderr)
File "./tools/../scripts/lib/zulip_tools.py", line 200, in run
subprocess.check_call(args, **kwargs)
File "/usr/lib/python3.5/subprocess.py", line 581, in check_call
raise CalledProcessError(retcode, cmd)
subprocess.CalledProcessError: Command '['/srv/zulip-yarn/bin/yarn', 'install', '--non-interactive', '--frozen-lockfile', '--prod']' returned non-zero exit status 1
Traceback (most recent call last):
File "/home/zulip/deployments/2020-04-18-01-53-13/scripts/lib/upgrade-zulip-stage-2", line 122, in <module>
preexec_fn=su_to_zulip)
File "/usr/lib/python3.5/subprocess.py", line 581, in check_call
raise CalledProcessError(retcode, cmd)
subprocess.CalledProcessError: Command '['./tools/update-prod-static', '--authors-not-required', '--prev-deploy', '/home/zulip/deployments/current']' returned non-zero exit status 1
Traceback (most recent call last):
File "/home/zulip/deployments/current/scripts/lib/upgrade-zulip-from-git", line 69, in <module>
deploy_path, "--from-git"] + deploy_options)
File "/usr/lib/python3.5/subprocess.py", line 581, in check_call
raise CalledProcessError(retcode, cmd)
subprocess.CalledProcessError: Command '['/home/zulip/deployments/2020-04-18-01-53-13/scripts/lib/upgrade-zulip-stage-2', '/home/zulip/deployments/2020-04-18-01-53-13', '--from-git']' returned non-zero exit status 1
```
Because `update-prod-static` is failing, the actual error messages is in `/home/zulip/deployments/next/var/log/upgrade-prod-static.log`.
`update-prod-static` itself has very verbose output, so I think there's two paths forward:
* Simplest is to copy `build-release-tarball` in at least printing the path to `upgrade-prod-static.log`, and perhaps catching and not emitting much of that unhelpful traceback.
* Better would be to make `update-prod-static` have more sane output.
upgrade-zulip-from-git: Provide useful error messages when `update-prod-static` fails
This just prints the path to the prod static log file.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scripts/lib/node_cache.py`
Content:
```
1 import os
2 import hashlib
3 import json
4 import shutil
5
6 from typing import Optional, List, IO, Any
7 from scripts.lib.zulip_tools import subprocess_text_output, run
8
9 ZULIP_PATH = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
10 ZULIP_SRV_PATH = "/srv"
11
12 if 'TRAVIS' in os.environ:
13 # In Travis CI, we don't have root access
14 ZULIP_SRV_PATH = "/home/travis"
15
16
17 NODE_MODULES_CACHE_PATH = os.path.join(ZULIP_SRV_PATH, 'zulip-npm-cache')
18 YARN_BIN = os.path.join(ZULIP_SRV_PATH, 'zulip-yarn/bin/yarn')
19 YARN_PACKAGE_JSON = os.path.join(ZULIP_SRV_PATH, 'zulip-yarn/package.json')
20
21 DEFAULT_PRODUCTION = False
22
23 def get_yarn_args(production: bool) -> List[str]:
24 if production:
25 yarn_args = ["--prod"]
26 else:
27 yarn_args = []
28 return yarn_args
29
30 def generate_sha1sum_node_modules(
31 setup_dir: Optional[str] = None, production: bool = DEFAULT_PRODUCTION
32 ) -> str:
33 if setup_dir is None:
34 setup_dir = os.path.realpath(os.getcwd())
35 PACKAGE_JSON_FILE_PATH = os.path.join(setup_dir, 'package.json')
36 YARN_LOCK_FILE_PATH = os.path.join(setup_dir, 'yarn.lock')
37 sha1sum = hashlib.sha1()
38 sha1sum.update(subprocess_text_output(['cat', PACKAGE_JSON_FILE_PATH]).encode('utf8'))
39 if os.path.exists(YARN_LOCK_FILE_PATH):
40 # For backwards compatibility, we can't assume yarn.lock exists
41 sha1sum.update(subprocess_text_output(['cat', YARN_LOCK_FILE_PATH]).encode('utf8'))
42 with open(YARN_PACKAGE_JSON) as f:
43 yarn_version = json.load(f)['version']
44 sha1sum.update(yarn_version.encode("utf8"))
45 sha1sum.update(subprocess_text_output(['node', '--version']).encode('utf8'))
46 yarn_args = get_yarn_args(production=production)
47 sha1sum.update(''.join(sorted(yarn_args)).encode('utf8'))
48 return sha1sum.hexdigest()
49
50 def setup_node_modules(
51 production: bool = DEFAULT_PRODUCTION,
52 stdout: Optional[IO[Any]] = None,
53 stderr: Optional[IO[Any]] = None,
54 prefer_offline: bool = False,
55 ) -> None:
56 yarn_args = get_yarn_args(production=production)
57 if prefer_offline:
58 yarn_args.append("--prefer-offline")
59 sha1sum = generate_sha1sum_node_modules(production=production)
60 target_path = os.path.join(NODE_MODULES_CACHE_PATH, sha1sum)
61 cached_node_modules = os.path.join(target_path, 'node_modules')
62 success_stamp = os.path.join(target_path, '.success-stamp')
63 # Check if a cached version already exists
64 if not os.path.exists(success_stamp):
65 do_yarn_install(target_path,
66 yarn_args,
67 success_stamp,
68 stdout=stdout,
69 stderr=stderr)
70
71 print("Using cached node modules from %s" % (cached_node_modules,))
72 if os.path.islink('node_modules'):
73 os.remove('node_modules')
74 elif os.path.isdir('node_modules'):
75 shutil.rmtree('node_modules')
76 os.symlink(cached_node_modules, 'node_modules')
77
78 def do_yarn_install(
79 target_path: str,
80 yarn_args: List[str],
81 success_stamp: str,
82 stdout: Optional[IO[Any]] = None,
83 stderr: Optional[IO[Any]] = None,
84 ) -> None:
85 os.makedirs(target_path, exist_ok=True)
86 shutil.copy('package.json', target_path)
87 shutil.copy("yarn.lock", target_path)
88 shutil.copy(".yarnrc", target_path)
89 cached_node_modules = os.path.join(target_path, 'node_modules')
90 print("Cached version not found! Installing node modules.")
91
92 # Copy the existing node_modules to speed up install
93 if os.path.exists("node_modules") and not os.path.exists(cached_node_modules):
94 shutil.copytree("node_modules/", cached_node_modules, symlinks=True)
95 if os.environ.get('CUSTOM_CA_CERTIFICATES'):
96 run([YARN_BIN, "config", "set", "cafile", os.environ['CUSTOM_CA_CERTIFICATES']],
97 stdout=stdout, stderr=stderr)
98 run([YARN_BIN, "install", "--non-interactive", "--frozen-lockfile"] + yarn_args,
99 cwd=target_path, stdout=stdout, stderr=stderr)
100 with open(success_stamp, 'w'):
101 pass
102
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/scripts/lib/node_cache.py b/scripts/lib/node_cache.py
--- a/scripts/lib/node_cache.py
+++ b/scripts/lib/node_cache.py
@@ -3,7 +3,7 @@
import json
import shutil
-from typing import Optional, List, IO, Any
+from typing import Optional, List
from scripts.lib.zulip_tools import subprocess_text_output, run
ZULIP_PATH = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
@@ -49,8 +49,6 @@
def setup_node_modules(
production: bool = DEFAULT_PRODUCTION,
- stdout: Optional[IO[Any]] = None,
- stderr: Optional[IO[Any]] = None,
prefer_offline: bool = False,
) -> None:
yarn_args = get_yarn_args(production=production)
@@ -64,9 +62,7 @@
if not os.path.exists(success_stamp):
do_yarn_install(target_path,
yarn_args,
- success_stamp,
- stdout=stdout,
- stderr=stderr)
+ success_stamp)
print("Using cached node modules from %s" % (cached_node_modules,))
if os.path.islink('node_modules'):
@@ -78,9 +74,7 @@
def do_yarn_install(
target_path: str,
yarn_args: List[str],
- success_stamp: str,
- stdout: Optional[IO[Any]] = None,
- stderr: Optional[IO[Any]] = None,
+ success_stamp: str
) -> None:
os.makedirs(target_path, exist_ok=True)
shutil.copy('package.json', target_path)
@@ -93,9 +87,8 @@
if os.path.exists("node_modules") and not os.path.exists(cached_node_modules):
shutil.copytree("node_modules/", cached_node_modules, symlinks=True)
if os.environ.get('CUSTOM_CA_CERTIFICATES'):
- run([YARN_BIN, "config", "set", "cafile", os.environ['CUSTOM_CA_CERTIFICATES']],
- stdout=stdout, stderr=stderr)
+ run([YARN_BIN, "config", "set", "cafile", os.environ['CUSTOM_CA_CERTIFICATES']])
run([YARN_BIN, "install", "--non-interactive", "--frozen-lockfile"] + yarn_args,
- cwd=target_path, stdout=stdout, stderr=stderr)
+ cwd=target_path)
with open(success_stamp, 'w'):
pass
| {"golden_diff": "diff --git a/scripts/lib/node_cache.py b/scripts/lib/node_cache.py\n--- a/scripts/lib/node_cache.py\n+++ b/scripts/lib/node_cache.py\n@@ -3,7 +3,7 @@\n import json\n import shutil\n \n-from typing import Optional, List, IO, Any\n+from typing import Optional, List\n from scripts.lib.zulip_tools import subprocess_text_output, run\n \n ZULIP_PATH = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\n@@ -49,8 +49,6 @@\n \n def setup_node_modules(\n production: bool = DEFAULT_PRODUCTION,\n- stdout: Optional[IO[Any]] = None,\n- stderr: Optional[IO[Any]] = None,\n prefer_offline: bool = False,\n ) -> None:\n yarn_args = get_yarn_args(production=production)\n@@ -64,9 +62,7 @@\n if not os.path.exists(success_stamp):\n do_yarn_install(target_path,\n yarn_args,\n- success_stamp,\n- stdout=stdout,\n- stderr=stderr)\n+ success_stamp)\n \n print(\"Using cached node modules from %s\" % (cached_node_modules,))\n if os.path.islink('node_modules'):\n@@ -78,9 +74,7 @@\n def do_yarn_install(\n target_path: str,\n yarn_args: List[str],\n- success_stamp: str,\n- stdout: Optional[IO[Any]] = None,\n- stderr: Optional[IO[Any]] = None,\n+ success_stamp: str\n ) -> None:\n os.makedirs(target_path, exist_ok=True)\n shutil.copy('package.json', target_path)\n@@ -93,9 +87,8 @@\n if os.path.exists(\"node_modules\") and not os.path.exists(cached_node_modules):\n shutil.copytree(\"node_modules/\", cached_node_modules, symlinks=True)\n if os.environ.get('CUSTOM_CA_CERTIFICATES'):\n- run([YARN_BIN, \"config\", \"set\", \"cafile\", os.environ['CUSTOM_CA_CERTIFICATES']],\n- stdout=stdout, stderr=stderr)\n+ run([YARN_BIN, \"config\", \"set\", \"cafile\", os.environ['CUSTOM_CA_CERTIFICATES']])\n run([YARN_BIN, \"install\", \"--non-interactive\", \"--frozen-lockfile\"] + yarn_args,\n- cwd=target_path, stdout=stdout, stderr=stderr)\n+ cwd=target_path)\n with open(success_stamp, 'w'):\n pass\n", "issue": "upgrade-zulip-from-git: Provide useful error messages when `update-prod-static` fails\nIn https://github.com/zulip/zulip/issues/14615, we have `upgrade-zulip-from-git` failing inside `update-prod-static`, and the error message is terrible:\r\n\r\n```\r\n2020-04-17 21:23:41,590 upgrade-zulip-stage-2: Building static assets...\r\nCached version not found! Installing node modules.\r\n+ /srv/zulip-yarn/bin/yarn install --non-interactive --frozen-lockfile --prod\r\n\r\nError running a subcommand of ./tools/update-prod-static: /srv/zulip-yarn/bin/yarn install --non-interactive --frozen-lockfile --prod\r\nActual error output for the subcommand is just above this.\r\n\r\nTraceback (most recent call last):\r\n File \"./tools/update-prod-static\", line 37, in <module>\r\n setup_node_modules(production=True, stdout=fp, stderr=fp)\r\n File \"./tools/../scripts/lib/node_cache.py\", line 66, in setup_node_modules\r\n stderr=stderr)\r\n File \"./tools/../scripts/lib/node_cache.py\", line 91, in do_yarn_install\r\n cwd=target_path, stdout=stdout, stderr=stderr)\r\n File \"./tools/../scripts/lib/zulip_tools.py\", line 200, in run\r\n subprocess.check_call(args, **kwargs)\r\n File \"/usr/lib/python3.5/subprocess.py\", line 581, in check_call\r\n raise CalledProcessError(retcode, cmd)\r\nsubprocess.CalledProcessError: Command '['/srv/zulip-yarn/bin/yarn', 'install', '--non-interactive', '--frozen-lockfile', '--prod']' returned non-zero exit status 1\r\nTraceback (most recent call last):\r\n File \"/home/zulip/deployments/2020-04-18-01-53-13/scripts/lib/upgrade-zulip-stage-2\", line 122, in <module>\r\n preexec_fn=su_to_zulip)\r\n File \"/usr/lib/python3.5/subprocess.py\", line 581, in check_call\r\n raise CalledProcessError(retcode, cmd)\r\nsubprocess.CalledProcessError: Command '['./tools/update-prod-static', '--authors-not-required', '--prev-deploy', '/home/zulip/deployments/current']' returned non-zero exit status 1\r\nTraceback (most recent call last):\r\n File \"/home/zulip/deployments/current/scripts/lib/upgrade-zulip-from-git\", line 69, in <module>\r\n deploy_path, \"--from-git\"] + deploy_options)\r\n File \"/usr/lib/python3.5/subprocess.py\", line 581, in check_call\r\n raise CalledProcessError(retcode, cmd)\r\nsubprocess.CalledProcessError: Command '['/home/zulip/deployments/2020-04-18-01-53-13/scripts/lib/upgrade-zulip-stage-2', '/home/zulip/deployments/2020-04-18-01-53-13', '--from-git']' returned non-zero exit status 1\r\n```\r\n\r\nBecause `update-prod-static` is failing, the actual error messages is in `/home/zulip/deployments/next/var/log/upgrade-prod-static.log`.\r\n\r\n`update-prod-static` itself has very verbose output, so I think there's two paths forward:\r\n* Simplest is to copy `build-release-tarball` in at least printing the path to `upgrade-prod-static.log`, and perhaps catching and not emitting much of that unhelpful traceback.\r\n* Better would be to make `update-prod-static` have more sane output.\nupgrade-zulip-from-git: Provide useful error messages when `update-prod-static` fails \nThis just prints the path to the prod static log file. \n", "before_files": [{"content": "import os\nimport hashlib\nimport json\nimport shutil\n\nfrom typing import Optional, List, IO, Any\nfrom scripts.lib.zulip_tools import subprocess_text_output, run\n\nZULIP_PATH = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\nZULIP_SRV_PATH = \"/srv\"\n\nif 'TRAVIS' in os.environ:\n # In Travis CI, we don't have root access\n ZULIP_SRV_PATH = \"/home/travis\"\n\n\nNODE_MODULES_CACHE_PATH = os.path.join(ZULIP_SRV_PATH, 'zulip-npm-cache')\nYARN_BIN = os.path.join(ZULIP_SRV_PATH, 'zulip-yarn/bin/yarn')\nYARN_PACKAGE_JSON = os.path.join(ZULIP_SRV_PATH, 'zulip-yarn/package.json')\n\nDEFAULT_PRODUCTION = False\n\ndef get_yarn_args(production: bool) -> List[str]:\n if production:\n yarn_args = [\"--prod\"]\n else:\n yarn_args = []\n return yarn_args\n\ndef generate_sha1sum_node_modules(\n setup_dir: Optional[str] = None, production: bool = DEFAULT_PRODUCTION\n) -> str:\n if setup_dir is None:\n setup_dir = os.path.realpath(os.getcwd())\n PACKAGE_JSON_FILE_PATH = os.path.join(setup_dir, 'package.json')\n YARN_LOCK_FILE_PATH = os.path.join(setup_dir, 'yarn.lock')\n sha1sum = hashlib.sha1()\n sha1sum.update(subprocess_text_output(['cat', PACKAGE_JSON_FILE_PATH]).encode('utf8'))\n if os.path.exists(YARN_LOCK_FILE_PATH):\n # For backwards compatibility, we can't assume yarn.lock exists\n sha1sum.update(subprocess_text_output(['cat', YARN_LOCK_FILE_PATH]).encode('utf8'))\n with open(YARN_PACKAGE_JSON) as f:\n yarn_version = json.load(f)['version']\n sha1sum.update(yarn_version.encode(\"utf8\"))\n sha1sum.update(subprocess_text_output(['node', '--version']).encode('utf8'))\n yarn_args = get_yarn_args(production=production)\n sha1sum.update(''.join(sorted(yarn_args)).encode('utf8'))\n return sha1sum.hexdigest()\n\ndef setup_node_modules(\n production: bool = DEFAULT_PRODUCTION,\n stdout: Optional[IO[Any]] = None,\n stderr: Optional[IO[Any]] = None,\n prefer_offline: bool = False,\n) -> None:\n yarn_args = get_yarn_args(production=production)\n if prefer_offline:\n yarn_args.append(\"--prefer-offline\")\n sha1sum = generate_sha1sum_node_modules(production=production)\n target_path = os.path.join(NODE_MODULES_CACHE_PATH, sha1sum)\n cached_node_modules = os.path.join(target_path, 'node_modules')\n success_stamp = os.path.join(target_path, '.success-stamp')\n # Check if a cached version already exists\n if not os.path.exists(success_stamp):\n do_yarn_install(target_path,\n yarn_args,\n success_stamp,\n stdout=stdout,\n stderr=stderr)\n\n print(\"Using cached node modules from %s\" % (cached_node_modules,))\n if os.path.islink('node_modules'):\n os.remove('node_modules')\n elif os.path.isdir('node_modules'):\n shutil.rmtree('node_modules')\n os.symlink(cached_node_modules, 'node_modules')\n\ndef do_yarn_install(\n target_path: str,\n yarn_args: List[str],\n success_stamp: str,\n stdout: Optional[IO[Any]] = None,\n stderr: Optional[IO[Any]] = None,\n) -> None:\n os.makedirs(target_path, exist_ok=True)\n shutil.copy('package.json', target_path)\n shutil.copy(\"yarn.lock\", target_path)\n shutil.copy(\".yarnrc\", target_path)\n cached_node_modules = os.path.join(target_path, 'node_modules')\n print(\"Cached version not found! Installing node modules.\")\n\n # Copy the existing node_modules to speed up install\n if os.path.exists(\"node_modules\") and not os.path.exists(cached_node_modules):\n shutil.copytree(\"node_modules/\", cached_node_modules, symlinks=True)\n if os.environ.get('CUSTOM_CA_CERTIFICATES'):\n run([YARN_BIN, \"config\", \"set\", \"cafile\", os.environ['CUSTOM_CA_CERTIFICATES']],\n stdout=stdout, stderr=stderr)\n run([YARN_BIN, \"install\", \"--non-interactive\", \"--frozen-lockfile\"] + yarn_args,\n cwd=target_path, stdout=stdout, stderr=stderr)\n with open(success_stamp, 'w'):\n pass\n", "path": "scripts/lib/node_cache.py"}], "after_files": [{"content": "import os\nimport hashlib\nimport json\nimport shutil\n\nfrom typing import Optional, List\nfrom scripts.lib.zulip_tools import subprocess_text_output, run\n\nZULIP_PATH = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\nZULIP_SRV_PATH = \"/srv\"\n\nif 'TRAVIS' in os.environ:\n # In Travis CI, we don't have root access\n ZULIP_SRV_PATH = \"/home/travis\"\n\n\nNODE_MODULES_CACHE_PATH = os.path.join(ZULIP_SRV_PATH, 'zulip-npm-cache')\nYARN_BIN = os.path.join(ZULIP_SRV_PATH, 'zulip-yarn/bin/yarn')\nYARN_PACKAGE_JSON = os.path.join(ZULIP_SRV_PATH, 'zulip-yarn/package.json')\n\nDEFAULT_PRODUCTION = False\n\ndef get_yarn_args(production: bool) -> List[str]:\n if production:\n yarn_args = [\"--prod\"]\n else:\n yarn_args = []\n return yarn_args\n\ndef generate_sha1sum_node_modules(\n setup_dir: Optional[str] = None, production: bool = DEFAULT_PRODUCTION\n) -> str:\n if setup_dir is None:\n setup_dir = os.path.realpath(os.getcwd())\n PACKAGE_JSON_FILE_PATH = os.path.join(setup_dir, 'package.json')\n YARN_LOCK_FILE_PATH = os.path.join(setup_dir, 'yarn.lock')\n sha1sum = hashlib.sha1()\n sha1sum.update(subprocess_text_output(['cat', PACKAGE_JSON_FILE_PATH]).encode('utf8'))\n if os.path.exists(YARN_LOCK_FILE_PATH):\n # For backwards compatibility, we can't assume yarn.lock exists\n sha1sum.update(subprocess_text_output(['cat', YARN_LOCK_FILE_PATH]).encode('utf8'))\n with open(YARN_PACKAGE_JSON) as f:\n yarn_version = json.load(f)['version']\n sha1sum.update(yarn_version.encode(\"utf8\"))\n sha1sum.update(subprocess_text_output(['node', '--version']).encode('utf8'))\n yarn_args = get_yarn_args(production=production)\n sha1sum.update(''.join(sorted(yarn_args)).encode('utf8'))\n return sha1sum.hexdigest()\n\ndef setup_node_modules(\n production: bool = DEFAULT_PRODUCTION,\n prefer_offline: bool = False,\n) -> None:\n yarn_args = get_yarn_args(production=production)\n if prefer_offline:\n yarn_args.append(\"--prefer-offline\")\n sha1sum = generate_sha1sum_node_modules(production=production)\n target_path = os.path.join(NODE_MODULES_CACHE_PATH, sha1sum)\n cached_node_modules = os.path.join(target_path, 'node_modules')\n success_stamp = os.path.join(target_path, '.success-stamp')\n # Check if a cached version already exists\n if not os.path.exists(success_stamp):\n do_yarn_install(target_path,\n yarn_args,\n success_stamp)\n\n print(\"Using cached node modules from %s\" % (cached_node_modules,))\n if os.path.islink('node_modules'):\n os.remove('node_modules')\n elif os.path.isdir('node_modules'):\n shutil.rmtree('node_modules')\n os.symlink(cached_node_modules, 'node_modules')\n\ndef do_yarn_install(\n target_path: str,\n yarn_args: List[str],\n success_stamp: str\n) -> None:\n os.makedirs(target_path, exist_ok=True)\n shutil.copy('package.json', target_path)\n shutil.copy(\"yarn.lock\", target_path)\n shutil.copy(\".yarnrc\", target_path)\n cached_node_modules = os.path.join(target_path, 'node_modules')\n print(\"Cached version not found! Installing node modules.\")\n\n # Copy the existing node_modules to speed up install\n if os.path.exists(\"node_modules\") and not os.path.exists(cached_node_modules):\n shutil.copytree(\"node_modules/\", cached_node_modules, symlinks=True)\n if os.environ.get('CUSTOM_CA_CERTIFICATES'):\n run([YARN_BIN, \"config\", \"set\", \"cafile\", os.environ['CUSTOM_CA_CERTIFICATES']])\n run([YARN_BIN, \"install\", \"--non-interactive\", \"--frozen-lockfile\"] + yarn_args,\n cwd=target_path)\n with open(success_stamp, 'w'):\n pass\n", "path": "scripts/lib/node_cache.py"}]} | 2,317 | 536 |
gh_patches_debug_15730 | rasdani/github-patches | git_diff | learningequality__kolibri-4679 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Some images are not found
### Observed behavior
In the current Kolibri develop branch, some images are not found after building js bundle.
I.e.
`yarn devserver`works perfectly
`yarn build & yarn django-devserver` gives a not-found error in the urls
/user/kolibri-logo.svg
/user/background.jpg

It looks like a path issue in the frontend
### Expected behavior
All the images should be found when using the built bundles.
### User-facing consequences
Bad look
### Steps to reproduce
Using develop code
`yarn build` will build the bundles
`yarn django-devserver` will run only the Django webserver.
Browsing in Kolibri will be visible
### Context
* Kolibri develop branch
* Tested with Google Chrome
* OS: Linux
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kolibri/core/templatetags/kolibri_tags.py`
Content:
```
1 """
2 Kolibri template tags
3 =====================
4 """
5 from __future__ import absolute_import
6 from __future__ import print_function
7 from __future__ import unicode_literals
8
9 import copy
10 import json
11 import re
12
13 import user_agents
14 from django import template
15 from django.conf import settings
16 from django.contrib.staticfiles.templatetags.staticfiles import static
17 from django.core.serializers.json import DjangoJSONEncoder
18 from django.core.urlresolvers import resolve
19 from django.core.urlresolvers import reverse
20 from django.utils.html import mark_safe
21 from django.utils.timezone import now
22 from django.utils.translation import get_language
23 from django.utils.translation import get_language_bidi
24 from django.utils.translation import get_language_info
25 from django_js_reverse.js_reverse_settings import JS_GLOBAL_OBJECT_NAME
26 from django_js_reverse.js_reverse_settings import JS_VAR_NAME
27 from django_js_reverse.templatetags.js_reverse import js_reverse_inline
28 from rest_framework.renderers import JSONRenderer
29 from six import iteritems
30
31 import kolibri
32 from kolibri.core.device.models import ContentCacheKey
33 from kolibri.core.hooks import NavigationHook
34 from kolibri.core.webpack.utils import webpack_asset_render
35 from kolibri.utils import conf
36 from kolibri.utils import i18n
37
38 register = template.Library()
39
40
41 @register.simple_tag()
42 def kolibri_content_cache_key():
43 js = """
44 <script>
45 var contentCacheKey = '{cache_key}';
46 </script>
47 """.format(
48 cache_key=ContentCacheKey.get_cache_key(),
49 )
50 return mark_safe(js)
51
52
53 def _supports_modern_fonts(request):
54 """
55 In order to use the modern font-loading strategy we need to ensure two things:
56
57 1. The browser needs to properly use the font-face unicode-range descriptor in order
58 only load fonts when they are needed. This allows us to reference fonts for every
59 supported alphabet while ensuring that the client doesn't download many megabytes
60 of font data.
61
62 2. The browser needs to avoid a flash of invisible text (FOIT) while extra fonts are
63 loading, and instead render text using the browser's default fonts (FOUT). This
64 allows users to view and begin reading text, even if the fonts haven't loaded yet.
65 With some browsers this means supporting the new font-display descriptor. The
66 Edge browser uses FOUT instead of FOIT by default, and therefore doesn't need to
67 support font-display.
68
69 Based on https://caniuse.com/#feat=font-unicode-range
70 """
71
72 if 'HTTP_USER_AGENT' not in request.META:
73 return False
74
75 browser = user_agents.parse(request.META['HTTP_USER_AGENT']).browser
76
77 if browser.family == "Edge": # Edge only needs unicode-range, not font-display
78 return browser.version[0] >= 17
79 if browser.family in ("Firefox", "Firefox Mobile"):
80 return browser.version[0] >= 58
81 if browser.family in ("Chrome", "Chrome Mobile"):
82 return browser.version[0] >= 60
83 if browser.family == "Safari":
84 return browser.version[0] >= 11 and browser.version[1] >= 1
85 if browser.family == "Opera":
86 return browser.version[0] >= 47
87 if browser.family == "Mobile Safari":
88 return browser.version[0] >= 11 and browser.version[1] >= 4
89
90 return False
91
92
93 @register.simple_tag(takes_context=True)
94 def kolibri_language_globals(context):
95
96 template = """
97 <script>
98 var languageCode = '{lang_code}';
99 var languageDir = '{lang_dir}';
100 var languages = JSON.parse('{languages}');
101 var useModernFontLoading = {use_modern};
102 </script>
103 <link type="text/css" href="{common_css_file}?v={version}" rel="stylesheet"/>
104 <link type="text/css" href="{subset_css_file}?v={version}" rel="stylesheet"/>
105 <link type="text/css" href="{full_css_file}?v={version}" rel="stylesheet"/>
106 """
107
108 language_code = get_language()
109 lang_dir = "rtl" if get_language_bidi() else "ltr"
110
111 languages = {}
112 for code, language_name in settings.LANGUAGES:
113 lang_info = next((lang for lang in i18n.KOLIBRI_SUPPORTED_LANGUAGES if lang['intl_code'] == code), None)
114 languages[code] = {
115 # Format to match the schema of the content Language model
116 "id": code,
117 "lang_name": language_name,
118 "english_name": lang_info["english_name"] if lang_info else get_language_info(code)["name"],
119 "lang_direction": get_language_info(code)["bidi"],
120 }
121
122 common_file = static("assets/fonts/noto-common.css")
123 subset_file = static("assets/fonts/noto-subset.{}.css".format(language_code))
124 is_modern = _supports_modern_fonts(context["request"])
125 full_file = static(
126 "assets/fonts/noto-full.{}.{}.css".format(
127 language_code, ("modern" if is_modern else "basic")
128 )
129 )
130
131 return mark_safe(
132 template.format(
133 lang_code=language_code,
134 lang_dir=lang_dir,
135 languages=json.dumps(languages),
136 use_modern="true" if is_modern else "false",
137 common_css_file=common_file,
138 subset_css_file=subset_file,
139 full_css_file=full_file,
140 # Temporary cache busting strategy.
141 # Would be better to use ManifestStaticFilesStorage
142 version=kolibri.__version__,
143 )
144 )
145
146
147 @register.simple_tag()
148 def kolibri_navigation_actions():
149 """
150 A tag to include an initial JS-object to bootstrap nav action data into the app.
151 :return: An html string
152 """
153 return webpack_asset_render(NavigationHook)
154
155
156 @register.simple_tag(takes_context=True)
157 def kolibri_set_urls(context):
158 js_global_object_name = getattr(settings, 'JS_REVERSE_JS_GLOBAL_OBJECT_NAME', JS_GLOBAL_OBJECT_NAME)
159 js_var_name = getattr(settings, 'JS_REVERSE_JS_VAR_NAME', JS_VAR_NAME)
160 js = (js_reverse_inline(context) +
161 """
162 Object.assign({kolibri}.urls, {global_object}.{js_var});
163 {kolibri}.urls.__staticURL = '{static_url}';
164 """.format(
165 kolibri=conf.KOLIBRI_CORE_JS_NAME,
166 global_object=js_global_object_name,
167 js_var=js_var_name,
168 static_url=settings.STATIC_URL))
169 return mark_safe(js)
170
171
172 @register.simple_tag()
173 def kolibri_set_server_time():
174 html = ("<script type='text/javascript'>"
175 "{0}.utils.serverClock.setServerTime({1});"
176 "</script>".format(conf.KOLIBRI_CORE_JS_NAME,
177 json.dumps(now(), cls=DjangoJSONEncoder)))
178 return mark_safe(html)
179
180
181 @register.simple_tag(takes_context=True)
182 def kolibri_bootstrap_model(context, base_name, api_resource, **kwargs):
183 response, kwargs = _kolibri_bootstrap_helper(context, base_name, api_resource, 'detail', **kwargs)
184 html = ("<script type='text/javascript'>"
185 "var model = {0}.resources.{1}.createModel(JSON.parse({2}));"
186 "model.synced = true;"
187 "</script>".format(
188 conf.KOLIBRI_CORE_JS_NAME,
189 api_resource,
190 json.dumps(JSONRenderer().render(response.data).decode('utf-8'))))
191 return mark_safe(html)
192
193
194 @register.simple_tag(takes_context=True)
195 def kolibri_bootstrap_collection(context, base_name, api_resource, **kwargs):
196 response, kwargs = _kolibri_bootstrap_helper(context, base_name, api_resource, 'list', **kwargs)
197 html = ("<script type='text/javascript'>"
198 "var collection = {0}.resources.{1}.createCollection({2}, JSON.parse({3}));"
199 "collection.synced = true;"
200 "</script>".format(conf.KOLIBRI_CORE_JS_NAME,
201 api_resource,
202 json.dumps(kwargs),
203 json.dumps(JSONRenderer().render(response.data).decode('utf-8')),
204 ))
205 return mark_safe(html)
206
207
208 def _replace_dict_values(check, replace, dict):
209 for (key, value) in iteritems(dict):
210 if dict[key] is check:
211 dict[key] = replace
212
213
214 def _kolibri_bootstrap_helper(context, base_name, api_resource, route, **kwargs):
215 reversal = dict()
216 kwargs_check = 'kwargs_'
217 # remove prepended string and matching items from kwargs
218 for key in list(kwargs.keys()):
219 if kwargs_check in key:
220 item = kwargs.pop(key)
221 key = re.sub(kwargs_check, '', key)
222 reversal[key] = item
223 view, view_args, view_kwargs = resolve(reverse('kolibri:core:{0}-{1}'.format(base_name, route), kwargs=reversal))
224 # switch out None temporarily because invalid filtering and caching can occur
225 _replace_dict_values(None, str(''), kwargs)
226 request = copy.copy(context['request'])
227 request.GET = request.GET.copy()
228 for key in kwargs:
229 request.GET[key] = kwargs[key]
230 response = view(request, **view_kwargs)
231 _replace_dict_values(str(''), None, kwargs)
232 return response, kwargs
233
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kolibri/core/templatetags/kolibri_tags.py b/kolibri/core/templatetags/kolibri_tags.py
--- a/kolibri/core/templatetags/kolibri_tags.py
+++ b/kolibri/core/templatetags/kolibri_tags.py
@@ -157,10 +157,9 @@
def kolibri_set_urls(context):
js_global_object_name = getattr(settings, 'JS_REVERSE_JS_GLOBAL_OBJECT_NAME', JS_GLOBAL_OBJECT_NAME)
js_var_name = getattr(settings, 'JS_REVERSE_JS_VAR_NAME', JS_VAR_NAME)
- js = (js_reverse_inline(context) +
- """
+ js = (js_reverse_inline(context) + """
Object.assign({kolibri}.urls, {global_object}.{js_var});
- {kolibri}.urls.__staticURL = '{static_url}';
+ {kolibri}.urls.__setStaticURL('{static_url}');
""".format(
kolibri=conf.KOLIBRI_CORE_JS_NAME,
global_object=js_global_object_name,
| {"golden_diff": "diff --git a/kolibri/core/templatetags/kolibri_tags.py b/kolibri/core/templatetags/kolibri_tags.py\n--- a/kolibri/core/templatetags/kolibri_tags.py\n+++ b/kolibri/core/templatetags/kolibri_tags.py\n@@ -157,10 +157,9 @@\n def kolibri_set_urls(context):\n js_global_object_name = getattr(settings, 'JS_REVERSE_JS_GLOBAL_OBJECT_NAME', JS_GLOBAL_OBJECT_NAME)\n js_var_name = getattr(settings, 'JS_REVERSE_JS_VAR_NAME', JS_VAR_NAME)\n- js = (js_reverse_inline(context) +\n- \"\"\"\n+ js = (js_reverse_inline(context) + \"\"\"\n Object.assign({kolibri}.urls, {global_object}.{js_var});\n- {kolibri}.urls.__staticURL = '{static_url}';\n+ {kolibri}.urls.__setStaticURL('{static_url}');\n \"\"\".format(\n kolibri=conf.KOLIBRI_CORE_JS_NAME,\n global_object=js_global_object_name,\n", "issue": "Some images are not found \n### Observed behavior\r\nIn the current Kolibri develop branch, some images are not found after building js bundle.\r\nI.e. \r\n`yarn devserver`works perfectly\r\n`yarn build & yarn django-devserver` gives a not-found error in the urls\r\n/user/kolibri-logo.svg\r\n/user/background.jpg\r\n\r\nIt looks like a path issue in the frontend\r\n\r\n### Expected behavior\r\nAll the images should be found when using the built bundles.\r\n\r\n### User-facing consequences\r\nBad look\r\n\r\n### Steps to reproduce\r\nUsing develop code\r\n`yarn build` will build the bundles\r\n`yarn django-devserver` will run only the Django webserver.\r\nBrowsing in Kolibri will be visible\r\n\r\n\r\n### Context\r\n* Kolibri develop branch\r\n* Tested with Google Chrome\r\n* OS: Linux\r\n\n", "before_files": [{"content": "\"\"\"\nKolibri template tags\n=====================\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport copy\nimport json\nimport re\n\nimport user_agents\nfrom django import template\nfrom django.conf import settings\nfrom django.contrib.staticfiles.templatetags.staticfiles import static\nfrom django.core.serializers.json import DjangoJSONEncoder\nfrom django.core.urlresolvers import resolve\nfrom django.core.urlresolvers import reverse\nfrom django.utils.html import mark_safe\nfrom django.utils.timezone import now\nfrom django.utils.translation import get_language\nfrom django.utils.translation import get_language_bidi\nfrom django.utils.translation import get_language_info\nfrom django_js_reverse.js_reverse_settings import JS_GLOBAL_OBJECT_NAME\nfrom django_js_reverse.js_reverse_settings import JS_VAR_NAME\nfrom django_js_reverse.templatetags.js_reverse import js_reverse_inline\nfrom rest_framework.renderers import JSONRenderer\nfrom six import iteritems\n\nimport kolibri\nfrom kolibri.core.device.models import ContentCacheKey\nfrom kolibri.core.hooks import NavigationHook\nfrom kolibri.core.webpack.utils import webpack_asset_render\nfrom kolibri.utils import conf\nfrom kolibri.utils import i18n\n\nregister = template.Library()\n\n\[email protected]_tag()\ndef kolibri_content_cache_key():\n js = \"\"\"\n <script>\n var contentCacheKey = '{cache_key}';\n </script>\n \"\"\".format(\n cache_key=ContentCacheKey.get_cache_key(),\n )\n return mark_safe(js)\n\n\ndef _supports_modern_fonts(request):\n \"\"\"\n In order to use the modern font-loading strategy we need to ensure two things:\n\n 1. The browser needs to properly use the font-face unicode-range descriptor in order\n only load fonts when they are needed. This allows us to reference fonts for every\n supported alphabet while ensuring that the client doesn't download many megabytes\n of font data.\n\n 2. The browser needs to avoid a flash of invisible text (FOIT) while extra fonts are\n loading, and instead render text using the browser's default fonts (FOUT). This\n allows users to view and begin reading text, even if the fonts haven't loaded yet.\n With some browsers this means supporting the new font-display descriptor. The\n Edge browser uses FOUT instead of FOIT by default, and therefore doesn't need to\n support font-display.\n\n Based on https://caniuse.com/#feat=font-unicode-range\n \"\"\"\n\n if 'HTTP_USER_AGENT' not in request.META:\n return False\n\n browser = user_agents.parse(request.META['HTTP_USER_AGENT']).browser\n\n if browser.family == \"Edge\": # Edge only needs unicode-range, not font-display\n return browser.version[0] >= 17\n if browser.family in (\"Firefox\", \"Firefox Mobile\"):\n return browser.version[0] >= 58\n if browser.family in (\"Chrome\", \"Chrome Mobile\"):\n return browser.version[0] >= 60\n if browser.family == \"Safari\":\n return browser.version[0] >= 11 and browser.version[1] >= 1\n if browser.family == \"Opera\":\n return browser.version[0] >= 47\n if browser.family == \"Mobile Safari\":\n return browser.version[0] >= 11 and browser.version[1] >= 4\n\n return False\n\n\[email protected]_tag(takes_context=True)\ndef kolibri_language_globals(context):\n\n template = \"\"\"\n <script>\n var languageCode = '{lang_code}';\n var languageDir = '{lang_dir}';\n var languages = JSON.parse('{languages}');\n var useModernFontLoading = {use_modern};\n </script>\n <link type=\"text/css\" href=\"{common_css_file}?v={version}\" rel=\"stylesheet\"/>\n <link type=\"text/css\" href=\"{subset_css_file}?v={version}\" rel=\"stylesheet\"/>\n <link type=\"text/css\" href=\"{full_css_file}?v={version}\" rel=\"stylesheet\"/>\n \"\"\"\n\n language_code = get_language()\n lang_dir = \"rtl\" if get_language_bidi() else \"ltr\"\n\n languages = {}\n for code, language_name in settings.LANGUAGES:\n lang_info = next((lang for lang in i18n.KOLIBRI_SUPPORTED_LANGUAGES if lang['intl_code'] == code), None)\n languages[code] = {\n # Format to match the schema of the content Language model\n \"id\": code,\n \"lang_name\": language_name,\n \"english_name\": lang_info[\"english_name\"] if lang_info else get_language_info(code)[\"name\"],\n \"lang_direction\": get_language_info(code)[\"bidi\"],\n }\n\n common_file = static(\"assets/fonts/noto-common.css\")\n subset_file = static(\"assets/fonts/noto-subset.{}.css\".format(language_code))\n is_modern = _supports_modern_fonts(context[\"request\"])\n full_file = static(\n \"assets/fonts/noto-full.{}.{}.css\".format(\n language_code, (\"modern\" if is_modern else \"basic\")\n )\n )\n\n return mark_safe(\n template.format(\n lang_code=language_code,\n lang_dir=lang_dir,\n languages=json.dumps(languages),\n use_modern=\"true\" if is_modern else \"false\",\n common_css_file=common_file,\n subset_css_file=subset_file,\n full_css_file=full_file,\n # Temporary cache busting strategy.\n # Would be better to use ManifestStaticFilesStorage\n version=kolibri.__version__,\n )\n )\n\n\[email protected]_tag()\ndef kolibri_navigation_actions():\n \"\"\"\n A tag to include an initial JS-object to bootstrap nav action data into the app.\n :return: An html string\n \"\"\"\n return webpack_asset_render(NavigationHook)\n\n\[email protected]_tag(takes_context=True)\ndef kolibri_set_urls(context):\n js_global_object_name = getattr(settings, 'JS_REVERSE_JS_GLOBAL_OBJECT_NAME', JS_GLOBAL_OBJECT_NAME)\n js_var_name = getattr(settings, 'JS_REVERSE_JS_VAR_NAME', JS_VAR_NAME)\n js = (js_reverse_inline(context) +\n \"\"\"\n Object.assign({kolibri}.urls, {global_object}.{js_var});\n {kolibri}.urls.__staticURL = '{static_url}';\n \"\"\".format(\n kolibri=conf.KOLIBRI_CORE_JS_NAME,\n global_object=js_global_object_name,\n js_var=js_var_name,\n static_url=settings.STATIC_URL))\n return mark_safe(js)\n\n\[email protected]_tag()\ndef kolibri_set_server_time():\n html = (\"<script type='text/javascript'>\"\n \"{0}.utils.serverClock.setServerTime({1});\"\n \"</script>\".format(conf.KOLIBRI_CORE_JS_NAME,\n json.dumps(now(), cls=DjangoJSONEncoder)))\n return mark_safe(html)\n\n\[email protected]_tag(takes_context=True)\ndef kolibri_bootstrap_model(context, base_name, api_resource, **kwargs):\n response, kwargs = _kolibri_bootstrap_helper(context, base_name, api_resource, 'detail', **kwargs)\n html = (\"<script type='text/javascript'>\"\n \"var model = {0}.resources.{1}.createModel(JSON.parse({2}));\"\n \"model.synced = true;\"\n \"</script>\".format(\n conf.KOLIBRI_CORE_JS_NAME,\n api_resource,\n json.dumps(JSONRenderer().render(response.data).decode('utf-8'))))\n return mark_safe(html)\n\n\[email protected]_tag(takes_context=True)\ndef kolibri_bootstrap_collection(context, base_name, api_resource, **kwargs):\n response, kwargs = _kolibri_bootstrap_helper(context, base_name, api_resource, 'list', **kwargs)\n html = (\"<script type='text/javascript'>\"\n \"var collection = {0}.resources.{1}.createCollection({2}, JSON.parse({3}));\"\n \"collection.synced = true;\"\n \"</script>\".format(conf.KOLIBRI_CORE_JS_NAME,\n api_resource,\n json.dumps(kwargs),\n json.dumps(JSONRenderer().render(response.data).decode('utf-8')),\n ))\n return mark_safe(html)\n\n\ndef _replace_dict_values(check, replace, dict):\n for (key, value) in iteritems(dict):\n if dict[key] is check:\n dict[key] = replace\n\n\ndef _kolibri_bootstrap_helper(context, base_name, api_resource, route, **kwargs):\n reversal = dict()\n kwargs_check = 'kwargs_'\n # remove prepended string and matching items from kwargs\n for key in list(kwargs.keys()):\n if kwargs_check in key:\n item = kwargs.pop(key)\n key = re.sub(kwargs_check, '', key)\n reversal[key] = item\n view, view_args, view_kwargs = resolve(reverse('kolibri:core:{0}-{1}'.format(base_name, route), kwargs=reversal))\n # switch out None temporarily because invalid filtering and caching can occur\n _replace_dict_values(None, str(''), kwargs)\n request = copy.copy(context['request'])\n request.GET = request.GET.copy()\n for key in kwargs:\n request.GET[key] = kwargs[key]\n response = view(request, **view_kwargs)\n _replace_dict_values(str(''), None, kwargs)\n return response, kwargs\n", "path": "kolibri/core/templatetags/kolibri_tags.py"}], "after_files": [{"content": "\"\"\"\nKolibri template tags\n=====================\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport copy\nimport json\nimport re\n\nimport user_agents\nfrom django import template\nfrom django.conf import settings\nfrom django.contrib.staticfiles.templatetags.staticfiles import static\nfrom django.core.serializers.json import DjangoJSONEncoder\nfrom django.core.urlresolvers import resolve\nfrom django.core.urlresolvers import reverse\nfrom django.utils.html import mark_safe\nfrom django.utils.timezone import now\nfrom django.utils.translation import get_language\nfrom django.utils.translation import get_language_bidi\nfrom django.utils.translation import get_language_info\nfrom django_js_reverse.js_reverse_settings import JS_GLOBAL_OBJECT_NAME\nfrom django_js_reverse.js_reverse_settings import JS_VAR_NAME\nfrom django_js_reverse.templatetags.js_reverse import js_reverse_inline\nfrom rest_framework.renderers import JSONRenderer\nfrom six import iteritems\n\nimport kolibri\nfrom kolibri.core.device.models import ContentCacheKey\nfrom kolibri.core.hooks import NavigationHook\nfrom kolibri.core.webpack.utils import webpack_asset_render\nfrom kolibri.utils import conf\nfrom kolibri.utils import i18n\n\nregister = template.Library()\n\n\[email protected]_tag()\ndef kolibri_content_cache_key():\n js = \"\"\"\n <script>\n var contentCacheKey = '{cache_key}';\n </script>\n \"\"\".format(\n cache_key=ContentCacheKey.get_cache_key(),\n )\n return mark_safe(js)\n\n\ndef _supports_modern_fonts(request):\n \"\"\"\n In order to use the modern font-loading strategy we need to ensure two things:\n\n 1. The browser needs to properly use the font-face unicode-range descriptor in order\n only load fonts when they are needed. This allows us to reference fonts for every\n supported alphabet while ensuring that the client doesn't download many megabytes\n of font data.\n\n 2. The browser needs to avoid a flash of invisible text (FOIT) while extra fonts are\n loading, and instead render text using the browser's default fonts (FOUT). This\n allows users to view and begin reading text, even if the fonts haven't loaded yet.\n With some browsers this means supporting the new font-display descriptor. The\n Edge browser uses FOUT instead of FOIT by default, and therefore doesn't need to\n support font-display.\n\n Based on https://caniuse.com/#feat=font-unicode-range\n \"\"\"\n\n if 'HTTP_USER_AGENT' not in request.META:\n return False\n\n browser = user_agents.parse(request.META['HTTP_USER_AGENT']).browser\n\n if browser.family == \"Edge\": # Edge only needs unicode-range, not font-display\n return browser.version[0] >= 17\n if browser.family in (\"Firefox\", \"Firefox Mobile\"):\n return browser.version[0] >= 58\n if browser.family in (\"Chrome\", \"Chrome Mobile\"):\n return browser.version[0] >= 60\n if browser.family == \"Safari\":\n return browser.version[0] >= 11 and browser.version[1] >= 1\n if browser.family == \"Opera\":\n return browser.version[0] >= 47\n if browser.family == \"Mobile Safari\":\n return browser.version[0] >= 11 and browser.version[1] >= 4\n\n return False\n\n\[email protected]_tag(takes_context=True)\ndef kolibri_language_globals(context):\n\n template = \"\"\"\n <script>\n var languageCode = '{lang_code}';\n var languageDir = '{lang_dir}';\n var languages = JSON.parse('{languages}');\n var useModernFontLoading = {use_modern};\n </script>\n <link type=\"text/css\" href=\"{common_css_file}?v={version}\" rel=\"stylesheet\"/>\n <link type=\"text/css\" href=\"{subset_css_file}?v={version}\" rel=\"stylesheet\"/>\n <link type=\"text/css\" href=\"{full_css_file}?v={version}\" rel=\"stylesheet\"/>\n \"\"\"\n\n language_code = get_language()\n lang_dir = \"rtl\" if get_language_bidi() else \"ltr\"\n\n languages = {}\n for code, language_name in settings.LANGUAGES:\n lang_info = next((lang for lang in i18n.KOLIBRI_SUPPORTED_LANGUAGES if lang['intl_code'] == code), None)\n languages[code] = {\n # Format to match the schema of the content Language model\n \"id\": code,\n \"lang_name\": language_name,\n \"english_name\": lang_info[\"english_name\"] if lang_info else get_language_info(code)[\"name\"],\n \"lang_direction\": get_language_info(code)[\"bidi\"],\n }\n\n common_file = static(\"assets/fonts/noto-common.css\")\n subset_file = static(\"assets/fonts/noto-subset.{}.css\".format(language_code))\n is_modern = _supports_modern_fonts(context[\"request\"])\n full_file = static(\n \"assets/fonts/noto-full.{}.{}.css\".format(\n language_code, (\"modern\" if is_modern else \"basic\")\n )\n )\n\n return mark_safe(\n template.format(\n lang_code=language_code,\n lang_dir=lang_dir,\n languages=json.dumps(languages),\n use_modern=\"true\" if is_modern else \"false\",\n common_css_file=common_file,\n subset_css_file=subset_file,\n full_css_file=full_file,\n # Temporary cache busting strategy.\n # Would be better to use ManifestStaticFilesStorage\n version=kolibri.__version__,\n )\n )\n\n\[email protected]_tag()\ndef kolibri_navigation_actions():\n \"\"\"\n A tag to include an initial JS-object to bootstrap nav action data into the app.\n :return: An html string\n \"\"\"\n return webpack_asset_render(NavigationHook)\n\n\[email protected]_tag(takes_context=True)\ndef kolibri_set_urls(context):\n js_global_object_name = getattr(settings, 'JS_REVERSE_JS_GLOBAL_OBJECT_NAME', JS_GLOBAL_OBJECT_NAME)\n js_var_name = getattr(settings, 'JS_REVERSE_JS_VAR_NAME', JS_VAR_NAME)\n js = (js_reverse_inline(context) + \"\"\"\n Object.assign({kolibri}.urls, {global_object}.{js_var});\n {kolibri}.urls.__setStaticURL('{static_url}');\n \"\"\".format(\n kolibri=conf.KOLIBRI_CORE_JS_NAME,\n global_object=js_global_object_name,\n js_var=js_var_name,\n static_url=settings.STATIC_URL))\n return mark_safe(js)\n\n\[email protected]_tag()\ndef kolibri_set_server_time():\n html = (\"<script type='text/javascript'>\"\n \"{0}.utils.serverClock.setServerTime({1});\"\n \"</script>\".format(conf.KOLIBRI_CORE_JS_NAME,\n json.dumps(now(), cls=DjangoJSONEncoder)))\n return mark_safe(html)\n\n\[email protected]_tag(takes_context=True)\ndef kolibri_bootstrap_model(context, base_name, api_resource, **kwargs):\n response, kwargs = _kolibri_bootstrap_helper(context, base_name, api_resource, 'detail', **kwargs)\n html = (\"<script type='text/javascript'>\"\n \"var model = {0}.resources.{1}.createModel(JSON.parse({2}));\"\n \"model.synced = true;\"\n \"</script>\".format(\n conf.KOLIBRI_CORE_JS_NAME,\n api_resource,\n json.dumps(JSONRenderer().render(response.data).decode('utf-8'))))\n return mark_safe(html)\n\n\[email protected]_tag(takes_context=True)\ndef kolibri_bootstrap_collection(context, base_name, api_resource, **kwargs):\n response, kwargs = _kolibri_bootstrap_helper(context, base_name, api_resource, 'list', **kwargs)\n html = (\"<script type='text/javascript'>\"\n \"var collection = {0}.resources.{1}.createCollection({2}, JSON.parse({3}));\"\n \"collection.synced = true;\"\n \"</script>\".format(conf.KOLIBRI_CORE_JS_NAME,\n api_resource,\n json.dumps(kwargs),\n json.dumps(JSONRenderer().render(response.data).decode('utf-8')),\n ))\n return mark_safe(html)\n\n\ndef _replace_dict_values(check, replace, dict):\n for (key, value) in iteritems(dict):\n if dict[key] is check:\n dict[key] = replace\n\n\ndef _kolibri_bootstrap_helper(context, base_name, api_resource, route, **kwargs):\n reversal = dict()\n kwargs_check = 'kwargs_'\n # remove prepended string and matching items from kwargs\n for key in list(kwargs.keys()):\n if kwargs_check in key:\n item = kwargs.pop(key)\n key = re.sub(kwargs_check, '', key)\n reversal[key] = item\n view, view_args, view_kwargs = resolve(reverse('kolibri:core:{0}-{1}'.format(base_name, route), kwargs=reversal))\n # switch out None temporarily because invalid filtering and caching can occur\n _replace_dict_values(None, str(''), kwargs)\n request = copy.copy(context['request'])\n request.GET = request.GET.copy()\n for key in kwargs:\n request.GET[key] = kwargs[key]\n response = view(request, **view_kwargs)\n _replace_dict_values(str(''), None, kwargs)\n return response, kwargs\n", "path": "kolibri/core/templatetags/kolibri_tags.py"}]} | 3,088 | 231 |
gh_patches_debug_34221 | rasdani/github-patches | git_diff | aws__aws-cli-4148 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
eks update-config - generates incorrect kube-config - missing aws-iam-authenticator command
Version: aws-cli/1.16.156 Python/2.7.5 Linux/3.10.0-957.12.1.el7.x86_64 botocore/1.12.146
kubectl : 1.12.7
iam-aws-authenticator: 1.12.7
```bash
aws --region eu-west-1 eks update-kubeconfig --name XXXXXX
```
it generates
```
apiVersion: client.authentication.k8s.io/v1alpha1
args:
- eks
- get-token
- --cluster-name
- XXXXXX
command: aws ---> it should be aws-iam-authenticator
```
Obviously whatever the attempt with the get-token - it should not break the previous mode
or give me the choice
```
apiVersion: client.authentication.k8s.io/v1alpha1
args:
- token
- -i
- xxxxx
command: aws-iam-authenticator
```
The above makes kubectl invoke the aws cli but results to
```
You must specify a region. You can also configure your region by running "aws configure".
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `awscli/customizations/eks/update_kubeconfig.py`
Content:
```
1 # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License"). You
4 # may not use this file except in compliance with the License. A copy of
5 # the License is located at
6 #
7 # http://aws.amazon.com/apache2.0/
8 #
9 # or in the "license" file accompanying this file. This file is
10 # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 # ANY KIND, either express or implied. See the License for the specific
12 # language governing permissions and limitations under the License.
13
14 import os
15 import logging
16
17 from botocore.compat import OrderedDict
18
19 from awscli.customizations.commands import BasicCommand
20 from awscli.customizations.utils import uni_print
21 from awscli.customizations.eks.exceptions import EKSClusterError
22 from awscli.customizations.eks.kubeconfig import (Kubeconfig,
23 KubeconfigError,
24 KubeconfigLoader,
25 KubeconfigWriter,
26 KubeconfigValidator,
27 KubeconfigAppender)
28 from awscli.customizations.eks.ordered_yaml import ordered_yaml_dump
29
30 LOG = logging.getLogger(__name__)
31
32 DEFAULT_PATH = os.path.expanduser("~/.kube/config")
33
34 # Use the endpoint for kubernetes 1.10
35 # To get the most recent endpoint we will need to
36 # Do a check on the cluster's version number
37 API_VERSION = "client.authentication.k8s.io/v1alpha1"
38
39 class UpdateKubeconfigCommand(BasicCommand):
40 NAME = 'update-kubeconfig'
41
42 DESCRIPTION = BasicCommand.FROM_FILE(
43 'eks',
44 'update-kubeconfig',
45 '_description.rst'
46 )
47
48 ARG_TABLE = [
49 {
50 'name': 'name',
51 'help_text': ("The name of the cluster for which "
52 "to create a kubeconfig entry. "
53 "This cluster must exist in your account and in the "
54 "specified or configured default Region "
55 "for your AWS CLI installation."),
56 'required': True
57 },
58 {
59 'name': 'kubeconfig',
60 'help_text': ("Optionally specify a kubeconfig file to append "
61 "with your configuration. "
62 "By default, the configuration is written to the "
63 "first file path in the KUBECONFIG "
64 "environment variable (if it is set) "
65 "or the default kubeconfig path (.kube/config) "
66 "in your home directory."),
67 'required': False
68 },
69 {
70 'name': 'role-arn',
71 'help_text': ("To assume a role for cluster authentication, "
72 "specify an IAM role ARN with this option. "
73 "For example, if you created a cluster "
74 "while assuming an IAM role, "
75 "then you must also assume that role to "
76 "connect to the cluster the first time."),
77 'required': False
78 },
79 {
80 'name': 'dry-run',
81 'action': 'store_true',
82 'default': False,
83 'help_text': ("Print the merged kubeconfig to stdout instead of "
84 "writing it to the specified file."),
85 'required': False
86 },
87 {
88 'name': 'verbose',
89 'action': 'store_true',
90 'default': False,
91 'help_text': ("Print more detailed output "
92 "when writing to the kubeconfig file, "
93 "including the appended entries.")
94 },
95 {
96 'name': 'alias',
97 'help_text': ("Alias for the cluster context name. "
98 "Defaults to match cluster ARN."),
99 'required': False
100 }
101 ]
102
103 def _display_entries(self, entries):
104 """
105 Display entries in yaml format
106
107 :param entries: a list of OrderedDicts to be printed
108 :type entries: list
109 """
110 uni_print("Entries:\n\n")
111 for entry in entries:
112 uni_print(ordered_yaml_dump(entry))
113 uni_print("\n")
114
115 def _run_main(self, parsed_args, parsed_globals):
116 client = EKSClient(self._session,
117 parsed_args.name,
118 parsed_args.role_arn,
119 parsed_globals)
120 new_cluster_dict = client.get_cluster_entry()
121 new_user_dict = client.get_user_entry()
122
123 config_selector = KubeconfigSelector(
124 os.environ.get("KUBECONFIG", ""),
125 parsed_args.kubeconfig
126 )
127 config = config_selector.choose_kubeconfig(
128 new_cluster_dict["name"]
129 )
130 updating_existing = config.has_cluster(new_cluster_dict["name"])
131 appender = KubeconfigAppender()
132 new_context_dict = appender.insert_cluster_user_pair(config,
133 new_cluster_dict,
134 new_user_dict,
135 parsed_args.alias)
136
137 if parsed_args.dry_run:
138 uni_print(config.dump_content())
139 else:
140 writer = KubeconfigWriter()
141 writer.write_kubeconfig(config)
142
143 if updating_existing:
144 uni_print("Updated context {0} in {1}\n".format(
145 new_context_dict["name"], config.path
146 ))
147 else:
148 uni_print("Added new context {0} to {1}\n".format(
149 new_context_dict["name"], config.path
150 ))
151
152 if parsed_args.verbose:
153 self._display_entries([
154 new_context_dict,
155 new_user_dict,
156 new_cluster_dict
157 ])
158
159
160
161 class KubeconfigSelector(object):
162
163 def __init__(self, env_variable, path_in, validator=None,
164 loader=None):
165 """
166 Parse KUBECONFIG into a list of absolute paths.
167 Also replace the empty list with DEFAULT_PATH
168
169 :param env_variable: KUBECONFIG as a long string
170 :type env_variable: string
171
172 :param path_in: The path passed in through the CLI
173 :type path_in: string or None
174 """
175 if validator is None:
176 validator = KubeconfigValidator()
177 self._validator = validator
178
179 if loader is None:
180 loader = KubeconfigLoader(validator)
181 self._loader = loader
182
183 if path_in is not None:
184 # Override environment variable
185 self._paths = [self._expand_path(path_in)]
186 else:
187 # Get the list of paths from the environment variable
188 if env_variable == "":
189 env_variable = DEFAULT_PATH
190 self._paths = [self._expand_path(element)
191 for element in env_variable.split(os.pathsep)
192 if len(element.strip()) > 0]
193 if len(self._paths) == 0:
194 self._paths = [DEFAULT_PATH]
195
196 def choose_kubeconfig(self, cluster_name):
197 """
198 Choose which kubeconfig file to read from.
199 If name is already an entry in one of the $KUBECONFIG files,
200 choose that one.
201 Otherwise choose the first file.
202
203 :param cluster_name: The name of the cluster which is going to be added
204 :type cluster_name: String
205
206 :return: a chosen Kubeconfig based on above rules
207 :rtype: Kubeconfig
208 """
209 # Search for an existing entry to update
210 for candidate_path in self._paths:
211 try:
212 loaded_config = self._loader.load_kubeconfig(candidate_path)
213
214 if loaded_config.has_cluster(cluster_name):
215 LOG.debug("Found entry to update at {0}".format(
216 candidate_path
217 ))
218 return loaded_config
219 except KubeconfigError as e:
220 LOG.warning("Passing {0}:{1}".format(candidate_path, e))
221
222 # No entry was found, use the first file in KUBECONFIG
223 #
224 # Note: This could raise KubeconfigErrors if paths[0] is corrupted
225 return self._loader.load_kubeconfig(self._paths[0])
226
227 def _expand_path(self, path):
228 """ A helper to expand a path to a full absolute path. """
229 return os.path.abspath(os.path.expanduser(path))
230
231
232 class EKSClient(object):
233 def __init__(self, session, cluster_name, role_arn, parsed_globals=None):
234 self._session = session
235 self._cluster_name = cluster_name
236 self._role_arn = role_arn
237 self._cluster_description = None
238 self._globals = parsed_globals
239
240 def _get_cluster_description(self):
241 """
242 Use an eks describe-cluster call to get the cluster description
243 Cache the response in self._cluster_description.
244 describe-cluster will only be called once.
245 """
246 if self._cluster_description is None:
247 if self._globals is None:
248 client = self._session.create_client("eks")
249 else:
250 client = self._session.create_client(
251 "eks",
252 region_name=self._globals.region,
253 endpoint_url=self._globals.endpoint_url,
254 verify=self._globals.verify_ssl
255 )
256 full_description = client.describe_cluster(name=self._cluster_name)
257 self._cluster_description = full_description["cluster"]
258
259 if "status" not in self._cluster_description:
260 raise EKSClusterError("Cluster not found")
261 if self._cluster_description["status"] != "ACTIVE":
262 raise EKSClusterError("Cluster status not active")
263
264 return self._cluster_description
265
266 def get_cluster_entry(self):
267 """
268 Return a cluster entry generated using
269 the previously obtained description.
270 """
271
272 cert_data = self._get_cluster_description().get("certificateAuthority",
273 {"data": ""})["data"]
274 endpoint = self._get_cluster_description().get("endpoint")
275 arn = self._get_cluster_description().get("arn")
276
277 return OrderedDict([
278 ("cluster", OrderedDict([
279 ("certificate-authority-data", cert_data),
280 ("server", endpoint)
281 ])),
282 ("name", arn)
283 ])
284
285 def get_user_entry(self):
286 """
287 Return a user entry generated using
288 the previously obtained description.
289 """
290 generated_user = OrderedDict([
291 ("name", self._get_cluster_description().get("arn", "")),
292 ("user", OrderedDict([
293 ("exec", OrderedDict([
294 ("apiVersion", API_VERSION),
295 ("args",
296 [
297 "eks",
298 "get-token",
299 "--cluster-name",
300 self._cluster_name
301 ]),
302 ("command", "aws")
303 ]))
304 ]))
305 ])
306
307 if self._role_arn is not None:
308 generated_user["user"]["exec"]["args"].extend([
309 "--role",
310 self._role_arn
311 ])
312
313 if self._session.profile:
314 generated_user["user"]["exec"]["env"] = [OrderedDict([
315 ("name", "AWS_PROFILE"),
316 ("value", self._session.profile)
317 ])]
318
319 return generated_user
320
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/awscli/customizations/eks/update_kubeconfig.py b/awscli/customizations/eks/update_kubeconfig.py
--- a/awscli/customizations/eks/update_kubeconfig.py
+++ b/awscli/customizations/eks/update_kubeconfig.py
@@ -101,7 +101,7 @@
]
def _display_entries(self, entries):
- """
+ """
Display entries in yaml format
:param entries: a list of OrderedDicts to be printed
@@ -248,7 +248,7 @@
client = self._session.create_client("eks")
else:
client = self._session.create_client(
- "eks",
+ "eks",
region_name=self._globals.region,
endpoint_url=self._globals.endpoint_url,
verify=self._globals.verify_ssl
@@ -287,6 +287,9 @@
Return a user entry generated using
the previously obtained description.
"""
+
+ region = self._get_cluster_description().get("arn").split(":")[3]
+
generated_user = OrderedDict([
("name", self._get_cluster_description().get("arn", "")),
("user", OrderedDict([
@@ -294,10 +297,12 @@
("apiVersion", API_VERSION),
("args",
[
+ "--region",
+ region,
"eks",
"get-token",
"--cluster-name",
- self._cluster_name
+ self._cluster_name,
]),
("command", "aws")
]))
| {"golden_diff": "diff --git a/awscli/customizations/eks/update_kubeconfig.py b/awscli/customizations/eks/update_kubeconfig.py\n--- a/awscli/customizations/eks/update_kubeconfig.py\n+++ b/awscli/customizations/eks/update_kubeconfig.py\n@@ -101,7 +101,7 @@\n ]\n \n def _display_entries(self, entries):\n- \"\"\" \n+ \"\"\"\n Display entries in yaml format\n \n :param entries: a list of OrderedDicts to be printed\n@@ -248,7 +248,7 @@\n client = self._session.create_client(\"eks\")\n else:\n client = self._session.create_client(\n- \"eks\", \n+ \"eks\",\n region_name=self._globals.region,\n endpoint_url=self._globals.endpoint_url,\n verify=self._globals.verify_ssl\n@@ -287,6 +287,9 @@\n Return a user entry generated using\n the previously obtained description.\n \"\"\"\n+\n+ region = self._get_cluster_description().get(\"arn\").split(\":\")[3]\n+\n generated_user = OrderedDict([\n (\"name\", self._get_cluster_description().get(\"arn\", \"\")),\n (\"user\", OrderedDict([\n@@ -294,10 +297,12 @@\n (\"apiVersion\", API_VERSION),\n (\"args\",\n [\n+ \"--region\",\n+ region,\n \"eks\",\n \"get-token\",\n \"--cluster-name\",\n- self._cluster_name\n+ self._cluster_name,\n ]),\n (\"command\", \"aws\")\n ]))\n", "issue": "eks update-config - generates incorrect kube-config - missing aws-iam-authenticator command\nVersion: aws-cli/1.16.156 Python/2.7.5 Linux/3.10.0-957.12.1.el7.x86_64 botocore/1.12.146\r\nkubectl : 1.12.7\r\niam-aws-authenticator: 1.12.7\r\n\r\n\r\n```bash\r\naws --region eu-west-1 eks update-kubeconfig --name XXXXXX\r\n```\r\n\r\nit generates \r\n\r\n```\r\n apiVersion: client.authentication.k8s.io/v1alpha1\r\n args:\r\n - eks\r\n - get-token\r\n - --cluster-name\r\n - XXXXXX\r\n command: aws ---> it should be aws-iam-authenticator\r\n```\r\n\r\nObviously whatever the attempt with the get-token - it should not break the previous mode\r\nor give me the choice\r\n```\r\n apiVersion: client.authentication.k8s.io/v1alpha1\r\n args:\r\n - token\r\n - -i\r\n - xxxxx\r\n command: aws-iam-authenticator\r\n```\r\nThe above makes kubectl invoke the aws cli but results to\r\n\r\n```\r\nYou must specify a region. You can also configure your region by running \"aws configure\".\r\n```\n", "before_files": [{"content": "# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\n\nimport os\nimport logging\n\nfrom botocore.compat import OrderedDict\n\nfrom awscli.customizations.commands import BasicCommand\nfrom awscli.customizations.utils import uni_print\nfrom awscli.customizations.eks.exceptions import EKSClusterError\nfrom awscli.customizations.eks.kubeconfig import (Kubeconfig,\n KubeconfigError,\n KubeconfigLoader,\n KubeconfigWriter,\n KubeconfigValidator,\n KubeconfigAppender)\nfrom awscli.customizations.eks.ordered_yaml import ordered_yaml_dump\n\nLOG = logging.getLogger(__name__)\n\nDEFAULT_PATH = os.path.expanduser(\"~/.kube/config\")\n\n# Use the endpoint for kubernetes 1.10\n# To get the most recent endpoint we will need to\n# Do a check on the cluster's version number\nAPI_VERSION = \"client.authentication.k8s.io/v1alpha1\"\n\nclass UpdateKubeconfigCommand(BasicCommand):\n NAME = 'update-kubeconfig'\n\n DESCRIPTION = BasicCommand.FROM_FILE(\n 'eks',\n 'update-kubeconfig',\n '_description.rst'\n )\n\n ARG_TABLE = [\n {\n 'name': 'name',\n 'help_text': (\"The name of the cluster for which \"\n \"to create a kubeconfig entry. \"\n \"This cluster must exist in your account and in the \"\n \"specified or configured default Region \"\n \"for your AWS CLI installation.\"),\n 'required': True\n },\n {\n 'name': 'kubeconfig',\n 'help_text': (\"Optionally specify a kubeconfig file to append \"\n \"with your configuration. \"\n \"By default, the configuration is written to the \"\n \"first file path in the KUBECONFIG \"\n \"environment variable (if it is set) \"\n \"or the default kubeconfig path (.kube/config) \"\n \"in your home directory.\"),\n 'required': False\n },\n {\n 'name': 'role-arn',\n 'help_text': (\"To assume a role for cluster authentication, \"\n \"specify an IAM role ARN with this option. \"\n \"For example, if you created a cluster \"\n \"while assuming an IAM role, \"\n \"then you must also assume that role to \"\n \"connect to the cluster the first time.\"),\n 'required': False\n },\n {\n 'name': 'dry-run',\n 'action': 'store_true',\n 'default': False,\n 'help_text': (\"Print the merged kubeconfig to stdout instead of \"\n \"writing it to the specified file.\"),\n 'required': False\n },\n {\n 'name': 'verbose',\n 'action': 'store_true',\n 'default': False,\n 'help_text': (\"Print more detailed output \"\n \"when writing to the kubeconfig file, \"\n \"including the appended entries.\")\n },\n {\n 'name': 'alias',\n 'help_text': (\"Alias for the cluster context name. \"\n \"Defaults to match cluster ARN.\"),\n 'required': False\n }\n ]\n\n def _display_entries(self, entries):\n \"\"\" \n Display entries in yaml format\n\n :param entries: a list of OrderedDicts to be printed\n :type entries: list\n \"\"\"\n uni_print(\"Entries:\\n\\n\")\n for entry in entries:\n uni_print(ordered_yaml_dump(entry))\n uni_print(\"\\n\")\n\n def _run_main(self, parsed_args, parsed_globals):\n client = EKSClient(self._session,\n parsed_args.name,\n parsed_args.role_arn,\n parsed_globals)\n new_cluster_dict = client.get_cluster_entry()\n new_user_dict = client.get_user_entry()\n\n config_selector = KubeconfigSelector(\n os.environ.get(\"KUBECONFIG\", \"\"),\n parsed_args.kubeconfig\n )\n config = config_selector.choose_kubeconfig(\n new_cluster_dict[\"name\"]\n )\n updating_existing = config.has_cluster(new_cluster_dict[\"name\"])\n appender = KubeconfigAppender()\n new_context_dict = appender.insert_cluster_user_pair(config,\n new_cluster_dict,\n new_user_dict,\n parsed_args.alias)\n\n if parsed_args.dry_run:\n uni_print(config.dump_content())\n else:\n writer = KubeconfigWriter()\n writer.write_kubeconfig(config)\n\n if updating_existing:\n uni_print(\"Updated context {0} in {1}\\n\".format(\n new_context_dict[\"name\"], config.path\n ))\n else:\n uni_print(\"Added new context {0} to {1}\\n\".format(\n new_context_dict[\"name\"], config.path\n ))\n\n if parsed_args.verbose:\n self._display_entries([\n new_context_dict,\n new_user_dict,\n new_cluster_dict\n ])\n\n\n\nclass KubeconfigSelector(object):\n\n def __init__(self, env_variable, path_in, validator=None,\n loader=None):\n \"\"\"\n Parse KUBECONFIG into a list of absolute paths.\n Also replace the empty list with DEFAULT_PATH\n\n :param env_variable: KUBECONFIG as a long string\n :type env_variable: string\n\n :param path_in: The path passed in through the CLI\n :type path_in: string or None\n \"\"\"\n if validator is None:\n validator = KubeconfigValidator()\n self._validator = validator\n\n if loader is None:\n loader = KubeconfigLoader(validator)\n self._loader = loader\n\n if path_in is not None:\n # Override environment variable\n self._paths = [self._expand_path(path_in)]\n else:\n # Get the list of paths from the environment variable\n if env_variable == \"\":\n env_variable = DEFAULT_PATH\n self._paths = [self._expand_path(element)\n for element in env_variable.split(os.pathsep)\n if len(element.strip()) > 0]\n if len(self._paths) == 0:\n self._paths = [DEFAULT_PATH]\n\n def choose_kubeconfig(self, cluster_name):\n \"\"\"\n Choose which kubeconfig file to read from.\n If name is already an entry in one of the $KUBECONFIG files,\n choose that one.\n Otherwise choose the first file.\n\n :param cluster_name: The name of the cluster which is going to be added\n :type cluster_name: String\n\n :return: a chosen Kubeconfig based on above rules\n :rtype: Kubeconfig\n \"\"\"\n # Search for an existing entry to update\n for candidate_path in self._paths:\n try:\n loaded_config = self._loader.load_kubeconfig(candidate_path)\n\n if loaded_config.has_cluster(cluster_name):\n LOG.debug(\"Found entry to update at {0}\".format(\n candidate_path\n ))\n return loaded_config\n except KubeconfigError as e:\n LOG.warning(\"Passing {0}:{1}\".format(candidate_path, e))\n\n # No entry was found, use the first file in KUBECONFIG\n #\n # Note: This could raise KubeconfigErrors if paths[0] is corrupted\n return self._loader.load_kubeconfig(self._paths[0])\n\n def _expand_path(self, path):\n \"\"\" A helper to expand a path to a full absolute path. \"\"\"\n return os.path.abspath(os.path.expanduser(path))\n\n\nclass EKSClient(object):\n def __init__(self, session, cluster_name, role_arn, parsed_globals=None):\n self._session = session\n self._cluster_name = cluster_name\n self._role_arn = role_arn\n self._cluster_description = None\n self._globals = parsed_globals\n\n def _get_cluster_description(self):\n \"\"\"\n Use an eks describe-cluster call to get the cluster description\n Cache the response in self._cluster_description.\n describe-cluster will only be called once.\n \"\"\"\n if self._cluster_description is None:\n if self._globals is None:\n client = self._session.create_client(\"eks\")\n else:\n client = self._session.create_client(\n \"eks\", \n region_name=self._globals.region,\n endpoint_url=self._globals.endpoint_url,\n verify=self._globals.verify_ssl\n )\n full_description = client.describe_cluster(name=self._cluster_name)\n self._cluster_description = full_description[\"cluster\"]\n\n if \"status\" not in self._cluster_description:\n raise EKSClusterError(\"Cluster not found\")\n if self._cluster_description[\"status\"] != \"ACTIVE\":\n raise EKSClusterError(\"Cluster status not active\")\n\n return self._cluster_description\n\n def get_cluster_entry(self):\n \"\"\"\n Return a cluster entry generated using\n the previously obtained description.\n \"\"\"\n\n cert_data = self._get_cluster_description().get(\"certificateAuthority\",\n {\"data\": \"\"})[\"data\"]\n endpoint = self._get_cluster_description().get(\"endpoint\")\n arn = self._get_cluster_description().get(\"arn\")\n\n return OrderedDict([\n (\"cluster\", OrderedDict([\n (\"certificate-authority-data\", cert_data),\n (\"server\", endpoint)\n ])),\n (\"name\", arn)\n ])\n\n def get_user_entry(self):\n \"\"\"\n Return a user entry generated using\n the previously obtained description.\n \"\"\"\n generated_user = OrderedDict([\n (\"name\", self._get_cluster_description().get(\"arn\", \"\")),\n (\"user\", OrderedDict([\n (\"exec\", OrderedDict([\n (\"apiVersion\", API_VERSION),\n (\"args\",\n [\n \"eks\",\n \"get-token\",\n \"--cluster-name\",\n self._cluster_name\n ]),\n (\"command\", \"aws\")\n ]))\n ]))\n ])\n\n if self._role_arn is not None:\n generated_user[\"user\"][\"exec\"][\"args\"].extend([\n \"--role\",\n self._role_arn\n ])\n\n if self._session.profile:\n generated_user[\"user\"][\"exec\"][\"env\"] = [OrderedDict([\n (\"name\", \"AWS_PROFILE\"),\n (\"value\", self._session.profile)\n ])]\n\n return generated_user\n", "path": "awscli/customizations/eks/update_kubeconfig.py"}], "after_files": [{"content": "# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\n\nimport os\nimport logging\n\nfrom botocore.compat import OrderedDict\n\nfrom awscli.customizations.commands import BasicCommand\nfrom awscli.customizations.utils import uni_print\nfrom awscli.customizations.eks.exceptions import EKSClusterError\nfrom awscli.customizations.eks.kubeconfig import (Kubeconfig,\n KubeconfigError,\n KubeconfigLoader,\n KubeconfigWriter,\n KubeconfigValidator,\n KubeconfigAppender)\nfrom awscli.customizations.eks.ordered_yaml import ordered_yaml_dump\n\nLOG = logging.getLogger(__name__)\n\nDEFAULT_PATH = os.path.expanduser(\"~/.kube/config\")\n\n# Use the endpoint for kubernetes 1.10\n# To get the most recent endpoint we will need to\n# Do a check on the cluster's version number\nAPI_VERSION = \"client.authentication.k8s.io/v1alpha1\"\n\nclass UpdateKubeconfigCommand(BasicCommand):\n NAME = 'update-kubeconfig'\n\n DESCRIPTION = BasicCommand.FROM_FILE(\n 'eks',\n 'update-kubeconfig',\n '_description.rst'\n )\n\n ARG_TABLE = [\n {\n 'name': 'name',\n 'help_text': (\"The name of the cluster for which \"\n \"to create a kubeconfig entry. \"\n \"This cluster must exist in your account and in the \"\n \"specified or configured default Region \"\n \"for your AWS CLI installation.\"),\n 'required': True\n },\n {\n 'name': 'kubeconfig',\n 'help_text': (\"Optionally specify a kubeconfig file to append \"\n \"with your configuration. \"\n \"By default, the configuration is written to the \"\n \"first file path in the KUBECONFIG \"\n \"environment variable (if it is set) \"\n \"or the default kubeconfig path (.kube/config) \"\n \"in your home directory.\"),\n 'required': False\n },\n {\n 'name': 'role-arn',\n 'help_text': (\"To assume a role for cluster authentication, \"\n \"specify an IAM role ARN with this option. \"\n \"For example, if you created a cluster \"\n \"while assuming an IAM role, \"\n \"then you must also assume that role to \"\n \"connect to the cluster the first time.\"),\n 'required': False\n },\n {\n 'name': 'dry-run',\n 'action': 'store_true',\n 'default': False,\n 'help_text': (\"Print the merged kubeconfig to stdout instead of \"\n \"writing it to the specified file.\"),\n 'required': False\n },\n {\n 'name': 'verbose',\n 'action': 'store_true',\n 'default': False,\n 'help_text': (\"Print more detailed output \"\n \"when writing to the kubeconfig file, \"\n \"including the appended entries.\")\n },\n {\n 'name': 'alias',\n 'help_text': (\"Alias for the cluster context name. \"\n \"Defaults to match cluster ARN.\"),\n 'required': False\n }\n ]\n\n def _display_entries(self, entries):\n \"\"\"\n Display entries in yaml format\n\n :param entries: a list of OrderedDicts to be printed\n :type entries: list\n \"\"\"\n uni_print(\"Entries:\\n\\n\")\n for entry in entries:\n uni_print(ordered_yaml_dump(entry))\n uni_print(\"\\n\")\n\n def _run_main(self, parsed_args, parsed_globals):\n client = EKSClient(self._session,\n parsed_args.name,\n parsed_args.role_arn,\n parsed_globals)\n new_cluster_dict = client.get_cluster_entry()\n new_user_dict = client.get_user_entry()\n\n config_selector = KubeconfigSelector(\n os.environ.get(\"KUBECONFIG\", \"\"),\n parsed_args.kubeconfig\n )\n config = config_selector.choose_kubeconfig(\n new_cluster_dict[\"name\"]\n )\n updating_existing = config.has_cluster(new_cluster_dict[\"name\"])\n appender = KubeconfigAppender()\n new_context_dict = appender.insert_cluster_user_pair(config,\n new_cluster_dict,\n new_user_dict,\n parsed_args.alias)\n\n if parsed_args.dry_run:\n uni_print(config.dump_content())\n else:\n writer = KubeconfigWriter()\n writer.write_kubeconfig(config)\n\n if updating_existing:\n uni_print(\"Updated context {0} in {1}\\n\".format(\n new_context_dict[\"name\"], config.path\n ))\n else:\n uni_print(\"Added new context {0} to {1}\\n\".format(\n new_context_dict[\"name\"], config.path\n ))\n\n if parsed_args.verbose:\n self._display_entries([\n new_context_dict,\n new_user_dict,\n new_cluster_dict\n ])\n\n\n\nclass KubeconfigSelector(object):\n\n def __init__(self, env_variable, path_in, validator=None,\n loader=None):\n \"\"\"\n Parse KUBECONFIG into a list of absolute paths.\n Also replace the empty list with DEFAULT_PATH\n\n :param env_variable: KUBECONFIG as a long string\n :type env_variable: string\n\n :param path_in: The path passed in through the CLI\n :type path_in: string or None\n \"\"\"\n if validator is None:\n validator = KubeconfigValidator()\n self._validator = validator\n\n if loader is None:\n loader = KubeconfigLoader(validator)\n self._loader = loader\n\n if path_in is not None:\n # Override environment variable\n self._paths = [self._expand_path(path_in)]\n else:\n # Get the list of paths from the environment variable\n if env_variable == \"\":\n env_variable = DEFAULT_PATH\n self._paths = [self._expand_path(element)\n for element in env_variable.split(os.pathsep)\n if len(element.strip()) > 0]\n if len(self._paths) == 0:\n self._paths = [DEFAULT_PATH]\n\n def choose_kubeconfig(self, cluster_name):\n \"\"\"\n Choose which kubeconfig file to read from.\n If name is already an entry in one of the $KUBECONFIG files,\n choose that one.\n Otherwise choose the first file.\n\n :param cluster_name: The name of the cluster which is going to be added\n :type cluster_name: String\n\n :return: a chosen Kubeconfig based on above rules\n :rtype: Kubeconfig\n \"\"\"\n # Search for an existing entry to update\n for candidate_path in self._paths:\n try:\n loaded_config = self._loader.load_kubeconfig(candidate_path)\n\n if loaded_config.has_cluster(cluster_name):\n LOG.debug(\"Found entry to update at {0}\".format(\n candidate_path\n ))\n return loaded_config\n except KubeconfigError as e:\n LOG.warning(\"Passing {0}:{1}\".format(candidate_path, e))\n\n # No entry was found, use the first file in KUBECONFIG\n #\n # Note: This could raise KubeconfigErrors if paths[0] is corrupted\n return self._loader.load_kubeconfig(self._paths[0])\n\n def _expand_path(self, path):\n \"\"\" A helper to expand a path to a full absolute path. \"\"\"\n return os.path.abspath(os.path.expanduser(path))\n\n\nclass EKSClient(object):\n def __init__(self, session, cluster_name, role_arn, parsed_globals=None):\n self._session = session\n self._cluster_name = cluster_name\n self._role_arn = role_arn\n self._cluster_description = None\n self._globals = parsed_globals\n\n def _get_cluster_description(self):\n \"\"\"\n Use an eks describe-cluster call to get the cluster description\n Cache the response in self._cluster_description.\n describe-cluster will only be called once.\n \"\"\"\n if self._cluster_description is None:\n if self._globals is None:\n client = self._session.create_client(\"eks\")\n else:\n client = self._session.create_client(\n \"eks\",\n region_name=self._globals.region,\n endpoint_url=self._globals.endpoint_url,\n verify=self._globals.verify_ssl\n )\n full_description = client.describe_cluster(name=self._cluster_name)\n self._cluster_description = full_description[\"cluster\"]\n\n if \"status\" not in self._cluster_description:\n raise EKSClusterError(\"Cluster not found\")\n if self._cluster_description[\"status\"] != \"ACTIVE\":\n raise EKSClusterError(\"Cluster status not active\")\n\n return self._cluster_description\n\n def get_cluster_entry(self):\n \"\"\"\n Return a cluster entry generated using\n the previously obtained description.\n \"\"\"\n\n cert_data = self._get_cluster_description().get(\"certificateAuthority\",\n {\"data\": \"\"})[\"data\"]\n endpoint = self._get_cluster_description().get(\"endpoint\")\n arn = self._get_cluster_description().get(\"arn\")\n\n return OrderedDict([\n (\"cluster\", OrderedDict([\n (\"certificate-authority-data\", cert_data),\n (\"server\", endpoint)\n ])),\n (\"name\", arn)\n ])\n\n def get_user_entry(self):\n \"\"\"\n Return a user entry generated using\n the previously obtained description.\n \"\"\"\n\n region = self._get_cluster_description().get(\"arn\").split(\":\")[3]\n\n generated_user = OrderedDict([\n (\"name\", self._get_cluster_description().get(\"arn\", \"\")),\n (\"user\", OrderedDict([\n (\"exec\", OrderedDict([\n (\"apiVersion\", API_VERSION),\n (\"args\",\n [\n \"--region\",\n region,\n \"eks\",\n \"get-token\",\n \"--cluster-name\",\n self._cluster_name,\n ]),\n (\"command\", \"aws\")\n ]))\n ]))\n ])\n\n if self._role_arn is not None:\n generated_user[\"user\"][\"exec\"][\"args\"].extend([\n \"--role\",\n self._role_arn\n ])\n\n if self._session.profile:\n generated_user[\"user\"][\"exec\"][\"env\"] = [OrderedDict([\n (\"name\", \"AWS_PROFILE\"),\n (\"value\", self._session.profile)\n ])]\n\n return generated_user\n", "path": "awscli/customizations/eks/update_kubeconfig.py"}]} | 3,689 | 341 |
gh_patches_debug_28304 | rasdani/github-patches | git_diff | canonical__microk8s-4015 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Connection failed. The joining node is already known to dqlite.
<!--
Thank you for submitting an issue. Please fill in the template below
information about the bug you encountered.
-->
#### Summary
If, joining a new node fails for any reason, the joining node gets added to .dqlite, and the cluster has to be fully shutdown to remediate.
#### What Should Happen Instead?
Either, `microk8s remove-node (broken ip)` should work, or there should be a less impactful way to remediate.
#### Reproduction Steps
Have not tried reproducing.
BUT, On a production cluster running 1.25, I provisioned a new server, added microk8s, and `microk8s add-node` followed by join.
I failed to specify the correct channel, so, the new node now running 1.26 failed to join the cluster.
Not a problem, let me remove microk8s, and install the correct channel.
```
Contacting cluster at 10.100.5.100
Connection failed. The joining node (10.100.5.102) is already known to dqlite (504).
```
https://microk8s.io/docs/restore-quorum
Is supposedly the method needed to resolve this issue, however, this is highly impactful to workloads, as the entire cluster has to be shutdown.
#### Related Tickets
https://github.com/canonical/microk8s/issues/2212
#### A few more details
```
root@kube01:~/Kubernetes/namespaces/readymotorsports.net# cat /var/snap/microk8s/current/var/kubernetes/backend/cluster.yaml
- Address: 10.100.5.105:19001
ID: 3297041220608546238
Role: 0
- Address: 10.100.5.100:19001
ID: 6578832437747111275
Role: 0
- Address: 10.100.5.106:19001
ID: 16212301467739305109
Role: 0
- Address: 10.100.5.102:19001
ID: 17023170999447087085
Role: 1
root@kube01:~/Kubernetes/namespaces/readymotorsports.net# microk8s remove-node 10.100.5.102
Error from server (NotFound): nodes "10.100.5.102" not found
Node 10.100.5.102 does not exist in Kubernetes.
root@kube01:~/Kubernetes/namespaces/readymotorsports.net# kubectl get node -o wide
NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
kube06 Ready <none> 83d v1.25.5 10.100.5.106 <none> Ubuntu 22.04.1 LTS 5.15.0-58-generic containerd://1.6.8
kube01 Ready control-plane,master 95d v1.25.5 10.100.5.100 <none> Ubuntu 22.04.1 LTS 5.15.0-58-generic containerd://1.6.8
kube05 Ready <none> 108d v1.25.5 10.100.5.105 <none> Ubuntu 22.04.1 LTS 5.15.0-58-generic containerd://1.6.8
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scripts/wrappers/remove_node.py`
Content:
```
1 #!/usr/bin/python3
2 import json
3 import os
4 import shutil
5 import subprocess
6 import sys
7
8 import click
9 import netifaces
10
11 from common.cluster.utils import (
12 try_set_file_permissions,
13 is_node_running_dqlite,
14 )
15
16 snapdata_path = os.environ.get("SNAP_DATA")
17 snap_path = os.environ.get("SNAP")
18 callback_tokens_file = "{}/credentials/callback-tokens.txt".format(snapdata_path)
19
20 cluster_dir = "{}/var/kubernetes/backend".format(snapdata_path)
21
22
23 def remove_dqlite_node(node, force=False):
24 try:
25 # Make sure this node exists
26 node_info = subprocess.check_output(
27 "{}/microk8s-kubectl.wrapper get no {} -o json".format(snap_path, node).split()
28 )
29 info = json.loads(node_info.decode())
30 node_address = None
31 for a in info["status"]["addresses"]:
32 if a["type"] == "InternalIP":
33 node_address = a["address"]
34 break
35
36 if not node_address:
37 print("Node {} is not part of the cluster.".format(node))
38 exit(1)
39
40 node_ep = None
41 my_ep, other_ep = get_dqlite_endpoints()
42 for ep in other_ep:
43 if ep.startswith("{}:".format(node_address)):
44 node_ep = ep
45
46 if node_ep and force:
47 delete_dqlite_node([node_ep], my_ep)
48 elif node_ep and not force:
49 print(
50 "Removal failed. Node {} is registered with dqlite. "
51 "Please, run first 'microk8s leave' on the departing node. \n"
52 "If the node is not available anymore and will never attempt to join the cluster "
53 "in the future use the '--force' flag \n"
54 "to unregister the node while removing it.".format(node)
55 )
56 exit(1)
57
58 except subprocess.CalledProcessError:
59 print("Node {} does not exist in Kubernetes.".format(node))
60 if force:
61 print("Attempting to remove {} from dqlite.".format(node))
62 # Make sure we do not have the node in dqlite.
63 # We assume the IP is provided to denote the
64 my_ep, other_ep = get_dqlite_endpoints()
65 for ep in other_ep:
66 if ep.startswith("{}:".format(node)):
67 print("Removing node entry found in dqlite.")
68 delete_dqlite_node([ep], my_ep)
69 exit(1)
70
71 remove_node(node)
72
73
74 def remove_node(node):
75 try:
76 # Make sure this node exists
77 subprocess.check_call(
78 "{}/microk8s-kubectl.wrapper get no {}".format(snap_path, node).split(),
79 stdout=subprocess.DEVNULL,
80 stderr=subprocess.DEVNULL,
81 )
82 except subprocess.CalledProcessError:
83 print("Node {} does not exist.".format(node))
84 exit(1)
85
86 remove_kubelet_token(node)
87 remove_callback_token(node)
88 subprocess.check_call(
89 "{}/microk8s-kubectl.wrapper delete no {}".format(snap_path, node).split(),
90 stdout=subprocess.DEVNULL,
91 stderr=subprocess.DEVNULL,
92 )
93
94
95 def remove_kubelet_token(node):
96 """
97 Remove a token for a node in the known tokens
98
99 :param node: the name of the node
100 """
101 file = "{}/credentials/known_tokens.csv".format(snapdata_path)
102 backup_file = "{}.backup".format(file)
103 token = "system:node:{}".format(node)
104 # That is a critical section. We need to protect it.
105 with open(backup_file, "w") as back_fp:
106 with open(file, "r") as fp:
107 for _, line in enumerate(fp):
108 if token in line:
109 continue
110 back_fp.write("{}".format(line))
111
112 try_set_file_permissions(backup_file)
113 shutil.copyfile(backup_file, file)
114
115
116 def get_dqlite_endpoints():
117 """
118 Return the endpoints the current node has on dqlite and the endpoints of the rest of the nodes.
119
120 :return: two lists with the endpoints
121 """
122 out = subprocess.check_output(
123 "{snappath}/bin/dqlite -s file://{dbdir}/cluster.yaml -c {dbdir}/cluster.crt "
124 "-k {dbdir}/cluster.key -f json k8s .cluster".format(
125 snappath=snap_path, dbdir=cluster_dir
126 ).split()
127 )
128 data = json.loads(out.decode())
129 ep_addresses = []
130 for ep in data:
131 ep_addresses.append(ep["Address"])
132 local_ips = []
133 for interface in netifaces.interfaces():
134 if netifaces.AF_INET not in netifaces.ifaddresses(interface):
135 continue
136 for link in netifaces.ifaddresses(interface)[netifaces.AF_INET]:
137 local_ips.append(link["addr"])
138 my_ep = []
139 other_ep = []
140 for ep in ep_addresses:
141 found = False
142 for ip in local_ips:
143 if "{}:".format(ip) in ep:
144 my_ep.append(ep)
145 found = True
146 if not found:
147 other_ep.append(ep)
148
149 return my_ep, other_ep
150
151
152 def delete_dqlite_node(delete_node, dqlite_ep):
153 if len(delete_node) > 0 and "127.0.0.1" not in delete_node[0]:
154 for ep in dqlite_ep:
155 try:
156 cmd = (
157 "{snappath}/bin/dqlite -s file://{dbdir}/cluster.yaml -c {dbdir}/cluster.crt "
158 "-k {dbdir}/cluster.key -f json k8s".format(
159 snappath=snap_path, dbdir=cluster_dir
160 ).split()
161 )
162 cmd.append(".remove {}".format(delete_node[0]))
163 subprocess.check_output(cmd)
164 break
165 except Exception as err:
166 print("Contacting node {} failed. Error:".format(ep))
167 print(repr(err))
168 exit(2)
169
170
171 def remove_callback_token(node):
172 """
173 Remove a callback token
174
175 :param node: the node
176 """
177 tmp_file = "{}.tmp".format(callback_tokens_file)
178 if not os.path.isfile(callback_tokens_file):
179 open(callback_tokens_file, "a+")
180 os.chmod(callback_tokens_file, 0o600)
181 with open(tmp_file, "w") as backup_fp:
182 os.chmod(tmp_file, 0o600)
183 with open(callback_tokens_file, "r+") as callback_fp:
184 # Entries are of the format: 'node_hostname:agent_port token'
185 # We need to get the node_hostname part
186 for line in callback_fp:
187 parts = line.split(":")
188 if parts[0] == node:
189 continue
190 else:
191 backup_fp.write(line)
192
193 try_set_file_permissions(tmp_file)
194 shutil.move(tmp_file, callback_tokens_file)
195
196
197 @click.command()
198 @click.argument("node", required=True)
199 @click.option(
200 "--force",
201 is_flag=True,
202 required=False,
203 default=False,
204 help="Force the node removal operation. (default: false)",
205 )
206 def reset(node, force):
207 """
208 Remove a node from the cluster
209 """
210 if is_node_running_dqlite():
211 remove_dqlite_node(node, force)
212 else:
213 remove_node(node)
214 sys.exit(0)
215
216
217 if __name__ == "__main__":
218 reset(prog_name="microk8s remove-node")
219
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/scripts/wrappers/remove_node.py b/scripts/wrappers/remove_node.py
--- a/scripts/wrappers/remove_node.py
+++ b/scripts/wrappers/remove_node.py
@@ -8,13 +8,15 @@
import click
import netifaces
+from ipaddress import ip_address, IPv4Address
+
from common.cluster.utils import (
try_set_file_permissions,
is_node_running_dqlite,
)
-snapdata_path = os.environ.get("SNAP_DATA")
snap_path = os.environ.get("SNAP")
+snapdata_path = os.environ.get("SNAP_DATA")
callback_tokens_file = "{}/credentials/callback-tokens.txt".format(snapdata_path)
cluster_dir = "{}/var/kubernetes/backend".format(snapdata_path)
@@ -22,6 +24,22 @@
def remove_dqlite_node(node, force=False):
try:
+ # If node is an IP address, find the node name.
+ if type(ip_address(node)) is IPv4Address:
+ node_info = subprocess.check_output(
+ "{}/microk8s-kubectl.wrapper get no -o json".format(snap_path).split()
+ )
+ info = json.loads(node_info.decode())
+ found = False
+ for n in info["items"]:
+ if found:
+ break
+ for a in n["status"]["addresses"]:
+ if a["type"] == "InternalIP" and a["address"] == node:
+ node = n["metadata"]["name"]
+ found = True
+ break
+
# Make sure this node exists
node_info = subprocess.check_output(
"{}/microk8s-kubectl.wrapper get no {} -o json".format(snap_path, node).split()
| {"golden_diff": "diff --git a/scripts/wrappers/remove_node.py b/scripts/wrappers/remove_node.py\n--- a/scripts/wrappers/remove_node.py\n+++ b/scripts/wrappers/remove_node.py\n@@ -8,13 +8,15 @@\n import click\n import netifaces\n \n+from ipaddress import ip_address, IPv4Address\n+\n from common.cluster.utils import (\n try_set_file_permissions,\n is_node_running_dqlite,\n )\n \n-snapdata_path = os.environ.get(\"SNAP_DATA\")\n snap_path = os.environ.get(\"SNAP\")\n+snapdata_path = os.environ.get(\"SNAP_DATA\")\n callback_tokens_file = \"{}/credentials/callback-tokens.txt\".format(snapdata_path)\n \n cluster_dir = \"{}/var/kubernetes/backend\".format(snapdata_path)\n@@ -22,6 +24,22 @@\n \n def remove_dqlite_node(node, force=False):\n try:\n+ # If node is an IP address, find the node name.\n+ if type(ip_address(node)) is IPv4Address:\n+ node_info = subprocess.check_output(\n+ \"{}/microk8s-kubectl.wrapper get no -o json\".format(snap_path).split()\n+ )\n+ info = json.loads(node_info.decode())\n+ found = False\n+ for n in info[\"items\"]:\n+ if found:\n+ break\n+ for a in n[\"status\"][\"addresses\"]:\n+ if a[\"type\"] == \"InternalIP\" and a[\"address\"] == node:\n+ node = n[\"metadata\"][\"name\"]\n+ found = True\n+ break\n+\n # Make sure this node exists\n node_info = subprocess.check_output(\n \"{}/microk8s-kubectl.wrapper get no {} -o json\".format(snap_path, node).split()\n", "issue": "Connection failed. The joining node is already known to dqlite.\n<!--\r\n Thank you for submitting an issue. Please fill in the template below\r\n information about the bug you encountered.\r\n-->\r\n\r\n#### Summary\r\nIf, joining a new node fails for any reason, the joining node gets added to .dqlite, and the cluster has to be fully shutdown to remediate.\r\n\r\n#### What Should Happen Instead?\r\nEither, `microk8s remove-node (broken ip)` should work, or there should be a less impactful way to remediate.\r\n\r\n#### Reproduction Steps\r\nHave not tried reproducing.\r\n\r\nBUT, On a production cluster running 1.25, I provisioned a new server, added microk8s, and `microk8s add-node` followed by join.\r\n\r\nI failed to specify the correct channel, so, the new node now running 1.26 failed to join the cluster. \r\n\r\nNot a problem, let me remove microk8s, and install the correct channel.\r\n\r\n```\r\nContacting cluster at 10.100.5.100\r\nConnection failed. The joining node (10.100.5.102) is already known to dqlite (504).\r\n```\r\n\r\nhttps://microk8s.io/docs/restore-quorum\r\n\r\nIs supposedly the method needed to resolve this issue, however, this is highly impactful to workloads, as the entire cluster has to be shutdown.\r\n\r\n#### Related Tickets\r\n\r\nhttps://github.com/canonical/microk8s/issues/2212\r\n\r\n\r\n#### A few more details\r\n\r\n```\r\nroot@kube01:~/Kubernetes/namespaces/readymotorsports.net# cat /var/snap/microk8s/current/var/kubernetes/backend/cluster.yaml\r\n- Address: 10.100.5.105:19001\r\n ID: 3297041220608546238\r\n Role: 0\r\n- Address: 10.100.5.100:19001\r\n ID: 6578832437747111275\r\n Role: 0\r\n- Address: 10.100.5.106:19001\r\n ID: 16212301467739305109\r\n Role: 0\r\n- Address: 10.100.5.102:19001\r\n ID: 17023170999447087085\r\n Role: 1\r\nroot@kube01:~/Kubernetes/namespaces/readymotorsports.net# microk8s remove-node 10.100.5.102\r\nError from server (NotFound): nodes \"10.100.5.102\" not found\r\nNode 10.100.5.102 does not exist in Kubernetes.\r\nroot@kube01:~/Kubernetes/namespaces/readymotorsports.net# kubectl get node -o wide\r\nNAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME\r\nkube06 Ready <none> 83d v1.25.5 10.100.5.106 <none> Ubuntu 22.04.1 LTS 5.15.0-58-generic containerd://1.6.8\r\nkube01 Ready control-plane,master 95d v1.25.5 10.100.5.100 <none> Ubuntu 22.04.1 LTS 5.15.0-58-generic containerd://1.6.8\r\nkube05 Ready <none> 108d v1.25.5 10.100.5.105 <none> Ubuntu 22.04.1 LTS 5.15.0-58-generic containerd://1.6.8\r\n```\r\n\r\n\n", "before_files": [{"content": "#!/usr/bin/python3\nimport json\nimport os\nimport shutil\nimport subprocess\nimport sys\n\nimport click\nimport netifaces\n\nfrom common.cluster.utils import (\n try_set_file_permissions,\n is_node_running_dqlite,\n)\n\nsnapdata_path = os.environ.get(\"SNAP_DATA\")\nsnap_path = os.environ.get(\"SNAP\")\ncallback_tokens_file = \"{}/credentials/callback-tokens.txt\".format(snapdata_path)\n\ncluster_dir = \"{}/var/kubernetes/backend\".format(snapdata_path)\n\n\ndef remove_dqlite_node(node, force=False):\n try:\n # Make sure this node exists\n node_info = subprocess.check_output(\n \"{}/microk8s-kubectl.wrapper get no {} -o json\".format(snap_path, node).split()\n )\n info = json.loads(node_info.decode())\n node_address = None\n for a in info[\"status\"][\"addresses\"]:\n if a[\"type\"] == \"InternalIP\":\n node_address = a[\"address\"]\n break\n\n if not node_address:\n print(\"Node {} is not part of the cluster.\".format(node))\n exit(1)\n\n node_ep = None\n my_ep, other_ep = get_dqlite_endpoints()\n for ep in other_ep:\n if ep.startswith(\"{}:\".format(node_address)):\n node_ep = ep\n\n if node_ep and force:\n delete_dqlite_node([node_ep], my_ep)\n elif node_ep and not force:\n print(\n \"Removal failed. Node {} is registered with dqlite. \"\n \"Please, run first 'microk8s leave' on the departing node. \\n\"\n \"If the node is not available anymore and will never attempt to join the cluster \"\n \"in the future use the '--force' flag \\n\"\n \"to unregister the node while removing it.\".format(node)\n )\n exit(1)\n\n except subprocess.CalledProcessError:\n print(\"Node {} does not exist in Kubernetes.\".format(node))\n if force:\n print(\"Attempting to remove {} from dqlite.\".format(node))\n # Make sure we do not have the node in dqlite.\n # We assume the IP is provided to denote the\n my_ep, other_ep = get_dqlite_endpoints()\n for ep in other_ep:\n if ep.startswith(\"{}:\".format(node)):\n print(\"Removing node entry found in dqlite.\")\n delete_dqlite_node([ep], my_ep)\n exit(1)\n\n remove_node(node)\n\n\ndef remove_node(node):\n try:\n # Make sure this node exists\n subprocess.check_call(\n \"{}/microk8s-kubectl.wrapper get no {}\".format(snap_path, node).split(),\n stdout=subprocess.DEVNULL,\n stderr=subprocess.DEVNULL,\n )\n except subprocess.CalledProcessError:\n print(\"Node {} does not exist.\".format(node))\n exit(1)\n\n remove_kubelet_token(node)\n remove_callback_token(node)\n subprocess.check_call(\n \"{}/microk8s-kubectl.wrapper delete no {}\".format(snap_path, node).split(),\n stdout=subprocess.DEVNULL,\n stderr=subprocess.DEVNULL,\n )\n\n\ndef remove_kubelet_token(node):\n \"\"\"\n Remove a token for a node in the known tokens\n\n :param node: the name of the node\n \"\"\"\n file = \"{}/credentials/known_tokens.csv\".format(snapdata_path)\n backup_file = \"{}.backup\".format(file)\n token = \"system:node:{}\".format(node)\n # That is a critical section. We need to protect it.\n with open(backup_file, \"w\") as back_fp:\n with open(file, \"r\") as fp:\n for _, line in enumerate(fp):\n if token in line:\n continue\n back_fp.write(\"{}\".format(line))\n\n try_set_file_permissions(backup_file)\n shutil.copyfile(backup_file, file)\n\n\ndef get_dqlite_endpoints():\n \"\"\"\n Return the endpoints the current node has on dqlite and the endpoints of the rest of the nodes.\n\n :return: two lists with the endpoints\n \"\"\"\n out = subprocess.check_output(\n \"{snappath}/bin/dqlite -s file://{dbdir}/cluster.yaml -c {dbdir}/cluster.crt \"\n \"-k {dbdir}/cluster.key -f json k8s .cluster\".format(\n snappath=snap_path, dbdir=cluster_dir\n ).split()\n )\n data = json.loads(out.decode())\n ep_addresses = []\n for ep in data:\n ep_addresses.append(ep[\"Address\"])\n local_ips = []\n for interface in netifaces.interfaces():\n if netifaces.AF_INET not in netifaces.ifaddresses(interface):\n continue\n for link in netifaces.ifaddresses(interface)[netifaces.AF_INET]:\n local_ips.append(link[\"addr\"])\n my_ep = []\n other_ep = []\n for ep in ep_addresses:\n found = False\n for ip in local_ips:\n if \"{}:\".format(ip) in ep:\n my_ep.append(ep)\n found = True\n if not found:\n other_ep.append(ep)\n\n return my_ep, other_ep\n\n\ndef delete_dqlite_node(delete_node, dqlite_ep):\n if len(delete_node) > 0 and \"127.0.0.1\" not in delete_node[0]:\n for ep in dqlite_ep:\n try:\n cmd = (\n \"{snappath}/bin/dqlite -s file://{dbdir}/cluster.yaml -c {dbdir}/cluster.crt \"\n \"-k {dbdir}/cluster.key -f json k8s\".format(\n snappath=snap_path, dbdir=cluster_dir\n ).split()\n )\n cmd.append(\".remove {}\".format(delete_node[0]))\n subprocess.check_output(cmd)\n break\n except Exception as err:\n print(\"Contacting node {} failed. Error:\".format(ep))\n print(repr(err))\n exit(2)\n\n\ndef remove_callback_token(node):\n \"\"\"\n Remove a callback token\n\n :param node: the node\n \"\"\"\n tmp_file = \"{}.tmp\".format(callback_tokens_file)\n if not os.path.isfile(callback_tokens_file):\n open(callback_tokens_file, \"a+\")\n os.chmod(callback_tokens_file, 0o600)\n with open(tmp_file, \"w\") as backup_fp:\n os.chmod(tmp_file, 0o600)\n with open(callback_tokens_file, \"r+\") as callback_fp:\n # Entries are of the format: 'node_hostname:agent_port token'\n # We need to get the node_hostname part\n for line in callback_fp:\n parts = line.split(\":\")\n if parts[0] == node:\n continue\n else:\n backup_fp.write(line)\n\n try_set_file_permissions(tmp_file)\n shutil.move(tmp_file, callback_tokens_file)\n\n\[email protected]()\[email protected](\"node\", required=True)\[email protected](\n \"--force\",\n is_flag=True,\n required=False,\n default=False,\n help=\"Force the node removal operation. (default: false)\",\n)\ndef reset(node, force):\n \"\"\"\n Remove a node from the cluster\n \"\"\"\n if is_node_running_dqlite():\n remove_dqlite_node(node, force)\n else:\n remove_node(node)\n sys.exit(0)\n\n\nif __name__ == \"__main__\":\n reset(prog_name=\"microk8s remove-node\")\n", "path": "scripts/wrappers/remove_node.py"}], "after_files": [{"content": "#!/usr/bin/python3\nimport json\nimport os\nimport shutil\nimport subprocess\nimport sys\n\nimport click\nimport netifaces\n\nfrom ipaddress import ip_address, IPv4Address\n\nfrom common.cluster.utils import (\n try_set_file_permissions,\n is_node_running_dqlite,\n)\n\nsnap_path = os.environ.get(\"SNAP\")\nsnapdata_path = os.environ.get(\"SNAP_DATA\")\ncallback_tokens_file = \"{}/credentials/callback-tokens.txt\".format(snapdata_path)\n\ncluster_dir = \"{}/var/kubernetes/backend\".format(snapdata_path)\n\n\ndef remove_dqlite_node(node, force=False):\n try:\n # If node is an IP address, find the node name.\n if type(ip_address(node)) is IPv4Address:\n node_info = subprocess.check_output(\n \"{}/microk8s-kubectl.wrapper get no -o json\".format(snap_path).split()\n )\n info = json.loads(node_info.decode())\n found = False\n for n in info[\"items\"]:\n if found:\n break\n for a in n[\"status\"][\"addresses\"]:\n if a[\"type\"] == \"InternalIP\" and a[\"address\"] == node:\n node = n[\"metadata\"][\"name\"]\n found = True\n break\n\n # Make sure this node exists\n node_info = subprocess.check_output(\n \"{}/microk8s-kubectl.wrapper get no {} -o json\".format(snap_path, node).split()\n )\n info = json.loads(node_info.decode())\n node_address = None\n for a in info[\"status\"][\"addresses\"]:\n if a[\"type\"] == \"InternalIP\":\n node_address = a[\"address\"]\n break\n\n if not node_address:\n print(\"Node {} is not part of the cluster.\".format(node))\n exit(1)\n\n node_ep = None\n my_ep, other_ep = get_dqlite_endpoints()\n for ep in other_ep:\n if ep.startswith(\"{}:\".format(node_address)):\n node_ep = ep\n\n if node_ep and force:\n delete_dqlite_node([node_ep], my_ep)\n elif node_ep and not force:\n print(\n \"Removal failed. Node {} is registered with dqlite. \"\n \"Please, run first 'microk8s leave' on the departing node. \\n\"\n \"If the node is not available anymore and will never attempt to join the cluster \"\n \"in the future use the '--force' flag \\n\"\n \"to unregister the node while removing it.\".format(node)\n )\n exit(1)\n\n except subprocess.CalledProcessError:\n print(\"Node {} does not exist in Kubernetes.\".format(node))\n if force:\n print(\"Attempting to remove {} from dqlite.\".format(node))\n # Make sure we do not have the node in dqlite.\n # We assume the IP is provided to denote the\n my_ep, other_ep = get_dqlite_endpoints()\n for ep in other_ep:\n if ep.startswith(\"{}:\".format(node)):\n print(\"Removing node entry found in dqlite.\")\n delete_dqlite_node([ep], my_ep)\n exit(1)\n\n remove_node(node)\n\n\ndef remove_node(node):\n try:\n # Make sure this node exists\n subprocess.check_call(\n \"{}/microk8s-kubectl.wrapper get no {}\".format(snap_path, node).split(),\n stdout=subprocess.DEVNULL,\n stderr=subprocess.DEVNULL,\n )\n except subprocess.CalledProcessError:\n print(\"Node {} does not exist.\".format(node))\n exit(1)\n\n remove_kubelet_token(node)\n remove_callback_token(node)\n subprocess.check_call(\n \"{}/microk8s-kubectl.wrapper delete no {}\".format(snap_path, node).split(),\n stdout=subprocess.DEVNULL,\n stderr=subprocess.DEVNULL,\n )\n\n\ndef remove_kubelet_token(node):\n \"\"\"\n Remove a token for a node in the known tokens\n\n :param node: the name of the node\n \"\"\"\n file = \"{}/credentials/known_tokens.csv\".format(snapdata_path)\n backup_file = \"{}.backup\".format(file)\n token = \"system:node:{}\".format(node)\n # That is a critical section. We need to protect it.\n with open(backup_file, \"w\") as back_fp:\n with open(file, \"r\") as fp:\n for _, line in enumerate(fp):\n if token in line:\n continue\n back_fp.write(\"{}\".format(line))\n\n try_set_file_permissions(backup_file)\n shutil.copyfile(backup_file, file)\n\n\ndef get_dqlite_endpoints():\n \"\"\"\n Return the endpoints the current node has on dqlite and the endpoints of the rest of the nodes.\n\n :return: two lists with the endpoints\n \"\"\"\n out = subprocess.check_output(\n \"{snappath}/bin/dqlite -s file://{dbdir}/cluster.yaml -c {dbdir}/cluster.crt \"\n \"-k {dbdir}/cluster.key -f json k8s .cluster\".format(\n snappath=snap_path, dbdir=cluster_dir\n ).split()\n )\n data = json.loads(out.decode())\n ep_addresses = []\n for ep in data:\n ep_addresses.append(ep[\"Address\"])\n local_ips = []\n for interface in netifaces.interfaces():\n if netifaces.AF_INET not in netifaces.ifaddresses(interface):\n continue\n for link in netifaces.ifaddresses(interface)[netifaces.AF_INET]:\n local_ips.append(link[\"addr\"])\n my_ep = []\n other_ep = []\n for ep in ep_addresses:\n found = False\n for ip in local_ips:\n if \"{}:\".format(ip) in ep:\n my_ep.append(ep)\n found = True\n if not found:\n other_ep.append(ep)\n\n return my_ep, other_ep\n\n\ndef delete_dqlite_node(delete_node, dqlite_ep):\n if len(delete_node) > 0 and \"127.0.0.1\" not in delete_node[0]:\n for ep in dqlite_ep:\n try:\n cmd = (\n \"{snappath}/bin/dqlite -s file://{dbdir}/cluster.yaml -c {dbdir}/cluster.crt \"\n \"-k {dbdir}/cluster.key -f json k8s\".format(\n snappath=snap_path, dbdir=cluster_dir\n ).split()\n )\n cmd.append(\".remove {}\".format(delete_node[0]))\n subprocess.check_output(cmd)\n break\n except Exception as err:\n print(\"Contacting node {} failed. Error:\".format(ep))\n print(repr(err))\n exit(2)\n\n\ndef remove_callback_token(node):\n \"\"\"\n Remove a callback token\n\n :param node: the node\n \"\"\"\n tmp_file = \"{}.tmp\".format(callback_tokens_file)\n if not os.path.isfile(callback_tokens_file):\n open(callback_tokens_file, \"a+\")\n os.chmod(callback_tokens_file, 0o600)\n with open(tmp_file, \"w\") as backup_fp:\n os.chmod(tmp_file, 0o600)\n with open(callback_tokens_file, \"r+\") as callback_fp:\n # Entries are of the format: 'node_hostname:agent_port token'\n # We need to get the node_hostname part\n for line in callback_fp:\n parts = line.split(\":\")\n if parts[0] == node:\n continue\n else:\n backup_fp.write(line)\n\n try_set_file_permissions(tmp_file)\n shutil.move(tmp_file, callback_tokens_file)\n\n\[email protected]()\[email protected](\"node\", required=True)\[email protected](\n \"--force\",\n is_flag=True,\n required=False,\n default=False,\n help=\"Force the node removal operation. (default: false)\",\n)\ndef reset(node, force):\n \"\"\"\n Remove a node from the cluster\n \"\"\"\n if is_node_running_dqlite():\n remove_dqlite_node(node, force)\n else:\n remove_node(node)\n sys.exit(0)\n\n\nif __name__ == \"__main__\":\n reset(prog_name=\"microk8s remove-node\")\n", "path": "scripts/wrappers/remove_node.py"}]} | 3,392 | 385 |
gh_patches_debug_14979 | rasdani/github-patches | git_diff | plone__Products.CMFPlone-3880 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Catalog search does not handle correctly parenthesis inside quotes
## BUG/PROBLEM REPORT (OR OTHER COMMON ISSUE)
### What I did:
- Add a page with parenthesis enclosed text as a title
- Try to search that page using quoted query
### What I expect to happen:
- The search searches the page
### What actually happened:
- The search does not find the page
- When accessing through REST API (because plone.app.querystring uses the `munge_search_term` function here), we get an error because the query is transformed to an unknown string for ZCatalog
### What version of Plone/ Addons I am using:
- Plone 6
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `Products/CMFPlone/browser/search.py`
Content:
```
1 from DateTime import DateTime
2 from plone.app.contentlisting.interfaces import IContentListing
3 from plone.app.layout.navigation.interfaces import INavigationRoot
4 from plone.base.batch import Batch
5 from plone.base.interfaces import ISearchSchema
6 from plone.base.interfaces.siteroot import IPloneSiteRoot
7 from plone.registry.interfaces import IRegistry
8 from Products.CMFCore.utils import getToolByName
9 from Products.CMFPlone.browser.navtree import getNavigationRoot
10 from Products.ZCTextIndex.ParseTree import ParseError
11 from zope.cachedescriptors.property import Lazy as lazy_property
12 from zope.component import getMultiAdapter
13 from zope.component import getUtility
14 from zope.component import queryUtility
15 from zope.i18nmessageid import MessageFactory
16 from zope.publisher.browser import BrowserView
17 from ZTUtils import make_query
18
19 import json
20 import re
21
22
23 _ = MessageFactory("plone")
24
25 # We should accept both a simple space, unicode u'\u0020 but also a
26 # multi-space, so called 'waji-kankaku', unicode u'\u3000'
27 MULTISPACE = "\u3000"
28 BAD_CHARS = ("?", "-", "+", "*", MULTISPACE)
29 EVER = DateTime("1970-01-03")
30
31
32 def quote_chars(s):
33 # We need to quote parentheses when searching text indices
34 if "(" in s:
35 s = s.replace("(", '"("')
36 if ")" in s:
37 s = s.replace(")", '")"')
38 if MULTISPACE in s:
39 s = s.replace(MULTISPACE, " ")
40 return s
41
42
43 def quote(term):
44 # The terms and, or and not must be wrapped in quotes to avoid
45 # being parsed as logical query atoms.
46 if term.lower() in ("and", "or", "not"):
47 term = '"%s"' % term
48 return term
49
50
51 def munge_search_term(query):
52 for char in BAD_CHARS:
53 query = query.replace(char, " ")
54
55 # extract quoted phrases first
56 quoted_phrases = re.findall(r'"([^"]*)"', query)
57 r = []
58 for qp in quoted_phrases:
59 # remove from original query
60 query = query.replace(f'"{qp}"', "")
61 # replace with cleaned leading/trailing whitespaces
62 # and skip empty phrases
63 clean_qp = qp.strip()
64 if not clean_qp:
65 continue
66 r.append(f'"{clean_qp}"')
67
68 r += map(quote, query.strip().split())
69 r = " AND ".join(r)
70 r = quote_chars(r) + ("*" if r and not r.endswith('"') else "")
71 return r
72
73
74 class Search(BrowserView):
75 valid_keys = ("sort_on", "sort_order", "sort_limit", "fq", "fl", "facet")
76
77 def results(
78 self, query=None, batch=True, b_size=10, b_start=0, use_content_listing=True
79 ):
80 """Get properly wrapped search results from the catalog.
81 Everything in Plone that performs searches should go through this view.
82 'query' should be a dictionary of catalog parameters.
83 """
84 if query is None:
85 query = {}
86 if batch:
87 query["b_start"] = b_start = int(b_start)
88 query["b_size"] = b_size
89 query = self.filter_query(query)
90
91 if query is None:
92 results = []
93 else:
94 catalog = getToolByName(self.context, "portal_catalog")
95 try:
96 results = catalog(**query)
97 except ParseError:
98 return []
99
100 if use_content_listing:
101 results = IContentListing(results)
102 if batch:
103 results = Batch(results, b_size, b_start)
104 return results
105
106 def _filter_query(self, query):
107 request = self.request
108
109 catalog = getToolByName(self.context, "portal_catalog")
110 valid_indexes = tuple(catalog.indexes())
111 valid_keys = self.valid_keys + valid_indexes
112
113 text = query.get("SearchableText", None)
114 if text is None:
115 text = request.form.get("SearchableText", "")
116 if not text:
117 # Without text, must provide a meaningful non-empty search
118 valid = set(valid_indexes).intersection(request.form.keys()) or set(
119 valid_indexes
120 ).intersection(query.keys())
121 if not valid:
122 return
123
124 for k, v in request.form.items():
125 if v and ((k in valid_keys) or k.startswith("facet.")):
126 query[k] = v
127 if text:
128 query["SearchableText"] = munge_search_term(text)
129
130 # don't filter on created at all if we want all results
131 created = query.get("created")
132 if created:
133 try:
134 if created.get("query", EVER) <= EVER:
135 del query["created"]
136 except AttributeError:
137 # created not a mapping
138 del query["created"]
139
140 # respect `types_not_searched` setting
141 types = query.get("portal_type", [])
142 if "query" in types:
143 types = types["query"]
144 query["portal_type"] = self.filter_types(types)
145 # respect effective/expiration date
146 query["show_inactive"] = False
147 # respect navigation root if we're not at the site root.
148 if "path" not in query and not IPloneSiteRoot.providedBy(self.context):
149 query["path"] = getNavigationRoot(self.context)
150
151 if "sort_order" in query and not query["sort_order"]:
152 del query["sort_order"]
153 return query
154
155 @lazy_property
156 def default_sort_on(self):
157 registry = getUtility(IRegistry)
158 search_settings = registry.forInterface(ISearchSchema, prefix="plone")
159 return search_settings.sort_on
160
161 def filter_query(self, query):
162 query = self._filter_query(query)
163 if query is None:
164 query = {}
165 # explicitly set a sort; if no `sort_on` is present, the catalog sorts
166 # by relevance
167 if "sort_on" not in query:
168 self.default_sort_on
169 if self.default_sort_on != "relevance":
170 query["sort_on"] = self.default_sort_on
171 elif query["sort_on"] == "relevance":
172 del query["sort_on"]
173 if query.get("sort_on", "") == "Date":
174 query["sort_order"] = "reverse"
175 elif "sort_order" in query:
176 del query["sort_order"]
177 if not query:
178 return None
179 return query
180
181 def filter_types(self, types):
182 plone_utils = getToolByName(self.context, "plone_utils")
183 if not isinstance(types, list):
184 types = [types]
185 return plone_utils.getUserFriendlyTypes(types)
186
187 def types_list(self):
188 # only show those types that have any content
189 catalog = getToolByName(self.context, "portal_catalog")
190 used_types = catalog._catalog.getIndex("portal_type").uniqueValues()
191 return self.filter_types(list(used_types))
192
193 def sort_options(self):
194 """Sorting options for search results view."""
195 if "sort_on" not in self.request.form:
196 self.request.form["sort_on"] = self.default_sort_on
197 return (
198 SortOption(self.request, _("relevance"), "relevance"),
199 SortOption(self.request, _("date (newest first)"), "Date", reverse=True),
200 SortOption(self.request, _("alphabetically"), "sortable_title"),
201 )
202
203 def show_advanced_search(self):
204 """Whether we need to show advanced search options a.k.a. filters?"""
205 show = self.request.get("advanced_search", None)
206 if not show or show == "False":
207 return False
208 return True
209
210 def advanced_search_trigger(self):
211 """URL builder for show/close advanced search filters."""
212 query = self.request.get("QUERY_STRING", None)
213 url = self.request.get("ACTUAL_URL", self.context.absolute_url())
214 if not query:
215 return url
216 if "advanced_search" in query:
217 if "advanced_search=True" in query:
218 query = query.replace("advanced_search=True", "")
219 if "advanced_search=False" in query:
220 query = query.replace("advanced_search=False", "")
221 else:
222 query = query + "&advanced_search=True"
223 return url + "?" + query
224
225 def breadcrumbs(self, item):
226 obj = item.getObject()
227 view = getMultiAdapter((obj, self.request), name="breadcrumbs_view")
228 # cut off the item itself
229 breadcrumbs = list(view.breadcrumbs())[:-1]
230 if len(breadcrumbs) == 0:
231 # don't show breadcrumbs if we only have a single element
232 return None
233 if len(breadcrumbs) > 3:
234 # if we have too long breadcrumbs, emit the middle elements
235 empty = {"absolute_url": "", "Title": "…"}
236 breadcrumbs = [breadcrumbs[0], empty] + breadcrumbs[-2:]
237 return breadcrumbs
238
239 def navroot_url(self):
240 if not hasattr(self, "_navroot_url"):
241 state = self.context.unrestrictedTraverse("@@plone_portal_state")
242 self._navroot_url = state.navigation_root_url()
243 return self._navroot_url
244
245 @property
246 def show_images(self):
247 registry = queryUtility(IRegistry)
248 return registry.get("plone.search_show_images")
249
250 @property
251 def search_image_scale(self):
252 registry = queryUtility(IRegistry)
253 return registry.get("plone.search_image_scale")
254
255
256 class AjaxSearch(Search):
257 def __call__(self):
258 items = []
259 try:
260 per_page = int(self.request.form.get("perPage"))
261 except Exception:
262 per_page = 10
263 try:
264 page = int(self.request.form.get("page"))
265 except Exception:
266 page = 1
267
268 results = self.results(batch=False, use_content_listing=False)
269 batch = Batch(results, per_page, start=(page - 1) * per_page)
270
271 registry = queryUtility(IRegistry)
272 length = registry.get("plone.search_results_description_length")
273 show_images = registry.get("plone.search_show_images")
274 if show_images:
275 image_scale = registry.get("plone.search_image_scale")
276 # image_scaling = getMultiAdapter((self.context, self.request), name='image_scale')
277 self.image_scaling = getMultiAdapter(
278 (INavigationRoot(self.context), self.request), name="image_scale"
279 )
280 plone_view = getMultiAdapter((self.context, self.request), name="plone")
281 view_action_types = registry.get("plone.types_use_view_action_in_listings", [])
282 for item in batch:
283 url = item.getURL()
284 if item.portal_type in view_action_types:
285 url = "%s/view" % url
286 img_tag = None
287 if show_images:
288 img_tag = self.get_image_tag(item, image_scale)
289 items.append(
290 {
291 "id": item.UID,
292 "title": item.Title,
293 "description": plone_view.cropText(item.Description, length),
294 "url": url,
295 "state": item.review_state if item.review_state else None,
296 "img_tag": img_tag,
297 }
298 )
299 return json.dumps({"total": len(results), "items": items})
300
301 def get_image_tag(self, item, image_scale):
302 return self.image_scaling.tag(item, "image", scale=image_scale)
303
304
305 class SortOption:
306 def __init__(self, request, title, sortkey="", reverse=False):
307 self.request = request
308 self.title = title
309 self.sortkey = sortkey
310 self.reverse = reverse
311
312 def selected(self):
313 sort_on = self.request.get("sort_on", "")
314 return sort_on == self.sortkey and sort_on != ""
315
316 def url(self):
317 q = {}
318 q.update(self.request.form)
319 if "sort_on" in q.keys():
320 del q["sort_on"]
321 if "sort_order" in q.keys():
322 del q["sort_order"]
323 q["sort_on"] = self.sortkey
324 if self.reverse:
325 q["sort_order"] = "reverse"
326
327 base_url = self.request.URL
328 return base_url + "?" + make_query(q)
329
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/Products/CMFPlone/browser/search.py b/Products/CMFPlone/browser/search.py
--- a/Products/CMFPlone/browser/search.py
+++ b/Products/CMFPlone/browser/search.py
@@ -45,10 +45,11 @@
# being parsed as logical query atoms.
if term.lower() in ("and", "or", "not"):
term = '"%s"' % term
- return term
+ return quote_chars(term)
def munge_search_term(query):
+ original_query = query
for char in BAD_CHARS:
query = query.replace(char, " ")
@@ -67,7 +68,7 @@
r += map(quote, query.strip().split())
r = " AND ".join(r)
- r = quote_chars(r) + ("*" if r and not r.endswith('"') else "")
+ r = r + ("*" if r and not original_query.endswith('"') else "")
return r
| {"golden_diff": "diff --git a/Products/CMFPlone/browser/search.py b/Products/CMFPlone/browser/search.py\n--- a/Products/CMFPlone/browser/search.py\n+++ b/Products/CMFPlone/browser/search.py\n@@ -45,10 +45,11 @@\n # being parsed as logical query atoms.\n if term.lower() in (\"and\", \"or\", \"not\"):\n term = '\"%s\"' % term\n- return term\n+ return quote_chars(term)\n \n \n def munge_search_term(query):\n+ original_query = query\n for char in BAD_CHARS:\n query = query.replace(char, \" \")\n \n@@ -67,7 +68,7 @@\n \n r += map(quote, query.strip().split())\n r = \" AND \".join(r)\n- r = quote_chars(r) + (\"*\" if r and not r.endswith('\"') else \"\")\n+ r = r + (\"*\" if r and not original_query.endswith('\"') else \"\")\n return r\n", "issue": "Catalog search does not handle correctly parenthesis inside quotes\n## BUG/PROBLEM REPORT (OR OTHER COMMON ISSUE)\r\n\r\n### What I did:\r\n\r\n- Add a page with parenthesis enclosed text as a title\r\n- Try to search that page using quoted query\r\n\r\n### What I expect to happen:\r\n\r\n- The search searches the page\r\n\r\n### What actually happened:\r\n\r\n- The search does not find the page\r\n- When accessing through REST API (because plone.app.querystring uses the `munge_search_term` function here), we get an error because the query is transformed to an unknown string for ZCatalog\r\n\r\n### What version of Plone/ Addons I am using:\r\n\r\n- Plone 6\r\n\n", "before_files": [{"content": "from DateTime import DateTime\nfrom plone.app.contentlisting.interfaces import IContentListing\nfrom plone.app.layout.navigation.interfaces import INavigationRoot\nfrom plone.base.batch import Batch\nfrom plone.base.interfaces import ISearchSchema\nfrom plone.base.interfaces.siteroot import IPloneSiteRoot\nfrom plone.registry.interfaces import IRegistry\nfrom Products.CMFCore.utils import getToolByName\nfrom Products.CMFPlone.browser.navtree import getNavigationRoot\nfrom Products.ZCTextIndex.ParseTree import ParseError\nfrom zope.cachedescriptors.property import Lazy as lazy_property\nfrom zope.component import getMultiAdapter\nfrom zope.component import getUtility\nfrom zope.component import queryUtility\nfrom zope.i18nmessageid import MessageFactory\nfrom zope.publisher.browser import BrowserView\nfrom ZTUtils import make_query\n\nimport json\nimport re\n\n\n_ = MessageFactory(\"plone\")\n\n# We should accept both a simple space, unicode u'\\u0020 but also a\n# multi-space, so called 'waji-kankaku', unicode u'\\u3000'\nMULTISPACE = \"\\u3000\"\nBAD_CHARS = (\"?\", \"-\", \"+\", \"*\", MULTISPACE)\nEVER = DateTime(\"1970-01-03\")\n\n\ndef quote_chars(s):\n # We need to quote parentheses when searching text indices\n if \"(\" in s:\n s = s.replace(\"(\", '\"(\"')\n if \")\" in s:\n s = s.replace(\")\", '\")\"')\n if MULTISPACE in s:\n s = s.replace(MULTISPACE, \" \")\n return s\n\n\ndef quote(term):\n # The terms and, or and not must be wrapped in quotes to avoid\n # being parsed as logical query atoms.\n if term.lower() in (\"and\", \"or\", \"not\"):\n term = '\"%s\"' % term\n return term\n\n\ndef munge_search_term(query):\n for char in BAD_CHARS:\n query = query.replace(char, \" \")\n\n # extract quoted phrases first\n quoted_phrases = re.findall(r'\"([^\"]*)\"', query)\n r = []\n for qp in quoted_phrases:\n # remove from original query\n query = query.replace(f'\"{qp}\"', \"\")\n # replace with cleaned leading/trailing whitespaces\n # and skip empty phrases\n clean_qp = qp.strip()\n if not clean_qp:\n continue\n r.append(f'\"{clean_qp}\"')\n\n r += map(quote, query.strip().split())\n r = \" AND \".join(r)\n r = quote_chars(r) + (\"*\" if r and not r.endswith('\"') else \"\")\n return r\n\n\nclass Search(BrowserView):\n valid_keys = (\"sort_on\", \"sort_order\", \"sort_limit\", \"fq\", \"fl\", \"facet\")\n\n def results(\n self, query=None, batch=True, b_size=10, b_start=0, use_content_listing=True\n ):\n \"\"\"Get properly wrapped search results from the catalog.\n Everything in Plone that performs searches should go through this view.\n 'query' should be a dictionary of catalog parameters.\n \"\"\"\n if query is None:\n query = {}\n if batch:\n query[\"b_start\"] = b_start = int(b_start)\n query[\"b_size\"] = b_size\n query = self.filter_query(query)\n\n if query is None:\n results = []\n else:\n catalog = getToolByName(self.context, \"portal_catalog\")\n try:\n results = catalog(**query)\n except ParseError:\n return []\n\n if use_content_listing:\n results = IContentListing(results)\n if batch:\n results = Batch(results, b_size, b_start)\n return results\n\n def _filter_query(self, query):\n request = self.request\n\n catalog = getToolByName(self.context, \"portal_catalog\")\n valid_indexes = tuple(catalog.indexes())\n valid_keys = self.valid_keys + valid_indexes\n\n text = query.get(\"SearchableText\", None)\n if text is None:\n text = request.form.get(\"SearchableText\", \"\")\n if not text:\n # Without text, must provide a meaningful non-empty search\n valid = set(valid_indexes).intersection(request.form.keys()) or set(\n valid_indexes\n ).intersection(query.keys())\n if not valid:\n return\n\n for k, v in request.form.items():\n if v and ((k in valid_keys) or k.startswith(\"facet.\")):\n query[k] = v\n if text:\n query[\"SearchableText\"] = munge_search_term(text)\n\n # don't filter on created at all if we want all results\n created = query.get(\"created\")\n if created:\n try:\n if created.get(\"query\", EVER) <= EVER:\n del query[\"created\"]\n except AttributeError:\n # created not a mapping\n del query[\"created\"]\n\n # respect `types_not_searched` setting\n types = query.get(\"portal_type\", [])\n if \"query\" in types:\n types = types[\"query\"]\n query[\"portal_type\"] = self.filter_types(types)\n # respect effective/expiration date\n query[\"show_inactive\"] = False\n # respect navigation root if we're not at the site root.\n if \"path\" not in query and not IPloneSiteRoot.providedBy(self.context):\n query[\"path\"] = getNavigationRoot(self.context)\n\n if \"sort_order\" in query and not query[\"sort_order\"]:\n del query[\"sort_order\"]\n return query\n\n @lazy_property\n def default_sort_on(self):\n registry = getUtility(IRegistry)\n search_settings = registry.forInterface(ISearchSchema, prefix=\"plone\")\n return search_settings.sort_on\n\n def filter_query(self, query):\n query = self._filter_query(query)\n if query is None:\n query = {}\n # explicitly set a sort; if no `sort_on` is present, the catalog sorts\n # by relevance\n if \"sort_on\" not in query:\n self.default_sort_on\n if self.default_sort_on != \"relevance\":\n query[\"sort_on\"] = self.default_sort_on\n elif query[\"sort_on\"] == \"relevance\":\n del query[\"sort_on\"]\n if query.get(\"sort_on\", \"\") == \"Date\":\n query[\"sort_order\"] = \"reverse\"\n elif \"sort_order\" in query:\n del query[\"sort_order\"]\n if not query:\n return None\n return query\n\n def filter_types(self, types):\n plone_utils = getToolByName(self.context, \"plone_utils\")\n if not isinstance(types, list):\n types = [types]\n return plone_utils.getUserFriendlyTypes(types)\n\n def types_list(self):\n # only show those types that have any content\n catalog = getToolByName(self.context, \"portal_catalog\")\n used_types = catalog._catalog.getIndex(\"portal_type\").uniqueValues()\n return self.filter_types(list(used_types))\n\n def sort_options(self):\n \"\"\"Sorting options for search results view.\"\"\"\n if \"sort_on\" not in self.request.form:\n self.request.form[\"sort_on\"] = self.default_sort_on\n return (\n SortOption(self.request, _(\"relevance\"), \"relevance\"),\n SortOption(self.request, _(\"date (newest first)\"), \"Date\", reverse=True),\n SortOption(self.request, _(\"alphabetically\"), \"sortable_title\"),\n )\n\n def show_advanced_search(self):\n \"\"\"Whether we need to show advanced search options a.k.a. filters?\"\"\"\n show = self.request.get(\"advanced_search\", None)\n if not show or show == \"False\":\n return False\n return True\n\n def advanced_search_trigger(self):\n \"\"\"URL builder for show/close advanced search filters.\"\"\"\n query = self.request.get(\"QUERY_STRING\", None)\n url = self.request.get(\"ACTUAL_URL\", self.context.absolute_url())\n if not query:\n return url\n if \"advanced_search\" in query:\n if \"advanced_search=True\" in query:\n query = query.replace(\"advanced_search=True\", \"\")\n if \"advanced_search=False\" in query:\n query = query.replace(\"advanced_search=False\", \"\")\n else:\n query = query + \"&advanced_search=True\"\n return url + \"?\" + query\n\n def breadcrumbs(self, item):\n obj = item.getObject()\n view = getMultiAdapter((obj, self.request), name=\"breadcrumbs_view\")\n # cut off the item itself\n breadcrumbs = list(view.breadcrumbs())[:-1]\n if len(breadcrumbs) == 0:\n # don't show breadcrumbs if we only have a single element\n return None\n if len(breadcrumbs) > 3:\n # if we have too long breadcrumbs, emit the middle elements\n empty = {\"absolute_url\": \"\", \"Title\": \"\u2026\"}\n breadcrumbs = [breadcrumbs[0], empty] + breadcrumbs[-2:]\n return breadcrumbs\n\n def navroot_url(self):\n if not hasattr(self, \"_navroot_url\"):\n state = self.context.unrestrictedTraverse(\"@@plone_portal_state\")\n self._navroot_url = state.navigation_root_url()\n return self._navroot_url\n\n @property\n def show_images(self):\n registry = queryUtility(IRegistry)\n return registry.get(\"plone.search_show_images\")\n\n @property\n def search_image_scale(self):\n registry = queryUtility(IRegistry)\n return registry.get(\"plone.search_image_scale\")\n\n\nclass AjaxSearch(Search):\n def __call__(self):\n items = []\n try:\n per_page = int(self.request.form.get(\"perPage\"))\n except Exception:\n per_page = 10\n try:\n page = int(self.request.form.get(\"page\"))\n except Exception:\n page = 1\n\n results = self.results(batch=False, use_content_listing=False)\n batch = Batch(results, per_page, start=(page - 1) * per_page)\n\n registry = queryUtility(IRegistry)\n length = registry.get(\"plone.search_results_description_length\")\n show_images = registry.get(\"plone.search_show_images\")\n if show_images:\n image_scale = registry.get(\"plone.search_image_scale\")\n # image_scaling = getMultiAdapter((self.context, self.request), name='image_scale')\n self.image_scaling = getMultiAdapter(\n (INavigationRoot(self.context), self.request), name=\"image_scale\"\n )\n plone_view = getMultiAdapter((self.context, self.request), name=\"plone\")\n view_action_types = registry.get(\"plone.types_use_view_action_in_listings\", [])\n for item in batch:\n url = item.getURL()\n if item.portal_type in view_action_types:\n url = \"%s/view\" % url\n img_tag = None\n if show_images:\n img_tag = self.get_image_tag(item, image_scale)\n items.append(\n {\n \"id\": item.UID,\n \"title\": item.Title,\n \"description\": plone_view.cropText(item.Description, length),\n \"url\": url,\n \"state\": item.review_state if item.review_state else None,\n \"img_tag\": img_tag,\n }\n )\n return json.dumps({\"total\": len(results), \"items\": items})\n\n def get_image_tag(self, item, image_scale):\n return self.image_scaling.tag(item, \"image\", scale=image_scale)\n\n\nclass SortOption:\n def __init__(self, request, title, sortkey=\"\", reverse=False):\n self.request = request\n self.title = title\n self.sortkey = sortkey\n self.reverse = reverse\n\n def selected(self):\n sort_on = self.request.get(\"sort_on\", \"\")\n return sort_on == self.sortkey and sort_on != \"\"\n\n def url(self):\n q = {}\n q.update(self.request.form)\n if \"sort_on\" in q.keys():\n del q[\"sort_on\"]\n if \"sort_order\" in q.keys():\n del q[\"sort_order\"]\n q[\"sort_on\"] = self.sortkey\n if self.reverse:\n q[\"sort_order\"] = \"reverse\"\n\n base_url = self.request.URL\n return base_url + \"?\" + make_query(q)\n", "path": "Products/CMFPlone/browser/search.py"}], "after_files": [{"content": "from DateTime import DateTime\nfrom plone.app.contentlisting.interfaces import IContentListing\nfrom plone.app.layout.navigation.interfaces import INavigationRoot\nfrom plone.base.batch import Batch\nfrom plone.base.interfaces import ISearchSchema\nfrom plone.base.interfaces.siteroot import IPloneSiteRoot\nfrom plone.registry.interfaces import IRegistry\nfrom Products.CMFCore.utils import getToolByName\nfrom Products.CMFPlone.browser.navtree import getNavigationRoot\nfrom Products.ZCTextIndex.ParseTree import ParseError\nfrom zope.cachedescriptors.property import Lazy as lazy_property\nfrom zope.component import getMultiAdapter\nfrom zope.component import getUtility\nfrom zope.component import queryUtility\nfrom zope.i18nmessageid import MessageFactory\nfrom zope.publisher.browser import BrowserView\nfrom ZTUtils import make_query\n\nimport json\nimport re\n\n\n_ = MessageFactory(\"plone\")\n\n# We should accept both a simple space, unicode u'\\u0020 but also a\n# multi-space, so called 'waji-kankaku', unicode u'\\u3000'\nMULTISPACE = \"\\u3000\"\nBAD_CHARS = (\"?\", \"-\", \"+\", \"*\", MULTISPACE)\nEVER = DateTime(\"1970-01-03\")\n\n\ndef quote_chars(s):\n # We need to quote parentheses when searching text indices\n if \"(\" in s:\n s = s.replace(\"(\", '\"(\"')\n if \")\" in s:\n s = s.replace(\")\", '\")\"')\n if MULTISPACE in s:\n s = s.replace(MULTISPACE, \" \")\n return s\n\n\ndef quote(term):\n # The terms and, or and not must be wrapped in quotes to avoid\n # being parsed as logical query atoms.\n if term.lower() in (\"and\", \"or\", \"not\"):\n term = '\"%s\"' % term\n return quote_chars(term)\n\n\ndef munge_search_term(query):\n original_query = query\n for char in BAD_CHARS:\n query = query.replace(char, \" \")\n\n # extract quoted phrases first\n quoted_phrases = re.findall(r'\"([^\"]*)\"', query)\n r = []\n for qp in quoted_phrases:\n # remove from original query\n query = query.replace(f'\"{qp}\"', \"\")\n # replace with cleaned leading/trailing whitespaces\n # and skip empty phrases\n clean_qp = qp.strip()\n if not clean_qp:\n continue\n r.append(f'\"{clean_qp}\"')\n\n r += map(quote, query.strip().split())\n r = \" AND \".join(r)\n r = r + (\"*\" if r and not original_query.endswith('\"') else \"\")\n return r\n\n\nclass Search(BrowserView):\n valid_keys = (\"sort_on\", \"sort_order\", \"sort_limit\", \"fq\", \"fl\", \"facet\")\n\n def results(\n self, query=None, batch=True, b_size=10, b_start=0, use_content_listing=True\n ):\n \"\"\"Get properly wrapped search results from the catalog.\n Everything in Plone that performs searches should go through this view.\n 'query' should be a dictionary of catalog parameters.\n \"\"\"\n if query is None:\n query = {}\n if batch:\n query[\"b_start\"] = b_start = int(b_start)\n query[\"b_size\"] = b_size\n query = self.filter_query(query)\n\n if query is None:\n results = []\n else:\n catalog = getToolByName(self.context, \"portal_catalog\")\n try:\n results = catalog(**query)\n except ParseError:\n return []\n\n if use_content_listing:\n results = IContentListing(results)\n if batch:\n results = Batch(results, b_size, b_start)\n return results\n\n def _filter_query(self, query):\n request = self.request\n\n catalog = getToolByName(self.context, \"portal_catalog\")\n valid_indexes = tuple(catalog.indexes())\n valid_keys = self.valid_keys + valid_indexes\n\n text = query.get(\"SearchableText\", None)\n if text is None:\n text = request.form.get(\"SearchableText\", \"\")\n if not text:\n # Without text, must provide a meaningful non-empty search\n valid = set(valid_indexes).intersection(request.form.keys()) or set(\n valid_indexes\n ).intersection(query.keys())\n if not valid:\n return\n\n for k, v in request.form.items():\n if v and ((k in valid_keys) or k.startswith(\"facet.\")):\n query[k] = v\n if text:\n query[\"SearchableText\"] = munge_search_term(text)\n\n # don't filter on created at all if we want all results\n created = query.get(\"created\")\n if created:\n try:\n if created.get(\"query\", EVER) <= EVER:\n del query[\"created\"]\n except AttributeError:\n # created not a mapping\n del query[\"created\"]\n\n # respect `types_not_searched` setting\n types = query.get(\"portal_type\", [])\n if \"query\" in types:\n types = types[\"query\"]\n query[\"portal_type\"] = self.filter_types(types)\n # respect effective/expiration date\n query[\"show_inactive\"] = False\n # respect navigation root if we're not at the site root.\n if \"path\" not in query and not IPloneSiteRoot.providedBy(self.context):\n query[\"path\"] = getNavigationRoot(self.context)\n\n if \"sort_order\" in query and not query[\"sort_order\"]:\n del query[\"sort_order\"]\n return query\n\n @lazy_property\n def default_sort_on(self):\n registry = getUtility(IRegistry)\n search_settings = registry.forInterface(ISearchSchema, prefix=\"plone\")\n return search_settings.sort_on\n\n def filter_query(self, query):\n query = self._filter_query(query)\n if query is None:\n query = {}\n # explicitly set a sort; if no `sort_on` is present, the catalog sorts\n # by relevance\n if \"sort_on\" not in query:\n self.default_sort_on\n if self.default_sort_on != \"relevance\":\n query[\"sort_on\"] = self.default_sort_on\n elif query[\"sort_on\"] == \"relevance\":\n del query[\"sort_on\"]\n if query.get(\"sort_on\", \"\") == \"Date\":\n query[\"sort_order\"] = \"reverse\"\n elif \"sort_order\" in query:\n del query[\"sort_order\"]\n if not query:\n return None\n return query\n\n def filter_types(self, types):\n plone_utils = getToolByName(self.context, \"plone_utils\")\n if not isinstance(types, list):\n types = [types]\n return plone_utils.getUserFriendlyTypes(types)\n\n def types_list(self):\n # only show those types that have any content\n catalog = getToolByName(self.context, \"portal_catalog\")\n used_types = catalog._catalog.getIndex(\"portal_type\").uniqueValues()\n return self.filter_types(list(used_types))\n\n def sort_options(self):\n \"\"\"Sorting options for search results view.\"\"\"\n if \"sort_on\" not in self.request.form:\n self.request.form[\"sort_on\"] = self.default_sort_on\n return (\n SortOption(self.request, _(\"relevance\"), \"relevance\"),\n SortOption(self.request, _(\"date (newest first)\"), \"Date\", reverse=True),\n SortOption(self.request, _(\"alphabetically\"), \"sortable_title\"),\n )\n\n def show_advanced_search(self):\n \"\"\"Whether we need to show advanced search options a.k.a. filters?\"\"\"\n show = self.request.get(\"advanced_search\", None)\n if not show or show == \"False\":\n return False\n return True\n\n def advanced_search_trigger(self):\n \"\"\"URL builder for show/close advanced search filters.\"\"\"\n query = self.request.get(\"QUERY_STRING\", None)\n url = self.request.get(\"ACTUAL_URL\", self.context.absolute_url())\n if not query:\n return url\n if \"advanced_search\" in query:\n if \"advanced_search=True\" in query:\n query = query.replace(\"advanced_search=True\", \"\")\n if \"advanced_search=False\" in query:\n query = query.replace(\"advanced_search=False\", \"\")\n else:\n query = query + \"&advanced_search=True\"\n return url + \"?\" + query\n\n def breadcrumbs(self, item):\n obj = item.getObject()\n view = getMultiAdapter((obj, self.request), name=\"breadcrumbs_view\")\n # cut off the item itself\n breadcrumbs = list(view.breadcrumbs())[:-1]\n if len(breadcrumbs) == 0:\n # don't show breadcrumbs if we only have a single element\n return None\n if len(breadcrumbs) > 3:\n # if we have too long breadcrumbs, emit the middle elements\n empty = {\"absolute_url\": \"\", \"Title\": \"\u2026\"}\n breadcrumbs = [breadcrumbs[0], empty] + breadcrumbs[-2:]\n return breadcrumbs\n\n def navroot_url(self):\n if not hasattr(self, \"_navroot_url\"):\n state = self.context.unrestrictedTraverse(\"@@plone_portal_state\")\n self._navroot_url = state.navigation_root_url()\n return self._navroot_url\n\n @property\n def show_images(self):\n registry = queryUtility(IRegistry)\n return registry.get(\"plone.search_show_images\")\n\n @property\n def search_image_scale(self):\n registry = queryUtility(IRegistry)\n return registry.get(\"plone.search_image_scale\")\n\n\nclass AjaxSearch(Search):\n def __call__(self):\n items = []\n try:\n per_page = int(self.request.form.get(\"perPage\"))\n except Exception:\n per_page = 10\n try:\n page = int(self.request.form.get(\"page\"))\n except Exception:\n page = 1\n\n results = self.results(batch=False, use_content_listing=False)\n batch = Batch(results, per_page, start=(page - 1) * per_page)\n\n registry = queryUtility(IRegistry)\n length = registry.get(\"plone.search_results_description_length\")\n show_images = registry.get(\"plone.search_show_images\")\n if show_images:\n image_scale = registry.get(\"plone.search_image_scale\")\n # image_scaling = getMultiAdapter((self.context, self.request), name='image_scale')\n self.image_scaling = getMultiAdapter(\n (INavigationRoot(self.context), self.request), name=\"image_scale\"\n )\n plone_view = getMultiAdapter((self.context, self.request), name=\"plone\")\n view_action_types = registry.get(\"plone.types_use_view_action_in_listings\", [])\n for item in batch:\n url = item.getURL()\n if item.portal_type in view_action_types:\n url = \"%s/view\" % url\n img_tag = None\n if show_images:\n img_tag = self.get_image_tag(item, image_scale)\n items.append(\n {\n \"id\": item.UID,\n \"title\": item.Title,\n \"description\": plone_view.cropText(item.Description, length),\n \"url\": url,\n \"state\": item.review_state if item.review_state else None,\n \"img_tag\": img_tag,\n }\n )\n return json.dumps({\"total\": len(results), \"items\": items})\n\n def get_image_tag(self, item, image_scale):\n return self.image_scaling.tag(item, \"image\", scale=image_scale)\n\n\nclass SortOption:\n def __init__(self, request, title, sortkey=\"\", reverse=False):\n self.request = request\n self.title = title\n self.sortkey = sortkey\n self.reverse = reverse\n\n def selected(self):\n sort_on = self.request.get(\"sort_on\", \"\")\n return sort_on == self.sortkey and sort_on != \"\"\n\n def url(self):\n q = {}\n q.update(self.request.form)\n if \"sort_on\" in q.keys():\n del q[\"sort_on\"]\n if \"sort_order\" in q.keys():\n del q[\"sort_order\"]\n q[\"sort_on\"] = self.sortkey\n if self.reverse:\n q[\"sort_order\"] = \"reverse\"\n\n base_url = self.request.URL\n return base_url + \"?\" + make_query(q)\n", "path": "Products/CMFPlone/browser/search.py"}]} | 3,911 | 222 |
gh_patches_debug_7459 | rasdani/github-patches | git_diff | rlworkgroup__garage-1558 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Backport #1554
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/garage/tf/plotter/plotter.py`
Content:
```
1 import atexit
2 from collections import namedtuple
3 from enum import Enum
4 import platform
5 from queue import Queue
6 from threading import Thread
7
8 import numpy as np
9 import tensorflow as tf
10
11 from garage.sampler.utils import rollout as default_rollout
12
13 __all__ = ['Plotter']
14
15
16 class Op(Enum):
17 STOP = 0
18 UPDATE = 1
19 DEMO = 2
20
21
22 Message = namedtuple('Message', ['op', 'args', 'kwargs'])
23
24
25 class Plotter:
26
27 # Static variable used to disable the plotter
28 enable = True
29 # List containing all plotters instantiated in the process
30 __plotters = []
31
32 def __init__(self,
33 env,
34 policy,
35 sess=None,
36 graph=None,
37 rollout=default_rollout):
38 Plotter.__plotters.append(self)
39 self.env = env
40 self.sess = tf.compat.v1.get_default_session(
41 ) if sess is None else sess
42 self.graph = tf.compat.v1.get_default_graph(
43 ) if graph is None else graph
44 with self.sess.as_default(), self.graph.as_default():
45 self.policy = policy.clone('plotter_policy')
46 self.rollout = rollout
47 self.worker_thread = Thread(target=self._start_worker, daemon=True)
48 self.queue = Queue()
49
50 # Needed in order to draw glfw window on the main thread
51 if ('Darwin' in platform.platform()):
52 self.rollout(env,
53 policy,
54 max_path_length=np.inf,
55 animated=True,
56 speedup=5)
57
58 def _start_worker(self):
59 env = None
60 policy = None
61 max_length = None
62 initial_rollout = True
63 try:
64 with self.sess.as_default(), self.sess.graph.as_default():
65 # Each iteration will process ALL messages currently in the
66 # queue
67 while True:
68 msgs = {}
69 # If true, block and yield processor
70 if initial_rollout:
71 msg = self.queue.get()
72 msgs[msg.op] = msg
73 # Only fetch the last message of each type
74 while not self.queue.empty():
75 msg = self.queue.get()
76 msgs[msg.op] = msg
77 else:
78 # Only fetch the last message of each type
79 while not self.queue.empty():
80 msg = self.queue.get_nowait()
81 msgs[msg.op] = msg
82
83 if Op.STOP in msgs:
84 self.queue.task_done()
85 break
86 if Op.UPDATE in msgs:
87 env, policy = msgs[Op.UPDATE].args
88 self.queue.task_done()
89 if Op.DEMO in msgs:
90 param_values, max_length = msgs[Op.DEMO].args
91 policy.set_param_values(param_values)
92 initial_rollout = False
93 self.rollout(env,
94 policy,
95 max_path_length=max_length,
96 animated=True,
97 speedup=5)
98 self.queue.task_done()
99 else:
100 if max_length:
101 self.rollout(env,
102 policy,
103 max_path_length=max_length,
104 animated=True,
105 speedup=5)
106 except KeyboardInterrupt:
107 pass
108
109 def close(self):
110 if self.worker_thread.is_alive():
111 while not self.queue.empty():
112 self.queue.get()
113 self.queue.task_done()
114 self.queue.put(Message(op=Op.STOP, args=None, kwargs=None))
115 self.queue.join()
116 self.worker_thread.join()
117
118 @staticmethod
119 def disable():
120 """Disable all instances of the Plotter class."""
121 Plotter.enable = False
122
123 @staticmethod
124 def get_plotters():
125 return Plotter.__plotters
126
127 def start(self):
128 if not Plotter.enable:
129 return
130 if not self.worker_thread.is_alive():
131 tf.compat.v1.get_variable_scope().reuse_variables()
132 self.worker_thread.start()
133 self.queue.put(
134 Message(op=Op.UPDATE,
135 args=(self.env, self.policy),
136 kwargs=None))
137 atexit.register(self.close)
138
139 def update_plot(self, policy, max_length=np.inf):
140 if not Plotter.enable:
141 return
142 if self.worker_thread.is_alive():
143 self.queue.put(
144 Message(op=Op.DEMO,
145 args=(policy.get_param_values(), max_length),
146 kwargs=None))
147
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/garage/tf/plotter/plotter.py b/src/garage/tf/plotter/plotter.py
--- a/src/garage/tf/plotter/plotter.py
+++ b/src/garage/tf/plotter/plotter.py
@@ -43,6 +43,7 @@
) if graph is None else graph
with self.sess.as_default(), self.graph.as_default():
self.policy = policy.clone('plotter_policy')
+ self.policy.build(policy.model.input)
self.rollout = rollout
self.worker_thread = Thread(target=self._start_worker, daemon=True)
self.queue = Queue()
| {"golden_diff": "diff --git a/src/garage/tf/plotter/plotter.py b/src/garage/tf/plotter/plotter.py\n--- a/src/garage/tf/plotter/plotter.py\n+++ b/src/garage/tf/plotter/plotter.py\n@@ -43,6 +43,7 @@\n ) if graph is None else graph\n with self.sess.as_default(), self.graph.as_default():\n self.policy = policy.clone('plotter_policy')\n+ self.policy.build(policy.model.input)\n self.rollout = rollout\n self.worker_thread = Thread(target=self._start_worker, daemon=True)\n self.queue = Queue()\n", "issue": "Backport #1554\n\n", "before_files": [{"content": "import atexit\nfrom collections import namedtuple\nfrom enum import Enum\nimport platform\nfrom queue import Queue\nfrom threading import Thread\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom garage.sampler.utils import rollout as default_rollout\n\n__all__ = ['Plotter']\n\n\nclass Op(Enum):\n STOP = 0\n UPDATE = 1\n DEMO = 2\n\n\nMessage = namedtuple('Message', ['op', 'args', 'kwargs'])\n\n\nclass Plotter:\n\n # Static variable used to disable the plotter\n enable = True\n # List containing all plotters instantiated in the process\n __plotters = []\n\n def __init__(self,\n env,\n policy,\n sess=None,\n graph=None,\n rollout=default_rollout):\n Plotter.__plotters.append(self)\n self.env = env\n self.sess = tf.compat.v1.get_default_session(\n ) if sess is None else sess\n self.graph = tf.compat.v1.get_default_graph(\n ) if graph is None else graph\n with self.sess.as_default(), self.graph.as_default():\n self.policy = policy.clone('plotter_policy')\n self.rollout = rollout\n self.worker_thread = Thread(target=self._start_worker, daemon=True)\n self.queue = Queue()\n\n # Needed in order to draw glfw window on the main thread\n if ('Darwin' in platform.platform()):\n self.rollout(env,\n policy,\n max_path_length=np.inf,\n animated=True,\n speedup=5)\n\n def _start_worker(self):\n env = None\n policy = None\n max_length = None\n initial_rollout = True\n try:\n with self.sess.as_default(), self.sess.graph.as_default():\n # Each iteration will process ALL messages currently in the\n # queue\n while True:\n msgs = {}\n # If true, block and yield processor\n if initial_rollout:\n msg = self.queue.get()\n msgs[msg.op] = msg\n # Only fetch the last message of each type\n while not self.queue.empty():\n msg = self.queue.get()\n msgs[msg.op] = msg\n else:\n # Only fetch the last message of each type\n while not self.queue.empty():\n msg = self.queue.get_nowait()\n msgs[msg.op] = msg\n\n if Op.STOP in msgs:\n self.queue.task_done()\n break\n if Op.UPDATE in msgs:\n env, policy = msgs[Op.UPDATE].args\n self.queue.task_done()\n if Op.DEMO in msgs:\n param_values, max_length = msgs[Op.DEMO].args\n policy.set_param_values(param_values)\n initial_rollout = False\n self.rollout(env,\n policy,\n max_path_length=max_length,\n animated=True,\n speedup=5)\n self.queue.task_done()\n else:\n if max_length:\n self.rollout(env,\n policy,\n max_path_length=max_length,\n animated=True,\n speedup=5)\n except KeyboardInterrupt:\n pass\n\n def close(self):\n if self.worker_thread.is_alive():\n while not self.queue.empty():\n self.queue.get()\n self.queue.task_done()\n self.queue.put(Message(op=Op.STOP, args=None, kwargs=None))\n self.queue.join()\n self.worker_thread.join()\n\n @staticmethod\n def disable():\n \"\"\"Disable all instances of the Plotter class.\"\"\"\n Plotter.enable = False\n\n @staticmethod\n def get_plotters():\n return Plotter.__plotters\n\n def start(self):\n if not Plotter.enable:\n return\n if not self.worker_thread.is_alive():\n tf.compat.v1.get_variable_scope().reuse_variables()\n self.worker_thread.start()\n self.queue.put(\n Message(op=Op.UPDATE,\n args=(self.env, self.policy),\n kwargs=None))\n atexit.register(self.close)\n\n def update_plot(self, policy, max_length=np.inf):\n if not Plotter.enable:\n return\n if self.worker_thread.is_alive():\n self.queue.put(\n Message(op=Op.DEMO,\n args=(policy.get_param_values(), max_length),\n kwargs=None))\n", "path": "src/garage/tf/plotter/plotter.py"}], "after_files": [{"content": "import atexit\nfrom collections import namedtuple\nfrom enum import Enum\nimport platform\nfrom queue import Queue\nfrom threading import Thread\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom garage.sampler.utils import rollout as default_rollout\n\n__all__ = ['Plotter']\n\n\nclass Op(Enum):\n STOP = 0\n UPDATE = 1\n DEMO = 2\n\n\nMessage = namedtuple('Message', ['op', 'args', 'kwargs'])\n\n\nclass Plotter:\n\n # Static variable used to disable the plotter\n enable = True\n # List containing all plotters instantiated in the process\n __plotters = []\n\n def __init__(self,\n env,\n policy,\n sess=None,\n graph=None,\n rollout=default_rollout):\n Plotter.__plotters.append(self)\n self.env = env\n self.sess = tf.compat.v1.get_default_session(\n ) if sess is None else sess\n self.graph = tf.compat.v1.get_default_graph(\n ) if graph is None else graph\n with self.sess.as_default(), self.graph.as_default():\n self.policy = policy.clone('plotter_policy')\n self.policy.build(policy.model.input)\n self.rollout = rollout\n self.worker_thread = Thread(target=self._start_worker, daemon=True)\n self.queue = Queue()\n\n # Needed in order to draw glfw window on the main thread\n if ('Darwin' in platform.platform()):\n self.rollout(env,\n policy,\n max_path_length=np.inf,\n animated=True,\n speedup=5)\n\n def _start_worker(self):\n env = None\n policy = None\n max_length = None\n initial_rollout = True\n try:\n with self.sess.as_default(), self.sess.graph.as_default():\n # Each iteration will process ALL messages currently in the\n # queue\n while True:\n msgs = {}\n # If true, block and yield processor\n if initial_rollout:\n msg = self.queue.get()\n msgs[msg.op] = msg\n # Only fetch the last message of each type\n while not self.queue.empty():\n msg = self.queue.get()\n msgs[msg.op] = msg\n else:\n # Only fetch the last message of each type\n while not self.queue.empty():\n msg = self.queue.get_nowait()\n msgs[msg.op] = msg\n\n if Op.STOP in msgs:\n self.queue.task_done()\n break\n if Op.UPDATE in msgs:\n env, policy = msgs[Op.UPDATE].args\n self.queue.task_done()\n if Op.DEMO in msgs:\n param_values, max_length = msgs[Op.DEMO].args\n policy.set_param_values(param_values)\n initial_rollout = False\n self.rollout(env,\n policy,\n max_path_length=max_length,\n animated=True,\n speedup=5)\n self.queue.task_done()\n else:\n if max_length:\n self.rollout(env,\n policy,\n max_path_length=max_length,\n animated=True,\n speedup=5)\n except KeyboardInterrupt:\n pass\n\n def close(self):\n if self.worker_thread.is_alive():\n while not self.queue.empty():\n self.queue.get()\n self.queue.task_done()\n self.queue.put(Message(op=Op.STOP, args=None, kwargs=None))\n self.queue.join()\n self.worker_thread.join()\n\n @staticmethod\n def disable():\n \"\"\"Disable all instances of the Plotter class.\"\"\"\n Plotter.enable = False\n\n @staticmethod\n def get_plotters():\n return Plotter.__plotters\n\n def start(self):\n if not Plotter.enable:\n return\n if not self.worker_thread.is_alive():\n tf.compat.v1.get_variable_scope().reuse_variables()\n self.worker_thread.start()\n self.queue.put(\n Message(op=Op.UPDATE,\n args=(self.env, self.policy),\n kwargs=None))\n atexit.register(self.close)\n\n def update_plot(self, policy, max_length=np.inf):\n if not Plotter.enable:\n return\n if self.worker_thread.is_alive():\n self.queue.put(\n Message(op=Op.DEMO,\n args=(policy.get_param_values(), max_length),\n kwargs=None))\n", "path": "src/garage/tf/plotter/plotter.py"}]} | 1,514 | 145 |
gh_patches_debug_1848 | rasdani/github-patches | git_diff | kivy__python-for-android-1163 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Openssl recipe crashes on x86 arch
p4a branch: stable
buildozer: 0.33
bootstrap: sdl2
kivy: 1.10.0
Error message i get:
```
arm_arch.h:46:6: error: #error "unsupported ARM architecture"
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pythonforandroid/recipes/openssl/__init__.py`
Content:
```
1 from functools import partial
2
3 from pythonforandroid.toolchain import Recipe, shprint, current_directory
4 import sh
5
6
7 class OpenSSLRecipe(Recipe):
8 version = '1.0.2h'
9 url = 'https://www.openssl.org/source/openssl-{version}.tar.gz'
10
11 def should_build(self, arch):
12 return not self.has_libs(arch, 'libssl' + self.version + '.so',
13 'libcrypto' + self.version + '.so')
14
15 def check_symbol(self, env, sofile, symbol):
16 nm = env.get('NM', 'nm')
17 syms = sh.sh('-c', "{} -gp {} | cut -d' ' -f3".format(
18 nm, sofile), _env=env).splitlines()
19 if symbol in syms:
20 return True
21 print('{} missing symbol {}; rebuilding'.format(sofile, symbol))
22 return False
23
24 def get_recipe_env(self, arch=None):
25 env = super(OpenSSLRecipe, self).get_recipe_env(arch)
26 env['OPENSSL_VERSION'] = self.version
27 env['CFLAGS'] += ' ' + env['LDFLAGS']
28 env['CC'] += ' ' + env['LDFLAGS']
29 return env
30
31 def select_build_arch(self, arch):
32 aname = arch.arch
33 if 'arm64' in aname:
34 return 'linux-aarch64'
35 if 'v7a' in aname:
36 return 'android-armv7'
37 if 'arm' in aname:
38 return 'android'
39 return 'linux-armv4'
40
41 def build_arch(self, arch):
42 env = self.get_recipe_env(arch)
43 with current_directory(self.get_build_dir(arch.arch)):
44 # sh fails with code 255 trying to execute ./Configure
45 # so instead we manually run perl passing in Configure
46 perl = sh.Command('perl')
47 buildarch = self.select_build_arch(arch)
48 shprint(perl, 'Configure', 'shared', 'no-dso', 'no-krb5', buildarch, _env=env)
49 self.apply_patch('disable-sover.patch', arch.arch)
50 self.apply_patch('rename-shared-lib.patch', arch.arch)
51
52 # check_ssl = partial(self.check_symbol, env, 'libssl' + self.version + '.so')
53 check_crypto = partial(self.check_symbol, env, 'libcrypto' + self.version + '.so')
54 while True:
55 shprint(sh.make, 'build_libs', _env=env)
56 if all(map(check_crypto, ('SSLeay', 'MD5_Transform', 'MD4_Init'))):
57 break
58 shprint(sh.make, 'clean', _env=env)
59
60 self.install_libs(arch, 'libssl' + self.version + '.so',
61 'libcrypto' + self.version + '.so')
62
63 recipe = OpenSSLRecipe()
64
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pythonforandroid/recipes/openssl/__init__.py b/pythonforandroid/recipes/openssl/__init__.py
--- a/pythonforandroid/recipes/openssl/__init__.py
+++ b/pythonforandroid/recipes/openssl/__init__.py
@@ -36,6 +36,8 @@
return 'android-armv7'
if 'arm' in aname:
return 'android'
+ if 'x86' in aname:
+ return 'android-x86'
return 'linux-armv4'
def build_arch(self, arch):
| {"golden_diff": "diff --git a/pythonforandroid/recipes/openssl/__init__.py b/pythonforandroid/recipes/openssl/__init__.py\n--- a/pythonforandroid/recipes/openssl/__init__.py\n+++ b/pythonforandroid/recipes/openssl/__init__.py\n@@ -36,6 +36,8 @@\n return 'android-armv7'\n if 'arm' in aname:\n return 'android'\n+ if 'x86' in aname:\n+ return 'android-x86'\n return 'linux-armv4'\n \n def build_arch(self, arch):\n", "issue": "Openssl recipe crashes on x86 arch\np4a branch: stable\r\nbuildozer: 0.33\r\nbootstrap: sdl2\r\nkivy: 1.10.0\r\n\r\nError message i get:\r\n```\r\narm_arch.h:46:6: error: #error \"unsupported ARM architecture\"\r\n```\n", "before_files": [{"content": "from functools import partial\n\nfrom pythonforandroid.toolchain import Recipe, shprint, current_directory\nimport sh\n\n\nclass OpenSSLRecipe(Recipe):\n version = '1.0.2h'\n url = 'https://www.openssl.org/source/openssl-{version}.tar.gz'\n\n def should_build(self, arch):\n return not self.has_libs(arch, 'libssl' + self.version + '.so',\n 'libcrypto' + self.version + '.so')\n\n def check_symbol(self, env, sofile, symbol):\n nm = env.get('NM', 'nm')\n syms = sh.sh('-c', \"{} -gp {} | cut -d' ' -f3\".format(\n nm, sofile), _env=env).splitlines()\n if symbol in syms:\n return True\n print('{} missing symbol {}; rebuilding'.format(sofile, symbol))\n return False\n\n def get_recipe_env(self, arch=None):\n env = super(OpenSSLRecipe, self).get_recipe_env(arch)\n env['OPENSSL_VERSION'] = self.version\n env['CFLAGS'] += ' ' + env['LDFLAGS']\n env['CC'] += ' ' + env['LDFLAGS']\n return env\n\n def select_build_arch(self, arch):\n aname = arch.arch\n if 'arm64' in aname:\n return 'linux-aarch64'\n if 'v7a' in aname:\n return 'android-armv7'\n if 'arm' in aname:\n return 'android'\n return 'linux-armv4'\n\n def build_arch(self, arch):\n env = self.get_recipe_env(arch)\n with current_directory(self.get_build_dir(arch.arch)):\n # sh fails with code 255 trying to execute ./Configure\n # so instead we manually run perl passing in Configure\n perl = sh.Command('perl')\n buildarch = self.select_build_arch(arch)\n shprint(perl, 'Configure', 'shared', 'no-dso', 'no-krb5', buildarch, _env=env)\n self.apply_patch('disable-sover.patch', arch.arch)\n self.apply_patch('rename-shared-lib.patch', arch.arch)\n\n # check_ssl = partial(self.check_symbol, env, 'libssl' + self.version + '.so')\n check_crypto = partial(self.check_symbol, env, 'libcrypto' + self.version + '.so')\n while True:\n shprint(sh.make, 'build_libs', _env=env)\n if all(map(check_crypto, ('SSLeay', 'MD5_Transform', 'MD4_Init'))):\n break\n shprint(sh.make, 'clean', _env=env)\n\n self.install_libs(arch, 'libssl' + self.version + '.so',\n 'libcrypto' + self.version + '.so')\n\nrecipe = OpenSSLRecipe()\n", "path": "pythonforandroid/recipes/openssl/__init__.py"}], "after_files": [{"content": "from functools import partial\n\nfrom pythonforandroid.toolchain import Recipe, shprint, current_directory\nimport sh\n\n\nclass OpenSSLRecipe(Recipe):\n version = '1.0.2h'\n url = 'https://www.openssl.org/source/openssl-{version}.tar.gz'\n\n def should_build(self, arch):\n return not self.has_libs(arch, 'libssl' + self.version + '.so',\n 'libcrypto' + self.version + '.so')\n\n def check_symbol(self, env, sofile, symbol):\n nm = env.get('NM', 'nm')\n syms = sh.sh('-c', \"{} -gp {} | cut -d' ' -f3\".format(\n nm, sofile), _env=env).splitlines()\n if symbol in syms:\n return True\n print('{} missing symbol {}; rebuilding'.format(sofile, symbol))\n return False\n\n def get_recipe_env(self, arch=None):\n env = super(OpenSSLRecipe, self).get_recipe_env(arch)\n env['OPENSSL_VERSION'] = self.version\n env['CFLAGS'] += ' ' + env['LDFLAGS']\n env['CC'] += ' ' + env['LDFLAGS']\n return env\n\n def select_build_arch(self, arch):\n aname = arch.arch\n if 'arm64' in aname:\n return 'linux-aarch64'\n if 'v7a' in aname:\n return 'android-armv7'\n if 'arm' in aname:\n return 'android'\n if 'x86' in aname:\n return 'android-x86'\n return 'linux-armv4'\n\n def build_arch(self, arch):\n env = self.get_recipe_env(arch)\n with current_directory(self.get_build_dir(arch.arch)):\n # sh fails with code 255 trying to execute ./Configure\n # so instead we manually run perl passing in Configure\n perl = sh.Command('perl')\n buildarch = self.select_build_arch(arch)\n shprint(perl, 'Configure', 'shared', 'no-dso', 'no-krb5', buildarch, _env=env)\n self.apply_patch('disable-sover.patch', arch.arch)\n self.apply_patch('rename-shared-lib.patch', arch.arch)\n\n # check_ssl = partial(self.check_symbol, env, 'libssl' + self.version + '.so')\n check_crypto = partial(self.check_symbol, env, 'libcrypto' + self.version + '.so')\n while True:\n shprint(sh.make, 'build_libs', _env=env)\n if all(map(check_crypto, ('SSLeay', 'MD5_Transform', 'MD4_Init'))):\n break\n shprint(sh.make, 'clean', _env=env)\n\n self.install_libs(arch, 'libssl' + self.version + '.so',\n 'libcrypto' + self.version + '.so')\n\nrecipe = OpenSSLRecipe()\n", "path": "pythonforandroid/recipes/openssl/__init__.py"}]} | 1,086 | 128 |
gh_patches_debug_26117 | rasdani/github-patches | git_diff | localstack__localstack-977 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Message attributes are not delivered when publishing a message on a SNS topic and receiving it from an SQS queue when using raw message delivery
Relates to: https://github.com/localstack/localstack/issues/137
For the special case of using raw message delivery, the message attributes are not passed along to SQS. Trivial to reproduce from https://github.com/localstack/localstack/issues/137 after enabling raw message delivery for the subscription.
<!-- Love localstack? Please consider supporting our collective:
👉 https://opencollective.com/localstack/donate -->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `localstack/services/sns/sns_listener.py`
Content:
```
1 import ast
2 import json
3 import logging
4 import requests
5 import uuid
6 import xmltodict
7 from requests.models import Response
8 from six.moves.urllib import parse as urlparse
9 from localstack.utils.aws import aws_stack
10 from localstack.utils.common import short_uid, to_str
11 from localstack.services.awslambda import lambda_api
12 from localstack.services.generic_proxy import ProxyListener
13
14 # mappings for SNS topic subscriptions
15 SNS_SUBSCRIPTIONS = {}
16
17 # set up logger
18 LOGGER = logging.getLogger(__name__)
19
20
21 class ProxyListenerSNS(ProxyListener):
22
23 def forward_request(self, method, path, data, headers):
24
25 # check region
26 try:
27 aws_stack.check_valid_region(headers)
28 except Exception as e:
29 return make_error(message=str(e), code=400)
30
31 if method == 'POST' and path == '/':
32 req_data = urlparse.parse_qs(to_str(data))
33 req_action = req_data['Action'][0]
34 topic_arn = req_data.get('TargetArn') or req_data.get('TopicArn')
35
36 if topic_arn:
37 topic_arn = topic_arn[0]
38 do_create_topic(topic_arn)
39
40 if req_action == 'SetSubscriptionAttributes':
41 sub = get_subscription_by_arn(req_data['SubscriptionArn'][0])
42 if not sub:
43 return make_error(message='Unable to find subscription for given ARN', code=400)
44 attr_name = req_data['AttributeName'][0]
45 attr_value = req_data['AttributeValue'][0]
46 sub[attr_name] = attr_value
47 return make_response(req_action)
48 elif req_action == 'GetSubscriptionAttributes':
49 sub = get_subscription_by_arn(req_data['SubscriptionArn'][0])
50 if not sub:
51 return make_error(message='Unable to find subscription for given ARN', code=400)
52 content = '<Attributes>'
53 for key, value in sub.items():
54 content += '<entry><key>%s</key><value>%s</value></entry>\n' % (key, value)
55 content += '</Attributes>'
56 return make_response(req_action, content=content)
57 elif req_action == 'Subscribe':
58 if 'Endpoint' not in req_data:
59 return make_error(message='Endpoint not specified in subscription', code=400)
60 elif req_action == 'Unsubscribe':
61 if 'SubscriptionArn' not in req_data:
62 return make_error(message='SubscriptionArn not specified in unsubscribe request', code=400)
63 do_unsubscribe(req_data.get('SubscriptionArn')[0])
64 elif req_action == 'DeleteTopic':
65 do_delete_topic(topic_arn)
66
67 elif req_action == 'Publish':
68 message = req_data['Message'][0]
69 sqs_client = aws_stack.connect_to_service('sqs')
70 for subscriber in SNS_SUBSCRIPTIONS.get(topic_arn, []):
71 filter_policy = json.loads(subscriber.get('FilterPolicy', '{}'))
72 message_attributes = get_message_attributes(req_data)
73 if check_filter_policy(filter_policy, message_attributes):
74 if subscriber['Protocol'] == 'sqs':
75 endpoint = subscriber['Endpoint']
76 if 'sqs_queue_url' in subscriber:
77 queue_url = subscriber.get('sqs_queue_url')
78 elif '://' in endpoint:
79 queue_url = endpoint
80 else:
81 queue_name = endpoint.split(':')[5]
82 queue_url = aws_stack.get_sqs_queue_url(queue_name)
83 subscriber['sqs_queue_url'] = queue_url
84 try:
85 sqs_client.send_message(
86 QueueUrl=queue_url,
87 MessageBody=create_sns_message_body(subscriber, req_data)
88 )
89 except Exception as exc:
90 return make_error(message=str(exc), code=400)
91 elif subscriber['Protocol'] == 'lambda':
92 lambda_api.process_sns_notification(
93 subscriber['Endpoint'],
94 topic_arn, message, subject=req_data.get('Subject', [None])[0]
95 )
96 elif subscriber['Protocol'] in ['http', 'https']:
97 try:
98 message_body = create_sns_message_body(subscriber, req_data)
99 except Exception as exc:
100 return make_error(message=str(exc), code=400)
101 requests.post(
102 subscriber['Endpoint'],
103 headers={
104 'Content-Type': 'text/plain',
105 'x-amz-sns-message-type': 'Notification'
106 },
107 data=message_body
108 )
109 else:
110 LOGGER.warning('Unexpected protocol "%s" for SNS subscription' % subscriber['Protocol'])
111 # return response here because we do not want the request to be forwarded to SNS
112 return make_response(req_action)
113
114 return True
115
116 def return_response(self, method, path, data, headers, response):
117 # This method is executed by the proxy after we've already received a
118 # response from the backend, hence we can utilize the "response" variable here
119 if method == 'POST' and path == '/':
120 req_data = urlparse.parse_qs(to_str(data))
121 req_action = req_data['Action'][0]
122 if req_action == 'Subscribe' and response.status_code < 400:
123 response_data = xmltodict.parse(response.content)
124 topic_arn = (req_data.get('TargetArn') or req_data.get('TopicArn'))[0]
125 sub_arn = response_data['SubscribeResponse']['SubscribeResult']['SubscriptionArn']
126 do_subscribe(topic_arn, req_data['Endpoint'][0], req_data['Protocol'][0], sub_arn)
127
128
129 # instantiate listener
130 UPDATE_SNS = ProxyListenerSNS()
131
132
133 def do_create_topic(topic_arn):
134 if topic_arn not in SNS_SUBSCRIPTIONS:
135 SNS_SUBSCRIPTIONS[topic_arn] = []
136
137
138 def do_delete_topic(topic_arn):
139 if topic_arn in SNS_SUBSCRIPTIONS:
140 del SNS_SUBSCRIPTIONS[topic_arn]
141
142
143 def do_subscribe(topic_arn, endpoint, protocol, subscription_arn):
144 subscription = {
145 # http://docs.aws.amazon.com/cli/latest/reference/sns/get-subscription-attributes.html
146 'TopicArn': topic_arn,
147 'Endpoint': endpoint,
148 'Protocol': protocol,
149 'SubscriptionArn': subscription_arn,
150 'RawMessageDelivery': 'false'
151 }
152 SNS_SUBSCRIPTIONS[topic_arn].append(subscription)
153
154
155 def do_unsubscribe(subscription_arn):
156 for topic_arn in SNS_SUBSCRIPTIONS:
157 SNS_SUBSCRIPTIONS[topic_arn] = [
158 sub for sub in SNS_SUBSCRIPTIONS[topic_arn]
159 if sub['SubscriptionArn'] != subscription_arn
160 ]
161
162
163 # ---------------
164 # HELPER METHODS
165 # ---------------
166
167 def get_topic_by_arn(topic_arn):
168 if topic_arn in SNS_SUBSCRIPTIONS:
169 return SNS_SUBSCRIPTIONS[topic_arn]
170 else:
171 return None
172
173
174 def get_subscription_by_arn(sub_arn):
175 # TODO maintain separate map instead of traversing all items
176 for key, subscriptions in SNS_SUBSCRIPTIONS.items():
177 for sub in subscriptions:
178 if sub['SubscriptionArn'] == sub_arn:
179 return sub
180
181
182 def make_response(op_name, content=''):
183 response = Response()
184 if not content:
185 content = '<MessageId>%s</MessageId>' % short_uid()
186 response._content = """<{op_name}Response xmlns="http://sns.amazonaws.com/doc/2010-03-31/">
187 <{op_name}Result>
188 {content}
189 </{op_name}Result>
190 <ResponseMetadata><RequestId>{req_id}</RequestId></ResponseMetadata>
191 </{op_name}Response>""".format(op_name=op_name, content=content, req_id=short_uid())
192 response.status_code = 200
193 return response
194
195
196 def make_error(message, code=400, code_string='InvalidParameter'):
197 response = Response()
198 response._content = """<ErrorResponse xmlns="http://sns.amazonaws.com/doc/2010-03-31/"><Error>
199 <Type>Sender</Type>
200 <Code>{code_string}</Code>
201 <Message>{message}</Message>
202 </Error><RequestId>{req_id}</RequestId>
203 </ErrorResponse>""".format(message=message, code_string=code_string, req_id=short_uid())
204 response.status_code = code
205 return response
206
207
208 def create_sns_message_body(subscriber, req_data):
209 message = req_data['Message'][0]
210 subject = req_data.get('Subject', [None])[0]
211 protocol = subscriber['Protocol']
212
213 if subscriber['RawMessageDelivery'] == 'true':
214 return message
215
216 if req_data.get('MessageStructure') == ['json']:
217 message = json.loads(message)
218 try:
219 message = message.get(protocol, message['default'])
220 except KeyError:
221 raise Exception("Unable to find 'default' key in message payload")
222
223 data = {}
224 data['MessageId'] = str(uuid.uuid4())
225 data['Type'] = 'Notification'
226 data['Message'] = message
227 data['TopicArn'] = subscriber['TopicArn']
228 if subject is not None:
229 data['Subject'] = subject
230 attributes = get_message_attributes(req_data)
231 if attributes:
232 data['MessageAttributes'] = attributes
233 return json.dumps(data)
234
235
236 def get_message_attributes(req_data):
237 attributes = {}
238 x = 1
239 while True:
240 name = req_data.get('MessageAttributes.entry.' + str(x) + '.Name', [None])[0]
241 if name is not None:
242 attribute = {}
243 attribute['Type'] = req_data.get('MessageAttributes.entry.' + str(x) + '.Value.DataType', [None])[0]
244 string_value = req_data.get('MessageAttributes.entry.' + str(x) + '.Value.StringValue', [None])[0]
245 binary_value = req_data.get('MessageAttributes.entry.' + str(x) + '.Value.BinaryValue', [None])[0]
246 if string_value is not None:
247 attribute['Value'] = string_value
248 elif binary_value is not None:
249 attribute['Value'] = binary_value
250
251 attributes[name] = attribute
252 x += 1
253 else:
254 break
255
256 return attributes
257
258
259 def is_number(x):
260 try:
261 float(x)
262 return True
263 except ValueError:
264 return False
265
266
267 def evaluate_numeric_condition(conditions, value):
268 if not is_number(value):
269 return False
270
271 for i in range(0, len(conditions), 2):
272 operator = conditions[i]
273 operand = conditions[i + 1]
274
275 if operator == '=':
276 if value != operand:
277 return False
278 elif operator == '>':
279 if value <= operand:
280 return False
281 elif operator == '<':
282 if value >= operand:
283 return False
284 elif operator == '>=':
285 if value < operand:
286 return False
287 elif operator == '<=':
288 if value > operand:
289 return False
290
291 return True
292
293
294 def evaluate_condition(value, condition):
295 if type(condition) is not dict:
296 return value == condition
297 elif condition.get('anything-but'):
298 return value not in condition.get('anything-but')
299 elif condition.get('prefix'):
300 prefix = condition.get('prefix')
301 return value.startswith(prefix)
302 elif condition.get('numeric'):
303 return evaluate_numeric_condition(condition.get('numeric'), value)
304
305 return False
306
307
308 def evaluate_filter_policy_conditions(conditions, attribute):
309 if type(conditions) is not list:
310 conditions = [conditions]
311
312 if attribute['Type'] == 'String.Array':
313 values = ast.literal_eval(attribute['Value'])
314 for value in values:
315 for condition in conditions:
316 if evaluate_condition(value, condition):
317 return True
318 else:
319 for condition in conditions:
320 if evaluate_condition(attribute['Value'], condition):
321 return True
322
323 return False
324
325
326 def check_filter_policy(filter_policy, message_attributes):
327 if not filter_policy:
328 return True
329
330 for criteria in filter_policy:
331 conditions = filter_policy.get(criteria)
332 attribute = message_attributes.get(criteria)
333
334 if attribute is None:
335 return False
336
337 if evaluate_filter_policy_conditions(conditions, attribute) is False:
338 return False
339
340 return True
341
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/localstack/services/sns/sns_listener.py b/localstack/services/sns/sns_listener.py
--- a/localstack/services/sns/sns_listener.py
+++ b/localstack/services/sns/sns_listener.py
@@ -84,7 +84,8 @@
try:
sqs_client.send_message(
QueueUrl=queue_url,
- MessageBody=create_sns_message_body(subscriber, req_data)
+ MessageBody=create_sns_message_body(subscriber, req_data),
+ MessageAttributes=create_sqs_message_attributes(subscriber, message_attributes)
)
except Exception as exc:
return make_error(message=str(exc), code=400)
@@ -233,6 +234,23 @@
return json.dumps(data)
+def create_sqs_message_attributes(subscriber, attributes):
+ if subscriber.get('RawMessageDelivery') not in ('true', True):
+ return {}
+
+ message_attributes = {}
+ for key, value in attributes.items():
+ attribute = {}
+ attribute['DataType'] = value['Type']
+ if value['Type'] == 'Binary':
+ attribute['BinaryValue'] = value['Value']
+ else:
+ attribute['StringValue'] = value['Value']
+ message_attributes[key] = attribute
+
+ return message_attributes
+
+
def get_message_attributes(req_data):
attributes = {}
x = 1
| {"golden_diff": "diff --git a/localstack/services/sns/sns_listener.py b/localstack/services/sns/sns_listener.py\n--- a/localstack/services/sns/sns_listener.py\n+++ b/localstack/services/sns/sns_listener.py\n@@ -84,7 +84,8 @@\n try:\n sqs_client.send_message(\n QueueUrl=queue_url,\n- MessageBody=create_sns_message_body(subscriber, req_data)\n+ MessageBody=create_sns_message_body(subscriber, req_data),\n+ MessageAttributes=create_sqs_message_attributes(subscriber, message_attributes)\n )\n except Exception as exc:\n return make_error(message=str(exc), code=400)\n@@ -233,6 +234,23 @@\n return json.dumps(data)\n \n \n+def create_sqs_message_attributes(subscriber, attributes):\n+ if subscriber.get('RawMessageDelivery') not in ('true', True):\n+ return {}\n+\n+ message_attributes = {}\n+ for key, value in attributes.items():\n+ attribute = {}\n+ attribute['DataType'] = value['Type']\n+ if value['Type'] == 'Binary':\n+ attribute['BinaryValue'] = value['Value']\n+ else:\n+ attribute['StringValue'] = value['Value']\n+ message_attributes[key] = attribute\n+\n+ return message_attributes\n+\n+\n def get_message_attributes(req_data):\n attributes = {}\n x = 1\n", "issue": "Message attributes are not delivered when publishing a message on a SNS topic and receiving it from an SQS queue when using raw message delivery\nRelates to: https://github.com/localstack/localstack/issues/137\r\n\r\nFor the special case of using raw message delivery, the message attributes are not passed along to SQS. Trivial to reproduce from https://github.com/localstack/localstack/issues/137 after enabling raw message delivery for the subscription.\r\n\r\n<!-- Love localstack? Please consider supporting our collective:\r\n\ud83d\udc49 https://opencollective.com/localstack/donate -->\n", "before_files": [{"content": "import ast\nimport json\nimport logging\nimport requests\nimport uuid\nimport xmltodict\nfrom requests.models import Response\nfrom six.moves.urllib import parse as urlparse\nfrom localstack.utils.aws import aws_stack\nfrom localstack.utils.common import short_uid, to_str\nfrom localstack.services.awslambda import lambda_api\nfrom localstack.services.generic_proxy import ProxyListener\n\n# mappings for SNS topic subscriptions\nSNS_SUBSCRIPTIONS = {}\n\n# set up logger\nLOGGER = logging.getLogger(__name__)\n\n\nclass ProxyListenerSNS(ProxyListener):\n\n def forward_request(self, method, path, data, headers):\n\n # check region\n try:\n aws_stack.check_valid_region(headers)\n except Exception as e:\n return make_error(message=str(e), code=400)\n\n if method == 'POST' and path == '/':\n req_data = urlparse.parse_qs(to_str(data))\n req_action = req_data['Action'][0]\n topic_arn = req_data.get('TargetArn') or req_data.get('TopicArn')\n\n if topic_arn:\n topic_arn = topic_arn[0]\n do_create_topic(topic_arn)\n\n if req_action == 'SetSubscriptionAttributes':\n sub = get_subscription_by_arn(req_data['SubscriptionArn'][0])\n if not sub:\n return make_error(message='Unable to find subscription for given ARN', code=400)\n attr_name = req_data['AttributeName'][0]\n attr_value = req_data['AttributeValue'][0]\n sub[attr_name] = attr_value\n return make_response(req_action)\n elif req_action == 'GetSubscriptionAttributes':\n sub = get_subscription_by_arn(req_data['SubscriptionArn'][0])\n if not sub:\n return make_error(message='Unable to find subscription for given ARN', code=400)\n content = '<Attributes>'\n for key, value in sub.items():\n content += '<entry><key>%s</key><value>%s</value></entry>\\n' % (key, value)\n content += '</Attributes>'\n return make_response(req_action, content=content)\n elif req_action == 'Subscribe':\n if 'Endpoint' not in req_data:\n return make_error(message='Endpoint not specified in subscription', code=400)\n elif req_action == 'Unsubscribe':\n if 'SubscriptionArn' not in req_data:\n return make_error(message='SubscriptionArn not specified in unsubscribe request', code=400)\n do_unsubscribe(req_data.get('SubscriptionArn')[0])\n elif req_action == 'DeleteTopic':\n do_delete_topic(topic_arn)\n\n elif req_action == 'Publish':\n message = req_data['Message'][0]\n sqs_client = aws_stack.connect_to_service('sqs')\n for subscriber in SNS_SUBSCRIPTIONS.get(topic_arn, []):\n filter_policy = json.loads(subscriber.get('FilterPolicy', '{}'))\n message_attributes = get_message_attributes(req_data)\n if check_filter_policy(filter_policy, message_attributes):\n if subscriber['Protocol'] == 'sqs':\n endpoint = subscriber['Endpoint']\n if 'sqs_queue_url' in subscriber:\n queue_url = subscriber.get('sqs_queue_url')\n elif '://' in endpoint:\n queue_url = endpoint\n else:\n queue_name = endpoint.split(':')[5]\n queue_url = aws_stack.get_sqs_queue_url(queue_name)\n subscriber['sqs_queue_url'] = queue_url\n try:\n sqs_client.send_message(\n QueueUrl=queue_url,\n MessageBody=create_sns_message_body(subscriber, req_data)\n )\n except Exception as exc:\n return make_error(message=str(exc), code=400)\n elif subscriber['Protocol'] == 'lambda':\n lambda_api.process_sns_notification(\n subscriber['Endpoint'],\n topic_arn, message, subject=req_data.get('Subject', [None])[0]\n )\n elif subscriber['Protocol'] in ['http', 'https']:\n try:\n message_body = create_sns_message_body(subscriber, req_data)\n except Exception as exc:\n return make_error(message=str(exc), code=400)\n requests.post(\n subscriber['Endpoint'],\n headers={\n 'Content-Type': 'text/plain',\n 'x-amz-sns-message-type': 'Notification'\n },\n data=message_body\n )\n else:\n LOGGER.warning('Unexpected protocol \"%s\" for SNS subscription' % subscriber['Protocol'])\n # return response here because we do not want the request to be forwarded to SNS\n return make_response(req_action)\n\n return True\n\n def return_response(self, method, path, data, headers, response):\n # This method is executed by the proxy after we've already received a\n # response from the backend, hence we can utilize the \"response\" variable here\n if method == 'POST' and path == '/':\n req_data = urlparse.parse_qs(to_str(data))\n req_action = req_data['Action'][0]\n if req_action == 'Subscribe' and response.status_code < 400:\n response_data = xmltodict.parse(response.content)\n topic_arn = (req_data.get('TargetArn') or req_data.get('TopicArn'))[0]\n sub_arn = response_data['SubscribeResponse']['SubscribeResult']['SubscriptionArn']\n do_subscribe(topic_arn, req_data['Endpoint'][0], req_data['Protocol'][0], sub_arn)\n\n\n# instantiate listener\nUPDATE_SNS = ProxyListenerSNS()\n\n\ndef do_create_topic(topic_arn):\n if topic_arn not in SNS_SUBSCRIPTIONS:\n SNS_SUBSCRIPTIONS[topic_arn] = []\n\n\ndef do_delete_topic(topic_arn):\n if topic_arn in SNS_SUBSCRIPTIONS:\n del SNS_SUBSCRIPTIONS[topic_arn]\n\n\ndef do_subscribe(topic_arn, endpoint, protocol, subscription_arn):\n subscription = {\n # http://docs.aws.amazon.com/cli/latest/reference/sns/get-subscription-attributes.html\n 'TopicArn': topic_arn,\n 'Endpoint': endpoint,\n 'Protocol': protocol,\n 'SubscriptionArn': subscription_arn,\n 'RawMessageDelivery': 'false'\n }\n SNS_SUBSCRIPTIONS[topic_arn].append(subscription)\n\n\ndef do_unsubscribe(subscription_arn):\n for topic_arn in SNS_SUBSCRIPTIONS:\n SNS_SUBSCRIPTIONS[topic_arn] = [\n sub for sub in SNS_SUBSCRIPTIONS[topic_arn]\n if sub['SubscriptionArn'] != subscription_arn\n ]\n\n\n# ---------------\n# HELPER METHODS\n# ---------------\n\ndef get_topic_by_arn(topic_arn):\n if topic_arn in SNS_SUBSCRIPTIONS:\n return SNS_SUBSCRIPTIONS[topic_arn]\n else:\n return None\n\n\ndef get_subscription_by_arn(sub_arn):\n # TODO maintain separate map instead of traversing all items\n for key, subscriptions in SNS_SUBSCRIPTIONS.items():\n for sub in subscriptions:\n if sub['SubscriptionArn'] == sub_arn:\n return sub\n\n\ndef make_response(op_name, content=''):\n response = Response()\n if not content:\n content = '<MessageId>%s</MessageId>' % short_uid()\n response._content = \"\"\"<{op_name}Response xmlns=\"http://sns.amazonaws.com/doc/2010-03-31/\">\n <{op_name}Result>\n {content}\n </{op_name}Result>\n <ResponseMetadata><RequestId>{req_id}</RequestId></ResponseMetadata>\n </{op_name}Response>\"\"\".format(op_name=op_name, content=content, req_id=short_uid())\n response.status_code = 200\n return response\n\n\ndef make_error(message, code=400, code_string='InvalidParameter'):\n response = Response()\n response._content = \"\"\"<ErrorResponse xmlns=\"http://sns.amazonaws.com/doc/2010-03-31/\"><Error>\n <Type>Sender</Type>\n <Code>{code_string}</Code>\n <Message>{message}</Message>\n </Error><RequestId>{req_id}</RequestId>\n </ErrorResponse>\"\"\".format(message=message, code_string=code_string, req_id=short_uid())\n response.status_code = code\n return response\n\n\ndef create_sns_message_body(subscriber, req_data):\n message = req_data['Message'][0]\n subject = req_data.get('Subject', [None])[0]\n protocol = subscriber['Protocol']\n\n if subscriber['RawMessageDelivery'] == 'true':\n return message\n\n if req_data.get('MessageStructure') == ['json']:\n message = json.loads(message)\n try:\n message = message.get(protocol, message['default'])\n except KeyError:\n raise Exception(\"Unable to find 'default' key in message payload\")\n\n data = {}\n data['MessageId'] = str(uuid.uuid4())\n data['Type'] = 'Notification'\n data['Message'] = message\n data['TopicArn'] = subscriber['TopicArn']\n if subject is not None:\n data['Subject'] = subject\n attributes = get_message_attributes(req_data)\n if attributes:\n data['MessageAttributes'] = attributes\n return json.dumps(data)\n\n\ndef get_message_attributes(req_data):\n attributes = {}\n x = 1\n while True:\n name = req_data.get('MessageAttributes.entry.' + str(x) + '.Name', [None])[0]\n if name is not None:\n attribute = {}\n attribute['Type'] = req_data.get('MessageAttributes.entry.' + str(x) + '.Value.DataType', [None])[0]\n string_value = req_data.get('MessageAttributes.entry.' + str(x) + '.Value.StringValue', [None])[0]\n binary_value = req_data.get('MessageAttributes.entry.' + str(x) + '.Value.BinaryValue', [None])[0]\n if string_value is not None:\n attribute['Value'] = string_value\n elif binary_value is not None:\n attribute['Value'] = binary_value\n\n attributes[name] = attribute\n x += 1\n else:\n break\n\n return attributes\n\n\ndef is_number(x):\n try:\n float(x)\n return True\n except ValueError:\n return False\n\n\ndef evaluate_numeric_condition(conditions, value):\n if not is_number(value):\n return False\n\n for i in range(0, len(conditions), 2):\n operator = conditions[i]\n operand = conditions[i + 1]\n\n if operator == '=':\n if value != operand:\n return False\n elif operator == '>':\n if value <= operand:\n return False\n elif operator == '<':\n if value >= operand:\n return False\n elif operator == '>=':\n if value < operand:\n return False\n elif operator == '<=':\n if value > operand:\n return False\n\n return True\n\n\ndef evaluate_condition(value, condition):\n if type(condition) is not dict:\n return value == condition\n elif condition.get('anything-but'):\n return value not in condition.get('anything-but')\n elif condition.get('prefix'):\n prefix = condition.get('prefix')\n return value.startswith(prefix)\n elif condition.get('numeric'):\n return evaluate_numeric_condition(condition.get('numeric'), value)\n\n return False\n\n\ndef evaluate_filter_policy_conditions(conditions, attribute):\n if type(conditions) is not list:\n conditions = [conditions]\n\n if attribute['Type'] == 'String.Array':\n values = ast.literal_eval(attribute['Value'])\n for value in values:\n for condition in conditions:\n if evaluate_condition(value, condition):\n return True\n else:\n for condition in conditions:\n if evaluate_condition(attribute['Value'], condition):\n return True\n\n return False\n\n\ndef check_filter_policy(filter_policy, message_attributes):\n if not filter_policy:\n return True\n\n for criteria in filter_policy:\n conditions = filter_policy.get(criteria)\n attribute = message_attributes.get(criteria)\n\n if attribute is None:\n return False\n\n if evaluate_filter_policy_conditions(conditions, attribute) is False:\n return False\n\n return True\n", "path": "localstack/services/sns/sns_listener.py"}], "after_files": [{"content": "import ast\nimport json\nimport logging\nimport requests\nimport uuid\nimport xmltodict\nfrom requests.models import Response\nfrom six.moves.urllib import parse as urlparse\nfrom localstack.utils.aws import aws_stack\nfrom localstack.utils.common import short_uid, to_str\nfrom localstack.services.awslambda import lambda_api\nfrom localstack.services.generic_proxy import ProxyListener\n\n# mappings for SNS topic subscriptions\nSNS_SUBSCRIPTIONS = {}\n\n# set up logger\nLOGGER = logging.getLogger(__name__)\n\n\nclass ProxyListenerSNS(ProxyListener):\n\n def forward_request(self, method, path, data, headers):\n\n # check region\n try:\n aws_stack.check_valid_region(headers)\n except Exception as e:\n return make_error(message=str(e), code=400)\n\n if method == 'POST' and path == '/':\n req_data = urlparse.parse_qs(to_str(data))\n req_action = req_data['Action'][0]\n topic_arn = req_data.get('TargetArn') or req_data.get('TopicArn')\n\n if topic_arn:\n topic_arn = topic_arn[0]\n do_create_topic(topic_arn)\n\n if req_action == 'SetSubscriptionAttributes':\n sub = get_subscription_by_arn(req_data['SubscriptionArn'][0])\n if not sub:\n return make_error(message='Unable to find subscription for given ARN', code=400)\n attr_name = req_data['AttributeName'][0]\n attr_value = req_data['AttributeValue'][0]\n sub[attr_name] = attr_value\n return make_response(req_action)\n elif req_action == 'GetSubscriptionAttributes':\n sub = get_subscription_by_arn(req_data['SubscriptionArn'][0])\n if not sub:\n return make_error(message='Unable to find subscription for given ARN', code=400)\n content = '<Attributes>'\n for key, value in sub.items():\n content += '<entry><key>%s</key><value>%s</value></entry>\\n' % (key, value)\n content += '</Attributes>'\n return make_response(req_action, content=content)\n elif req_action == 'Subscribe':\n if 'Endpoint' not in req_data:\n return make_error(message='Endpoint not specified in subscription', code=400)\n elif req_action == 'Unsubscribe':\n if 'SubscriptionArn' not in req_data:\n return make_error(message='SubscriptionArn not specified in unsubscribe request', code=400)\n do_unsubscribe(req_data.get('SubscriptionArn')[0])\n elif req_action == 'DeleteTopic':\n do_delete_topic(topic_arn)\n\n elif req_action == 'Publish':\n message = req_data['Message'][0]\n sqs_client = aws_stack.connect_to_service('sqs')\n for subscriber in SNS_SUBSCRIPTIONS.get(topic_arn, []):\n filter_policy = json.loads(subscriber.get('FilterPolicy', '{}'))\n message_attributes = get_message_attributes(req_data)\n if check_filter_policy(filter_policy, message_attributes):\n if subscriber['Protocol'] == 'sqs':\n endpoint = subscriber['Endpoint']\n if 'sqs_queue_url' in subscriber:\n queue_url = subscriber.get('sqs_queue_url')\n elif '://' in endpoint:\n queue_url = endpoint\n else:\n queue_name = endpoint.split(':')[5]\n queue_url = aws_stack.get_sqs_queue_url(queue_name)\n subscriber['sqs_queue_url'] = queue_url\n try:\n sqs_client.send_message(\n QueueUrl=queue_url,\n MessageBody=create_sns_message_body(subscriber, req_data),\n MessageAttributes=create_sqs_message_attributes(subscriber, message_attributes)\n )\n except Exception as exc:\n return make_error(message=str(exc), code=400)\n elif subscriber['Protocol'] == 'lambda':\n lambda_api.process_sns_notification(\n subscriber['Endpoint'],\n topic_arn, message, subject=req_data.get('Subject', [None])[0]\n )\n elif subscriber['Protocol'] in ['http', 'https']:\n try:\n message_body = create_sns_message_body(subscriber, req_data)\n except Exception as exc:\n return make_error(message=str(exc), code=400)\n requests.post(\n subscriber['Endpoint'],\n headers={\n 'Content-Type': 'text/plain',\n 'x-amz-sns-message-type': 'Notification'\n },\n data=message_body\n )\n else:\n LOGGER.warning('Unexpected protocol \"%s\" for SNS subscription' % subscriber['Protocol'])\n # return response here because we do not want the request to be forwarded to SNS\n return make_response(req_action)\n\n return True\n\n def return_response(self, method, path, data, headers, response):\n # This method is executed by the proxy after we've already received a\n # response from the backend, hence we can utilize the \"response\" variable here\n if method == 'POST' and path == '/':\n req_data = urlparse.parse_qs(to_str(data))\n req_action = req_data['Action'][0]\n if req_action == 'Subscribe' and response.status_code < 400:\n response_data = xmltodict.parse(response.content)\n topic_arn = (req_data.get('TargetArn') or req_data.get('TopicArn'))[0]\n sub_arn = response_data['SubscribeResponse']['SubscribeResult']['SubscriptionArn']\n do_subscribe(topic_arn, req_data['Endpoint'][0], req_data['Protocol'][0], sub_arn)\n\n\n# instantiate listener\nUPDATE_SNS = ProxyListenerSNS()\n\n\ndef do_create_topic(topic_arn):\n if topic_arn not in SNS_SUBSCRIPTIONS:\n SNS_SUBSCRIPTIONS[topic_arn] = []\n\n\ndef do_delete_topic(topic_arn):\n if topic_arn in SNS_SUBSCRIPTIONS:\n del SNS_SUBSCRIPTIONS[topic_arn]\n\n\ndef do_subscribe(topic_arn, endpoint, protocol, subscription_arn):\n subscription = {\n # http://docs.aws.amazon.com/cli/latest/reference/sns/get-subscription-attributes.html\n 'TopicArn': topic_arn,\n 'Endpoint': endpoint,\n 'Protocol': protocol,\n 'SubscriptionArn': subscription_arn,\n 'RawMessageDelivery': 'false'\n }\n SNS_SUBSCRIPTIONS[topic_arn].append(subscription)\n\n\ndef do_unsubscribe(subscription_arn):\n for topic_arn in SNS_SUBSCRIPTIONS:\n SNS_SUBSCRIPTIONS[topic_arn] = [\n sub for sub in SNS_SUBSCRIPTIONS[topic_arn]\n if sub['SubscriptionArn'] != subscription_arn\n ]\n\n\n# ---------------\n# HELPER METHODS\n# ---------------\n\ndef get_topic_by_arn(topic_arn):\n if topic_arn in SNS_SUBSCRIPTIONS:\n return SNS_SUBSCRIPTIONS[topic_arn]\n else:\n return None\n\n\ndef get_subscription_by_arn(sub_arn):\n # TODO maintain separate map instead of traversing all items\n for key, subscriptions in SNS_SUBSCRIPTIONS.items():\n for sub in subscriptions:\n if sub['SubscriptionArn'] == sub_arn:\n return sub\n\n\ndef make_response(op_name, content=''):\n response = Response()\n if not content:\n content = '<MessageId>%s</MessageId>' % short_uid()\n response._content = \"\"\"<{op_name}Response xmlns=\"http://sns.amazonaws.com/doc/2010-03-31/\">\n <{op_name}Result>\n {content}\n </{op_name}Result>\n <ResponseMetadata><RequestId>{req_id}</RequestId></ResponseMetadata>\n </{op_name}Response>\"\"\".format(op_name=op_name, content=content, req_id=short_uid())\n response.status_code = 200\n return response\n\n\ndef make_error(message, code=400, code_string='InvalidParameter'):\n response = Response()\n response._content = \"\"\"<ErrorResponse xmlns=\"http://sns.amazonaws.com/doc/2010-03-31/\"><Error>\n <Type>Sender</Type>\n <Code>{code_string}</Code>\n <Message>{message}</Message>\n </Error><RequestId>{req_id}</RequestId>\n </ErrorResponse>\"\"\".format(message=message, code_string=code_string, req_id=short_uid())\n response.status_code = code\n return response\n\n\ndef create_sns_message_body(subscriber, req_data):\n message = req_data['Message'][0]\n subject = req_data.get('Subject', [None])[0]\n protocol = subscriber['Protocol']\n\n if subscriber['RawMessageDelivery'] == 'true':\n return message\n\n if req_data.get('MessageStructure') == ['json']:\n message = json.loads(message)\n try:\n message = message.get(protocol, message['default'])\n except KeyError:\n raise Exception(\"Unable to find 'default' key in message payload\")\n\n data = {}\n data['MessageId'] = str(uuid.uuid4())\n data['Type'] = 'Notification'\n data['Message'] = message\n data['TopicArn'] = subscriber['TopicArn']\n if subject is not None:\n data['Subject'] = subject\n attributes = get_message_attributes(req_data)\n if attributes:\n data['MessageAttributes'] = attributes\n return json.dumps(data)\n\n\ndef create_sqs_message_attributes(subscriber, attributes):\n if subscriber.get('RawMessageDelivery') not in ('true', True):\n return {}\n\n message_attributes = {}\n for key, value in attributes.items():\n attribute = {}\n attribute['DataType'] = value['Type']\n if value['Type'] == 'Binary':\n attribute['BinaryValue'] = value['Value']\n else:\n attribute['StringValue'] = value['Value']\n message_attributes[key] = attribute\n\n return message_attributes\n\n\ndef get_message_attributes(req_data):\n attributes = {}\n x = 1\n while True:\n name = req_data.get('MessageAttributes.entry.' + str(x) + '.Name', [None])[0]\n if name is not None:\n attribute = {}\n attribute['Type'] = req_data.get('MessageAttributes.entry.' + str(x) + '.Value.DataType', [None])[0]\n string_value = req_data.get('MessageAttributes.entry.' + str(x) + '.Value.StringValue', [None])[0]\n binary_value = req_data.get('MessageAttributes.entry.' + str(x) + '.Value.BinaryValue', [None])[0]\n if string_value is not None:\n attribute['Value'] = string_value\n elif binary_value is not None:\n attribute['Value'] = binary_value\n\n attributes[name] = attribute\n x += 1\n else:\n break\n\n return attributes\n\n\ndef is_number(x):\n try:\n float(x)\n return True\n except ValueError:\n return False\n\n\ndef evaluate_numeric_condition(conditions, value):\n if not is_number(value):\n return False\n\n for i in range(0, len(conditions), 2):\n operator = conditions[i]\n operand = conditions[i + 1]\n\n if operator == '=':\n if value != operand:\n return False\n elif operator == '>':\n if value <= operand:\n return False\n elif operator == '<':\n if value >= operand:\n return False\n elif operator == '>=':\n if value < operand:\n return False\n elif operator == '<=':\n if value > operand:\n return False\n\n return True\n\n\ndef evaluate_condition(value, condition):\n if type(condition) is not dict:\n return value == condition\n elif condition.get('anything-but'):\n return value not in condition.get('anything-but')\n elif condition.get('prefix'):\n prefix = condition.get('prefix')\n return value.startswith(prefix)\n elif condition.get('numeric'):\n return evaluate_numeric_condition(condition.get('numeric'), value)\n\n return False\n\n\ndef evaluate_filter_policy_conditions(conditions, attribute):\n if type(conditions) is not list:\n conditions = [conditions]\n\n if attribute['Type'] == 'String.Array':\n values = ast.literal_eval(attribute['Value'])\n for value in values:\n for condition in conditions:\n if evaluate_condition(value, condition):\n return True\n else:\n for condition in conditions:\n if evaluate_condition(attribute['Value'], condition):\n return True\n\n return False\n\n\ndef check_filter_policy(filter_policy, message_attributes):\n if not filter_policy:\n return True\n\n for criteria in filter_policy:\n conditions = filter_policy.get(criteria)\n attribute = message_attributes.get(criteria)\n\n if attribute is None:\n return False\n\n if evaluate_filter_policy_conditions(conditions, attribute) is False:\n return False\n\n return True\n", "path": "localstack/services/sns/sns_listener.py"}]} | 3,929 | 305 |
gh_patches_debug_34210 | rasdani/github-patches | git_diff | matrix-org__synapse-7663 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Crash on startup (1.14.0)
### Description
Restarting synapse runs the risk of crashing.
I run a daily stop-backup-restart routine, but since upgrading to 1.14.0, there have been infrequent occurrences of crashes after attempting to restart ([logs attached in gist](https://gist.github.com/karthanistyr/749a6f35fd8e13e15fc9571fd304dfe4)).
### Steps to reproduce
- stop the server (e.g. stop the docker compose stack, in my case)
- wait for some time (my backup routine lasts for 45 mins)
- restart server (restart the docker compose stack)
It's worth noting that using docker-compose, the containers themselves are scrapped when stopped and new ones are spun up on restart, reattaching volumes containing persistent data.
It had been running like this for more than two years (dating back to the 0.27.x series), and it's the first time something like this happens to my server.
It looks like some form of race condition as it is infrequent and I can usually restart the container as would the daily backup script and it starts up fine.
### Version information
- **Homeserver**: draak.fr
- **Version**: 1.14.0
- **Install method**: Docker image
- **Platform**: debian linux 10.3
- **Hardware**: small 4-core 8 GB VPS
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `synapse/config/cache.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # Copyright 2019 Matrix.org Foundation C.I.C.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 import os
17 import re
18 from typing import Callable, Dict
19
20 from ._base import Config, ConfigError
21
22 # The prefix for all cache factor-related environment variables
23 _CACHE_PREFIX = "SYNAPSE_CACHE_FACTOR"
24
25 # Map from canonicalised cache name to cache.
26 _CACHES = {}
27
28 _DEFAULT_FACTOR_SIZE = 0.5
29 _DEFAULT_EVENT_CACHE_SIZE = "10K"
30
31
32 class CacheProperties(object):
33 def __init__(self):
34 # The default factor size for all caches
35 self.default_factor_size = float(
36 os.environ.get(_CACHE_PREFIX, _DEFAULT_FACTOR_SIZE)
37 )
38 self.resize_all_caches_func = None
39
40
41 properties = CacheProperties()
42
43
44 def _canonicalise_cache_name(cache_name: str) -> str:
45 """Gets the canonical form of the cache name.
46
47 Since we specify cache names in config and environment variables we need to
48 ignore case and special characters. For example, some caches have asterisks
49 in their name to denote that they're not attached to a particular database
50 function, and these asterisks need to be stripped out
51 """
52
53 cache_name = re.sub(r"[^A-Za-z_1-9]", "", cache_name)
54
55 return cache_name.lower()
56
57
58 def add_resizable_cache(cache_name: str, cache_resize_callback: Callable):
59 """Register a cache that's size can dynamically change
60
61 Args:
62 cache_name: A reference to the cache
63 cache_resize_callback: A callback function that will be ran whenever
64 the cache needs to be resized
65 """
66 # Some caches have '*' in them which we strip out.
67 cache_name = _canonicalise_cache_name(cache_name)
68
69 _CACHES[cache_name] = cache_resize_callback
70
71 # Ensure all loaded caches are sized appropriately
72 #
73 # This method should only run once the config has been read,
74 # as it uses values read from it
75 if properties.resize_all_caches_func:
76 properties.resize_all_caches_func()
77
78
79 class CacheConfig(Config):
80 section = "caches"
81 _environ = os.environ
82
83 @staticmethod
84 def reset():
85 """Resets the caches to their defaults. Used for tests."""
86 properties.default_factor_size = float(
87 os.environ.get(_CACHE_PREFIX, _DEFAULT_FACTOR_SIZE)
88 )
89 properties.resize_all_caches_func = None
90 _CACHES.clear()
91
92 def generate_config_section(self, **kwargs):
93 return """\
94 ## Caching ##
95
96 # Caching can be configured through the following options.
97 #
98 # A cache 'factor' is a multiplier that can be applied to each of
99 # Synapse's caches in order to increase or decrease the maximum
100 # number of entries that can be stored.
101
102 # The number of events to cache in memory. Not affected by
103 # caches.global_factor.
104 #
105 #event_cache_size: 10K
106
107 caches:
108 # Controls the global cache factor, which is the default cache factor
109 # for all caches if a specific factor for that cache is not otherwise
110 # set.
111 #
112 # This can also be set by the "SYNAPSE_CACHE_FACTOR" environment
113 # variable. Setting by environment variable takes priority over
114 # setting through the config file.
115 #
116 # Defaults to 0.5, which will half the size of all caches.
117 #
118 #global_factor: 1.0
119
120 # A dictionary of cache name to cache factor for that individual
121 # cache. Overrides the global cache factor for a given cache.
122 #
123 # These can also be set through environment variables comprised
124 # of "SYNAPSE_CACHE_FACTOR_" + the name of the cache in capital
125 # letters and underscores. Setting by environment variable
126 # takes priority over setting through the config file.
127 # Ex. SYNAPSE_CACHE_FACTOR_GET_USERS_WHO_SHARE_ROOM_WITH_USER=2.0
128 #
129 # Some caches have '*' and other characters that are not
130 # alphanumeric or underscores. These caches can be named with or
131 # without the special characters stripped. For example, to specify
132 # the cache factor for `*stateGroupCache*` via an environment
133 # variable would be `SYNAPSE_CACHE_FACTOR_STATEGROUPCACHE=2.0`.
134 #
135 per_cache_factors:
136 #get_users_who_share_room_with_user: 2.0
137 """
138
139 def read_config(self, config, **kwargs):
140 self.event_cache_size = self.parse_size(
141 config.get("event_cache_size", _DEFAULT_EVENT_CACHE_SIZE)
142 )
143 self.cache_factors = {} # type: Dict[str, float]
144
145 cache_config = config.get("caches") or {}
146 self.global_factor = cache_config.get(
147 "global_factor", properties.default_factor_size
148 )
149 if not isinstance(self.global_factor, (int, float)):
150 raise ConfigError("caches.global_factor must be a number.")
151
152 # Set the global one so that it's reflected in new caches
153 properties.default_factor_size = self.global_factor
154
155 # Load cache factors from the config
156 individual_factors = cache_config.get("per_cache_factors") or {}
157 if not isinstance(individual_factors, dict):
158 raise ConfigError("caches.per_cache_factors must be a dictionary")
159
160 # Canonicalise the cache names *before* updating with the environment
161 # variables.
162 individual_factors = {
163 _canonicalise_cache_name(key): val
164 for key, val in individual_factors.items()
165 }
166
167 # Override factors from environment if necessary
168 individual_factors.update(
169 {
170 _canonicalise_cache_name(key[len(_CACHE_PREFIX) + 1 :]): float(val)
171 for key, val in self._environ.items()
172 if key.startswith(_CACHE_PREFIX + "_")
173 }
174 )
175
176 for cache, factor in individual_factors.items():
177 if not isinstance(factor, (int, float)):
178 raise ConfigError(
179 "caches.per_cache_factors.%s must be a number" % (cache,)
180 )
181 self.cache_factors[cache] = factor
182
183 # Resize all caches (if necessary) with the new factors we've loaded
184 self.resize_all_caches()
185
186 # Store this function so that it can be called from other classes without
187 # needing an instance of Config
188 properties.resize_all_caches_func = self.resize_all_caches
189
190 def resize_all_caches(self):
191 """Ensure all cache sizes are up to date
192
193 For each cache, run the mapped callback function with either
194 a specific cache factor or the default, global one.
195 """
196 for cache_name, callback in _CACHES.items():
197 new_factor = self.cache_factors.get(cache_name, self.global_factor)
198 callback(new_factor)
199
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/synapse/config/cache.py b/synapse/config/cache.py
--- a/synapse/config/cache.py
+++ b/synapse/config/cache.py
@@ -15,6 +15,7 @@
import os
import re
+import threading
from typing import Callable, Dict
from ._base import Config, ConfigError
@@ -25,6 +26,9 @@
# Map from canonicalised cache name to cache.
_CACHES = {}
+# a lock on the contents of _CACHES
+_CACHES_LOCK = threading.Lock()
+
_DEFAULT_FACTOR_SIZE = 0.5
_DEFAULT_EVENT_CACHE_SIZE = "10K"
@@ -66,7 +70,10 @@
# Some caches have '*' in them which we strip out.
cache_name = _canonicalise_cache_name(cache_name)
- _CACHES[cache_name] = cache_resize_callback
+ # sometimes caches are initialised from background threads, so we need to make
+ # sure we don't conflict with another thread running a resize operation
+ with _CACHES_LOCK:
+ _CACHES[cache_name] = cache_resize_callback
# Ensure all loaded caches are sized appropriately
#
@@ -87,7 +94,8 @@
os.environ.get(_CACHE_PREFIX, _DEFAULT_FACTOR_SIZE)
)
properties.resize_all_caches_func = None
- _CACHES.clear()
+ with _CACHES_LOCK:
+ _CACHES.clear()
def generate_config_section(self, **kwargs):
return """\
@@ -193,6 +201,8 @@
For each cache, run the mapped callback function with either
a specific cache factor or the default, global one.
"""
- for cache_name, callback in _CACHES.items():
- new_factor = self.cache_factors.get(cache_name, self.global_factor)
- callback(new_factor)
+ # block other threads from modifying _CACHES while we iterate it.
+ with _CACHES_LOCK:
+ for cache_name, callback in _CACHES.items():
+ new_factor = self.cache_factors.get(cache_name, self.global_factor)
+ callback(new_factor)
| {"golden_diff": "diff --git a/synapse/config/cache.py b/synapse/config/cache.py\n--- a/synapse/config/cache.py\n+++ b/synapse/config/cache.py\n@@ -15,6 +15,7 @@\n \n import os\n import re\n+import threading\n from typing import Callable, Dict\n \n from ._base import Config, ConfigError\n@@ -25,6 +26,9 @@\n # Map from canonicalised cache name to cache.\n _CACHES = {}\n \n+# a lock on the contents of _CACHES\n+_CACHES_LOCK = threading.Lock()\n+\n _DEFAULT_FACTOR_SIZE = 0.5\n _DEFAULT_EVENT_CACHE_SIZE = \"10K\"\n \n@@ -66,7 +70,10 @@\n # Some caches have '*' in them which we strip out.\n cache_name = _canonicalise_cache_name(cache_name)\n \n- _CACHES[cache_name] = cache_resize_callback\n+ # sometimes caches are initialised from background threads, so we need to make\n+ # sure we don't conflict with another thread running a resize operation\n+ with _CACHES_LOCK:\n+ _CACHES[cache_name] = cache_resize_callback\n \n # Ensure all loaded caches are sized appropriately\n #\n@@ -87,7 +94,8 @@\n os.environ.get(_CACHE_PREFIX, _DEFAULT_FACTOR_SIZE)\n )\n properties.resize_all_caches_func = None\n- _CACHES.clear()\n+ with _CACHES_LOCK:\n+ _CACHES.clear()\n \n def generate_config_section(self, **kwargs):\n return \"\"\"\\\n@@ -193,6 +201,8 @@\n For each cache, run the mapped callback function with either\n a specific cache factor or the default, global one.\n \"\"\"\n- for cache_name, callback in _CACHES.items():\n- new_factor = self.cache_factors.get(cache_name, self.global_factor)\n- callback(new_factor)\n+ # block other threads from modifying _CACHES while we iterate it.\n+ with _CACHES_LOCK:\n+ for cache_name, callback in _CACHES.items():\n+ new_factor = self.cache_factors.get(cache_name, self.global_factor)\n+ callback(new_factor)\n", "issue": "Crash on startup (1.14.0)\n### Description\r\nRestarting synapse runs the risk of crashing.\r\n\r\nI run a daily stop-backup-restart routine, but since upgrading to 1.14.0, there have been infrequent occurrences of crashes after attempting to restart ([logs attached in gist](https://gist.github.com/karthanistyr/749a6f35fd8e13e15fc9571fd304dfe4)).\r\n\r\n### Steps to reproduce\r\n\r\n- stop the server (e.g. stop the docker compose stack, in my case)\r\n- wait for some time (my backup routine lasts for 45 mins)\r\n- restart server (restart the docker compose stack)\r\n\r\nIt's worth noting that using docker-compose, the containers themselves are scrapped when stopped and new ones are spun up on restart, reattaching volumes containing persistent data.\r\n\r\nIt had been running like this for more than two years (dating back to the 0.27.x series), and it's the first time something like this happens to my server.\r\n\r\nIt looks like some form of race condition as it is infrequent and I can usually restart the container as would the daily backup script and it starts up fine.\r\n\r\n### Version information\r\n- **Homeserver**: draak.fr\r\n- **Version**: 1.14.0\r\n- **Install method**: Docker image\r\n- **Platform**: debian linux 10.3\r\n- **Hardware**: small 4-core 8 GB VPS\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright 2019 Matrix.org Foundation C.I.C.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport re\nfrom typing import Callable, Dict\n\nfrom ._base import Config, ConfigError\n\n# The prefix for all cache factor-related environment variables\n_CACHE_PREFIX = \"SYNAPSE_CACHE_FACTOR\"\n\n# Map from canonicalised cache name to cache.\n_CACHES = {}\n\n_DEFAULT_FACTOR_SIZE = 0.5\n_DEFAULT_EVENT_CACHE_SIZE = \"10K\"\n\n\nclass CacheProperties(object):\n def __init__(self):\n # The default factor size for all caches\n self.default_factor_size = float(\n os.environ.get(_CACHE_PREFIX, _DEFAULT_FACTOR_SIZE)\n )\n self.resize_all_caches_func = None\n\n\nproperties = CacheProperties()\n\n\ndef _canonicalise_cache_name(cache_name: str) -> str:\n \"\"\"Gets the canonical form of the cache name.\n\n Since we specify cache names in config and environment variables we need to\n ignore case and special characters. For example, some caches have asterisks\n in their name to denote that they're not attached to a particular database\n function, and these asterisks need to be stripped out\n \"\"\"\n\n cache_name = re.sub(r\"[^A-Za-z_1-9]\", \"\", cache_name)\n\n return cache_name.lower()\n\n\ndef add_resizable_cache(cache_name: str, cache_resize_callback: Callable):\n \"\"\"Register a cache that's size can dynamically change\n\n Args:\n cache_name: A reference to the cache\n cache_resize_callback: A callback function that will be ran whenever\n the cache needs to be resized\n \"\"\"\n # Some caches have '*' in them which we strip out.\n cache_name = _canonicalise_cache_name(cache_name)\n\n _CACHES[cache_name] = cache_resize_callback\n\n # Ensure all loaded caches are sized appropriately\n #\n # This method should only run once the config has been read,\n # as it uses values read from it\n if properties.resize_all_caches_func:\n properties.resize_all_caches_func()\n\n\nclass CacheConfig(Config):\n section = \"caches\"\n _environ = os.environ\n\n @staticmethod\n def reset():\n \"\"\"Resets the caches to their defaults. Used for tests.\"\"\"\n properties.default_factor_size = float(\n os.environ.get(_CACHE_PREFIX, _DEFAULT_FACTOR_SIZE)\n )\n properties.resize_all_caches_func = None\n _CACHES.clear()\n\n def generate_config_section(self, **kwargs):\n return \"\"\"\\\n ## Caching ##\n\n # Caching can be configured through the following options.\n #\n # A cache 'factor' is a multiplier that can be applied to each of\n # Synapse's caches in order to increase or decrease the maximum\n # number of entries that can be stored.\n\n # The number of events to cache in memory. Not affected by\n # caches.global_factor.\n #\n #event_cache_size: 10K\n\n caches:\n # Controls the global cache factor, which is the default cache factor\n # for all caches if a specific factor for that cache is not otherwise\n # set.\n #\n # This can also be set by the \"SYNAPSE_CACHE_FACTOR\" environment\n # variable. Setting by environment variable takes priority over\n # setting through the config file.\n #\n # Defaults to 0.5, which will half the size of all caches.\n #\n #global_factor: 1.0\n\n # A dictionary of cache name to cache factor for that individual\n # cache. Overrides the global cache factor for a given cache.\n #\n # These can also be set through environment variables comprised\n # of \"SYNAPSE_CACHE_FACTOR_\" + the name of the cache in capital\n # letters and underscores. Setting by environment variable\n # takes priority over setting through the config file.\n # Ex. SYNAPSE_CACHE_FACTOR_GET_USERS_WHO_SHARE_ROOM_WITH_USER=2.0\n #\n # Some caches have '*' and other characters that are not\n # alphanumeric or underscores. These caches can be named with or\n # without the special characters stripped. For example, to specify\n # the cache factor for `*stateGroupCache*` via an environment\n # variable would be `SYNAPSE_CACHE_FACTOR_STATEGROUPCACHE=2.0`.\n #\n per_cache_factors:\n #get_users_who_share_room_with_user: 2.0\n \"\"\"\n\n def read_config(self, config, **kwargs):\n self.event_cache_size = self.parse_size(\n config.get(\"event_cache_size\", _DEFAULT_EVENT_CACHE_SIZE)\n )\n self.cache_factors = {} # type: Dict[str, float]\n\n cache_config = config.get(\"caches\") or {}\n self.global_factor = cache_config.get(\n \"global_factor\", properties.default_factor_size\n )\n if not isinstance(self.global_factor, (int, float)):\n raise ConfigError(\"caches.global_factor must be a number.\")\n\n # Set the global one so that it's reflected in new caches\n properties.default_factor_size = self.global_factor\n\n # Load cache factors from the config\n individual_factors = cache_config.get(\"per_cache_factors\") or {}\n if not isinstance(individual_factors, dict):\n raise ConfigError(\"caches.per_cache_factors must be a dictionary\")\n\n # Canonicalise the cache names *before* updating with the environment\n # variables.\n individual_factors = {\n _canonicalise_cache_name(key): val\n for key, val in individual_factors.items()\n }\n\n # Override factors from environment if necessary\n individual_factors.update(\n {\n _canonicalise_cache_name(key[len(_CACHE_PREFIX) + 1 :]): float(val)\n for key, val in self._environ.items()\n if key.startswith(_CACHE_PREFIX + \"_\")\n }\n )\n\n for cache, factor in individual_factors.items():\n if not isinstance(factor, (int, float)):\n raise ConfigError(\n \"caches.per_cache_factors.%s must be a number\" % (cache,)\n )\n self.cache_factors[cache] = factor\n\n # Resize all caches (if necessary) with the new factors we've loaded\n self.resize_all_caches()\n\n # Store this function so that it can be called from other classes without\n # needing an instance of Config\n properties.resize_all_caches_func = self.resize_all_caches\n\n def resize_all_caches(self):\n \"\"\"Ensure all cache sizes are up to date\n\n For each cache, run the mapped callback function with either\n a specific cache factor or the default, global one.\n \"\"\"\n for cache_name, callback in _CACHES.items():\n new_factor = self.cache_factors.get(cache_name, self.global_factor)\n callback(new_factor)\n", "path": "synapse/config/cache.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright 2019 Matrix.org Foundation C.I.C.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport re\nimport threading\nfrom typing import Callable, Dict\n\nfrom ._base import Config, ConfigError\n\n# The prefix for all cache factor-related environment variables\n_CACHE_PREFIX = \"SYNAPSE_CACHE_FACTOR\"\n\n# Map from canonicalised cache name to cache.\n_CACHES = {}\n\n# a lock on the contents of _CACHES\n_CACHES_LOCK = threading.Lock()\n\n_DEFAULT_FACTOR_SIZE = 0.5\n_DEFAULT_EVENT_CACHE_SIZE = \"10K\"\n\n\nclass CacheProperties(object):\n def __init__(self):\n # The default factor size for all caches\n self.default_factor_size = float(\n os.environ.get(_CACHE_PREFIX, _DEFAULT_FACTOR_SIZE)\n )\n self.resize_all_caches_func = None\n\n\nproperties = CacheProperties()\n\n\ndef _canonicalise_cache_name(cache_name: str) -> str:\n \"\"\"Gets the canonical form of the cache name.\n\n Since we specify cache names in config and environment variables we need to\n ignore case and special characters. For example, some caches have asterisks\n in their name to denote that they're not attached to a particular database\n function, and these asterisks need to be stripped out\n \"\"\"\n\n cache_name = re.sub(r\"[^A-Za-z_1-9]\", \"\", cache_name)\n\n return cache_name.lower()\n\n\ndef add_resizable_cache(cache_name: str, cache_resize_callback: Callable):\n \"\"\"Register a cache that's size can dynamically change\n\n Args:\n cache_name: A reference to the cache\n cache_resize_callback: A callback function that will be ran whenever\n the cache needs to be resized\n \"\"\"\n # Some caches have '*' in them which we strip out.\n cache_name = _canonicalise_cache_name(cache_name)\n\n # sometimes caches are initialised from background threads, so we need to make\n # sure we don't conflict with another thread running a resize operation\n with _CACHES_LOCK:\n _CACHES[cache_name] = cache_resize_callback\n\n # Ensure all loaded caches are sized appropriately\n #\n # This method should only run once the config has been read,\n # as it uses values read from it\n if properties.resize_all_caches_func:\n properties.resize_all_caches_func()\n\n\nclass CacheConfig(Config):\n section = \"caches\"\n _environ = os.environ\n\n @staticmethod\n def reset():\n \"\"\"Resets the caches to their defaults. Used for tests.\"\"\"\n properties.default_factor_size = float(\n os.environ.get(_CACHE_PREFIX, _DEFAULT_FACTOR_SIZE)\n )\n properties.resize_all_caches_func = None\n with _CACHES_LOCK:\n _CACHES.clear()\n\n def generate_config_section(self, **kwargs):\n return \"\"\"\\\n ## Caching ##\n\n # Caching can be configured through the following options.\n #\n # A cache 'factor' is a multiplier that can be applied to each of\n # Synapse's caches in order to increase or decrease the maximum\n # number of entries that can be stored.\n\n # The number of events to cache in memory. Not affected by\n # caches.global_factor.\n #\n #event_cache_size: 10K\n\n caches:\n # Controls the global cache factor, which is the default cache factor\n # for all caches if a specific factor for that cache is not otherwise\n # set.\n #\n # This can also be set by the \"SYNAPSE_CACHE_FACTOR\" environment\n # variable. Setting by environment variable takes priority over\n # setting through the config file.\n #\n # Defaults to 0.5, which will half the size of all caches.\n #\n #global_factor: 1.0\n\n # A dictionary of cache name to cache factor for that individual\n # cache. Overrides the global cache factor for a given cache.\n #\n # These can also be set through environment variables comprised\n # of \"SYNAPSE_CACHE_FACTOR_\" + the name of the cache in capital\n # letters and underscores. Setting by environment variable\n # takes priority over setting through the config file.\n # Ex. SYNAPSE_CACHE_FACTOR_GET_USERS_WHO_SHARE_ROOM_WITH_USER=2.0\n #\n # Some caches have '*' and other characters that are not\n # alphanumeric or underscores. These caches can be named with or\n # without the special characters stripped. For example, to specify\n # the cache factor for `*stateGroupCache*` via an environment\n # variable would be `SYNAPSE_CACHE_FACTOR_STATEGROUPCACHE=2.0`.\n #\n per_cache_factors:\n #get_users_who_share_room_with_user: 2.0\n \"\"\"\n\n def read_config(self, config, **kwargs):\n self.event_cache_size = self.parse_size(\n config.get(\"event_cache_size\", _DEFAULT_EVENT_CACHE_SIZE)\n )\n self.cache_factors = {} # type: Dict[str, float]\n\n cache_config = config.get(\"caches\") or {}\n self.global_factor = cache_config.get(\n \"global_factor\", properties.default_factor_size\n )\n if not isinstance(self.global_factor, (int, float)):\n raise ConfigError(\"caches.global_factor must be a number.\")\n\n # Set the global one so that it's reflected in new caches\n properties.default_factor_size = self.global_factor\n\n # Load cache factors from the config\n individual_factors = cache_config.get(\"per_cache_factors\") or {}\n if not isinstance(individual_factors, dict):\n raise ConfigError(\"caches.per_cache_factors must be a dictionary\")\n\n # Canonicalise the cache names *before* updating with the environment\n # variables.\n individual_factors = {\n _canonicalise_cache_name(key): val\n for key, val in individual_factors.items()\n }\n\n # Override factors from environment if necessary\n individual_factors.update(\n {\n _canonicalise_cache_name(key[len(_CACHE_PREFIX) + 1 :]): float(val)\n for key, val in self._environ.items()\n if key.startswith(_CACHE_PREFIX + \"_\")\n }\n )\n\n for cache, factor in individual_factors.items():\n if not isinstance(factor, (int, float)):\n raise ConfigError(\n \"caches.per_cache_factors.%s must be a number\" % (cache,)\n )\n self.cache_factors[cache] = factor\n\n # Resize all caches (if necessary) with the new factors we've loaded\n self.resize_all_caches()\n\n # Store this function so that it can be called from other classes without\n # needing an instance of Config\n properties.resize_all_caches_func = self.resize_all_caches\n\n def resize_all_caches(self):\n \"\"\"Ensure all cache sizes are up to date\n\n For each cache, run the mapped callback function with either\n a specific cache factor or the default, global one.\n \"\"\"\n # block other threads from modifying _CACHES while we iterate it.\n with _CACHES_LOCK:\n for cache_name, callback in _CACHES.items():\n new_factor = self.cache_factors.get(cache_name, self.global_factor)\n callback(new_factor)\n", "path": "synapse/config/cache.py"}]} | 2,666 | 487 |
gh_patches_debug_41566 | rasdani/github-patches | git_diff | vaexio__vaex-312 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
vaex.groupby type casting
In the current implementation of `groupby`, if a column is on of type `int`, calculating the mean will also be of type `int`, which is numerically not accurate. This should probably be cased to `float` somewhere behind the scenes.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `packages/vaex-core/vaex/agg.py`
Content:
```
1 import numpy as np
2
3 from .stat import _Statistic
4 import vaex.superagg
5
6 aggregates = {}
7
8
9 def register(f, name=None):
10 name = name or f.__name__
11 aggregates[name] = f
12 return f
13
14
15 class AggregatorDescriptor(object):
16 def __repr__(self):
17 return 'vaex.agg.{}({!r})'.format(self.short_name, str(self.expression))
18
19 def finish(self, value):
20 return value
21
22
23 class AggregatorDescriptorBasic(AggregatorDescriptor):
24 def __init__(self, name, expression, short_name, multi_args=False, agg_args=[]):
25 self.name = name
26 self.short_name = short_name
27 self.expression = expression
28 self.agg_args = agg_args
29 if not multi_args:
30 if self.expression == '*':
31 self.expressions = []
32 else:
33 self.expressions = [self.expression]
34 else:
35 self.expressions = expression
36
37 def pretty_name(self, id=None):
38 id = id or "_".join(map(str, self.expression))
39 return '{0}_{1}'.format(id, self.short_name)
40
41 def add_operations(self, agg_task, edges=True, **kwargs):
42 value = agg_task.add_aggregation_operation(self, edges=edges, **kwargs)
43 @vaex.delayed
44 def finish(value):
45 return self.finish(value)
46 return finish(value)
47
48 def _create_operation(self, df, grid):
49 if self.expression == '*':
50 self.dtype_in = np.dtype('int64')
51 self.dtype_out = np.dtype('int64')
52 else:
53 self.dtype_in = df[str(self.expressions[0])].dtype
54 self.dtype_out = self.dtype_in
55 if self.short_name == "count":
56 self.dtype_out = np.dtype('int64')
57 agg_op_type = vaex.utils.find_type_from_dtype(vaex.superagg, self.name + "_", self.dtype_in)
58 agg_op = agg_op_type(grid, *self.agg_args)
59 return agg_op
60
61
62 class AggregatorDescriptorMulti(AggregatorDescriptor):
63 """Uses multiple operations/aggregation to calculate the final aggretation"""
64 def __init__(self, name, expression, short_name):
65 self.name = name
66 self.short_name = short_name
67 self.expression = expression
68 self.expressions = [self.expression]
69 self._add_sub_agg()
70
71 def _add_sub_agg(self):
72 pass
73
74 def pretty_name(self, id=None):
75 id = id or "_".join(map(str, self.expression))
76 return '{0}_{1}'.format(id, self.short_name)
77
78
79 class AggregatorDescriptorMean(AggregatorDescriptorMulti):
80 def __init__(self, name, expression, short_name="mean"):
81 super(AggregatorDescriptorMean, self).__init__(name, expression, short_name)
82
83 def _add_sub_agg(self):
84 self.sum = sum(self.expression)
85 self.count = count(self.expression)
86
87 def add_operations(self, agg_task, **kwargs):
88 task_sum = self.sum.add_operations(agg_task, **kwargs)
89 task_count = self.count.add_operations(agg_task, **kwargs)
90 self.dtype_in = self.sum.dtype_in
91 self.dtype_out = self.sum.dtype_out
92 @vaex.delayed
93 def finish(sum, count):
94 dtype = sum.dtype
95 if sum.dtype.kind == 'M':
96 sum = sum.view('uint64')
97 count = count.view('uint64')
98 with np.errstate(divide='ignore', invalid='ignore'):
99 mean = sum / count
100 if dtype.kind != mean.dtype.kind:
101 # TODO: not sure why view does not work
102 mean = mean.astype(dtype)
103 return mean
104 return finish(task_sum, task_count)
105
106
107 class AggregatorDescriptorVar(AggregatorDescriptorMulti):
108 def __init__(self, name, expression, short_name="var", ddof=0):
109 super(AggregatorDescriptorVar, self).__init__(name, expression, short_name)
110 self.ddof = ddof
111
112 def add_operations(self, agg_task, **kwargs):
113 expression_sum = expression = agg_task.df[str(self.expression)]
114 expression = expression_sum = expression.astype('float64')
115 sum_moment = _sum_moment(str(expression_sum), 2)
116 sum_ = sum(str(expression_sum))
117 count_ = count(str(expression))
118
119 task_sum_moment = sum_moment.add_operations(agg_task, **kwargs)
120 task_sum = sum_.add_operations(agg_task, **kwargs)
121 task_count = count_.add_operations(agg_task, **kwargs)
122 self.dtype_in = sum_.dtype_in
123 self.dtype_out = sum_.dtype_out
124 @vaex.delayed
125 def finish(sum_moment, sum, count):
126 # print(self.sum, sum, task_sum)
127 dtype = sum.dtype
128 if sum.dtype.kind == 'M':
129 sum = sum.view('uint64')
130 sum_moment = sum_moment.view('uint64')
131 count = count.view('uint64')
132 with np.errstate(divide='ignore', invalid='ignore'):
133 mean = sum / count
134 print(sum, sum_moment)
135 raw_moments2 = sum_moment/count
136 variance = (raw_moments2 - mean**2) #* count/(count-self.ddof)
137 if dtype.kind != mean.dtype.kind:
138 # TODO: not sure why view does not work
139 variance = variance.astype(dtype)
140 return self.finish(variance)
141 return finish(task_sum_moment, task_sum, task_count)
142
143
144 class AggregatorDescriptorStd(AggregatorDescriptorVar):
145 def finish(self, value):
146 return value**0.5
147
148 @register
149 def count(expression='*'):
150 '''Creates a count aggregation'''
151 return AggregatorDescriptorBasic('AggCount', expression, 'count')
152
153 @register
154 def sum(expression):
155 '''Creates a sum aggregation'''
156 return AggregatorDescriptorBasic('AggSum', expression, 'sum')
157
158 @register
159 def mean(expression):
160 '''Creates a mean aggregation'''
161 return AggregatorDescriptorMean('mean', expression, 'mean')
162
163 @register
164 def min(expression):
165 '''Creates a min aggregation'''
166 return AggregatorDescriptorBasic('AggMin', expression, 'min')
167
168 @register
169 def _sum_moment(expression, moment):
170 '''Creates a sum of moment aggregator'''
171 return AggregatorDescriptorBasic('AggSumMoment', expression, 'summoment', agg_args=[moment])
172
173 @register
174 def max(expression):
175 '''Creates a max aggregation'''
176 return AggregatorDescriptorBasic('AggMax', expression, 'max')
177
178 @register
179 def first(expression, order_expression):
180 '''Creates a max aggregation'''
181 return AggregatorDescriptorBasic('AggFirst', [expression, order_expression], 'first', multi_args=True)
182
183 @register
184 def std(expression, ddof=0):
185 '''Creates a standard deviation aggregation'''
186 return AggregatorDescriptorStd('std', expression, 'std', ddof=ddof)
187
188 @register
189 def var(expression, ddof=0):
190 '''Creates a variance aggregation'''
191 return AggregatorDescriptorVar('var', expression, 'var', ddof=ddof)
192
193 # @register
194 # def covar(x, y):
195 # '''Creates a standard deviation aggregation'''
196 # return _Statistic('covar', x, y)
197
198 # @register
199 # def correlation(x, y):
200 # '''Creates a standard deviation aggregation'''
201 # return _Statistic('correlation', x, y)
202
203
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/packages/vaex-core/vaex/agg.py b/packages/vaex-core/vaex/agg.py
--- a/packages/vaex-core/vaex/agg.py
+++ b/packages/vaex-core/vaex/agg.py
@@ -24,7 +24,7 @@
def __init__(self, name, expression, short_name, multi_args=False, agg_args=[]):
self.name = name
self.short_name = short_name
- self.expression = expression
+ self.expression = str(expression)
self.agg_args = agg_args
if not multi_args:
if self.expression == '*':
@@ -66,10 +66,6 @@
self.short_name = short_name
self.expression = expression
self.expressions = [self.expression]
- self._add_sub_agg()
-
- def _add_sub_agg(self):
- pass
def pretty_name(self, id=None):
id = id or "_".join(map(str, self.expression))
@@ -80,15 +76,20 @@
def __init__(self, name, expression, short_name="mean"):
super(AggregatorDescriptorMean, self).__init__(name, expression, short_name)
- def _add_sub_agg(self):
- self.sum = sum(self.expression)
- self.count = count(self.expression)
-
def add_operations(self, agg_task, **kwargs):
- task_sum = self.sum.add_operations(agg_task, **kwargs)
- task_count = self.count.add_operations(agg_task, **kwargs)
- self.dtype_in = self.sum.dtype_in
- self.dtype_out = self.sum.dtype_out
+ expression = expression_sum = expression = agg_task.df[str(self.expression)]
+ # ints, floats and bools are upcasted
+ if expression_sum.dtype.kind in "buif":
+ expression = expression_sum = expression_sum.astype('float64')
+
+ sum_agg = sum(expression_sum)
+ count_agg = count(expression)
+
+ task_sum = sum_agg.add_operations(agg_task, **kwargs)
+ task_count = count_agg.add_operations(agg_task, **kwargs)
+ self.dtype_in = sum_agg.dtype_in
+ self.dtype_out = sum_agg.dtype_out
+
@vaex.delayed
def finish(sum, count):
dtype = sum.dtype
@@ -101,6 +102,7 @@
# TODO: not sure why view does not work
mean = mean.astype(dtype)
return mean
+
return finish(task_sum, task_count)
@@ -123,7 +125,6 @@
self.dtype_out = sum_.dtype_out
@vaex.delayed
def finish(sum_moment, sum, count):
- # print(self.sum, sum, task_sum)
dtype = sum.dtype
if sum.dtype.kind == 'M':
sum = sum.view('uint64')
@@ -131,7 +132,6 @@
count = count.view('uint64')
with np.errstate(divide='ignore', invalid='ignore'):
mean = sum / count
- print(sum, sum_moment)
raw_moments2 = sum_moment/count
variance = (raw_moments2 - mean**2) #* count/(count-self.ddof)
if dtype.kind != mean.dtype.kind:
| {"golden_diff": "diff --git a/packages/vaex-core/vaex/agg.py b/packages/vaex-core/vaex/agg.py\n--- a/packages/vaex-core/vaex/agg.py\n+++ b/packages/vaex-core/vaex/agg.py\n@@ -24,7 +24,7 @@\n def __init__(self, name, expression, short_name, multi_args=False, agg_args=[]):\n self.name = name\n self.short_name = short_name\n- self.expression = expression\n+ self.expression = str(expression)\n self.agg_args = agg_args\n if not multi_args:\n if self.expression == '*':\n@@ -66,10 +66,6 @@\n self.short_name = short_name\n self.expression = expression\n self.expressions = [self.expression]\n- self._add_sub_agg()\n-\n- def _add_sub_agg(self):\n- pass\n \n def pretty_name(self, id=None):\n id = id or \"_\".join(map(str, self.expression))\n@@ -80,15 +76,20 @@\n def __init__(self, name, expression, short_name=\"mean\"):\n super(AggregatorDescriptorMean, self).__init__(name, expression, short_name)\n \n- def _add_sub_agg(self):\n- self.sum = sum(self.expression)\n- self.count = count(self.expression)\n-\n def add_operations(self, agg_task, **kwargs):\n- task_sum = self.sum.add_operations(agg_task, **kwargs)\n- task_count = self.count.add_operations(agg_task, **kwargs)\n- self.dtype_in = self.sum.dtype_in\n- self.dtype_out = self.sum.dtype_out\n+ expression = expression_sum = expression = agg_task.df[str(self.expression)]\n+ # ints, floats and bools are upcasted\n+ if expression_sum.dtype.kind in \"buif\":\n+ expression = expression_sum = expression_sum.astype('float64')\n+\n+ sum_agg = sum(expression_sum)\n+ count_agg = count(expression)\n+\n+ task_sum = sum_agg.add_operations(agg_task, **kwargs)\n+ task_count = count_agg.add_operations(agg_task, **kwargs)\n+ self.dtype_in = sum_agg.dtype_in\n+ self.dtype_out = sum_agg.dtype_out\n+\n @vaex.delayed\n def finish(sum, count):\n dtype = sum.dtype\n@@ -101,6 +102,7 @@\n # TODO: not sure why view does not work\n mean = mean.astype(dtype)\n return mean\n+\n return finish(task_sum, task_count)\n \n \n@@ -123,7 +125,6 @@\n self.dtype_out = sum_.dtype_out\n @vaex.delayed\n def finish(sum_moment, sum, count):\n- # print(self.sum, sum, task_sum)\n dtype = sum.dtype\n if sum.dtype.kind == 'M':\n sum = sum.view('uint64')\n@@ -131,7 +132,6 @@\n count = count.view('uint64')\n with np.errstate(divide='ignore', invalid='ignore'):\n mean = sum / count\n- print(sum, sum_moment)\n raw_moments2 = sum_moment/count\n variance = (raw_moments2 - mean**2) #* count/(count-self.ddof)\n if dtype.kind != mean.dtype.kind:\n", "issue": "vaex.groupby type casting\nIn the current implementation of `groupby`, if a column is on of type `int`, calculating the mean will also be of type `int`, which is numerically not accurate. This should probably be cased to `float` somewhere behind the scenes. \n", "before_files": [{"content": "import numpy as np\n\nfrom .stat import _Statistic\nimport vaex.superagg\n\naggregates = {}\n\n\ndef register(f, name=None):\n name = name or f.__name__\n aggregates[name] = f\n return f\n\n\nclass AggregatorDescriptor(object):\n def __repr__(self):\n return 'vaex.agg.{}({!r})'.format(self.short_name, str(self.expression))\n\n def finish(self, value):\n return value\n\n\nclass AggregatorDescriptorBasic(AggregatorDescriptor):\n def __init__(self, name, expression, short_name, multi_args=False, agg_args=[]):\n self.name = name\n self.short_name = short_name\n self.expression = expression\n self.agg_args = agg_args\n if not multi_args:\n if self.expression == '*':\n self.expressions = []\n else:\n self.expressions = [self.expression]\n else:\n self.expressions = expression\n\n def pretty_name(self, id=None):\n id = id or \"_\".join(map(str, self.expression))\n return '{0}_{1}'.format(id, self.short_name)\n\n def add_operations(self, agg_task, edges=True, **kwargs):\n value = agg_task.add_aggregation_operation(self, edges=edges, **kwargs)\n @vaex.delayed\n def finish(value):\n return self.finish(value)\n return finish(value)\n\n def _create_operation(self, df, grid):\n if self.expression == '*':\n self.dtype_in = np.dtype('int64')\n self.dtype_out = np.dtype('int64')\n else:\n self.dtype_in = df[str(self.expressions[0])].dtype\n self.dtype_out = self.dtype_in\n if self.short_name == \"count\":\n self.dtype_out = np.dtype('int64')\n agg_op_type = vaex.utils.find_type_from_dtype(vaex.superagg, self.name + \"_\", self.dtype_in)\n agg_op = agg_op_type(grid, *self.agg_args)\n return agg_op\n\n\nclass AggregatorDescriptorMulti(AggregatorDescriptor):\n \"\"\"Uses multiple operations/aggregation to calculate the final aggretation\"\"\"\n def __init__(self, name, expression, short_name):\n self.name = name\n self.short_name = short_name\n self.expression = expression\n self.expressions = [self.expression]\n self._add_sub_agg()\n\n def _add_sub_agg(self):\n pass\n\n def pretty_name(self, id=None):\n id = id or \"_\".join(map(str, self.expression))\n return '{0}_{1}'.format(id, self.short_name)\n\n\nclass AggregatorDescriptorMean(AggregatorDescriptorMulti):\n def __init__(self, name, expression, short_name=\"mean\"):\n super(AggregatorDescriptorMean, self).__init__(name, expression, short_name)\n\n def _add_sub_agg(self):\n self.sum = sum(self.expression)\n self.count = count(self.expression)\n\n def add_operations(self, agg_task, **kwargs):\n task_sum = self.sum.add_operations(agg_task, **kwargs)\n task_count = self.count.add_operations(agg_task, **kwargs)\n self.dtype_in = self.sum.dtype_in\n self.dtype_out = self.sum.dtype_out\n @vaex.delayed\n def finish(sum, count):\n dtype = sum.dtype\n if sum.dtype.kind == 'M':\n sum = sum.view('uint64')\n count = count.view('uint64')\n with np.errstate(divide='ignore', invalid='ignore'):\n mean = sum / count\n if dtype.kind != mean.dtype.kind:\n # TODO: not sure why view does not work\n mean = mean.astype(dtype)\n return mean\n return finish(task_sum, task_count)\n\n\nclass AggregatorDescriptorVar(AggregatorDescriptorMulti):\n def __init__(self, name, expression, short_name=\"var\", ddof=0):\n super(AggregatorDescriptorVar, self).__init__(name, expression, short_name)\n self.ddof = ddof\n\n def add_operations(self, agg_task, **kwargs):\n expression_sum = expression = agg_task.df[str(self.expression)]\n expression = expression_sum = expression.astype('float64')\n sum_moment = _sum_moment(str(expression_sum), 2)\n sum_ = sum(str(expression_sum))\n count_ = count(str(expression))\n\n task_sum_moment = sum_moment.add_operations(agg_task, **kwargs)\n task_sum = sum_.add_operations(agg_task, **kwargs)\n task_count = count_.add_operations(agg_task, **kwargs)\n self.dtype_in = sum_.dtype_in\n self.dtype_out = sum_.dtype_out\n @vaex.delayed\n def finish(sum_moment, sum, count):\n # print(self.sum, sum, task_sum)\n dtype = sum.dtype\n if sum.dtype.kind == 'M':\n sum = sum.view('uint64')\n sum_moment = sum_moment.view('uint64')\n count = count.view('uint64')\n with np.errstate(divide='ignore', invalid='ignore'):\n mean = sum / count\n print(sum, sum_moment)\n raw_moments2 = sum_moment/count\n variance = (raw_moments2 - mean**2) #* count/(count-self.ddof)\n if dtype.kind != mean.dtype.kind:\n # TODO: not sure why view does not work\n variance = variance.astype(dtype)\n return self.finish(variance)\n return finish(task_sum_moment, task_sum, task_count)\n\n\nclass AggregatorDescriptorStd(AggregatorDescriptorVar):\n def finish(self, value):\n return value**0.5\n\n@register\ndef count(expression='*'):\n '''Creates a count aggregation'''\n return AggregatorDescriptorBasic('AggCount', expression, 'count')\n\n@register\ndef sum(expression):\n '''Creates a sum aggregation'''\n return AggregatorDescriptorBasic('AggSum', expression, 'sum')\n\n@register\ndef mean(expression):\n '''Creates a mean aggregation'''\n return AggregatorDescriptorMean('mean', expression, 'mean')\n\n@register\ndef min(expression):\n '''Creates a min aggregation'''\n return AggregatorDescriptorBasic('AggMin', expression, 'min')\n\n@register\ndef _sum_moment(expression, moment):\n '''Creates a sum of moment aggregator'''\n return AggregatorDescriptorBasic('AggSumMoment', expression, 'summoment', agg_args=[moment])\n\n@register\ndef max(expression):\n '''Creates a max aggregation'''\n return AggregatorDescriptorBasic('AggMax', expression, 'max')\n\n@register\ndef first(expression, order_expression):\n '''Creates a max aggregation'''\n return AggregatorDescriptorBasic('AggFirst', [expression, order_expression], 'first', multi_args=True)\n\n@register\ndef std(expression, ddof=0):\n '''Creates a standard deviation aggregation'''\n return AggregatorDescriptorStd('std', expression, 'std', ddof=ddof)\n\n@register\ndef var(expression, ddof=0):\n '''Creates a variance aggregation'''\n return AggregatorDescriptorVar('var', expression, 'var', ddof=ddof)\n\n# @register\n# def covar(x, y):\n# '''Creates a standard deviation aggregation'''\n# return _Statistic('covar', x, y)\n\n# @register\n# def correlation(x, y):\n# '''Creates a standard deviation aggregation'''\n# return _Statistic('correlation', x, y)\n\n", "path": "packages/vaex-core/vaex/agg.py"}], "after_files": [{"content": "import numpy as np\n\nfrom .stat import _Statistic\nimport vaex.superagg\n\naggregates = {}\n\n\ndef register(f, name=None):\n name = name or f.__name__\n aggregates[name] = f\n return f\n\n\nclass AggregatorDescriptor(object):\n def __repr__(self):\n return 'vaex.agg.{}({!r})'.format(self.short_name, str(self.expression))\n\n def finish(self, value):\n return value\n\n\nclass AggregatorDescriptorBasic(AggregatorDescriptor):\n def __init__(self, name, expression, short_name, multi_args=False, agg_args=[]):\n self.name = name\n self.short_name = short_name\n self.expression = str(expression)\n self.agg_args = agg_args\n if not multi_args:\n if self.expression == '*':\n self.expressions = []\n else:\n self.expressions = [self.expression]\n else:\n self.expressions = expression\n\n def pretty_name(self, id=None):\n id = id or \"_\".join(map(str, self.expression))\n return '{0}_{1}'.format(id, self.short_name)\n\n def add_operations(self, agg_task, edges=True, **kwargs):\n value = agg_task.add_aggregation_operation(self, edges=edges, **kwargs)\n @vaex.delayed\n def finish(value):\n return self.finish(value)\n return finish(value)\n\n def _create_operation(self, df, grid):\n if self.expression == '*':\n self.dtype_in = np.dtype('int64')\n self.dtype_out = np.dtype('int64')\n else:\n self.dtype_in = df[str(self.expressions[0])].dtype\n self.dtype_out = self.dtype_in\n if self.short_name == \"count\":\n self.dtype_out = np.dtype('int64')\n agg_op_type = vaex.utils.find_type_from_dtype(vaex.superagg, self.name + \"_\", self.dtype_in)\n agg_op = agg_op_type(grid, *self.agg_args)\n return agg_op\n\n\nclass AggregatorDescriptorMulti(AggregatorDescriptor):\n \"\"\"Uses multiple operations/aggregation to calculate the final aggretation\"\"\"\n def __init__(self, name, expression, short_name):\n self.name = name\n self.short_name = short_name\n self.expression = expression\n self.expressions = [self.expression]\n\n def pretty_name(self, id=None):\n id = id or \"_\".join(map(str, self.expression))\n return '{0}_{1}'.format(id, self.short_name)\n\n\nclass AggregatorDescriptorMean(AggregatorDescriptorMulti):\n def __init__(self, name, expression, short_name=\"mean\"):\n super(AggregatorDescriptorMean, self).__init__(name, expression, short_name)\n\n def add_operations(self, agg_task, **kwargs):\n expression = expression_sum = expression = agg_task.df[str(self.expression)]\n # ints, floats and bools are upcasted\n if expression_sum.dtype.kind in \"buif\":\n expression = expression_sum = expression_sum.astype('float64')\n\n sum_agg = sum(expression_sum)\n count_agg = count(expression)\n\n task_sum = sum_agg.add_operations(agg_task, **kwargs)\n task_count = count_agg.add_operations(agg_task, **kwargs)\n self.dtype_in = sum_agg.dtype_in\n self.dtype_out = sum_agg.dtype_out\n\n @vaex.delayed\n def finish(sum, count):\n dtype = sum.dtype\n if sum.dtype.kind == 'M':\n sum = sum.view('uint64')\n count = count.view('uint64')\n with np.errstate(divide='ignore', invalid='ignore'):\n mean = sum / count\n if dtype.kind != mean.dtype.kind:\n # TODO: not sure why view does not work\n mean = mean.astype(dtype)\n return mean\n\n return finish(task_sum, task_count)\n\n\nclass AggregatorDescriptorVar(AggregatorDescriptorMulti):\n def __init__(self, name, expression, short_name=\"var\", ddof=0):\n super(AggregatorDescriptorVar, self).__init__(name, expression, short_name)\n self.ddof = ddof\n\n def add_operations(self, agg_task, **kwargs):\n expression_sum = expression = agg_task.df[str(self.expression)]\n expression = expression_sum = expression.astype('float64')\n sum_moment = _sum_moment(str(expression_sum), 2)\n sum_ = sum(str(expression_sum))\n count_ = count(str(expression))\n\n task_sum_moment = sum_moment.add_operations(agg_task, **kwargs)\n task_sum = sum_.add_operations(agg_task, **kwargs)\n task_count = count_.add_operations(agg_task, **kwargs)\n self.dtype_in = sum_.dtype_in\n self.dtype_out = sum_.dtype_out\n @vaex.delayed\n def finish(sum_moment, sum, count):\n dtype = sum.dtype\n if sum.dtype.kind == 'M':\n sum = sum.view('uint64')\n sum_moment = sum_moment.view('uint64')\n count = count.view('uint64')\n with np.errstate(divide='ignore', invalid='ignore'):\n mean = sum / count\n raw_moments2 = sum_moment/count\n variance = (raw_moments2 - mean**2) #* count/(count-self.ddof)\n if dtype.kind != mean.dtype.kind:\n # TODO: not sure why view does not work\n variance = variance.astype(dtype)\n return self.finish(variance)\n return finish(task_sum_moment, task_sum, task_count)\n\n\nclass AggregatorDescriptorStd(AggregatorDescriptorVar):\n def finish(self, value):\n return value**0.5\n\n@register\ndef count(expression='*'):\n '''Creates a count aggregation'''\n return AggregatorDescriptorBasic('AggCount', expression, 'count')\n\n@register\ndef sum(expression):\n '''Creates a sum aggregation'''\n return AggregatorDescriptorBasic('AggSum', expression, 'sum')\n\n@register\ndef mean(expression):\n '''Creates a mean aggregation'''\n return AggregatorDescriptorMean('mean', expression, 'mean')\n\n@register\ndef min(expression):\n '''Creates a min aggregation'''\n return AggregatorDescriptorBasic('AggMin', expression, 'min')\n\n@register\ndef _sum_moment(expression, moment):\n '''Creates a sum of moment aggregator'''\n return AggregatorDescriptorBasic('AggSumMoment', expression, 'summoment', agg_args=[moment])\n\n@register\ndef max(expression):\n '''Creates a max aggregation'''\n return AggregatorDescriptorBasic('AggMax', expression, 'max')\n\n@register\ndef first(expression, order_expression):\n '''Creates a max aggregation'''\n return AggregatorDescriptorBasic('AggFirst', [expression, order_expression], 'first', multi_args=True)\n\n@register\ndef std(expression, ddof=0):\n '''Creates a standard deviation aggregation'''\n return AggregatorDescriptorStd('std', expression, 'std', ddof=ddof)\n\n@register\ndef var(expression, ddof=0):\n '''Creates a variance aggregation'''\n return AggregatorDescriptorVar('var', expression, 'var', ddof=ddof)\n\n# @register\n# def covar(x, y):\n# '''Creates a standard deviation aggregation'''\n# return _Statistic('covar', x, y)\n\n# @register\n# def correlation(x, y):\n# '''Creates a standard deviation aggregation'''\n# return _Statistic('correlation', x, y)\n\n", "path": "packages/vaex-core/vaex/agg.py"}]} | 2,452 | 742 |
gh_patches_debug_25109 | rasdani/github-patches | git_diff | jazzband__pip-tools-737 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
pip-sync uses first pip script on PATH instead of the one from the Python version it is run with
Describe the issue briefly here.
##### Environment Versions
1. OS Type: Android 8.1 aarch64/Latest Termux
1. Python version: 3.7.2
1. pip version: `pip 18.1 from /data/data/com.termux/files/usr/lib/python2.7/site-packages/pip (python 2.7)` (pip3.7 --version is `pip 19.0.2 from /data/data/com.termux/files/usr/lib/python3.7/site-packages/pip (python 3.7)
1. pip-tools version: `pip-compile, version 3.3.2`
##### Steps to replicate
1. Get Termux
2. Run `pkg up && pkg install python python2`
3. Install pip on Python 3 (get-pip.py)
4. Install pip on Python 2 (get-pip.py)
5. Install piptools on Python 3
6. Use `pip-sync` with a requirements.txt file and watch it installing to Python 2
##### Expected result
Installs into Python 3 if ran on Python 3 and to Python 2 if ran on Python2
##### Actual result
Installs to whichever Python version `pip` script on `PATH` is from
##### Extra information
If using Python 2 and 3 together, an extra `pip-compile` and `pip-sync` script per Python major and per Python minor version to be able to select Python version to run piptools on would be very useful.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `piptools/sync.py`
Content:
```
1 import collections
2 import os
3 import sys
4 import tempfile
5 from subprocess import check_call
6
7 from piptools._compat import stdlib_pkgs, DEV_PKGS
8 from . import click
9 from .exceptions import IncompatibleRequirements, UnsupportedConstraint
10 from .utils import flat_map, format_requirement, key_from_ireq, key_from_req, get_hashes_from_ireq
11
12 PACKAGES_TO_IGNORE = [
13 '-markerlib',
14 'pip',
15 'pip-tools',
16 'pip-review',
17 'pkg-resources',
18 ] + list(stdlib_pkgs) + list(DEV_PKGS)
19
20
21 def dependency_tree(installed_keys, root_key):
22 """
23 Calculate the dependency tree for the package `root_key` and return
24 a collection of all its dependencies. Uses a DFS traversal algorithm.
25
26 `installed_keys` should be a {key: requirement} mapping, e.g.
27 {'django': from_line('django==1.8')}
28 `root_key` should be the key to return the dependency tree for.
29 """
30 dependencies = set()
31 queue = collections.deque()
32
33 if root_key in installed_keys:
34 dep = installed_keys[root_key]
35 queue.append(dep)
36
37 while queue:
38 v = queue.popleft()
39 key = key_from_req(v)
40 if key in dependencies:
41 continue
42
43 dependencies.add(key)
44
45 for dep_specifier in v.requires():
46 dep_name = key_from_req(dep_specifier)
47 if dep_name in installed_keys:
48 dep = installed_keys[dep_name]
49
50 if dep_specifier.specifier.contains(dep.version):
51 queue.append(dep)
52
53 return dependencies
54
55
56 def get_dists_to_ignore(installed):
57 """
58 Returns a collection of package names to ignore when performing pip-sync,
59 based on the currently installed environment. For example, when pip-tools
60 is installed in the local environment, it should be ignored, including all
61 of its dependencies (e.g. click). When pip-tools is not installed
62 locally, click should also be installed/uninstalled depending on the given
63 requirements.
64 """
65 installed_keys = {key_from_req(r): r for r in installed}
66 return list(flat_map(lambda req: dependency_tree(installed_keys, req), PACKAGES_TO_IGNORE))
67
68
69 def merge(requirements, ignore_conflicts):
70 by_key = {}
71
72 for ireq in requirements:
73 if ireq.link is not None and not ireq.editable:
74 msg = ('pip-compile does not support URLs as packages, unless they are editable. '
75 'Perhaps add -e option?')
76 raise UnsupportedConstraint(msg, ireq)
77
78 key = ireq.link or key_from_req(ireq.req)
79
80 if not ignore_conflicts:
81 existing_ireq = by_key.get(key)
82 if existing_ireq:
83 # NOTE: We check equality here since we can assume that the
84 # requirements are all pinned
85 if ireq.specifier != existing_ireq.specifier:
86 raise IncompatibleRequirements(ireq, existing_ireq)
87
88 # TODO: Always pick the largest specifier in case of a conflict
89 by_key[key] = ireq
90
91 return by_key.values()
92
93
94 def diff(compiled_requirements, installed_dists):
95 """
96 Calculate which packages should be installed or uninstalled, given a set
97 of compiled requirements and a list of currently installed modules.
98 """
99 requirements_lut = {r.link or key_from_req(r.req): r for r in compiled_requirements}
100
101 satisfied = set() # holds keys
102 to_install = set() # holds InstallRequirement objects
103 to_uninstall = set() # holds keys
104
105 pkgs_to_ignore = get_dists_to_ignore(installed_dists)
106 for dist in installed_dists:
107 key = key_from_req(dist)
108 if key not in requirements_lut or not requirements_lut[key].match_markers():
109 to_uninstall.add(key)
110 elif requirements_lut[key].specifier.contains(dist.version):
111 satisfied.add(key)
112
113 for key, requirement in requirements_lut.items():
114 if key not in satisfied and requirement.match_markers():
115 to_install.add(requirement)
116
117 # Make sure to not uninstall any packages that should be ignored
118 to_uninstall -= set(pkgs_to_ignore)
119
120 return (to_install, to_uninstall)
121
122
123 def sync(to_install, to_uninstall, verbose=False, dry_run=False, pip_flags=None, install_flags=None):
124 """
125 Install and uninstalls the given sets of modules.
126 """
127 if not to_uninstall and not to_install:
128 click.echo("Everything up-to-date")
129
130 if pip_flags is None:
131 pip_flags = []
132
133 if not verbose:
134 pip_flags += ['-q']
135
136 if os.environ.get('VIRTUAL_ENV'):
137 # find pip via PATH
138 pip = 'pip'
139 else:
140 # find pip in same directory as pip-sync entry-point script
141 pip = os.path.join(os.path.dirname(os.path.abspath(sys.argv[0])), 'pip')
142
143 if to_uninstall:
144 if dry_run:
145 click.echo("Would uninstall:")
146 for pkg in to_uninstall:
147 click.echo(" {}".format(pkg))
148 else:
149 check_call([pip, 'uninstall', '-y'] + pip_flags + sorted(to_uninstall))
150
151 if to_install:
152 if install_flags is None:
153 install_flags = []
154 if dry_run:
155 click.echo("Would install:")
156 for ireq in to_install:
157 click.echo(" {}".format(format_requirement(ireq)))
158 else:
159 # prepare requirement lines
160 req_lines = []
161 for ireq in sorted(to_install, key=key_from_ireq):
162 ireq_hashes = get_hashes_from_ireq(ireq)
163 req_lines.append(format_requirement(ireq, hashes=ireq_hashes))
164
165 # save requirement lines to a temporary file
166 tmp_req_file = tempfile.NamedTemporaryFile(mode='wt', delete=False)
167 tmp_req_file.write('\n'.join(req_lines))
168 tmp_req_file.close()
169
170 try:
171 check_call([pip, 'install', '-r', tmp_req_file.name] + pip_flags + install_flags)
172 finally:
173 os.unlink(tmp_req_file.name)
174
175 return 0
176
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/piptools/sync.py b/piptools/sync.py
--- a/piptools/sync.py
+++ b/piptools/sync.py
@@ -133,20 +133,13 @@
if not verbose:
pip_flags += ['-q']
- if os.environ.get('VIRTUAL_ENV'):
- # find pip via PATH
- pip = 'pip'
- else:
- # find pip in same directory as pip-sync entry-point script
- pip = os.path.join(os.path.dirname(os.path.abspath(sys.argv[0])), 'pip')
-
if to_uninstall:
if dry_run:
click.echo("Would uninstall:")
for pkg in to_uninstall:
click.echo(" {}".format(pkg))
else:
- check_call([pip, 'uninstall', '-y'] + pip_flags + sorted(to_uninstall))
+ check_call([sys.executable, '-m', 'pip', 'uninstall', '-y'] + pip_flags + sorted(to_uninstall))
if to_install:
if install_flags is None:
@@ -168,7 +161,9 @@
tmp_req_file.close()
try:
- check_call([pip, 'install', '-r', tmp_req_file.name] + pip_flags + install_flags)
+ check_call(
+ [sys.executable, '-m', 'pip', 'install', '-r', tmp_req_file.name] + pip_flags + install_flags
+ )
finally:
os.unlink(tmp_req_file.name)
| {"golden_diff": "diff --git a/piptools/sync.py b/piptools/sync.py\n--- a/piptools/sync.py\n+++ b/piptools/sync.py\n@@ -133,20 +133,13 @@\n if not verbose:\n pip_flags += ['-q']\n \n- if os.environ.get('VIRTUAL_ENV'):\n- # find pip via PATH\n- pip = 'pip'\n- else:\n- # find pip in same directory as pip-sync entry-point script\n- pip = os.path.join(os.path.dirname(os.path.abspath(sys.argv[0])), 'pip')\n-\n if to_uninstall:\n if dry_run:\n click.echo(\"Would uninstall:\")\n for pkg in to_uninstall:\n click.echo(\" {}\".format(pkg))\n else:\n- check_call([pip, 'uninstall', '-y'] + pip_flags + sorted(to_uninstall))\n+ check_call([sys.executable, '-m', 'pip', 'uninstall', '-y'] + pip_flags + sorted(to_uninstall))\n \n if to_install:\n if install_flags is None:\n@@ -168,7 +161,9 @@\n tmp_req_file.close()\n \n try:\n- check_call([pip, 'install', '-r', tmp_req_file.name] + pip_flags + install_flags)\n+ check_call(\n+ [sys.executable, '-m', 'pip', 'install', '-r', tmp_req_file.name] + pip_flags + install_flags\n+ )\n finally:\n os.unlink(tmp_req_file.name)\n", "issue": "pip-sync uses first pip script on PATH instead of the one from the Python version it is run with\nDescribe the issue briefly here.\r\n\r\n##### Environment Versions\r\n\r\n1. OS Type: Android 8.1 aarch64/Latest Termux\r\n1. Python version: 3.7.2\r\n1. pip version: `pip 18.1 from /data/data/com.termux/files/usr/lib/python2.7/site-packages/pip (python 2.7)` (pip3.7 --version is `pip 19.0.2 from /data/data/com.termux/files/usr/lib/python3.7/site-packages/pip (python 3.7)\r\n1. pip-tools version: `pip-compile, version 3.3.2`\r\n\r\n##### Steps to replicate\r\n\r\n1. Get Termux\r\n2. Run `pkg up && pkg install python python2`\r\n3. Install pip on Python 3 (get-pip.py)\r\n4. Install pip on Python 2 (get-pip.py)\r\n5. Install piptools on Python 3\r\n6. Use `pip-sync` with a requirements.txt file and watch it installing to Python 2\r\n\r\n\r\n##### Expected result\r\n\r\nInstalls into Python 3 if ran on Python 3 and to Python 2 if ran on Python2\r\n\r\n##### Actual result\r\n\r\nInstalls to whichever Python version `pip` script on `PATH` is from\r\n\r\n##### Extra information\r\n\r\nIf using Python 2 and 3 together, an extra `pip-compile` and `pip-sync` script per Python major and per Python minor version to be able to select Python version to run piptools on would be very useful.\r\n\n", "before_files": [{"content": "import collections\nimport os\nimport sys\nimport tempfile\nfrom subprocess import check_call\n\nfrom piptools._compat import stdlib_pkgs, DEV_PKGS\nfrom . import click\nfrom .exceptions import IncompatibleRequirements, UnsupportedConstraint\nfrom .utils import flat_map, format_requirement, key_from_ireq, key_from_req, get_hashes_from_ireq\n\nPACKAGES_TO_IGNORE = [\n '-markerlib',\n 'pip',\n 'pip-tools',\n 'pip-review',\n 'pkg-resources',\n] + list(stdlib_pkgs) + list(DEV_PKGS)\n\n\ndef dependency_tree(installed_keys, root_key):\n \"\"\"\n Calculate the dependency tree for the package `root_key` and return\n a collection of all its dependencies. Uses a DFS traversal algorithm.\n\n `installed_keys` should be a {key: requirement} mapping, e.g.\n {'django': from_line('django==1.8')}\n `root_key` should be the key to return the dependency tree for.\n \"\"\"\n dependencies = set()\n queue = collections.deque()\n\n if root_key in installed_keys:\n dep = installed_keys[root_key]\n queue.append(dep)\n\n while queue:\n v = queue.popleft()\n key = key_from_req(v)\n if key in dependencies:\n continue\n\n dependencies.add(key)\n\n for dep_specifier in v.requires():\n dep_name = key_from_req(dep_specifier)\n if dep_name in installed_keys:\n dep = installed_keys[dep_name]\n\n if dep_specifier.specifier.contains(dep.version):\n queue.append(dep)\n\n return dependencies\n\n\ndef get_dists_to_ignore(installed):\n \"\"\"\n Returns a collection of package names to ignore when performing pip-sync,\n based on the currently installed environment. For example, when pip-tools\n is installed in the local environment, it should be ignored, including all\n of its dependencies (e.g. click). When pip-tools is not installed\n locally, click should also be installed/uninstalled depending on the given\n requirements.\n \"\"\"\n installed_keys = {key_from_req(r): r for r in installed}\n return list(flat_map(lambda req: dependency_tree(installed_keys, req), PACKAGES_TO_IGNORE))\n\n\ndef merge(requirements, ignore_conflicts):\n by_key = {}\n\n for ireq in requirements:\n if ireq.link is not None and not ireq.editable:\n msg = ('pip-compile does not support URLs as packages, unless they are editable. '\n 'Perhaps add -e option?')\n raise UnsupportedConstraint(msg, ireq)\n\n key = ireq.link or key_from_req(ireq.req)\n\n if not ignore_conflicts:\n existing_ireq = by_key.get(key)\n if existing_ireq:\n # NOTE: We check equality here since we can assume that the\n # requirements are all pinned\n if ireq.specifier != existing_ireq.specifier:\n raise IncompatibleRequirements(ireq, existing_ireq)\n\n # TODO: Always pick the largest specifier in case of a conflict\n by_key[key] = ireq\n\n return by_key.values()\n\n\ndef diff(compiled_requirements, installed_dists):\n \"\"\"\n Calculate which packages should be installed or uninstalled, given a set\n of compiled requirements and a list of currently installed modules.\n \"\"\"\n requirements_lut = {r.link or key_from_req(r.req): r for r in compiled_requirements}\n\n satisfied = set() # holds keys\n to_install = set() # holds InstallRequirement objects\n to_uninstall = set() # holds keys\n\n pkgs_to_ignore = get_dists_to_ignore(installed_dists)\n for dist in installed_dists:\n key = key_from_req(dist)\n if key not in requirements_lut or not requirements_lut[key].match_markers():\n to_uninstall.add(key)\n elif requirements_lut[key].specifier.contains(dist.version):\n satisfied.add(key)\n\n for key, requirement in requirements_lut.items():\n if key not in satisfied and requirement.match_markers():\n to_install.add(requirement)\n\n # Make sure to not uninstall any packages that should be ignored\n to_uninstall -= set(pkgs_to_ignore)\n\n return (to_install, to_uninstall)\n\n\ndef sync(to_install, to_uninstall, verbose=False, dry_run=False, pip_flags=None, install_flags=None):\n \"\"\"\n Install and uninstalls the given sets of modules.\n \"\"\"\n if not to_uninstall and not to_install:\n click.echo(\"Everything up-to-date\")\n\n if pip_flags is None:\n pip_flags = []\n\n if not verbose:\n pip_flags += ['-q']\n\n if os.environ.get('VIRTUAL_ENV'):\n # find pip via PATH\n pip = 'pip'\n else:\n # find pip in same directory as pip-sync entry-point script\n pip = os.path.join(os.path.dirname(os.path.abspath(sys.argv[0])), 'pip')\n\n if to_uninstall:\n if dry_run:\n click.echo(\"Would uninstall:\")\n for pkg in to_uninstall:\n click.echo(\" {}\".format(pkg))\n else:\n check_call([pip, 'uninstall', '-y'] + pip_flags + sorted(to_uninstall))\n\n if to_install:\n if install_flags is None:\n install_flags = []\n if dry_run:\n click.echo(\"Would install:\")\n for ireq in to_install:\n click.echo(\" {}\".format(format_requirement(ireq)))\n else:\n # prepare requirement lines\n req_lines = []\n for ireq in sorted(to_install, key=key_from_ireq):\n ireq_hashes = get_hashes_from_ireq(ireq)\n req_lines.append(format_requirement(ireq, hashes=ireq_hashes))\n\n # save requirement lines to a temporary file\n tmp_req_file = tempfile.NamedTemporaryFile(mode='wt', delete=False)\n tmp_req_file.write('\\n'.join(req_lines))\n tmp_req_file.close()\n\n try:\n check_call([pip, 'install', '-r', tmp_req_file.name] + pip_flags + install_flags)\n finally:\n os.unlink(tmp_req_file.name)\n\n return 0\n", "path": "piptools/sync.py"}], "after_files": [{"content": "import collections\nimport os\nimport sys\nimport tempfile\nfrom subprocess import check_call\n\nfrom piptools._compat import stdlib_pkgs, DEV_PKGS\nfrom . import click\nfrom .exceptions import IncompatibleRequirements, UnsupportedConstraint\nfrom .utils import flat_map, format_requirement, key_from_ireq, key_from_req, get_hashes_from_ireq\n\nPACKAGES_TO_IGNORE = [\n '-markerlib',\n 'pip',\n 'pip-tools',\n 'pip-review',\n 'pkg-resources',\n] + list(stdlib_pkgs) + list(DEV_PKGS)\n\n\ndef dependency_tree(installed_keys, root_key):\n \"\"\"\n Calculate the dependency tree for the package `root_key` and return\n a collection of all its dependencies. Uses a DFS traversal algorithm.\n\n `installed_keys` should be a {key: requirement} mapping, e.g.\n {'django': from_line('django==1.8')}\n `root_key` should be the key to return the dependency tree for.\n \"\"\"\n dependencies = set()\n queue = collections.deque()\n\n if root_key in installed_keys:\n dep = installed_keys[root_key]\n queue.append(dep)\n\n while queue:\n v = queue.popleft()\n key = key_from_req(v)\n if key in dependencies:\n continue\n\n dependencies.add(key)\n\n for dep_specifier in v.requires():\n dep_name = key_from_req(dep_specifier)\n if dep_name in installed_keys:\n dep = installed_keys[dep_name]\n\n if dep_specifier.specifier.contains(dep.version):\n queue.append(dep)\n\n return dependencies\n\n\ndef get_dists_to_ignore(installed):\n \"\"\"\n Returns a collection of package names to ignore when performing pip-sync,\n based on the currently installed environment. For example, when pip-tools\n is installed in the local environment, it should be ignored, including all\n of its dependencies (e.g. click). When pip-tools is not installed\n locally, click should also be installed/uninstalled depending on the given\n requirements.\n \"\"\"\n installed_keys = {key_from_req(r): r for r in installed}\n return list(flat_map(lambda req: dependency_tree(installed_keys, req), PACKAGES_TO_IGNORE))\n\n\ndef merge(requirements, ignore_conflicts):\n by_key = {}\n\n for ireq in requirements:\n if ireq.link is not None and not ireq.editable:\n msg = ('pip-compile does not support URLs as packages, unless they are editable. '\n 'Perhaps add -e option?')\n raise UnsupportedConstraint(msg, ireq)\n\n key = ireq.link or key_from_req(ireq.req)\n\n if not ignore_conflicts:\n existing_ireq = by_key.get(key)\n if existing_ireq:\n # NOTE: We check equality here since we can assume that the\n # requirements are all pinned\n if ireq.specifier != existing_ireq.specifier:\n raise IncompatibleRequirements(ireq, existing_ireq)\n\n # TODO: Always pick the largest specifier in case of a conflict\n by_key[key] = ireq\n\n return by_key.values()\n\n\ndef diff(compiled_requirements, installed_dists):\n \"\"\"\n Calculate which packages should be installed or uninstalled, given a set\n of compiled requirements and a list of currently installed modules.\n \"\"\"\n requirements_lut = {r.link or key_from_req(r.req): r for r in compiled_requirements}\n\n satisfied = set() # holds keys\n to_install = set() # holds InstallRequirement objects\n to_uninstall = set() # holds keys\n\n pkgs_to_ignore = get_dists_to_ignore(installed_dists)\n for dist in installed_dists:\n key = key_from_req(dist)\n if key not in requirements_lut or not requirements_lut[key].match_markers():\n to_uninstall.add(key)\n elif requirements_lut[key].specifier.contains(dist.version):\n satisfied.add(key)\n\n for key, requirement in requirements_lut.items():\n if key not in satisfied and requirement.match_markers():\n to_install.add(requirement)\n\n # Make sure to not uninstall any packages that should be ignored\n to_uninstall -= set(pkgs_to_ignore)\n\n return (to_install, to_uninstall)\n\n\ndef sync(to_install, to_uninstall, verbose=False, dry_run=False, pip_flags=None, install_flags=None):\n \"\"\"\n Install and uninstalls the given sets of modules.\n \"\"\"\n if not to_uninstall and not to_install:\n click.echo(\"Everything up-to-date\")\n\n if pip_flags is None:\n pip_flags = []\n\n if not verbose:\n pip_flags += ['-q']\n\n if to_uninstall:\n if dry_run:\n click.echo(\"Would uninstall:\")\n for pkg in to_uninstall:\n click.echo(\" {}\".format(pkg))\n else:\n check_call([sys.executable, '-m', 'pip', 'uninstall', '-y'] + pip_flags + sorted(to_uninstall))\n\n if to_install:\n if install_flags is None:\n install_flags = []\n if dry_run:\n click.echo(\"Would install:\")\n for ireq in to_install:\n click.echo(\" {}\".format(format_requirement(ireq)))\n else:\n # prepare requirement lines\n req_lines = []\n for ireq in sorted(to_install, key=key_from_ireq):\n ireq_hashes = get_hashes_from_ireq(ireq)\n req_lines.append(format_requirement(ireq, hashes=ireq_hashes))\n\n # save requirement lines to a temporary file\n tmp_req_file = tempfile.NamedTemporaryFile(mode='wt', delete=False)\n tmp_req_file.write('\\n'.join(req_lines))\n tmp_req_file.close()\n\n try:\n check_call(\n [sys.executable, '-m', 'pip', 'install', '-r', tmp_req_file.name] + pip_flags + install_flags\n )\n finally:\n os.unlink(tmp_req_file.name)\n\n return 0\n", "path": "piptools/sync.py"}]} | 2,369 | 335 |
gh_patches_debug_24939 | rasdani/github-patches | git_diff | unionai-oss__pandera-958 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Bug: mypy complains about datetime.date / pandera.Date type
Hi!
I tried the following to represent a column of `datetime.date` objects (with no time):
```python
expiration_date: pt.Series[datetime.date] = ...
```
and
```python
expiration_date: pt.Series[pandas_engine.Date] = ...
```
Either raises an error "Value of type variable "GenericDtype" of "Series" cannot be "...".
I looked in the tests, it's still unclear to me how to do this.
Using a DataframeSchema I was able to set `dtype=datetime.date` and it appears to work, but what about with SchemaModel?
What's the right declaration?
Thank you,
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pandera/typing/common.py`
Content:
```
1 """Common typing functionality."""
2 # pylint:disable=abstract-method,disable=too-many-ancestors
3
4 import inspect
5 from typing import TYPE_CHECKING, Any, Generic, Optional, Type, TypeVar
6
7 import pandas as pd
8 import typing_inspect
9
10 from .. import dtypes
11 from ..engines import numpy_engine, pandas_engine
12
13 Bool = dtypes.Bool #: ``"bool"`` numpy dtype
14 DateTime = dtypes.DateTime #: ``"datetime64[ns]"`` numpy dtype
15 Timedelta = dtypes.Timedelta #: ``"timedelta64[ns]"`` numpy dtype
16 Category = dtypes.Category #: pandas ``"categorical"`` datatype
17 Float = dtypes.Float #: ``"float"`` numpy dtype
18 Float16 = dtypes.Float16 #: ``"float16"`` numpy dtype
19 Float32 = dtypes.Float32 #: ``"float32"`` numpy dtype
20 Float64 = dtypes.Float64 #: ``"float64"`` numpy dtype
21 Int = dtypes.Int #: ``"int"`` numpy dtype
22 Int8 = dtypes.Int8 #: ``"int8"`` numpy dtype
23 Int16 = dtypes.Int16 #: ``"int16"`` numpy dtype
24 Int32 = dtypes.Int32 #: ``"int32"`` numpy dtype
25 Int64 = dtypes.Int64 #: ``"int64"`` numpy dtype
26 UInt8 = dtypes.UInt8 #: ``"uint8"`` numpy dtype
27 UInt16 = dtypes.UInt16 #: ``"uint16"`` numpy dtype
28 UInt32 = dtypes.UInt32 #: ``"uint32"`` numpy dtype
29 UInt64 = dtypes.UInt64 #: ``"uint64"`` numpy dtype
30 INT8 = pandas_engine.INT8 #: ``"Int8"`` pandas dtype:: pandas 0.24.0+
31 INT16 = pandas_engine.INT16 #: ``"Int16"`` pandas dtype: pandas 0.24.0+
32 INT32 = pandas_engine.INT32 #: ``"Int32"`` pandas dtype: pandas 0.24.0+
33 INT64 = pandas_engine.INT64 #: ``"Int64"`` pandas dtype: pandas 0.24.0+
34 UINT8 = pandas_engine.UINT8 #: ``"UInt8"`` pandas dtype:: pandas 0.24.0+
35 UINT16 = pandas_engine.UINT16 #: ``"UInt16"`` pandas dtype: pandas 0.24.0+
36 UINT32 = pandas_engine.UINT32 #: ``"UInt32"`` pandas dtype: pandas 0.24.0+
37 UINT64 = pandas_engine.UINT64 #: ``"UInt64"`` pandas dtype: pandas 0.24.0+
38 Object = numpy_engine.Object #: ``"object"`` numpy dtype
39 String = dtypes.String #: ``"str"`` numpy dtype
40 #: ``"string"`` pandas dtypes: pandas 1.0.0+. For <1.0.0, this enum will
41 #: fall back on the str-as-object-array representation.
42 STRING = pandas_engine.STRING #: ``"str"`` numpy dtype
43 BOOL = pandas_engine.BOOL #: ``"str"`` numpy dtype
44
45 try:
46 Geometry = pandas_engine.Geometry # : ``"geometry"`` geopandas dtype
47 GEOPANDAS_INSTALLED = True
48 except AttributeError:
49 GEOPANDAS_INSTALLED = False
50
51 if GEOPANDAS_INSTALLED:
52 GenericDtype = TypeVar( # type: ignore
53 "GenericDtype",
54 bool,
55 int,
56 str,
57 float,
58 pd.core.dtypes.base.ExtensionDtype,
59 Bool,
60 DateTime,
61 Timedelta,
62 Category,
63 Float,
64 Float16,
65 Float32,
66 Float64,
67 Int,
68 Int8,
69 Int16,
70 Int32,
71 Int64,
72 UInt8,
73 UInt16,
74 UInt32,
75 UInt64,
76 INT8,
77 INT16,
78 INT32,
79 INT64,
80 UINT8,
81 UINT16,
82 UINT32,
83 UINT64,
84 Object,
85 String,
86 STRING,
87 Geometry,
88 covariant=True,
89 )
90 else:
91 GenericDtype = TypeVar( # type: ignore
92 "GenericDtype",
93 bool,
94 int,
95 str,
96 float,
97 pd.core.dtypes.base.ExtensionDtype,
98 Bool,
99 DateTime,
100 Timedelta,
101 Category,
102 Float,
103 Float16,
104 Float32,
105 Float64,
106 Int,
107 Int8,
108 Int16,
109 Int32,
110 Int64,
111 UInt8,
112 UInt16,
113 UInt32,
114 UInt64,
115 INT8,
116 INT16,
117 INT32,
118 INT64,
119 UINT8,
120 UINT16,
121 UINT32,
122 UINT64,
123 Object,
124 String,
125 STRING,
126 covariant=True,
127 )
128
129 Schema = TypeVar("Schema", bound="SchemaModel") # type: ignore
130
131
132 # pylint:disable=invalid-name
133 if TYPE_CHECKING:
134 T = TypeVar("T") # pragma: no cover
135 else:
136 T = Schema
137
138
139 class DataFrameBase(Generic[T]):
140 # pylint: disable=too-few-public-methods
141 """
142 Pandera Dataframe base class for validating dataframes on
143 initialization.
144 """
145
146 default_dtype: Optional[Type] = None
147
148 def __setattr__(self, name: str, value: Any) -> None:
149 # pylint: disable=no-member
150 object.__setattr__(self, name, value)
151 if name == "__orig_class__":
152 orig_class = getattr(self, "__orig_class__")
153 class_args = getattr(orig_class, "__args__", None)
154 if class_args is not None and any(
155 x.__name__ == "SchemaModel"
156 for x in inspect.getmro(class_args[0])
157 ):
158 schema_model = value.__args__[0]
159
160 # prevent the double validation problem by preventing checks for
161 # dataframes with a defined pandera.schema
162 pandera_accessor = getattr(self, "pandera")
163 if (
164 pandera_accessor.schema is None
165 or pandera_accessor.schema != schema_model.to_schema()
166 ):
167 pandera_accessor.add_schema(schema_model.to_schema())
168 self.__dict__ = schema_model.validate(self).__dict__
169
170
171 # pylint:disable=too-few-public-methods
172 class SeriesBase(Generic[GenericDtype]):
173 """Pandera Series base class to use for all pandas-like APIs."""
174
175 default_dtype: Optional[Type] = None
176
177 def __get__(
178 self, instance: object, owner: Type
179 ) -> str: # pragma: no cover
180 raise AttributeError("Series should resolve to Field-s")
181
182
183 # pylint:disable=too-few-public-methods
184 class IndexBase(Generic[GenericDtype]):
185 """Representation of pandas.Index, only used for type annotation.
186
187 *new in 0.5.0*
188 """
189
190 default_dtype: Optional[Type] = None
191
192 def __get__(
193 self, instance: object, owner: Type
194 ) -> str: # pragma: no cover
195 raise AttributeError("Indexes should resolve to pa.Index-s")
196
197
198 class AnnotationInfo: # pylint:disable=too-few-public-methods
199 """Captures extra information about an annotation.
200
201 Attributes:
202 origin: The non-parameterized generic class.
203 arg: The first generic type (SchemaModel does not support more than
204 1 argument).
205 literal: Whether the annotation is a literal.
206 optional: Whether the annotation is optional.
207 raw_annotation: The raw annotation.
208 metadata: Extra arguments passed to :data:`typing.Annotated`.
209 """
210
211 def __init__(self, raw_annotation: Type) -> None:
212 self._parse_annotation(raw_annotation)
213
214 @property
215 def is_generic_df(self) -> bool:
216 """True if the annotation is a DataFrameBase subclass."""
217 try:
218 if self.origin is None:
219 return False
220 return issubclass(self.origin, DataFrameBase)
221 except TypeError:
222 return False
223
224 def _parse_annotation(self, raw_annotation: Type) -> None:
225 """Parse key information from annotation.
226
227 :param annotation: A subscripted type.
228 :returns: Annotation
229 """
230 self.raw_annotation = raw_annotation
231 self.origin = self.arg = None
232
233 self.optional = typing_inspect.is_optional_type(raw_annotation)
234 if self.optional and typing_inspect.is_union_type(raw_annotation):
235 # Annotated with Optional or Union[..., NoneType]
236 # get_args -> (pandera.typing.Index[str], <class 'NoneType'>)
237 raw_annotation = typing_inspect.get_args(raw_annotation)[0]
238
239 self.origin = typing_inspect.get_origin(raw_annotation)
240 # Replace empty tuple returned from get_args by None
241 args = typing_inspect.get_args(raw_annotation) or None
242 self.arg = args[0] if args else args
243
244 self.metadata = getattr(self.arg, "__metadata__", None)
245 if self.metadata:
246 self.arg = typing_inspect.get_args(self.arg)[0]
247
248 self.literal = typing_inspect.is_literal_type(self.arg)
249 if self.literal:
250 self.arg = typing_inspect.get_args(self.arg)[0]
251
252 self.default_dtype = getattr(raw_annotation, "default_dtype", None)
253
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pandera/typing/common.py b/pandera/typing/common.py
--- a/pandera/typing/common.py
+++ b/pandera/typing/common.py
@@ -11,7 +11,9 @@
from ..engines import numpy_engine, pandas_engine
Bool = dtypes.Bool #: ``"bool"`` numpy dtype
+Date = dtypes.Date #: ``datetime.date`` object dtype
DateTime = dtypes.DateTime #: ``"datetime64[ns]"`` numpy dtype
+Decimal = dtypes.Decimal #: ``decimal.Decimal`` object dtype
Timedelta = dtypes.Timedelta #: ``"timedelta64[ns]"`` numpy dtype
Category = dtypes.Category #: pandas ``"categorical"`` datatype
Float = dtypes.Float #: ``"float"`` numpy dtype
@@ -57,7 +59,9 @@
float,
pd.core.dtypes.base.ExtensionDtype,
Bool,
+ Date,
DateTime,
+ Decimal,
Timedelta,
Category,
Float,
@@ -96,7 +100,9 @@
float,
pd.core.dtypes.base.ExtensionDtype,
Bool,
+ Date,
DateTime,
+ Decimal,
Timedelta,
Category,
Float,
| {"golden_diff": "diff --git a/pandera/typing/common.py b/pandera/typing/common.py\n--- a/pandera/typing/common.py\n+++ b/pandera/typing/common.py\n@@ -11,7 +11,9 @@\n from ..engines import numpy_engine, pandas_engine\n \n Bool = dtypes.Bool #: ``\"bool\"`` numpy dtype\n+Date = dtypes.Date #: ``datetime.date`` object dtype\n DateTime = dtypes.DateTime #: ``\"datetime64[ns]\"`` numpy dtype\n+Decimal = dtypes.Decimal #: ``decimal.Decimal`` object dtype\n Timedelta = dtypes.Timedelta #: ``\"timedelta64[ns]\"`` numpy dtype\n Category = dtypes.Category #: pandas ``\"categorical\"`` datatype\n Float = dtypes.Float #: ``\"float\"`` numpy dtype\n@@ -57,7 +59,9 @@\n float,\n pd.core.dtypes.base.ExtensionDtype,\n Bool,\n+ Date,\n DateTime,\n+ Decimal,\n Timedelta,\n Category,\n Float,\n@@ -96,7 +100,9 @@\n float,\n pd.core.dtypes.base.ExtensionDtype,\n Bool,\n+ Date,\n DateTime,\n+ Decimal,\n Timedelta,\n Category,\n Float,\n", "issue": "Bug: mypy complains about datetime.date / pandera.Date type\nHi! \r\nI tried the following to represent a column of `datetime.date` objects (with no time):\r\n```python\r\n expiration_date: pt.Series[datetime.date] = ...\r\n```\r\nand\r\n```python\r\n expiration_date: pt.Series[pandas_engine.Date] = ...\r\n```\r\n\r\nEither raises an error \"Value of type variable \"GenericDtype\" of \"Series\" cannot be \"...\".\r\nI looked in the tests, it's still unclear to me how to do this.\r\n\r\nUsing a DataframeSchema I was able to set `dtype=datetime.date` and it appears to work, but what about with SchemaModel?\r\nWhat's the right declaration?\r\n\r\nThank you,\r\n\r\n\n", "before_files": [{"content": "\"\"\"Common typing functionality.\"\"\"\n# pylint:disable=abstract-method,disable=too-many-ancestors\n\nimport inspect\nfrom typing import TYPE_CHECKING, Any, Generic, Optional, Type, TypeVar\n\nimport pandas as pd\nimport typing_inspect\n\nfrom .. import dtypes\nfrom ..engines import numpy_engine, pandas_engine\n\nBool = dtypes.Bool #: ``\"bool\"`` numpy dtype\nDateTime = dtypes.DateTime #: ``\"datetime64[ns]\"`` numpy dtype\nTimedelta = dtypes.Timedelta #: ``\"timedelta64[ns]\"`` numpy dtype\nCategory = dtypes.Category #: pandas ``\"categorical\"`` datatype\nFloat = dtypes.Float #: ``\"float\"`` numpy dtype\nFloat16 = dtypes.Float16 #: ``\"float16\"`` numpy dtype\nFloat32 = dtypes.Float32 #: ``\"float32\"`` numpy dtype\nFloat64 = dtypes.Float64 #: ``\"float64\"`` numpy dtype\nInt = dtypes.Int #: ``\"int\"`` numpy dtype\nInt8 = dtypes.Int8 #: ``\"int8\"`` numpy dtype\nInt16 = dtypes.Int16 #: ``\"int16\"`` numpy dtype\nInt32 = dtypes.Int32 #: ``\"int32\"`` numpy dtype\nInt64 = dtypes.Int64 #: ``\"int64\"`` numpy dtype\nUInt8 = dtypes.UInt8 #: ``\"uint8\"`` numpy dtype\nUInt16 = dtypes.UInt16 #: ``\"uint16\"`` numpy dtype\nUInt32 = dtypes.UInt32 #: ``\"uint32\"`` numpy dtype\nUInt64 = dtypes.UInt64 #: ``\"uint64\"`` numpy dtype\nINT8 = pandas_engine.INT8 #: ``\"Int8\"`` pandas dtype:: pandas 0.24.0+\nINT16 = pandas_engine.INT16 #: ``\"Int16\"`` pandas dtype: pandas 0.24.0+\nINT32 = pandas_engine.INT32 #: ``\"Int32\"`` pandas dtype: pandas 0.24.0+\nINT64 = pandas_engine.INT64 #: ``\"Int64\"`` pandas dtype: pandas 0.24.0+\nUINT8 = pandas_engine.UINT8 #: ``\"UInt8\"`` pandas dtype:: pandas 0.24.0+\nUINT16 = pandas_engine.UINT16 #: ``\"UInt16\"`` pandas dtype: pandas 0.24.0+\nUINT32 = pandas_engine.UINT32 #: ``\"UInt32\"`` pandas dtype: pandas 0.24.0+\nUINT64 = pandas_engine.UINT64 #: ``\"UInt64\"`` pandas dtype: pandas 0.24.0+\nObject = numpy_engine.Object #: ``\"object\"`` numpy dtype\nString = dtypes.String #: ``\"str\"`` numpy dtype\n#: ``\"string\"`` pandas dtypes: pandas 1.0.0+. For <1.0.0, this enum will\n#: fall back on the str-as-object-array representation.\nSTRING = pandas_engine.STRING #: ``\"str\"`` numpy dtype\nBOOL = pandas_engine.BOOL #: ``\"str\"`` numpy dtype\n\ntry:\n Geometry = pandas_engine.Geometry # : ``\"geometry\"`` geopandas dtype\n GEOPANDAS_INSTALLED = True\nexcept AttributeError:\n GEOPANDAS_INSTALLED = False\n\nif GEOPANDAS_INSTALLED:\n GenericDtype = TypeVar( # type: ignore\n \"GenericDtype\",\n bool,\n int,\n str,\n float,\n pd.core.dtypes.base.ExtensionDtype,\n Bool,\n DateTime,\n Timedelta,\n Category,\n Float,\n Float16,\n Float32,\n Float64,\n Int,\n Int8,\n Int16,\n Int32,\n Int64,\n UInt8,\n UInt16,\n UInt32,\n UInt64,\n INT8,\n INT16,\n INT32,\n INT64,\n UINT8,\n UINT16,\n UINT32,\n UINT64,\n Object,\n String,\n STRING,\n Geometry,\n covariant=True,\n )\nelse:\n GenericDtype = TypeVar( # type: ignore\n \"GenericDtype\",\n bool,\n int,\n str,\n float,\n pd.core.dtypes.base.ExtensionDtype,\n Bool,\n DateTime,\n Timedelta,\n Category,\n Float,\n Float16,\n Float32,\n Float64,\n Int,\n Int8,\n Int16,\n Int32,\n Int64,\n UInt8,\n UInt16,\n UInt32,\n UInt64,\n INT8,\n INT16,\n INT32,\n INT64,\n UINT8,\n UINT16,\n UINT32,\n UINT64,\n Object,\n String,\n STRING,\n covariant=True,\n )\n\nSchema = TypeVar(\"Schema\", bound=\"SchemaModel\") # type: ignore\n\n\n# pylint:disable=invalid-name\nif TYPE_CHECKING:\n T = TypeVar(\"T\") # pragma: no cover\nelse:\n T = Schema\n\n\nclass DataFrameBase(Generic[T]):\n # pylint: disable=too-few-public-methods\n \"\"\"\n Pandera Dataframe base class for validating dataframes on\n initialization.\n \"\"\"\n\n default_dtype: Optional[Type] = None\n\n def __setattr__(self, name: str, value: Any) -> None:\n # pylint: disable=no-member\n object.__setattr__(self, name, value)\n if name == \"__orig_class__\":\n orig_class = getattr(self, \"__orig_class__\")\n class_args = getattr(orig_class, \"__args__\", None)\n if class_args is not None and any(\n x.__name__ == \"SchemaModel\"\n for x in inspect.getmro(class_args[0])\n ):\n schema_model = value.__args__[0]\n\n # prevent the double validation problem by preventing checks for\n # dataframes with a defined pandera.schema\n pandera_accessor = getattr(self, \"pandera\")\n if (\n pandera_accessor.schema is None\n or pandera_accessor.schema != schema_model.to_schema()\n ):\n pandera_accessor.add_schema(schema_model.to_schema())\n self.__dict__ = schema_model.validate(self).__dict__\n\n\n# pylint:disable=too-few-public-methods\nclass SeriesBase(Generic[GenericDtype]):\n \"\"\"Pandera Series base class to use for all pandas-like APIs.\"\"\"\n\n default_dtype: Optional[Type] = None\n\n def __get__(\n self, instance: object, owner: Type\n ) -> str: # pragma: no cover\n raise AttributeError(\"Series should resolve to Field-s\")\n\n\n# pylint:disable=too-few-public-methods\nclass IndexBase(Generic[GenericDtype]):\n \"\"\"Representation of pandas.Index, only used for type annotation.\n\n *new in 0.5.0*\n \"\"\"\n\n default_dtype: Optional[Type] = None\n\n def __get__(\n self, instance: object, owner: Type\n ) -> str: # pragma: no cover\n raise AttributeError(\"Indexes should resolve to pa.Index-s\")\n\n\nclass AnnotationInfo: # pylint:disable=too-few-public-methods\n \"\"\"Captures extra information about an annotation.\n\n Attributes:\n origin: The non-parameterized generic class.\n arg: The first generic type (SchemaModel does not support more than\n 1 argument).\n literal: Whether the annotation is a literal.\n optional: Whether the annotation is optional.\n raw_annotation: The raw annotation.\n metadata: Extra arguments passed to :data:`typing.Annotated`.\n \"\"\"\n\n def __init__(self, raw_annotation: Type) -> None:\n self._parse_annotation(raw_annotation)\n\n @property\n def is_generic_df(self) -> bool:\n \"\"\"True if the annotation is a DataFrameBase subclass.\"\"\"\n try:\n if self.origin is None:\n return False\n return issubclass(self.origin, DataFrameBase)\n except TypeError:\n return False\n\n def _parse_annotation(self, raw_annotation: Type) -> None:\n \"\"\"Parse key information from annotation.\n\n :param annotation: A subscripted type.\n :returns: Annotation\n \"\"\"\n self.raw_annotation = raw_annotation\n self.origin = self.arg = None\n\n self.optional = typing_inspect.is_optional_type(raw_annotation)\n if self.optional and typing_inspect.is_union_type(raw_annotation):\n # Annotated with Optional or Union[..., NoneType]\n # get_args -> (pandera.typing.Index[str], <class 'NoneType'>)\n raw_annotation = typing_inspect.get_args(raw_annotation)[0]\n\n self.origin = typing_inspect.get_origin(raw_annotation)\n # Replace empty tuple returned from get_args by None\n args = typing_inspect.get_args(raw_annotation) or None\n self.arg = args[0] if args else args\n\n self.metadata = getattr(self.arg, \"__metadata__\", None)\n if self.metadata:\n self.arg = typing_inspect.get_args(self.arg)[0]\n\n self.literal = typing_inspect.is_literal_type(self.arg)\n if self.literal:\n self.arg = typing_inspect.get_args(self.arg)[0]\n\n self.default_dtype = getattr(raw_annotation, \"default_dtype\", None)\n", "path": "pandera/typing/common.py"}], "after_files": [{"content": "\"\"\"Common typing functionality.\"\"\"\n# pylint:disable=abstract-method,disable=too-many-ancestors\n\nimport inspect\nfrom typing import TYPE_CHECKING, Any, Generic, Optional, Type, TypeVar\n\nimport pandas as pd\nimport typing_inspect\n\nfrom .. import dtypes\nfrom ..engines import numpy_engine, pandas_engine\n\nBool = dtypes.Bool #: ``\"bool\"`` numpy dtype\nDate = dtypes.Date #: ``datetime.date`` object dtype\nDateTime = dtypes.DateTime #: ``\"datetime64[ns]\"`` numpy dtype\nDecimal = dtypes.Decimal #: ``decimal.Decimal`` object dtype\nTimedelta = dtypes.Timedelta #: ``\"timedelta64[ns]\"`` numpy dtype\nCategory = dtypes.Category #: pandas ``\"categorical\"`` datatype\nFloat = dtypes.Float #: ``\"float\"`` numpy dtype\nFloat16 = dtypes.Float16 #: ``\"float16\"`` numpy dtype\nFloat32 = dtypes.Float32 #: ``\"float32\"`` numpy dtype\nFloat64 = dtypes.Float64 #: ``\"float64\"`` numpy dtype\nInt = dtypes.Int #: ``\"int\"`` numpy dtype\nInt8 = dtypes.Int8 #: ``\"int8\"`` numpy dtype\nInt16 = dtypes.Int16 #: ``\"int16\"`` numpy dtype\nInt32 = dtypes.Int32 #: ``\"int32\"`` numpy dtype\nInt64 = dtypes.Int64 #: ``\"int64\"`` numpy dtype\nUInt8 = dtypes.UInt8 #: ``\"uint8\"`` numpy dtype\nUInt16 = dtypes.UInt16 #: ``\"uint16\"`` numpy dtype\nUInt32 = dtypes.UInt32 #: ``\"uint32\"`` numpy dtype\nUInt64 = dtypes.UInt64 #: ``\"uint64\"`` numpy dtype\nINT8 = pandas_engine.INT8 #: ``\"Int8\"`` pandas dtype:: pandas 0.24.0+\nINT16 = pandas_engine.INT16 #: ``\"Int16\"`` pandas dtype: pandas 0.24.0+\nINT32 = pandas_engine.INT32 #: ``\"Int32\"`` pandas dtype: pandas 0.24.0+\nINT64 = pandas_engine.INT64 #: ``\"Int64\"`` pandas dtype: pandas 0.24.0+\nUINT8 = pandas_engine.UINT8 #: ``\"UInt8\"`` pandas dtype:: pandas 0.24.0+\nUINT16 = pandas_engine.UINT16 #: ``\"UInt16\"`` pandas dtype: pandas 0.24.0+\nUINT32 = pandas_engine.UINT32 #: ``\"UInt32\"`` pandas dtype: pandas 0.24.0+\nUINT64 = pandas_engine.UINT64 #: ``\"UInt64\"`` pandas dtype: pandas 0.24.0+\nObject = numpy_engine.Object #: ``\"object\"`` numpy dtype\nString = dtypes.String #: ``\"str\"`` numpy dtype\n#: ``\"string\"`` pandas dtypes: pandas 1.0.0+. For <1.0.0, this enum will\n#: fall back on the str-as-object-array representation.\nSTRING = pandas_engine.STRING #: ``\"str\"`` numpy dtype\nBOOL = pandas_engine.BOOL #: ``\"str\"`` numpy dtype\n\ntry:\n Geometry = pandas_engine.Geometry # : ``\"geometry\"`` geopandas dtype\n GEOPANDAS_INSTALLED = True\nexcept AttributeError:\n GEOPANDAS_INSTALLED = False\n\nif GEOPANDAS_INSTALLED:\n GenericDtype = TypeVar( # type: ignore\n \"GenericDtype\",\n bool,\n int,\n str,\n float,\n pd.core.dtypes.base.ExtensionDtype,\n Bool,\n Date,\n DateTime,\n Decimal,\n Timedelta,\n Category,\n Float,\n Float16,\n Float32,\n Float64,\n Int,\n Int8,\n Int16,\n Int32,\n Int64,\n UInt8,\n UInt16,\n UInt32,\n UInt64,\n INT8,\n INT16,\n INT32,\n INT64,\n UINT8,\n UINT16,\n UINT32,\n UINT64,\n Object,\n String,\n STRING,\n Geometry,\n covariant=True,\n )\nelse:\n GenericDtype = TypeVar( # type: ignore\n \"GenericDtype\",\n bool,\n int,\n str,\n float,\n pd.core.dtypes.base.ExtensionDtype,\n Bool,\n Date,\n DateTime,\n Decimal,\n Timedelta,\n Category,\n Float,\n Float16,\n Float32,\n Float64,\n Int,\n Int8,\n Int16,\n Int32,\n Int64,\n UInt8,\n UInt16,\n UInt32,\n UInt64,\n INT8,\n INT16,\n INT32,\n INT64,\n UINT8,\n UINT16,\n UINT32,\n UINT64,\n Object,\n String,\n STRING,\n covariant=True,\n )\n\nSchema = TypeVar(\"Schema\", bound=\"SchemaModel\") # type: ignore\n\n\n# pylint:disable=invalid-name\nif TYPE_CHECKING:\n T = TypeVar(\"T\") # pragma: no cover\nelse:\n T = Schema\n\n\nclass DataFrameBase(Generic[T]):\n # pylint: disable=too-few-public-methods\n \"\"\"\n Pandera Dataframe base class for validating dataframes on\n initialization.\n \"\"\"\n\n default_dtype: Optional[Type] = None\n\n def __setattr__(self, name: str, value: Any) -> None:\n # pylint: disable=no-member\n object.__setattr__(self, name, value)\n if name == \"__orig_class__\":\n orig_class = getattr(self, \"__orig_class__\")\n class_args = getattr(orig_class, \"__args__\", None)\n if class_args is not None and any(\n x.__name__ == \"SchemaModel\"\n for x in inspect.getmro(class_args[0])\n ):\n schema_model = value.__args__[0]\n\n # prevent the double validation problem by preventing checks for\n # dataframes with a defined pandera.schema\n pandera_accessor = getattr(self, \"pandera\")\n if (\n pandera_accessor.schema is None\n or pandera_accessor.schema != schema_model.to_schema()\n ):\n pandera_accessor.add_schema(schema_model.to_schema())\n self.__dict__ = schema_model.validate(self).__dict__\n\n\n# pylint:disable=too-few-public-methods\nclass SeriesBase(Generic[GenericDtype]):\n \"\"\"Pandera Series base class to use for all pandas-like APIs.\"\"\"\n\n default_dtype: Optional[Type] = None\n\n def __get__(\n self, instance: object, owner: Type\n ) -> str: # pragma: no cover\n raise AttributeError(\"Series should resolve to Field-s\")\n\n\n# pylint:disable=too-few-public-methods\nclass IndexBase(Generic[GenericDtype]):\n \"\"\"Representation of pandas.Index, only used for type annotation.\n\n *new in 0.5.0*\n \"\"\"\n\n default_dtype: Optional[Type] = None\n\n def __get__(\n self, instance: object, owner: Type\n ) -> str: # pragma: no cover\n raise AttributeError(\"Indexes should resolve to pa.Index-s\")\n\n\nclass AnnotationInfo: # pylint:disable=too-few-public-methods\n \"\"\"Captures extra information about an annotation.\n\n Attributes:\n origin: The non-parameterized generic class.\n arg: The first generic type (SchemaModel does not support more than\n 1 argument).\n literal: Whether the annotation is a literal.\n optional: Whether the annotation is optional.\n raw_annotation: The raw annotation.\n metadata: Extra arguments passed to :data:`typing.Annotated`.\n \"\"\"\n\n def __init__(self, raw_annotation: Type) -> None:\n self._parse_annotation(raw_annotation)\n\n @property\n def is_generic_df(self) -> bool:\n \"\"\"True if the annotation is a DataFrameBase subclass.\"\"\"\n try:\n if self.origin is None:\n return False\n return issubclass(self.origin, DataFrameBase)\n except TypeError:\n return False\n\n def _parse_annotation(self, raw_annotation: Type) -> None:\n \"\"\"Parse key information from annotation.\n\n :param annotation: A subscripted type.\n :returns: Annotation\n \"\"\"\n self.raw_annotation = raw_annotation\n self.origin = self.arg = None\n\n self.optional = typing_inspect.is_optional_type(raw_annotation)\n if self.optional and typing_inspect.is_union_type(raw_annotation):\n # Annotated with Optional or Union[..., NoneType]\n # get_args -> (pandera.typing.Index[str], <class 'NoneType'>)\n raw_annotation = typing_inspect.get_args(raw_annotation)[0]\n\n self.origin = typing_inspect.get_origin(raw_annotation)\n # Replace empty tuple returned from get_args by None\n args = typing_inspect.get_args(raw_annotation) or None\n self.arg = args[0] if args else args\n\n self.metadata = getattr(self.arg, \"__metadata__\", None)\n if self.metadata:\n self.arg = typing_inspect.get_args(self.arg)[0]\n\n self.literal = typing_inspect.is_literal_type(self.arg)\n if self.literal:\n self.arg = typing_inspect.get_args(self.arg)[0]\n\n self.default_dtype = getattr(raw_annotation, \"default_dtype\", None)\n", "path": "pandera/typing/common.py"}]} | 3,166 | 280 |
gh_patches_debug_66775 | rasdani/github-patches | git_diff | apache__airflow-1296 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Inconsistent 'owner' field in examples
Dear Airflow Maintainers,
### Environment
- Version of Airflow (e.g. a release version, running your own fork, running off master -- provide a git log snippet): **1.7.0**
- Screen shots of your DAG's graph and tree views:

- Operating System: (Windows Version or `$ uname -a`) **Ubuntu 14.04**
- Python Version: `$ python --version` **2.7**
### Description of Issue
- What did you expect to happen? **All of the examples have a consistent owner, probably 'airflow'**
- What happened instead? **[Some](https://github.com/airbnb/airflow/blob/master/airflow/example_dags/example_python_operator.py) examples have `airflow`, [some](https://github.com/airbnb/airflow/blob/master/airflow/example_dags/example_passing_params_via_test_command.py) have `me`**
### Reproduction Steps
1. install airflow 1.7.0 via pip
2. start the webserver
3. look at the web UI, probably http://localhost:8080
**Inconsistent hard-coding in the examples will likely lead to confusion for new users.**
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `airflow/example_dags/example_trigger_controller_dag.py`
Content:
```
1
2 """This example illustrates the use of the TriggerDagRunOperator. There are 2
3 entities at work in this scenario:
4 1. The Controller DAG - the DAG that conditionally executes the trigger
5 2. The Target DAG - DAG being triggered (in example_trigger_target_dag.py)
6
7 This example illustrates the following features :
8 1. A TriggerDagRunOperator that takes:
9 a. A python callable that decides whether or not to trigger the Target DAG
10 b. An optional params dict passed to the python callable to help in
11 evaluating whether or not to trigger the Target DAG
12 c. The id (name) of the Target DAG
13 d. The python callable can add contextual info to the DagRun created by
14 way of adding a Pickleable payload (e.g. dictionary of primitives). This
15 state is then made available to the TargetDag
16 2. A Target DAG : c.f. example_trigger_target_dag.py
17 """
18
19 from airflow import DAG
20 from airflow.operators import TriggerDagRunOperator
21 from datetime import datetime
22
23 import pprint
24
25 pp = pprint.PrettyPrinter(indent=4)
26
27
28 def conditionally_trigger(context, dag_run_obj):
29 """This function decides whether or not to Trigger the remote DAG"""
30 c_p =context['params']['condition_param']
31 print("Controller DAG : conditionally_trigger = {}".format(c_p))
32 if context['params']['condition_param']:
33 dag_run_obj.payload = {'message': context['params']['message']}
34 pp.pprint(dag_run_obj.payload)
35 return dag_run_obj
36
37
38 # Define the DAG
39 dag = DAG(dag_id='example_trigger_controller_dag',
40 default_args={"owner": "me",
41 "start_date": datetime.now()},
42 schedule_interval='@once')
43
44
45 # Define the single task in this controller example DAG
46 trigger = TriggerDagRunOperator(task_id='test_trigger_dagrun',
47 trigger_dag_id="example_trigger_target_dag",
48 python_callable=conditionally_trigger,
49 params={'condition_param': True,
50 'message': 'Hello World'},
51 dag=dag)
52
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/airflow/example_dags/example_trigger_controller_dag.py b/airflow/example_dags/example_trigger_controller_dag.py
--- a/airflow/example_dags/example_trigger_controller_dag.py
+++ b/airflow/example_dags/example_trigger_controller_dag.py
@@ -37,7 +37,7 @@
# Define the DAG
dag = DAG(dag_id='example_trigger_controller_dag',
- default_args={"owner": "me",
+ default_args={"owner": "airflow",
"start_date": datetime.now()},
schedule_interval='@once')
| {"golden_diff": "diff --git a/airflow/example_dags/example_trigger_controller_dag.py b/airflow/example_dags/example_trigger_controller_dag.py\n--- a/airflow/example_dags/example_trigger_controller_dag.py\n+++ b/airflow/example_dags/example_trigger_controller_dag.py\n@@ -37,7 +37,7 @@\n \n # Define the DAG\n dag = DAG(dag_id='example_trigger_controller_dag',\n- default_args={\"owner\": \"me\",\n+ default_args={\"owner\": \"airflow\",\n \"start_date\": datetime.now()},\n schedule_interval='@once')\n", "issue": "Inconsistent 'owner' field in examples\nDear Airflow Maintainers,\n### Environment\n- Version of Airflow (e.g. a release version, running your own fork, running off master -- provide a git log snippet): **1.7.0**\n- Screen shots of your DAG's graph and tree views:\n \n- Operating System: (Windows Version or `$ uname -a`) **Ubuntu 14.04**\n- Python Version: `$ python --version` **2.7**\n### Description of Issue\n- What did you expect to happen? **All of the examples have a consistent owner, probably 'airflow'**\n- What happened instead? **[Some](https://github.com/airbnb/airflow/blob/master/airflow/example_dags/example_python_operator.py) examples have `airflow`, [some](https://github.com/airbnb/airflow/blob/master/airflow/example_dags/example_passing_params_via_test_command.py) have `me`**\n### Reproduction Steps\n1. install airflow 1.7.0 via pip\n2. start the webserver\n3. look at the web UI, probably http://localhost:8080\n\n**Inconsistent hard-coding in the examples will likely lead to confusion for new users.**\n\n", "before_files": [{"content": "\n\"\"\"This example illustrates the use of the TriggerDagRunOperator. There are 2\nentities at work in this scenario:\n1. The Controller DAG - the DAG that conditionally executes the trigger\n2. The Target DAG - DAG being triggered (in example_trigger_target_dag.py)\n\nThis example illustrates the following features :\n1. A TriggerDagRunOperator that takes:\n a. A python callable that decides whether or not to trigger the Target DAG\n b. An optional params dict passed to the python callable to help in\n evaluating whether or not to trigger the Target DAG\n c. The id (name) of the Target DAG\n d. The python callable can add contextual info to the DagRun created by\n way of adding a Pickleable payload (e.g. dictionary of primitives). This\n state is then made available to the TargetDag\n2. A Target DAG : c.f. example_trigger_target_dag.py\n\"\"\"\n\nfrom airflow import DAG\nfrom airflow.operators import TriggerDagRunOperator\nfrom datetime import datetime\n\nimport pprint\n\npp = pprint.PrettyPrinter(indent=4)\n\n\ndef conditionally_trigger(context, dag_run_obj):\n \"\"\"This function decides whether or not to Trigger the remote DAG\"\"\"\n c_p =context['params']['condition_param']\n print(\"Controller DAG : conditionally_trigger = {}\".format(c_p))\n if context['params']['condition_param']:\n dag_run_obj.payload = {'message': context['params']['message']}\n pp.pprint(dag_run_obj.payload)\n return dag_run_obj\n\n\n# Define the DAG\ndag = DAG(dag_id='example_trigger_controller_dag',\n default_args={\"owner\": \"me\",\n \"start_date\": datetime.now()},\n schedule_interval='@once')\n\n\n# Define the single task in this controller example DAG\ntrigger = TriggerDagRunOperator(task_id='test_trigger_dagrun',\n trigger_dag_id=\"example_trigger_target_dag\",\n python_callable=conditionally_trigger,\n params={'condition_param': True,\n 'message': 'Hello World'},\n dag=dag)\n", "path": "airflow/example_dags/example_trigger_controller_dag.py"}], "after_files": [{"content": "\n\"\"\"This example illustrates the use of the TriggerDagRunOperator. There are 2\nentities at work in this scenario:\n1. The Controller DAG - the DAG that conditionally executes the trigger\n2. The Target DAG - DAG being triggered (in example_trigger_target_dag.py)\n\nThis example illustrates the following features :\n1. A TriggerDagRunOperator that takes:\n a. A python callable that decides whether or not to trigger the Target DAG\n b. An optional params dict passed to the python callable to help in\n evaluating whether or not to trigger the Target DAG\n c. The id (name) of the Target DAG\n d. The python callable can add contextual info to the DagRun created by\n way of adding a Pickleable payload (e.g. dictionary of primitives). This\n state is then made available to the TargetDag\n2. A Target DAG : c.f. example_trigger_target_dag.py\n\"\"\"\n\nfrom airflow import DAG\nfrom airflow.operators import TriggerDagRunOperator\nfrom datetime import datetime\n\nimport pprint\n\npp = pprint.PrettyPrinter(indent=4)\n\n\ndef conditionally_trigger(context, dag_run_obj):\n \"\"\"This function decides whether or not to Trigger the remote DAG\"\"\"\n c_p =context['params']['condition_param']\n print(\"Controller DAG : conditionally_trigger = {}\".format(c_p))\n if context['params']['condition_param']:\n dag_run_obj.payload = {'message': context['params']['message']}\n pp.pprint(dag_run_obj.payload)\n return dag_run_obj\n\n\n# Define the DAG\ndag = DAG(dag_id='example_trigger_controller_dag',\n default_args={\"owner\": \"airflow\",\n \"start_date\": datetime.now()},\n schedule_interval='@once')\n\n\n# Define the single task in this controller example DAG\ntrigger = TriggerDagRunOperator(task_id='test_trigger_dagrun',\n trigger_dag_id=\"example_trigger_target_dag\",\n python_callable=conditionally_trigger,\n params={'condition_param': True,\n 'message': 'Hello World'},\n dag=dag)\n", "path": "airflow/example_dags/example_trigger_controller_dag.py"}]} | 1,091 | 127 |
gh_patches_debug_37830 | rasdani/github-patches | git_diff | hpcaitech__ColossalAI-2969 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG]: 运行chatgpt推理示例报错
### 🐛 Describe the bug
(https://github.com/hpcaitech/ColossalAI/tree/main/applications/ChatGPT)/examples/ 运行inference.py 抛出OSError:

### Environment
_No response_
[tensor] fix some unittests
[tensor] fix some unittests
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `applications/ChatGPT/examples/inference.py`
Content:
```
1 import argparse
2 import torch
3
4 from chatgpt.nn import BLOOMActor, GPTActor, OPTActor
5 from transformers import AutoTokenizer
6 from transformers.models.gpt2.tokenization_gpt2 import GPT2Tokenizer
7
8
9 def eval(args):
10 # configure model
11 if args.model == 'gpt2':
12 model = GPTActor(pretrained=args.pretrain).to(torch.cuda.current_device())
13 elif args.model == 'bloom':
14 model = BLOOMActor(pretrained=args.pretrain).to(torch.cuda.current_device())
15 elif args.model == 'opt':
16 model = OPTActor(pretrained=args.pretrain).to(torch.cuda.current_device())
17 else:
18 raise ValueError(f'Unsupported model "{args.model}"')
19
20 # configure tokenizer
21 if args.model == 'gpt2':
22 tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
23 tokenizer.pad_token = tokenizer.eos_token
24 elif args.model == 'bloom':
25 tokenizer = AutoTokenizer.from_pretrained(args.pretrain)
26 tokenizer.pad_token = tokenizer.eos_token
27 elif args.model == 'opt':
28 tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m")
29 else:
30 raise ValueError(f'Unsupported model "{args.model}"')
31
32 model.eval()
33 input = args.input
34 input_ids = tokenizer.encode(input, return_tensors='pt').to(torch.cuda.current_device())
35 outputs = model.generate(input_ids,
36 max_length=args.max_length,
37 do_sample=True,
38 top_k=50,
39 top_p=0.95,
40 num_return_sequences=1)
41 output = tokenizer.batch_decode(outputs[0], skip_special_tokens=True)
42 print(output)
43
44
45 if __name__ == '__main__':
46 parser = argparse.ArgumentParser()
47 parser.add_argument('--model', default='gpt2', choices=['gpt2', 'bloom', 'opt'])
48 parser.add_argument('--pretrain', type=str, default=None)
49 parser.add_argument('--input', type=str, default='Q: How are you ? A:')
50 parser.add_argument('--max_length', type=int, default=100)
51 args = parser.parse_args()
52 eval(args)
53
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/applications/ChatGPT/examples/inference.py b/applications/ChatGPT/examples/inference.py
--- a/applications/ChatGPT/examples/inference.py
+++ b/applications/ChatGPT/examples/inference.py
@@ -9,30 +9,34 @@
def eval(args):
# configure model
if args.model == 'gpt2':
- model = GPTActor(pretrained=args.pretrain).to(torch.cuda.current_device())
+ actor = GPTActor().to(torch.cuda.current_device())
elif args.model == 'bloom':
- model = BLOOMActor(pretrained=args.pretrain).to(torch.cuda.current_device())
+ actor = BLOOMActor().to(torch.cuda.current_device())
elif args.model == 'opt':
- model = OPTActor(pretrained=args.pretrain).to(torch.cuda.current_device())
+ actor = OPTActor().to(torch.cuda.current_device())
else:
raise ValueError(f'Unsupported model "{args.model}"')
+ state_dict = torch.load(args.pretrain)
+ actor.model.load_state_dict(state_dict)
+
+
# configure tokenizer
if args.model == 'gpt2':
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
tokenizer.pad_token = tokenizer.eos_token
elif args.model == 'bloom':
- tokenizer = AutoTokenizer.from_pretrained(args.pretrain)
+ tokenizer = AutoTokenizer.from_pretrained('bigscience/bloom-560m')
tokenizer.pad_token = tokenizer.eos_token
elif args.model == 'opt':
- tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m")
+ tokenizer = AutoTokenizer.from_pretrained('facebook/opt-350m')
else:
raise ValueError(f'Unsupported model "{args.model}"')
- model.eval()
+ actor.eval()
input = args.input
input_ids = tokenizer.encode(input, return_tensors='pt').to(torch.cuda.current_device())
- outputs = model.generate(input_ids,
+ outputs = actor.generate(input_ids,
max_length=args.max_length,
do_sample=True,
top_k=50,
@@ -46,7 +50,7 @@
parser = argparse.ArgumentParser()
parser.add_argument('--model', default='gpt2', choices=['gpt2', 'bloom', 'opt'])
parser.add_argument('--pretrain', type=str, default=None)
- parser.add_argument('--input', type=str, default='Q: How are you ? A:')
+ parser.add_argument('--input', type=str, default='Question: How are you ? Answer:')
parser.add_argument('--max_length', type=int, default=100)
args = parser.parse_args()
eval(args)
| {"golden_diff": "diff --git a/applications/ChatGPT/examples/inference.py b/applications/ChatGPT/examples/inference.py\n--- a/applications/ChatGPT/examples/inference.py\n+++ b/applications/ChatGPT/examples/inference.py\n@@ -9,30 +9,34 @@\n def eval(args):\n # configure model\n if args.model == 'gpt2':\n- model = GPTActor(pretrained=args.pretrain).to(torch.cuda.current_device())\n+ actor = GPTActor().to(torch.cuda.current_device())\n elif args.model == 'bloom':\n- model = BLOOMActor(pretrained=args.pretrain).to(torch.cuda.current_device())\n+ actor = BLOOMActor().to(torch.cuda.current_device())\n elif args.model == 'opt':\n- model = OPTActor(pretrained=args.pretrain).to(torch.cuda.current_device())\n+ actor = OPTActor().to(torch.cuda.current_device())\n else:\n raise ValueError(f'Unsupported model \"{args.model}\"')\n \n+ state_dict = torch.load(args.pretrain)\n+ actor.model.load_state_dict(state_dict)\n+ \n+ \n # configure tokenizer\n if args.model == 'gpt2':\n tokenizer = GPT2Tokenizer.from_pretrained('gpt2')\n tokenizer.pad_token = tokenizer.eos_token\n elif args.model == 'bloom':\n- tokenizer = AutoTokenizer.from_pretrained(args.pretrain)\n+ tokenizer = AutoTokenizer.from_pretrained('bigscience/bloom-560m')\n tokenizer.pad_token = tokenizer.eos_token\n elif args.model == 'opt':\n- tokenizer = AutoTokenizer.from_pretrained(\"facebook/opt-350m\")\n+ tokenizer = AutoTokenizer.from_pretrained('facebook/opt-350m')\n else:\n raise ValueError(f'Unsupported model \"{args.model}\"')\n \n- model.eval()\n+ actor.eval()\n input = args.input\n input_ids = tokenizer.encode(input, return_tensors='pt').to(torch.cuda.current_device())\n- outputs = model.generate(input_ids,\n+ outputs = actor.generate(input_ids,\n max_length=args.max_length,\n do_sample=True,\n top_k=50,\n@@ -46,7 +50,7 @@\n parser = argparse.ArgumentParser()\n parser.add_argument('--model', default='gpt2', choices=['gpt2', 'bloom', 'opt'])\n parser.add_argument('--pretrain', type=str, default=None)\n- parser.add_argument('--input', type=str, default='Q: How are you ? A:')\n+ parser.add_argument('--input', type=str, default='Question: How are you ? Answer:')\n parser.add_argument('--max_length', type=int, default=100)\n args = parser.parse_args()\n eval(args)\n", "issue": "[BUG]: \u8fd0\u884cchatgpt\u63a8\u7406\u793a\u4f8b\u62a5\u9519\n### \ud83d\udc1b Describe the bug\n\n(https://github.com/hpcaitech/ColossalAI/tree/main/applications/ChatGPT)/examples/ \u8fd0\u884cinference.py \u629b\u51faOSError:\r\n\r\n\n\n### Environment\n\n_No response_\n[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n", "before_files": [{"content": "import argparse\nimport torch\n\nfrom chatgpt.nn import BLOOMActor, GPTActor, OPTActor\nfrom transformers import AutoTokenizer\nfrom transformers.models.gpt2.tokenization_gpt2 import GPT2Tokenizer\n\n\ndef eval(args):\n # configure model\n if args.model == 'gpt2':\n model = GPTActor(pretrained=args.pretrain).to(torch.cuda.current_device())\n elif args.model == 'bloom':\n model = BLOOMActor(pretrained=args.pretrain).to(torch.cuda.current_device())\n elif args.model == 'opt':\n model = OPTActor(pretrained=args.pretrain).to(torch.cuda.current_device())\n else:\n raise ValueError(f'Unsupported model \"{args.model}\"')\n\n # configure tokenizer\n if args.model == 'gpt2':\n tokenizer = GPT2Tokenizer.from_pretrained('gpt2')\n tokenizer.pad_token = tokenizer.eos_token\n elif args.model == 'bloom':\n tokenizer = AutoTokenizer.from_pretrained(args.pretrain)\n tokenizer.pad_token = tokenizer.eos_token\n elif args.model == 'opt':\n tokenizer = AutoTokenizer.from_pretrained(\"facebook/opt-350m\")\n else:\n raise ValueError(f'Unsupported model \"{args.model}\"')\n\n model.eval()\n input = args.input\n input_ids = tokenizer.encode(input, return_tensors='pt').to(torch.cuda.current_device())\n outputs = model.generate(input_ids,\n max_length=args.max_length,\n do_sample=True,\n top_k=50,\n top_p=0.95,\n num_return_sequences=1)\n output = tokenizer.batch_decode(outputs[0], skip_special_tokens=True)\n print(output)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--model', default='gpt2', choices=['gpt2', 'bloom', 'opt'])\n parser.add_argument('--pretrain', type=str, default=None)\n parser.add_argument('--input', type=str, default='Q: How are you ? A:')\n parser.add_argument('--max_length', type=int, default=100)\n args = parser.parse_args()\n eval(args)\n", "path": "applications/ChatGPT/examples/inference.py"}], "after_files": [{"content": "import argparse\nimport torch\n\nfrom chatgpt.nn import BLOOMActor, GPTActor, OPTActor\nfrom transformers import AutoTokenizer\nfrom transformers.models.gpt2.tokenization_gpt2 import GPT2Tokenizer\n\n\ndef eval(args):\n # configure model\n if args.model == 'gpt2':\n actor = GPTActor().to(torch.cuda.current_device())\n elif args.model == 'bloom':\n actor = BLOOMActor().to(torch.cuda.current_device())\n elif args.model == 'opt':\n actor = OPTActor().to(torch.cuda.current_device())\n else:\n raise ValueError(f'Unsupported model \"{args.model}\"')\n\n state_dict = torch.load(args.pretrain)\n actor.model.load_state_dict(state_dict)\n \n \n # configure tokenizer\n if args.model == 'gpt2':\n tokenizer = GPT2Tokenizer.from_pretrained('gpt2')\n tokenizer.pad_token = tokenizer.eos_token\n elif args.model == 'bloom':\n tokenizer = AutoTokenizer.from_pretrained('bigscience/bloom-560m')\n tokenizer.pad_token = tokenizer.eos_token\n elif args.model == 'opt':\n tokenizer = AutoTokenizer.from_pretrained('facebook/opt-350m')\n else:\n raise ValueError(f'Unsupported model \"{args.model}\"')\n\n actor.eval()\n input = args.input\n input_ids = tokenizer.encode(input, return_tensors='pt').to(torch.cuda.current_device())\n outputs = actor.generate(input_ids,\n max_length=args.max_length,\n do_sample=True,\n top_k=50,\n top_p=0.95,\n num_return_sequences=1)\n output = tokenizer.batch_decode(outputs[0], skip_special_tokens=True)\n print(output)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--model', default='gpt2', choices=['gpt2', 'bloom', 'opt'])\n parser.add_argument('--pretrain', type=str, default=None)\n parser.add_argument('--input', type=str, default='Question: How are you ? Answer:')\n parser.add_argument('--max_length', type=int, default=100)\n args = parser.parse_args()\n eval(args)\n", "path": "applications/ChatGPT/examples/inference.py"}]} | 970 | 592 |
gh_patches_debug_15935 | rasdani/github-patches | git_diff | vispy__vispy-305 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
The first emitted Timer event has `None` as `dt` property
``` python
def on_timer(self, event):
print event.dt
```
displays `None` the first time, and the correct dt then (a float). The first dt should probably be `0.0`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `vispy/app/timer.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # Copyright (c) 2014, Vispy Development Team.
3 # Distributed under the (new) BSD License. See LICENSE.txt for more info.
4
5 from __future__ import division
6
7 from ..util.event import Event, EmitterGroup
8 from ..util.ptime import time as precision_time
9 from ..ext.six import string_types
10 from .base import BaseTimerBackend as TimerBackend # noqa
11 from . import use_app, Application
12
13
14 class Timer(object):
15
16 """Timer used to schedule events in the future or on a repeating schedule
17
18 Parameters
19 ----------
20 interval : float
21 Time between events.
22 connect : function | None
23 The function to call.
24 iterations : int
25 Number of iterations. Can be -1 for infinite.
26 start : bool
27 Whether to start the timer.
28 app : instance of vispy.app.Application
29 The application to attach the timer to.
30 """
31
32 def __init__(self, interval=0.0, connect=None, iterations=-1, start=False,
33 app=None):
34 self.events = EmitterGroup(source=self,
35 start=Event,
36 stop=Event,
37 timeout=Event)
38 #self.connect = self.events.timeout.connect
39 #self.disconnect = self.events.timeout.disconnect
40
41 # Get app instance
42 if app is None:
43 self._app = use_app()
44 elif isinstance(app, Application):
45 self._app = app
46 elif isinstance(app, string_types):
47 self._app = Application(app)
48 else:
49 raise ValueError('Invalid value for app %r' % app)
50
51 # Ensure app has backend app object
52 self._app.native
53
54 # Instantiate the backed with the right class
55 self._backend = self._app.backend_module.TimerBackend(self)
56
57 self._interval = interval
58 self._running = False
59 self._last_emit_time = None
60 self.iter_count = 0
61 self.max_iterations = iterations
62 if connect is not None:
63 self.connect(connect)
64 if start:
65 self.start()
66
67 @property
68 def app(self):
69 """ The vispy Application instance on which this Timer is based.
70 """
71 return self._app
72
73 @property
74 def interval(self):
75 return self._interval
76
77 @interval.setter
78 def interval(self, val):
79 self._interval = val
80 if self.running:
81 self.stop()
82 self.start()
83
84 @property
85 def running(self):
86 return self._running
87
88 def start(self, interval=None, iterations=None):
89 """Start the timer.
90
91 A timeout event will be generated every *interval* seconds.
92 If *interval* is None, then self.interval will be used.
93
94 If *iterations* is specified, the timer will stop after
95 emitting that number of events. If unspecified, then
96 the previous value of self.iterations will be used. If the value is
97 negative, then the timer will continue running until stop() is called.
98 """
99 self.iter_count = 0
100 if interval is not None:
101 self.interval = interval
102 if iterations is not None:
103 self.max_iterations = iterations
104 self._backend._vispy_start(self.interval)
105 self._running = True
106 self._last_emit_time = None
107 self.events.start(type='timer_start')
108
109 def stop(self):
110 """Stop the timer."""
111 self._backend._vispy_stop()
112 self._running = False
113 self.events.stop(type='timer_stop')
114
115 # use timer.app.run() and .quit() instead.
116 # def run_event_loop(self):
117 #"""Execute the event loop for this Timer's backend.
118 #"""
119 # return self._backend._vispy_run()
120
121 # def quit_event_loop(self):
122 #"""Exit the event loop for this Timer's backend.
123 #"""
124 # return self._backend._vispy_quit()
125
126 @property
127 def native(self):
128 """ The native timer on which this Timer is based.
129 """
130 return self._backend._vispy_get_native_timer()
131
132 def _timeout(self, *args):
133 # called when the backend timer has triggered.
134 if not self.running:
135 return
136 if self.max_iterations >= 0 and self.iter_count >= self.max_iterations:
137 self.stop()
138 return
139
140 # compute dt since last event
141 now = precision_time()
142 if self._last_emit_time is None:
143 dt = None
144 else:
145 dt = now - self._last_emit_time
146 self._last_emit_time = now
147
148 self.events.timeout(
149 type='timer_timeout',
150 iteration=self.iter_count,
151 dt=dt)
152 self.iter_count += 1
153
154 def connect(self, callback):
155 """ Alias for self.events.timeout.connect() """
156 return self.events.timeout.connect(callback)
157
158 def disconnect(self, callback=None):
159 """ Alias for self.events.timeout.disconnect() """
160 return self.events.timeout.disconnect(callback)
161
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/vispy/app/timer.py b/vispy/app/timer.py
--- a/vispy/app/timer.py
+++ b/vispy/app/timer.py
@@ -103,7 +103,7 @@
self.max_iterations = iterations
self._backend._vispy_start(self.interval)
self._running = True
- self._last_emit_time = None
+ self._last_emit_time = precision_time()
self.events.start(type='timer_start')
def stop(self):
@@ -139,10 +139,7 @@
# compute dt since last event
now = precision_time()
- if self._last_emit_time is None:
- dt = None
- else:
- dt = now - self._last_emit_time
+ dt = now - self._last_emit_time
self._last_emit_time = now
self.events.timeout(
| {"golden_diff": "diff --git a/vispy/app/timer.py b/vispy/app/timer.py\n--- a/vispy/app/timer.py\n+++ b/vispy/app/timer.py\n@@ -103,7 +103,7 @@\n self.max_iterations = iterations\n self._backend._vispy_start(self.interval)\n self._running = True\n- self._last_emit_time = None\n+ self._last_emit_time = precision_time()\n self.events.start(type='timer_start')\n \n def stop(self):\n@@ -139,10 +139,7 @@\n \n # compute dt since last event\n now = precision_time()\n- if self._last_emit_time is None:\n- dt = None\n- else:\n- dt = now - self._last_emit_time\n+ dt = now - self._last_emit_time\n self._last_emit_time = now\n \n self.events.timeout(\n", "issue": "The first emitted Timer event has `None` as `dt` property\n``` python\ndef on_timer(self, event):\n print event.dt\n```\n\ndisplays `None` the first time, and the correct dt then (a float). The first dt should probably be `0.0`.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright (c) 2014, Vispy Development Team.\n# Distributed under the (new) BSD License. See LICENSE.txt for more info.\n\nfrom __future__ import division\n\nfrom ..util.event import Event, EmitterGroup\nfrom ..util.ptime import time as precision_time\nfrom ..ext.six import string_types\nfrom .base import BaseTimerBackend as TimerBackend # noqa\nfrom . import use_app, Application\n\n\nclass Timer(object):\n\n \"\"\"Timer used to schedule events in the future or on a repeating schedule\n\n Parameters\n ----------\n interval : float\n Time between events.\n connect : function | None\n The function to call.\n iterations : int\n Number of iterations. Can be -1 for infinite.\n start : bool\n Whether to start the timer.\n app : instance of vispy.app.Application\n The application to attach the timer to.\n \"\"\"\n\n def __init__(self, interval=0.0, connect=None, iterations=-1, start=False,\n app=None):\n self.events = EmitterGroup(source=self,\n start=Event,\n stop=Event,\n timeout=Event)\n #self.connect = self.events.timeout.connect\n #self.disconnect = self.events.timeout.disconnect\n\n # Get app instance\n if app is None:\n self._app = use_app()\n elif isinstance(app, Application):\n self._app = app\n elif isinstance(app, string_types):\n self._app = Application(app)\n else:\n raise ValueError('Invalid value for app %r' % app)\n \n # Ensure app has backend app object\n self._app.native\n \n # Instantiate the backed with the right class\n self._backend = self._app.backend_module.TimerBackend(self)\n\n self._interval = interval\n self._running = False\n self._last_emit_time = None\n self.iter_count = 0\n self.max_iterations = iterations\n if connect is not None:\n self.connect(connect)\n if start:\n self.start()\n\n @property\n def app(self):\n \"\"\" The vispy Application instance on which this Timer is based.\n \"\"\"\n return self._app\n\n @property\n def interval(self):\n return self._interval\n\n @interval.setter\n def interval(self, val):\n self._interval = val\n if self.running:\n self.stop()\n self.start()\n\n @property\n def running(self):\n return self._running\n\n def start(self, interval=None, iterations=None):\n \"\"\"Start the timer.\n\n A timeout event will be generated every *interval* seconds.\n If *interval* is None, then self.interval will be used.\n\n If *iterations* is specified, the timer will stop after\n emitting that number of events. If unspecified, then\n the previous value of self.iterations will be used. If the value is\n negative, then the timer will continue running until stop() is called.\n \"\"\"\n self.iter_count = 0\n if interval is not None:\n self.interval = interval\n if iterations is not None:\n self.max_iterations = iterations\n self._backend._vispy_start(self.interval)\n self._running = True\n self._last_emit_time = None\n self.events.start(type='timer_start')\n\n def stop(self):\n \"\"\"Stop the timer.\"\"\"\n self._backend._vispy_stop()\n self._running = False\n self.events.stop(type='timer_stop')\n\n # use timer.app.run() and .quit() instead.\n # def run_event_loop(self):\n #\"\"\"Execute the event loop for this Timer's backend.\n #\"\"\"\n # return self._backend._vispy_run()\n\n # def quit_event_loop(self):\n #\"\"\"Exit the event loop for this Timer's backend.\n #\"\"\"\n # return self._backend._vispy_quit()\n\n @property\n def native(self):\n \"\"\" The native timer on which this Timer is based.\n \"\"\"\n return self._backend._vispy_get_native_timer()\n\n def _timeout(self, *args):\n # called when the backend timer has triggered.\n if not self.running:\n return\n if self.max_iterations >= 0 and self.iter_count >= self.max_iterations:\n self.stop()\n return\n\n # compute dt since last event\n now = precision_time()\n if self._last_emit_time is None:\n dt = None\n else:\n dt = now - self._last_emit_time\n self._last_emit_time = now\n\n self.events.timeout(\n type='timer_timeout',\n iteration=self.iter_count,\n dt=dt)\n self.iter_count += 1\n\n def connect(self, callback):\n \"\"\" Alias for self.events.timeout.connect() \"\"\"\n return self.events.timeout.connect(callback)\n\n def disconnect(self, callback=None):\n \"\"\" Alias for self.events.timeout.disconnect() \"\"\"\n return self.events.timeout.disconnect(callback)\n", "path": "vispy/app/timer.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright (c) 2014, Vispy Development Team.\n# Distributed under the (new) BSD License. See LICENSE.txt for more info.\n\nfrom __future__ import division\n\nfrom ..util.event import Event, EmitterGroup\nfrom ..util.ptime import time as precision_time\nfrom ..ext.six import string_types\nfrom .base import BaseTimerBackend as TimerBackend # noqa\nfrom . import use_app, Application\n\n\nclass Timer(object):\n\n \"\"\"Timer used to schedule events in the future or on a repeating schedule\n\n Parameters\n ----------\n interval : float\n Time between events.\n connect : function | None\n The function to call.\n iterations : int\n Number of iterations. Can be -1 for infinite.\n start : bool\n Whether to start the timer.\n app : instance of vispy.app.Application\n The application to attach the timer to.\n \"\"\"\n\n def __init__(self, interval=0.0, connect=None, iterations=-1, start=False,\n app=None):\n self.events = EmitterGroup(source=self,\n start=Event,\n stop=Event,\n timeout=Event)\n #self.connect = self.events.timeout.connect\n #self.disconnect = self.events.timeout.disconnect\n\n # Get app instance\n if app is None:\n self._app = use_app()\n elif isinstance(app, Application):\n self._app = app\n elif isinstance(app, string_types):\n self._app = Application(app)\n else:\n raise ValueError('Invalid value for app %r' % app)\n \n # Ensure app has backend app object\n self._app.native\n \n # Instantiate the backed with the right class\n self._backend = self._app.backend_module.TimerBackend(self)\n\n self._interval = interval\n self._running = False\n self._last_emit_time = None\n self.iter_count = 0\n self.max_iterations = iterations\n if connect is not None:\n self.connect(connect)\n if start:\n self.start()\n\n @property\n def app(self):\n \"\"\" The vispy Application instance on which this Timer is based.\n \"\"\"\n return self._app\n\n @property\n def interval(self):\n return self._interval\n\n @interval.setter\n def interval(self, val):\n self._interval = val\n if self.running:\n self.stop()\n self.start()\n\n @property\n def running(self):\n return self._running\n\n def start(self, interval=None, iterations=None):\n \"\"\"Start the timer.\n\n A timeout event will be generated every *interval* seconds.\n If *interval* is None, then self.interval will be used.\n\n If *iterations* is specified, the timer will stop after\n emitting that number of events. If unspecified, then\n the previous value of self.iterations will be used. If the value is\n negative, then the timer will continue running until stop() is called.\n \"\"\"\n self.iter_count = 0\n if interval is not None:\n self.interval = interval\n if iterations is not None:\n self.max_iterations = iterations\n self._backend._vispy_start(self.interval)\n self._running = True\n self._last_emit_time = precision_time()\n self.events.start(type='timer_start')\n\n def stop(self):\n \"\"\"Stop the timer.\"\"\"\n self._backend._vispy_stop()\n self._running = False\n self.events.stop(type='timer_stop')\n\n # use timer.app.run() and .quit() instead.\n # def run_event_loop(self):\n #\"\"\"Execute the event loop for this Timer's backend.\n #\"\"\"\n # return self._backend._vispy_run()\n\n # def quit_event_loop(self):\n #\"\"\"Exit the event loop for this Timer's backend.\n #\"\"\"\n # return self._backend._vispy_quit()\n\n @property\n def native(self):\n \"\"\" The native timer on which this Timer is based.\n \"\"\"\n return self._backend._vispy_get_native_timer()\n\n def _timeout(self, *args):\n # called when the backend timer has triggered.\n if not self.running:\n return\n if self.max_iterations >= 0 and self.iter_count >= self.max_iterations:\n self.stop()\n return\n\n # compute dt since last event\n now = precision_time()\n dt = now - self._last_emit_time\n self._last_emit_time = now\n\n self.events.timeout(\n type='timer_timeout',\n iteration=self.iter_count,\n dt=dt)\n self.iter_count += 1\n\n def connect(self, callback):\n \"\"\" Alias for self.events.timeout.connect() \"\"\"\n return self.events.timeout.connect(callback)\n\n def disconnect(self, callback=None):\n \"\"\" Alias for self.events.timeout.disconnect() \"\"\"\n return self.events.timeout.disconnect(callback)\n", "path": "vispy/app/timer.py"}]} | 1,767 | 204 |
gh_patches_debug_24108 | rasdani/github-patches | git_diff | pypa__pip-11264 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Checking out Bazaar branch makes full clone
When checking out a Bazaar branch, pip currently makes a full clone of the branch history. This is unnecessary and much slower than just fetching the latest revision:
For example, performance on my system for 'bzr co --lightweight lp:bzr':
0.60s user 0.11s system 5% cpu 12.234 total
Performance on my system for 'bzr branch lp:bzr':
65.41s user 1.48s system 39% cpu 2:47.91 total
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/pip/_internal/vcs/bazaar.py`
Content:
```
1 import logging
2 from typing import List, Optional, Tuple
3
4 from pip._internal.utils.misc import HiddenText, display_path
5 from pip._internal.utils.subprocess import make_command
6 from pip._internal.utils.urls import path_to_url
7 from pip._internal.vcs.versioncontrol import (
8 AuthInfo,
9 RemoteNotFoundError,
10 RevOptions,
11 VersionControl,
12 vcs,
13 )
14
15 logger = logging.getLogger(__name__)
16
17
18 class Bazaar(VersionControl):
19 name = "bzr"
20 dirname = ".bzr"
21 repo_name = "branch"
22 schemes = (
23 "bzr+http",
24 "bzr+https",
25 "bzr+ssh",
26 "bzr+sftp",
27 "bzr+ftp",
28 "bzr+lp",
29 "bzr+file",
30 )
31
32 @staticmethod
33 def get_base_rev_args(rev: str) -> List[str]:
34 return ["-r", rev]
35
36 def fetch_new(
37 self, dest: str, url: HiddenText, rev_options: RevOptions, verbosity: int
38 ) -> None:
39 rev_display = rev_options.to_display()
40 logger.info(
41 "Checking out %s%s to %s",
42 url,
43 rev_display,
44 display_path(dest),
45 )
46 if verbosity <= 0:
47 flag = "--quiet"
48 elif verbosity == 1:
49 flag = ""
50 else:
51 flag = f"-{'v'*verbosity}"
52 cmd_args = make_command("branch", flag, rev_options.to_args(), url, dest)
53 self.run_command(cmd_args)
54
55 def switch(self, dest: str, url: HiddenText, rev_options: RevOptions) -> None:
56 self.run_command(make_command("switch", url), cwd=dest)
57
58 def update(self, dest: str, url: HiddenText, rev_options: RevOptions) -> None:
59 cmd_args = make_command("pull", "-q", rev_options.to_args())
60 self.run_command(cmd_args, cwd=dest)
61
62 @classmethod
63 def get_url_rev_and_auth(cls, url: str) -> Tuple[str, Optional[str], AuthInfo]:
64 # hotfix the URL scheme after removing bzr+ from bzr+ssh:// readd it
65 url, rev, user_pass = super().get_url_rev_and_auth(url)
66 if url.startswith("ssh://"):
67 url = "bzr+" + url
68 return url, rev, user_pass
69
70 @classmethod
71 def get_remote_url(cls, location: str) -> str:
72 urls = cls.run_command(
73 ["info"], show_stdout=False, stdout_only=True, cwd=location
74 )
75 for line in urls.splitlines():
76 line = line.strip()
77 for x in ("checkout of branch: ", "parent branch: "):
78 if line.startswith(x):
79 repo = line.split(x)[1]
80 if cls._is_local_repository(repo):
81 return path_to_url(repo)
82 return repo
83 raise RemoteNotFoundError
84
85 @classmethod
86 def get_revision(cls, location: str) -> str:
87 revision = cls.run_command(
88 ["revno"],
89 show_stdout=False,
90 stdout_only=True,
91 cwd=location,
92 )
93 return revision.splitlines()[-1]
94
95 @classmethod
96 def is_commit_id_equal(cls, dest: str, name: Optional[str]) -> bool:
97 """Always assume the versions don't match"""
98 return False
99
100
101 vcs.register(Bazaar)
102
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/pip/_internal/vcs/bazaar.py b/src/pip/_internal/vcs/bazaar.py
--- a/src/pip/_internal/vcs/bazaar.py
+++ b/src/pip/_internal/vcs/bazaar.py
@@ -49,14 +49,25 @@
flag = ""
else:
flag = f"-{'v'*verbosity}"
- cmd_args = make_command("branch", flag, rev_options.to_args(), url, dest)
+ cmd_args = make_command(
+ "checkout", "--lightweight", flag, rev_options.to_args(), url, dest
+ )
self.run_command(cmd_args)
def switch(self, dest: str, url: HiddenText, rev_options: RevOptions) -> None:
self.run_command(make_command("switch", url), cwd=dest)
def update(self, dest: str, url: HiddenText, rev_options: RevOptions) -> None:
- cmd_args = make_command("pull", "-q", rev_options.to_args())
+ output = self.run_command(
+ make_command("info"), show_stdout=False, stdout_only=True, cwd=dest
+ )
+ if output.startswith("Standalone "):
+ # Older versions of pip used to create standalone branches.
+ # Convert the standalone branch to a checkout by calling "bzr bind".
+ cmd_args = make_command("bind", "-q", url)
+ self.run_command(cmd_args, cwd=dest)
+
+ cmd_args = make_command("update", "-q", rev_options.to_args())
self.run_command(cmd_args, cwd=dest)
@classmethod
| {"golden_diff": "diff --git a/src/pip/_internal/vcs/bazaar.py b/src/pip/_internal/vcs/bazaar.py\n--- a/src/pip/_internal/vcs/bazaar.py\n+++ b/src/pip/_internal/vcs/bazaar.py\n@@ -49,14 +49,25 @@\n flag = \"\"\n else:\n flag = f\"-{'v'*verbosity}\"\n- cmd_args = make_command(\"branch\", flag, rev_options.to_args(), url, dest)\n+ cmd_args = make_command(\n+ \"checkout\", \"--lightweight\", flag, rev_options.to_args(), url, dest\n+ )\n self.run_command(cmd_args)\n \n def switch(self, dest: str, url: HiddenText, rev_options: RevOptions) -> None:\n self.run_command(make_command(\"switch\", url), cwd=dest)\n \n def update(self, dest: str, url: HiddenText, rev_options: RevOptions) -> None:\n- cmd_args = make_command(\"pull\", \"-q\", rev_options.to_args())\n+ output = self.run_command(\n+ make_command(\"info\"), show_stdout=False, stdout_only=True, cwd=dest\n+ )\n+ if output.startswith(\"Standalone \"):\n+ # Older versions of pip used to create standalone branches.\n+ # Convert the standalone branch to a checkout by calling \"bzr bind\".\n+ cmd_args = make_command(\"bind\", \"-q\", url)\n+ self.run_command(cmd_args, cwd=dest)\n+\n+ cmd_args = make_command(\"update\", \"-q\", rev_options.to_args())\n self.run_command(cmd_args, cwd=dest)\n \n @classmethod\n", "issue": "Checking out Bazaar branch makes full clone\nWhen checking out a Bazaar branch, pip currently makes a full clone of the branch history. This is unnecessary and much slower than just fetching the latest revision:\r\n\r\nFor example, performance on my system for 'bzr co --lightweight lp:bzr':\r\n\r\n0.60s user 0.11s system 5% cpu 12.234 total\r\n\r\nPerformance on my system for 'bzr branch lp:bzr':\r\n\r\n65.41s user 1.48s system 39% cpu 2:47.91 total\r\n\n", "before_files": [{"content": "import logging\nfrom typing import List, Optional, Tuple\n\nfrom pip._internal.utils.misc import HiddenText, display_path\nfrom pip._internal.utils.subprocess import make_command\nfrom pip._internal.utils.urls import path_to_url\nfrom pip._internal.vcs.versioncontrol import (\n AuthInfo,\n RemoteNotFoundError,\n RevOptions,\n VersionControl,\n vcs,\n)\n\nlogger = logging.getLogger(__name__)\n\n\nclass Bazaar(VersionControl):\n name = \"bzr\"\n dirname = \".bzr\"\n repo_name = \"branch\"\n schemes = (\n \"bzr+http\",\n \"bzr+https\",\n \"bzr+ssh\",\n \"bzr+sftp\",\n \"bzr+ftp\",\n \"bzr+lp\",\n \"bzr+file\",\n )\n\n @staticmethod\n def get_base_rev_args(rev: str) -> List[str]:\n return [\"-r\", rev]\n\n def fetch_new(\n self, dest: str, url: HiddenText, rev_options: RevOptions, verbosity: int\n ) -> None:\n rev_display = rev_options.to_display()\n logger.info(\n \"Checking out %s%s to %s\",\n url,\n rev_display,\n display_path(dest),\n )\n if verbosity <= 0:\n flag = \"--quiet\"\n elif verbosity == 1:\n flag = \"\"\n else:\n flag = f\"-{'v'*verbosity}\"\n cmd_args = make_command(\"branch\", flag, rev_options.to_args(), url, dest)\n self.run_command(cmd_args)\n\n def switch(self, dest: str, url: HiddenText, rev_options: RevOptions) -> None:\n self.run_command(make_command(\"switch\", url), cwd=dest)\n\n def update(self, dest: str, url: HiddenText, rev_options: RevOptions) -> None:\n cmd_args = make_command(\"pull\", \"-q\", rev_options.to_args())\n self.run_command(cmd_args, cwd=dest)\n\n @classmethod\n def get_url_rev_and_auth(cls, url: str) -> Tuple[str, Optional[str], AuthInfo]:\n # hotfix the URL scheme after removing bzr+ from bzr+ssh:// readd it\n url, rev, user_pass = super().get_url_rev_and_auth(url)\n if url.startswith(\"ssh://\"):\n url = \"bzr+\" + url\n return url, rev, user_pass\n\n @classmethod\n def get_remote_url(cls, location: str) -> str:\n urls = cls.run_command(\n [\"info\"], show_stdout=False, stdout_only=True, cwd=location\n )\n for line in urls.splitlines():\n line = line.strip()\n for x in (\"checkout of branch: \", \"parent branch: \"):\n if line.startswith(x):\n repo = line.split(x)[1]\n if cls._is_local_repository(repo):\n return path_to_url(repo)\n return repo\n raise RemoteNotFoundError\n\n @classmethod\n def get_revision(cls, location: str) -> str:\n revision = cls.run_command(\n [\"revno\"],\n show_stdout=False,\n stdout_only=True,\n cwd=location,\n )\n return revision.splitlines()[-1]\n\n @classmethod\n def is_commit_id_equal(cls, dest: str, name: Optional[str]) -> bool:\n \"\"\"Always assume the versions don't match\"\"\"\n return False\n\n\nvcs.register(Bazaar)\n", "path": "src/pip/_internal/vcs/bazaar.py"}], "after_files": [{"content": "import logging\nfrom typing import List, Optional, Tuple\n\nfrom pip._internal.utils.misc import HiddenText, display_path\nfrom pip._internal.utils.subprocess import make_command\nfrom pip._internal.utils.urls import path_to_url\nfrom pip._internal.vcs.versioncontrol import (\n AuthInfo,\n RemoteNotFoundError,\n RevOptions,\n VersionControl,\n vcs,\n)\n\nlogger = logging.getLogger(__name__)\n\n\nclass Bazaar(VersionControl):\n name = \"bzr\"\n dirname = \".bzr\"\n repo_name = \"branch\"\n schemes = (\n \"bzr+http\",\n \"bzr+https\",\n \"bzr+ssh\",\n \"bzr+sftp\",\n \"bzr+ftp\",\n \"bzr+lp\",\n \"bzr+file\",\n )\n\n @staticmethod\n def get_base_rev_args(rev: str) -> List[str]:\n return [\"-r\", rev]\n\n def fetch_new(\n self, dest: str, url: HiddenText, rev_options: RevOptions, verbosity: int\n ) -> None:\n rev_display = rev_options.to_display()\n logger.info(\n \"Checking out %s%s to %s\",\n url,\n rev_display,\n display_path(dest),\n )\n if verbosity <= 0:\n flag = \"--quiet\"\n elif verbosity == 1:\n flag = \"\"\n else:\n flag = f\"-{'v'*verbosity}\"\n cmd_args = make_command(\n \"checkout\", \"--lightweight\", flag, rev_options.to_args(), url, dest\n )\n self.run_command(cmd_args)\n\n def switch(self, dest: str, url: HiddenText, rev_options: RevOptions) -> None:\n self.run_command(make_command(\"switch\", url), cwd=dest)\n\n def update(self, dest: str, url: HiddenText, rev_options: RevOptions) -> None:\n output = self.run_command(\n make_command(\"info\"), show_stdout=False, stdout_only=True, cwd=dest\n )\n if output.startswith(\"Standalone \"):\n # Older versions of pip used to create standalone branches.\n # Convert the standalone branch to a checkout by calling \"bzr bind\".\n cmd_args = make_command(\"bind\", \"-q\", url)\n self.run_command(cmd_args, cwd=dest)\n\n cmd_args = make_command(\"update\", \"-q\", rev_options.to_args())\n self.run_command(cmd_args, cwd=dest)\n\n @classmethod\n def get_url_rev_and_auth(cls, url: str) -> Tuple[str, Optional[str], AuthInfo]:\n # hotfix the URL scheme after removing bzr+ from bzr+ssh:// readd it\n url, rev, user_pass = super().get_url_rev_and_auth(url)\n if url.startswith(\"ssh://\"):\n url = \"bzr+\" + url\n return url, rev, user_pass\n\n @classmethod\n def get_remote_url(cls, location: str) -> str:\n urls = cls.run_command(\n [\"info\"], show_stdout=False, stdout_only=True, cwd=location\n )\n for line in urls.splitlines():\n line = line.strip()\n for x in (\"checkout of branch: \", \"parent branch: \"):\n if line.startswith(x):\n repo = line.split(x)[1]\n if cls._is_local_repository(repo):\n return path_to_url(repo)\n return repo\n raise RemoteNotFoundError\n\n @classmethod\n def get_revision(cls, location: str) -> str:\n revision = cls.run_command(\n [\"revno\"],\n show_stdout=False,\n stdout_only=True,\n cwd=location,\n )\n return revision.splitlines()[-1]\n\n @classmethod\n def is_commit_id_equal(cls, dest: str, name: Optional[str]) -> bool:\n \"\"\"Always assume the versions don't match\"\"\"\n return False\n\n\nvcs.register(Bazaar)\n", "path": "src/pip/_internal/vcs/bazaar.py"}]} | 1,332 | 355 |
gh_patches_debug_30789 | rasdani/github-patches | git_diff | readthedocs__readthedocs.org-2368 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Support setuptools/distribute extras dependencies
For example, I have in one of my projects:
```
extras_require={
'test': ['webtest', 'nose', 'coverage'],
'develop': ['bpython', 'z3c.checkversions [buildout]'],
},
```
For API documentation, 'test' packages need to be installed, would be nice if rtd supported this feature :)
Support setuptools/distribute extras dependencies
For example, I have in one of my projects:
```
extras_require={
'test': ['webtest', 'nose', 'coverage'],
'develop': ['bpython', 'z3c.checkversions [buildout]'],
},
```
For API documentation, 'test' packages need to be installed, would be nice if rtd supported this feature :)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `readthedocs/doc_builder/config.py`
Content:
```
1 from readthedocs_build.config import (ConfigError, BuildConfig, InvalidConfig,
2 load as load_config)
3
4
5 from readthedocs.projects.exceptions import ProjectImportError
6
7
8 class ConfigWrapper(object):
9
10 """
11 A config object that wraps the Project & YAML based configs.
12
13 Gives precedence to YAML, falling back to project if it isn't defined.
14
15 We only currently implement a subset of the existing YAML config.
16 This should be the canonical source for our usage of the YAML files,
17 never accessing the config object directly.
18
19 """
20
21 def __init__(self, version, yaml_config):
22 self._version = version
23 self._project = version.project
24 self._yaml_config = yaml_config
25
26 @property
27 def pip_install(self):
28 if 'pip_install' in self._yaml_config.get('python', {}):
29 return self._yaml_config['python']['pip_install']
30 else:
31 return False
32
33 @property
34 def install_project(self):
35 if self.pip_install:
36 return True
37 if 'setup_py_install' in self._yaml_config.get('python', {}):
38 return self._yaml_config['python']['setup_py_install']
39 else:
40 return self._project.install_project
41
42 @property
43 def python_interpreter(self):
44 if 'version' in self._yaml_config.get('python', {}):
45 ver = self._yaml_config['python']['version']
46 if str(ver).startswith('2'):
47 return 'python'
48 else:
49 return 'python3'
50 else:
51 return self._project.python_interpreter
52
53 @property
54 def python_version(self):
55 if 'version' in self._yaml_config.get('python', {}):
56 ver = self._yaml_config['python']['version']
57 return ver
58 else:
59 if self._project.python_interpreter == 'python':
60 return 2
61 else:
62 return 3
63
64 @property
65 def use_system_site_packages(self):
66 if 'use_system_site_packages' in self._yaml_config.get('python', {}):
67 return self._yaml_config['python']['use_system_site_packages']
68 else:
69 return self._project.use_system_packages
70
71 @property
72 def use_conda(self):
73 return 'conda' in self._yaml_config
74
75 @property
76 def conda_file(self):
77 if 'file' in self._yaml_config.get('conda', {}):
78 return self._yaml_config['conda']['file']
79 else:
80 return None
81
82 @property
83 def requirements_file(self):
84 if 'requirements_file' in self._yaml_config:
85 return self._yaml_config['requirements_file']
86 else:
87 return self._project.requirements_file
88
89 @property
90 def formats(self):
91 if 'formats' in self._yaml_config:
92 return self._yaml_config['formats']
93 else:
94 formats = ['htmlzip']
95 if self._project.enable_epub_build:
96 formats += ['epub']
97 if self._project.enable_pdf_build:
98 formats += ['pdf']
99 return formats
100
101 # Not implemented until we figure out how to keep in sync with the webs.
102 # Probably needs to be version-specific as well, not project.
103 # @property
104 # def documentation_type(self):
105 # if 'type' in self._yaml_config:
106 # return self._yaml_config['type']
107 # else:
108 # return self._project.documentation_type
109
110
111 def load_yaml_config(version):
112 """
113 Load a configuration from `readthedocs.yml` file.
114
115 This uses the configuration logic from `readthedocs-build`,
116 which will keep parsing consistent between projects.
117 """
118
119 checkout_path = version.project.checkout_path(version.slug)
120 try:
121 config = load_config(
122 path=checkout_path,
123 env_config={
124 'output_base': '',
125 'type': 'sphinx',
126 'name': version.slug,
127 },
128 )[0]
129 except InvalidConfig: # This is a subclass of ConfigError, so has to come first
130 raise
131 except ConfigError:
132 config = BuildConfig(
133 env_config={},
134 raw_config={},
135 source_file='empty',
136 source_position=0,
137 )
138 return ConfigWrapper(version=version, yaml_config=config)
139
```
Path: `readthedocs/doc_builder/python_environments.py`
Content:
```
1 import logging
2 import os
3 import shutil
4
5 from django.conf import settings
6
7 from readthedocs.doc_builder.config import ConfigWrapper
8 from readthedocs.doc_builder.loader import get_builder_class
9 from readthedocs.projects.constants import LOG_TEMPLATE
10
11 log = logging.getLogger(__name__)
12
13
14 class PythonEnvironment(object):
15
16 def __init__(self, version, build_env, config=None):
17 self.version = version
18 self.project = version.project
19 self.build_env = build_env
20 if config:
21 self.config = config
22 else:
23 self.config = ConfigWrapper(version=version, yaml_config={})
24 # Compute here, since it's used a lot
25 self.checkout_path = self.project.checkout_path(self.version.slug)
26
27 def _log(self, msg):
28 log.info(LOG_TEMPLATE
29 .format(project=self.project.slug,
30 version=self.version.slug,
31 msg=msg))
32
33 def delete_existing_build_dir(self):
34
35 # Handle deleting old build dir
36 build_dir = os.path.join(
37 self.venv_path(),
38 'build')
39 if os.path.exists(build_dir):
40 self._log('Removing existing build directory')
41 shutil.rmtree(build_dir)
42
43 def install_package(self):
44 setup_path = os.path.join(self.checkout_path, 'setup.py')
45 if os.path.isfile(setup_path) and self.config.install_project:
46 if self.config.pip_install or getattr(settings, 'USE_PIP_INSTALL', False):
47 self.build_env.run(
48 'python',
49 self.venv_bin(filename='pip'),
50 'install',
51 '--ignore-installed',
52 '--cache-dir',
53 self.project.pip_cache_path,
54 '.',
55 cwd=self.checkout_path,
56 bin_path=self.venv_bin()
57 )
58 else:
59 self.build_env.run(
60 'python',
61 'setup.py',
62 'install',
63 '--force',
64 cwd=self.checkout_path,
65 bin_path=self.venv_bin()
66 )
67
68 def venv_bin(self, filename=None):
69 """Return path to the virtualenv bin path, or a specific binary
70
71 :param filename: If specified, add this filename to the path return
72 :returns: Path to virtualenv bin or filename in virtualenv bin
73 """
74 parts = [self.venv_path(), 'bin']
75 if filename is not None:
76 parts.append(filename)
77 return os.path.join(*parts)
78
79
80 class Virtualenv(PythonEnvironment):
81
82 def venv_path(self):
83 return os.path.join(self.project.doc_path, 'envs', self.version.slug)
84
85 def setup_base(self):
86 site_packages = '--no-site-packages'
87 if self.config.use_system_site_packages:
88 site_packages = '--system-site-packages'
89 env_path = self.venv_path()
90 self.build_env.run(
91 self.config.python_interpreter,
92 '-mvirtualenv',
93 site_packages,
94 '--no-download',
95 env_path,
96 bin_path=None, # Don't use virtualenv bin that doesn't exist yet
97 )
98
99 def install_core_requirements(self):
100 requirements = [
101 'sphinx==1.3.5',
102 'Pygments==2.1.3',
103 'setuptools==20.1.1',
104 'docutils==0.12',
105 'mkdocs==0.15.0',
106 'mock==1.0.1',
107 'pillow==2.6.1',
108 ('git+https://github.com/rtfd/readthedocs-sphinx-ext.git'
109 '@0.6-alpha#egg=readthedocs-sphinx-ext'),
110 'sphinx-rtd-theme==0.1.9',
111 'alabaster>=0.7,<0.8,!=0.7.5',
112 'commonmark==0.5.4',
113 'recommonmark==0.1.1',
114 ]
115
116 cmd = [
117 'python',
118 self.venv_bin(filename='pip'),
119 'install',
120 '--use-wheel',
121 '-U',
122 '--cache-dir',
123 self.project.pip_cache_path,
124 ]
125 if self.config.use_system_site_packages:
126 # Other code expects sphinx-build to be installed inside the
127 # virtualenv. Using the -I option makes sure it gets installed
128 # even if it is already installed system-wide (and
129 # --system-site-packages is used)
130 cmd.append('-I')
131 cmd.extend(requirements)
132 self.build_env.run(
133 *cmd,
134 bin_path=self.venv_bin()
135 )
136
137 def install_user_requirements(self):
138 requirements_file_path = self.config.requirements_file
139 if not requirements_file_path:
140 builder_class = get_builder_class(self.project.documentation_type)
141 docs_dir = (builder_class(build_env=self.build_env, python_env=self)
142 .docs_dir())
143 for path in [docs_dir, '']:
144 for req_file in ['pip_requirements.txt', 'requirements.txt']:
145 test_path = os.path.join(self.checkout_path, path, req_file)
146 if os.path.exists(test_path):
147 requirements_file_path = test_path
148 break
149
150 if requirements_file_path:
151 self.build_env.run(
152 'python',
153 self.venv_bin(filename='pip'),
154 'install',
155 '--exists-action=w',
156 '--cache-dir',
157 self.project.pip_cache_path,
158 '-r{0}'.format(requirements_file_path),
159 cwd=self.checkout_path,
160 bin_path=self.venv_bin()
161 )
162
163
164 class Conda(PythonEnvironment):
165
166 def venv_path(self):
167 return os.path.join(self.project.doc_path, 'conda', self.version.slug)
168
169 def setup_base(self):
170 conda_env_path = os.path.join(self.project.doc_path, 'conda')
171 version_path = os.path.join(conda_env_path, self.version.slug)
172
173 if os.path.exists(version_path):
174 # Re-create conda directory each time to keep fresh state
175 self._log('Removing existing conda directory')
176 shutil.rmtree(version_path)
177 self.build_env.run(
178 'conda',
179 'env',
180 'create',
181 '--name',
182 self.version.slug,
183 '--file',
184 self.config.conda_file,
185 bin_path=None, # Don't use conda bin that doesn't exist yet
186 )
187
188 def install_core_requirements(self):
189
190 # Use conda for requirements it packages
191 requirements = [
192 'sphinx==1.3.5',
193 'Pygments==2.1.1',
194 'docutils==0.12',
195 'mock',
196 'pillow==3.0.0',
197 'sphinx_rtd_theme==0.1.7',
198 'alabaster>=0.7,<0.8,!=0.7.5',
199 ]
200
201 cmd = [
202 'conda',
203 'install',
204 '--yes',
205 '--name',
206 self.version.slug,
207 ]
208 cmd.extend(requirements)
209 self.build_env.run(
210 *cmd
211 )
212
213 # Install pip-only things.
214 pip_requirements = [
215 'mkdocs==0.15.0',
216 ('git+https://github.com/rtfd/readthedocs-sphinx-ext.git'
217 '@0.6-alpha#egg=readthedocs-sphinx-ext'),
218 'commonmark==0.5.4',
219 'recommonmark==0.1.1',
220 ]
221
222 pip_cmd = [
223 'python',
224 self.venv_bin(filename='pip'),
225 'install',
226 '-U',
227 '--cache-dir',
228 self.project.pip_cache_path,
229 ]
230 pip_cmd.extend(pip_requirements)
231 self.build_env.run(
232 *pip_cmd,
233 bin_path=self.venv_bin()
234 )
235
236 def install_user_requirements(self):
237 self.build_env.run(
238 'conda',
239 'env',
240 'update',
241 '--name',
242 self.version.slug,
243 '--file',
244 self.config.conda_file,
245 )
246
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/readthedocs/doc_builder/config.py b/readthedocs/doc_builder/config.py
--- a/readthedocs/doc_builder/config.py
+++ b/readthedocs/doc_builder/config.py
@@ -39,6 +39,14 @@
else:
return self._project.install_project
+ @property
+ def extra_requirements(self):
+ if self.pip_install and 'extra_requirements' in self._yaml_config.get(
+ 'python', {}):
+ return self._yaml_config['python']['extra_requirements']
+ else:
+ return []
+
@property
def python_interpreter(self):
if 'version' in self._yaml_config.get('python', {}):
diff --git a/readthedocs/doc_builder/python_environments.py b/readthedocs/doc_builder/python_environments.py
--- a/readthedocs/doc_builder/python_environments.py
+++ b/readthedocs/doc_builder/python_environments.py
@@ -44,6 +44,10 @@
setup_path = os.path.join(self.checkout_path, 'setup.py')
if os.path.isfile(setup_path) and self.config.install_project:
if self.config.pip_install or getattr(settings, 'USE_PIP_INSTALL', False):
+ extra_req_param = ''
+ if self.config.extra_requirements:
+ extra_req_param = '[{0}]'.format(
+ ','.join(self.config.extra_requirements))
self.build_env.run(
'python',
self.venv_bin(filename='pip'),
@@ -51,7 +55,7 @@
'--ignore-installed',
'--cache-dir',
self.project.pip_cache_path,
- '.',
+ '.{0}'.format(extra_req_param),
cwd=self.checkout_path,
bin_path=self.venv_bin()
)
| {"golden_diff": "diff --git a/readthedocs/doc_builder/config.py b/readthedocs/doc_builder/config.py\n--- a/readthedocs/doc_builder/config.py\n+++ b/readthedocs/doc_builder/config.py\n@@ -39,6 +39,14 @@\n else:\n return self._project.install_project\n \n+ @property\n+ def extra_requirements(self):\n+ if self.pip_install and 'extra_requirements' in self._yaml_config.get(\n+ 'python', {}):\n+ return self._yaml_config['python']['extra_requirements']\n+ else:\n+ return []\n+\n @property\n def python_interpreter(self):\n if 'version' in self._yaml_config.get('python', {}):\ndiff --git a/readthedocs/doc_builder/python_environments.py b/readthedocs/doc_builder/python_environments.py\n--- a/readthedocs/doc_builder/python_environments.py\n+++ b/readthedocs/doc_builder/python_environments.py\n@@ -44,6 +44,10 @@\n setup_path = os.path.join(self.checkout_path, 'setup.py')\n if os.path.isfile(setup_path) and self.config.install_project:\n if self.config.pip_install or getattr(settings, 'USE_PIP_INSTALL', False):\n+ extra_req_param = ''\n+ if self.config.extra_requirements:\n+ extra_req_param = '[{0}]'.format(\n+ ','.join(self.config.extra_requirements))\n self.build_env.run(\n 'python',\n self.venv_bin(filename='pip'),\n@@ -51,7 +55,7 @@\n '--ignore-installed',\n '--cache-dir',\n self.project.pip_cache_path,\n- '.',\n+ '.{0}'.format(extra_req_param),\n cwd=self.checkout_path,\n bin_path=self.venv_bin()\n )\n", "issue": "Support setuptools/distribute extras dependencies\nFor example, I have in one of my projects:\n\n```\n extras_require={\n 'test': ['webtest', 'nose', 'coverage'],\n 'develop': ['bpython', 'z3c.checkversions [buildout]'],\n },\n```\n\nFor API documentation, 'test' packages need to be installed, would be nice if rtd supported this feature :)\n\nSupport setuptools/distribute extras dependencies\nFor example, I have in one of my projects:\n\n```\n extras_require={\n 'test': ['webtest', 'nose', 'coverage'],\n 'develop': ['bpython', 'z3c.checkversions [buildout]'],\n },\n```\n\nFor API documentation, 'test' packages need to be installed, would be nice if rtd supported this feature :)\n\n", "before_files": [{"content": "from readthedocs_build.config import (ConfigError, BuildConfig, InvalidConfig,\n load as load_config)\n\n\nfrom readthedocs.projects.exceptions import ProjectImportError\n\n\nclass ConfigWrapper(object):\n\n \"\"\"\n A config object that wraps the Project & YAML based configs.\n\n Gives precedence to YAML, falling back to project if it isn't defined.\n\n We only currently implement a subset of the existing YAML config.\n This should be the canonical source for our usage of the YAML files,\n never accessing the config object directly.\n\n \"\"\"\n\n def __init__(self, version, yaml_config):\n self._version = version\n self._project = version.project\n self._yaml_config = yaml_config\n\n @property\n def pip_install(self):\n if 'pip_install' in self._yaml_config.get('python', {}):\n return self._yaml_config['python']['pip_install']\n else:\n return False\n\n @property\n def install_project(self):\n if self.pip_install:\n return True\n if 'setup_py_install' in self._yaml_config.get('python', {}):\n return self._yaml_config['python']['setup_py_install']\n else:\n return self._project.install_project\n\n @property\n def python_interpreter(self):\n if 'version' in self._yaml_config.get('python', {}):\n ver = self._yaml_config['python']['version']\n if str(ver).startswith('2'):\n return 'python'\n else:\n return 'python3'\n else:\n return self._project.python_interpreter\n\n @property\n def python_version(self):\n if 'version' in self._yaml_config.get('python', {}):\n ver = self._yaml_config['python']['version']\n return ver\n else:\n if self._project.python_interpreter == 'python':\n return 2\n else:\n return 3\n\n @property\n def use_system_site_packages(self):\n if 'use_system_site_packages' in self._yaml_config.get('python', {}):\n return self._yaml_config['python']['use_system_site_packages']\n else:\n return self._project.use_system_packages\n\n @property\n def use_conda(self):\n return 'conda' in self._yaml_config\n\n @property\n def conda_file(self):\n if 'file' in self._yaml_config.get('conda', {}):\n return self._yaml_config['conda']['file']\n else:\n return None\n\n @property\n def requirements_file(self):\n if 'requirements_file' in self._yaml_config:\n return self._yaml_config['requirements_file']\n else:\n return self._project.requirements_file\n\n @property\n def formats(self):\n if 'formats' in self._yaml_config:\n return self._yaml_config['formats']\n else:\n formats = ['htmlzip']\n if self._project.enable_epub_build:\n formats += ['epub']\n if self._project.enable_pdf_build:\n formats += ['pdf']\n return formats\n\n # Not implemented until we figure out how to keep in sync with the webs.\n # Probably needs to be version-specific as well, not project.\n # @property\n # def documentation_type(self):\n # if 'type' in self._yaml_config:\n # return self._yaml_config['type']\n # else:\n # return self._project.documentation_type\n\n\ndef load_yaml_config(version):\n \"\"\"\n Load a configuration from `readthedocs.yml` file.\n\n This uses the configuration logic from `readthedocs-build`,\n which will keep parsing consistent between projects.\n \"\"\"\n\n checkout_path = version.project.checkout_path(version.slug)\n try:\n config = load_config(\n path=checkout_path,\n env_config={\n 'output_base': '',\n 'type': 'sphinx',\n 'name': version.slug,\n },\n )[0]\n except InvalidConfig: # This is a subclass of ConfigError, so has to come first\n raise\n except ConfigError:\n config = BuildConfig(\n env_config={},\n raw_config={},\n source_file='empty',\n source_position=0,\n )\n return ConfigWrapper(version=version, yaml_config=config)\n", "path": "readthedocs/doc_builder/config.py"}, {"content": "import logging\nimport os\nimport shutil\n\nfrom django.conf import settings\n\nfrom readthedocs.doc_builder.config import ConfigWrapper\nfrom readthedocs.doc_builder.loader import get_builder_class\nfrom readthedocs.projects.constants import LOG_TEMPLATE\n\nlog = logging.getLogger(__name__)\n\n\nclass PythonEnvironment(object):\n\n def __init__(self, version, build_env, config=None):\n self.version = version\n self.project = version.project\n self.build_env = build_env\n if config:\n self.config = config\n else:\n self.config = ConfigWrapper(version=version, yaml_config={})\n # Compute here, since it's used a lot\n self.checkout_path = self.project.checkout_path(self.version.slug)\n\n def _log(self, msg):\n log.info(LOG_TEMPLATE\n .format(project=self.project.slug,\n version=self.version.slug,\n msg=msg))\n\n def delete_existing_build_dir(self):\n\n # Handle deleting old build dir\n build_dir = os.path.join(\n self.venv_path(),\n 'build')\n if os.path.exists(build_dir):\n self._log('Removing existing build directory')\n shutil.rmtree(build_dir)\n\n def install_package(self):\n setup_path = os.path.join(self.checkout_path, 'setup.py')\n if os.path.isfile(setup_path) and self.config.install_project:\n if self.config.pip_install or getattr(settings, 'USE_PIP_INSTALL', False):\n self.build_env.run(\n 'python',\n self.venv_bin(filename='pip'),\n 'install',\n '--ignore-installed',\n '--cache-dir',\n self.project.pip_cache_path,\n '.',\n cwd=self.checkout_path,\n bin_path=self.venv_bin()\n )\n else:\n self.build_env.run(\n 'python',\n 'setup.py',\n 'install',\n '--force',\n cwd=self.checkout_path,\n bin_path=self.venv_bin()\n )\n\n def venv_bin(self, filename=None):\n \"\"\"Return path to the virtualenv bin path, or a specific binary\n\n :param filename: If specified, add this filename to the path return\n :returns: Path to virtualenv bin or filename in virtualenv bin\n \"\"\"\n parts = [self.venv_path(), 'bin']\n if filename is not None:\n parts.append(filename)\n return os.path.join(*parts)\n\n\nclass Virtualenv(PythonEnvironment):\n\n def venv_path(self):\n return os.path.join(self.project.doc_path, 'envs', self.version.slug)\n\n def setup_base(self):\n site_packages = '--no-site-packages'\n if self.config.use_system_site_packages:\n site_packages = '--system-site-packages'\n env_path = self.venv_path()\n self.build_env.run(\n self.config.python_interpreter,\n '-mvirtualenv',\n site_packages,\n '--no-download',\n env_path,\n bin_path=None, # Don't use virtualenv bin that doesn't exist yet\n )\n\n def install_core_requirements(self):\n requirements = [\n 'sphinx==1.3.5',\n 'Pygments==2.1.3',\n 'setuptools==20.1.1',\n 'docutils==0.12',\n 'mkdocs==0.15.0',\n 'mock==1.0.1',\n 'pillow==2.6.1',\n ('git+https://github.com/rtfd/readthedocs-sphinx-ext.git'\n '@0.6-alpha#egg=readthedocs-sphinx-ext'),\n 'sphinx-rtd-theme==0.1.9',\n 'alabaster>=0.7,<0.8,!=0.7.5',\n 'commonmark==0.5.4',\n 'recommonmark==0.1.1',\n ]\n\n cmd = [\n 'python',\n self.venv_bin(filename='pip'),\n 'install',\n '--use-wheel',\n '-U',\n '--cache-dir',\n self.project.pip_cache_path,\n ]\n if self.config.use_system_site_packages:\n # Other code expects sphinx-build to be installed inside the\n # virtualenv. Using the -I option makes sure it gets installed\n # even if it is already installed system-wide (and\n # --system-site-packages is used)\n cmd.append('-I')\n cmd.extend(requirements)\n self.build_env.run(\n *cmd,\n bin_path=self.venv_bin()\n )\n\n def install_user_requirements(self):\n requirements_file_path = self.config.requirements_file\n if not requirements_file_path:\n builder_class = get_builder_class(self.project.documentation_type)\n docs_dir = (builder_class(build_env=self.build_env, python_env=self)\n .docs_dir())\n for path in [docs_dir, '']:\n for req_file in ['pip_requirements.txt', 'requirements.txt']:\n test_path = os.path.join(self.checkout_path, path, req_file)\n if os.path.exists(test_path):\n requirements_file_path = test_path\n break\n\n if requirements_file_path:\n self.build_env.run(\n 'python',\n self.venv_bin(filename='pip'),\n 'install',\n '--exists-action=w',\n '--cache-dir',\n self.project.pip_cache_path,\n '-r{0}'.format(requirements_file_path),\n cwd=self.checkout_path,\n bin_path=self.venv_bin()\n )\n\n\nclass Conda(PythonEnvironment):\n\n def venv_path(self):\n return os.path.join(self.project.doc_path, 'conda', self.version.slug)\n\n def setup_base(self):\n conda_env_path = os.path.join(self.project.doc_path, 'conda')\n version_path = os.path.join(conda_env_path, self.version.slug)\n\n if os.path.exists(version_path):\n # Re-create conda directory each time to keep fresh state\n self._log('Removing existing conda directory')\n shutil.rmtree(version_path)\n self.build_env.run(\n 'conda',\n 'env',\n 'create',\n '--name',\n self.version.slug,\n '--file',\n self.config.conda_file,\n bin_path=None, # Don't use conda bin that doesn't exist yet\n )\n\n def install_core_requirements(self):\n\n # Use conda for requirements it packages\n requirements = [\n 'sphinx==1.3.5',\n 'Pygments==2.1.1',\n 'docutils==0.12',\n 'mock',\n 'pillow==3.0.0',\n 'sphinx_rtd_theme==0.1.7',\n 'alabaster>=0.7,<0.8,!=0.7.5',\n ]\n\n cmd = [\n 'conda',\n 'install',\n '--yes',\n '--name',\n self.version.slug,\n ]\n cmd.extend(requirements)\n self.build_env.run(\n *cmd\n )\n\n # Install pip-only things.\n pip_requirements = [\n 'mkdocs==0.15.0',\n ('git+https://github.com/rtfd/readthedocs-sphinx-ext.git'\n '@0.6-alpha#egg=readthedocs-sphinx-ext'),\n 'commonmark==0.5.4',\n 'recommonmark==0.1.1',\n ]\n\n pip_cmd = [\n 'python',\n self.venv_bin(filename='pip'),\n 'install',\n '-U',\n '--cache-dir',\n self.project.pip_cache_path,\n ]\n pip_cmd.extend(pip_requirements)\n self.build_env.run(\n *pip_cmd,\n bin_path=self.venv_bin()\n )\n\n def install_user_requirements(self):\n self.build_env.run(\n 'conda',\n 'env',\n 'update',\n '--name',\n self.version.slug,\n '--file',\n self.config.conda_file,\n )\n", "path": "readthedocs/doc_builder/python_environments.py"}], "after_files": [{"content": "from readthedocs_build.config import (ConfigError, BuildConfig, InvalidConfig,\n load as load_config)\n\n\nfrom readthedocs.projects.exceptions import ProjectImportError\n\n\nclass ConfigWrapper(object):\n\n \"\"\"\n A config object that wraps the Project & YAML based configs.\n\n Gives precedence to YAML, falling back to project if it isn't defined.\n\n We only currently implement a subset of the existing YAML config.\n This should be the canonical source for our usage of the YAML files,\n never accessing the config object directly.\n\n \"\"\"\n\n def __init__(self, version, yaml_config):\n self._version = version\n self._project = version.project\n self._yaml_config = yaml_config\n\n @property\n def pip_install(self):\n if 'pip_install' in self._yaml_config.get('python', {}):\n return self._yaml_config['python']['pip_install']\n else:\n return False\n\n @property\n def install_project(self):\n if self.pip_install:\n return True\n if 'setup_py_install' in self._yaml_config.get('python', {}):\n return self._yaml_config['python']['setup_py_install']\n else:\n return self._project.install_project\n\n @property\n def extra_requirements(self):\n if self.pip_install and 'extra_requirements' in self._yaml_config.get(\n 'python', {}):\n return self._yaml_config['python']['extra_requirements']\n else:\n return []\n\n @property\n def python_interpreter(self):\n if 'version' in self._yaml_config.get('python', {}):\n ver = self._yaml_config['python']['version']\n if str(ver).startswith('2'):\n return 'python'\n else:\n return 'python3'\n else:\n return self._project.python_interpreter\n\n @property\n def python_version(self):\n if 'version' in self._yaml_config.get('python', {}):\n ver = self._yaml_config['python']['version']\n return ver\n else:\n if self._project.python_interpreter == 'python':\n return 2\n else:\n return 3\n\n @property\n def use_system_site_packages(self):\n if 'use_system_site_packages' in self._yaml_config.get('python', {}):\n return self._yaml_config['python']['use_system_site_packages']\n else:\n return self._project.use_system_packages\n\n @property\n def use_conda(self):\n return 'conda' in self._yaml_config\n\n @property\n def conda_file(self):\n if 'file' in self._yaml_config.get('conda', {}):\n return self._yaml_config['conda']['file']\n else:\n return None\n\n @property\n def requirements_file(self):\n if 'requirements_file' in self._yaml_config:\n return self._yaml_config['requirements_file']\n else:\n return self._project.requirements_file\n\n @property\n def formats(self):\n if 'formats' in self._yaml_config:\n return self._yaml_config['formats']\n else:\n formats = ['htmlzip']\n if self._project.enable_epub_build:\n formats += ['epub']\n if self._project.enable_pdf_build:\n formats += ['pdf']\n return formats\n\n # Not implemented until we figure out how to keep in sync with the webs.\n # Probably needs to be version-specific as well, not project.\n # @property\n # def documentation_type(self):\n # if 'type' in self._yaml_config:\n # return self._yaml_config['type']\n # else:\n # return self._project.documentation_type\n\n\ndef load_yaml_config(version):\n \"\"\"\n Load a configuration from `readthedocs.yml` file.\n\n This uses the configuration logic from `readthedocs-build`,\n which will keep parsing consistent between projects.\n \"\"\"\n\n checkout_path = version.project.checkout_path(version.slug)\n try:\n config = load_config(\n path=checkout_path,\n env_config={\n 'output_base': '',\n 'type': 'sphinx',\n 'name': version.slug,\n },\n )[0]\n except InvalidConfig: # This is a subclass of ConfigError, so has to come first\n raise\n except ConfigError:\n config = BuildConfig(\n env_config={},\n raw_config={},\n source_file='empty',\n source_position=0,\n )\n return ConfigWrapper(version=version, yaml_config=config)\n", "path": "readthedocs/doc_builder/config.py"}, {"content": "import logging\nimport os\nimport shutil\n\nfrom django.conf import settings\n\nfrom readthedocs.doc_builder.config import ConfigWrapper\nfrom readthedocs.doc_builder.loader import get_builder_class\nfrom readthedocs.projects.constants import LOG_TEMPLATE\n\nlog = logging.getLogger(__name__)\n\n\nclass PythonEnvironment(object):\n\n def __init__(self, version, build_env, config=None):\n self.version = version\n self.project = version.project\n self.build_env = build_env\n if config:\n self.config = config\n else:\n self.config = ConfigWrapper(version=version, yaml_config={})\n # Compute here, since it's used a lot\n self.checkout_path = self.project.checkout_path(self.version.slug)\n\n def _log(self, msg):\n log.info(LOG_TEMPLATE\n .format(project=self.project.slug,\n version=self.version.slug,\n msg=msg))\n\n def delete_existing_build_dir(self):\n\n # Handle deleting old build dir\n build_dir = os.path.join(\n self.venv_path(),\n 'build')\n if os.path.exists(build_dir):\n self._log('Removing existing build directory')\n shutil.rmtree(build_dir)\n\n def install_package(self):\n setup_path = os.path.join(self.checkout_path, 'setup.py')\n if os.path.isfile(setup_path) and self.config.install_project:\n if self.config.pip_install or getattr(settings, 'USE_PIP_INSTALL', False):\n extra_req_param = ''\n if self.config.extra_requirements:\n extra_req_param = '[{0}]'.format(\n ','.join(self.config.extra_requirements))\n self.build_env.run(\n 'python',\n self.venv_bin(filename='pip'),\n 'install',\n '--ignore-installed',\n '--cache-dir',\n self.project.pip_cache_path,\n '.{0}'.format(extra_req_param),\n cwd=self.checkout_path,\n bin_path=self.venv_bin()\n )\n else:\n self.build_env.run(\n 'python',\n 'setup.py',\n 'install',\n '--force',\n cwd=self.checkout_path,\n bin_path=self.venv_bin()\n )\n\n def venv_bin(self, filename=None):\n \"\"\"Return path to the virtualenv bin path, or a specific binary\n\n :param filename: If specified, add this filename to the path return\n :returns: Path to virtualenv bin or filename in virtualenv bin\n \"\"\"\n parts = [self.venv_path(), 'bin']\n if filename is not None:\n parts.append(filename)\n return os.path.join(*parts)\n\n\nclass Virtualenv(PythonEnvironment):\n\n def venv_path(self):\n return os.path.join(self.project.doc_path, 'envs', self.version.slug)\n\n def setup_base(self):\n site_packages = '--no-site-packages'\n if self.config.use_system_site_packages:\n site_packages = '--system-site-packages'\n env_path = self.venv_path()\n self.build_env.run(\n self.config.python_interpreter,\n '-mvirtualenv',\n site_packages,\n '--no-download',\n env_path,\n bin_path=None, # Don't use virtualenv bin that doesn't exist yet\n )\n\n def install_core_requirements(self):\n requirements = [\n 'sphinx==1.3.5',\n 'Pygments==2.1.3',\n 'setuptools==20.1.1',\n 'docutils==0.12',\n 'mkdocs==0.15.0',\n 'mock==1.0.1',\n 'pillow==2.6.1',\n ('git+https://github.com/rtfd/readthedocs-sphinx-ext.git'\n '@0.6-alpha#egg=readthedocs-sphinx-ext'),\n 'sphinx-rtd-theme==0.1.9',\n 'alabaster>=0.7,<0.8,!=0.7.5',\n 'commonmark==0.5.4',\n 'recommonmark==0.1.1',\n ]\n\n cmd = [\n 'python',\n self.venv_bin(filename='pip'),\n 'install',\n '--use-wheel',\n '-U',\n '--cache-dir',\n self.project.pip_cache_path,\n ]\n if self.config.use_system_site_packages:\n # Other code expects sphinx-build to be installed inside the\n # virtualenv. Using the -I option makes sure it gets installed\n # even if it is already installed system-wide (and\n # --system-site-packages is used)\n cmd.append('-I')\n cmd.extend(requirements)\n self.build_env.run(\n *cmd,\n bin_path=self.venv_bin()\n )\n\n def install_user_requirements(self):\n requirements_file_path = self.config.requirements_file\n if not requirements_file_path:\n builder_class = get_builder_class(self.project.documentation_type)\n docs_dir = (builder_class(build_env=self.build_env, python_env=self)\n .docs_dir())\n for path in [docs_dir, '']:\n for req_file in ['pip_requirements.txt', 'requirements.txt']:\n test_path = os.path.join(self.checkout_path, path, req_file)\n if os.path.exists(test_path):\n requirements_file_path = test_path\n break\n\n if requirements_file_path:\n self.build_env.run(\n 'python',\n self.venv_bin(filename='pip'),\n 'install',\n '--exists-action=w',\n '--cache-dir',\n self.project.pip_cache_path,\n '-r{0}'.format(requirements_file_path),\n cwd=self.checkout_path,\n bin_path=self.venv_bin()\n )\n\n\nclass Conda(PythonEnvironment):\n\n def venv_path(self):\n return os.path.join(self.project.doc_path, 'conda', self.version.slug)\n\n def setup_base(self):\n conda_env_path = os.path.join(self.project.doc_path, 'conda')\n version_path = os.path.join(conda_env_path, self.version.slug)\n\n if os.path.exists(version_path):\n # Re-create conda directory each time to keep fresh state\n self._log('Removing existing conda directory')\n shutil.rmtree(version_path)\n self.build_env.run(\n 'conda',\n 'env',\n 'create',\n '--name',\n self.version.slug,\n '--file',\n self.config.conda_file,\n bin_path=None, # Don't use conda bin that doesn't exist yet\n )\n\n def install_core_requirements(self):\n\n # Use conda for requirements it packages\n requirements = [\n 'sphinx==1.3.5',\n 'Pygments==2.1.1',\n 'docutils==0.12',\n 'mock',\n 'pillow==3.0.0',\n 'sphinx_rtd_theme==0.1.7',\n 'alabaster>=0.7,<0.8,!=0.7.5',\n ]\n\n cmd = [\n 'conda',\n 'install',\n '--yes',\n '--name',\n self.version.slug,\n ]\n cmd.extend(requirements)\n self.build_env.run(\n *cmd\n )\n\n # Install pip-only things.\n pip_requirements = [\n 'mkdocs==0.15.0',\n ('git+https://github.com/rtfd/readthedocs-sphinx-ext.git'\n '@0.6-alpha#egg=readthedocs-sphinx-ext'),\n 'commonmark==0.5.4',\n 'recommonmark==0.1.1',\n ]\n\n pip_cmd = [\n 'python',\n self.venv_bin(filename='pip'),\n 'install',\n '-U',\n '--cache-dir',\n self.project.pip_cache_path,\n ]\n pip_cmd.extend(pip_requirements)\n self.build_env.run(\n *pip_cmd,\n bin_path=self.venv_bin()\n )\n\n def install_user_requirements(self):\n self.build_env.run(\n 'conda',\n 'env',\n 'update',\n '--name',\n self.version.slug,\n '--file',\n self.config.conda_file,\n )\n", "path": "readthedocs/doc_builder/python_environments.py"}]} | 4,008 | 383 |
gh_patches_debug_5737 | rasdani/github-patches | git_diff | spesmilo__electrum-1738 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Feature request: Label colour - History Window
The transaction amount of outgoing transactions, when viewed in the history window, is coloured red.
It would be a nice if the label of these transactions could be coloured the same red colour so that they stand out more against the other incoming transactions.
Not a big issue but would be a 'nice to have'.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `gui/qt/history_widget.py`
Content:
```
1 #!/usr/bin/env python
2 #
3 # Electrum - lightweight Bitcoin client
4 # Copyright (C) 2015 Thomas Voegtlin
5 #
6 # Permission is hereby granted, free of charge, to any person
7 # obtaining a copy of this software and associated documentation files
8 # (the "Software"), to deal in the Software without restriction,
9 # including without limitation the rights to use, copy, modify, merge,
10 # publish, distribute, sublicense, and/or sell copies of the Software,
11 # and to permit persons to whom the Software is furnished to do so,
12 # subject to the following conditions:
13 #
14 # The above copyright notice and this permission notice shall be
15 # included in all copies or substantial portions of the Software.
16 #
17 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
20 # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
21 # BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
22 # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
23 # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 # SOFTWARE.
25
26
27 import webbrowser
28
29 from util import *
30 from electrum.i18n import _
31 from electrum.util import block_explorer_URL, format_satoshis, format_time
32 from electrum.plugins import run_hook
33
34
35 class HistoryWidget(MyTreeWidget):
36
37 def __init__(self, parent=None):
38 MyTreeWidget.__init__(self, parent, self.create_menu, [], 3)
39 self.refresh_headers()
40 self.setColumnHidden(1, True)
41 self.config = self.parent.config
42
43 def refresh_headers(self):
44 headers = ['', '', _('Date'), _('Description') , _('Amount'),
45 _('Balance')]
46 run_hook('history_tab_headers', headers)
47 self.update_headers(headers)
48
49 def get_icon(self, conf, timestamp):
50 time_str = _("unknown")
51 if conf > 0:
52 time_str = format_time(timestamp)
53 if conf == -1:
54 time_str = _('Not Verified')
55 icon = QIcon(":icons/unconfirmed.png")
56 elif conf == 0:
57 time_str = _('Unconfirmed')
58 icon = QIcon(":icons/unconfirmed.png")
59 elif conf < 6:
60 icon = QIcon(":icons/clock%d.png"%conf)
61 else:
62 icon = QIcon(":icons/confirmed.png")
63 return icon, time_str
64
65 def get_domain(self):
66 '''Replaced in address_dialog.py'''
67 return self.wallet.get_account_addresses(self.parent.current_account)
68
69 def on_update(self):
70 self.wallet = self.parent.wallet
71 h = self.wallet.get_history(self.get_domain())
72
73 item = self.currentItem()
74 current_tx = item.data(0, Qt.UserRole).toString() if item else None
75 self.clear()
76 run_hook('history_tab_update_begin')
77 for tx in h:
78 tx_hash, conf, value, timestamp, balance = tx
79 if conf is None and timestamp is None:
80 continue # skip history in offline mode
81 icon, time_str = self.get_icon(conf, timestamp)
82 v_str = self.parent.format_amount(value, True, whitespaces=True)
83 balance_str = self.parent.format_amount(balance, whitespaces=True)
84 label = self.wallet.get_label(tx_hash)
85 entry = ['', tx_hash, time_str, label, v_str, balance_str]
86 run_hook('history_tab_update', tx, entry)
87 item = QTreeWidgetItem(entry)
88 item.setIcon(0, icon)
89 for i in range(len(entry)):
90 if i>3:
91 item.setTextAlignment(i, Qt.AlignRight)
92 if i!=2:
93 item.setFont(i, QFont(MONOSPACE_FONT))
94 if value < 0:
95 item.setForeground(4, QBrush(QColor("#BC1E1E")))
96 if tx_hash:
97 item.setData(0, Qt.UserRole, tx_hash)
98 self.insertTopLevelItem(0, item)
99 if current_tx == tx_hash:
100 self.setCurrentItem(item)
101
102 def update_item(self, tx_hash, conf, timestamp):
103 icon, time_str = self.get_icon(conf, timestamp)
104 items = self.findItems(tx_hash, Qt.UserRole|Qt.MatchContains|Qt.MatchRecursive, column=1)
105 if items:
106 item = items[0]
107 item.setIcon(0, icon)
108 item.setText(2, time_str)
109
110 def create_menu(self, position):
111 self.selectedIndexes()
112 item = self.currentItem()
113 if not item:
114 return
115 tx_hash = str(item.data(0, Qt.UserRole).toString())
116 if not tx_hash:
117 return
118 tx_URL = block_explorer_URL(self.config, 'tx', tx_hash)
119 if not tx_URL:
120 return
121 menu = QMenu()
122 menu.addAction(_("Copy ID to Clipboard"), lambda: self.parent.app.clipboard().setText(tx_hash))
123 menu.addAction(_("Details"), lambda: self.parent.show_transaction(self.wallet.transactions.get(tx_hash)))
124 menu.addAction(_("Edit description"), lambda: self.editItem(item, self.editable_columns[0]))
125 menu.addAction(_("View on block explorer"), lambda: webbrowser.open(tx_URL))
126 menu.exec_(self.viewport().mapToGlobal(position))
127
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/gui/qt/history_widget.py b/gui/qt/history_widget.py
--- a/gui/qt/history_widget.py
+++ b/gui/qt/history_widget.py
@@ -92,6 +92,7 @@
if i!=2:
item.setFont(i, QFont(MONOSPACE_FONT))
if value < 0:
+ item.setForeground(3, QBrush(QColor("#BC1E1E")))
item.setForeground(4, QBrush(QColor("#BC1E1E")))
if tx_hash:
item.setData(0, Qt.UserRole, tx_hash)
| {"golden_diff": "diff --git a/gui/qt/history_widget.py b/gui/qt/history_widget.py\n--- a/gui/qt/history_widget.py\n+++ b/gui/qt/history_widget.py\n@@ -92,6 +92,7 @@\n if i!=2:\n item.setFont(i, QFont(MONOSPACE_FONT))\n if value < 0:\n+ item.setForeground(3, QBrush(QColor(\"#BC1E1E\")))\n item.setForeground(4, QBrush(QColor(\"#BC1E1E\")))\n if tx_hash:\n item.setData(0, Qt.UserRole, tx_hash)\n", "issue": "Feature request: Label colour - History Window\nThe transaction amount of outgoing transactions, when viewed in the history window, is coloured red. \n\nIt would be a nice if the label of these transactions could be coloured the same red colour so that they stand out more against the other incoming transactions. \n\nNot a big issue but would be a 'nice to have'.\n\n", "before_files": [{"content": "#!/usr/bin/env python\n#\n# Electrum - lightweight Bitcoin client\n# Copyright (C) 2015 Thomas Voegtlin\n#\n# Permission is hereby granted, free of charge, to any person\n# obtaining a copy of this software and associated documentation files\n# (the \"Software\"), to deal in the Software without restriction,\n# including without limitation the rights to use, copy, modify, merge,\n# publish, distribute, sublicense, and/or sell copies of the Software,\n# and to permit persons to whom the Software is furnished to do so,\n# subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\n# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\n# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\n# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\nimport webbrowser\n\nfrom util import *\nfrom electrum.i18n import _\nfrom electrum.util import block_explorer_URL, format_satoshis, format_time\nfrom electrum.plugins import run_hook\n\n\nclass HistoryWidget(MyTreeWidget):\n\n def __init__(self, parent=None):\n MyTreeWidget.__init__(self, parent, self.create_menu, [], 3)\n self.refresh_headers()\n self.setColumnHidden(1, True)\n self.config = self.parent.config\n\n def refresh_headers(self):\n headers = ['', '', _('Date'), _('Description') , _('Amount'),\n _('Balance')]\n run_hook('history_tab_headers', headers)\n self.update_headers(headers)\n\n def get_icon(self, conf, timestamp):\n time_str = _(\"unknown\")\n if conf > 0:\n time_str = format_time(timestamp)\n if conf == -1:\n time_str = _('Not Verified')\n icon = QIcon(\":icons/unconfirmed.png\")\n elif conf == 0:\n time_str = _('Unconfirmed')\n icon = QIcon(\":icons/unconfirmed.png\")\n elif conf < 6:\n icon = QIcon(\":icons/clock%d.png\"%conf)\n else:\n icon = QIcon(\":icons/confirmed.png\")\n return icon, time_str\n\n def get_domain(self):\n '''Replaced in address_dialog.py'''\n return self.wallet.get_account_addresses(self.parent.current_account)\n\n def on_update(self):\n self.wallet = self.parent.wallet\n h = self.wallet.get_history(self.get_domain())\n\n item = self.currentItem()\n current_tx = item.data(0, Qt.UserRole).toString() if item else None\n self.clear()\n run_hook('history_tab_update_begin')\n for tx in h:\n tx_hash, conf, value, timestamp, balance = tx\n if conf is None and timestamp is None:\n continue # skip history in offline mode\n icon, time_str = self.get_icon(conf, timestamp)\n v_str = self.parent.format_amount(value, True, whitespaces=True)\n balance_str = self.parent.format_amount(balance, whitespaces=True)\n label = self.wallet.get_label(tx_hash)\n entry = ['', tx_hash, time_str, label, v_str, balance_str]\n run_hook('history_tab_update', tx, entry)\n item = QTreeWidgetItem(entry)\n item.setIcon(0, icon)\n for i in range(len(entry)):\n if i>3:\n item.setTextAlignment(i, Qt.AlignRight)\n if i!=2:\n item.setFont(i, QFont(MONOSPACE_FONT))\n if value < 0:\n item.setForeground(4, QBrush(QColor(\"#BC1E1E\")))\n if tx_hash:\n item.setData(0, Qt.UserRole, tx_hash)\n self.insertTopLevelItem(0, item)\n if current_tx == tx_hash:\n self.setCurrentItem(item)\n\n def update_item(self, tx_hash, conf, timestamp):\n icon, time_str = self.get_icon(conf, timestamp)\n items = self.findItems(tx_hash, Qt.UserRole|Qt.MatchContains|Qt.MatchRecursive, column=1)\n if items:\n item = items[0]\n item.setIcon(0, icon)\n item.setText(2, time_str)\n\n def create_menu(self, position):\n self.selectedIndexes()\n item = self.currentItem()\n if not item:\n return\n tx_hash = str(item.data(0, Qt.UserRole).toString())\n if not tx_hash:\n return\n tx_URL = block_explorer_URL(self.config, 'tx', tx_hash)\n if not tx_URL:\n return\n menu = QMenu()\n menu.addAction(_(\"Copy ID to Clipboard\"), lambda: self.parent.app.clipboard().setText(tx_hash))\n menu.addAction(_(\"Details\"), lambda: self.parent.show_transaction(self.wallet.transactions.get(tx_hash)))\n menu.addAction(_(\"Edit description\"), lambda: self.editItem(item, self.editable_columns[0]))\n menu.addAction(_(\"View on block explorer\"), lambda: webbrowser.open(tx_URL))\n menu.exec_(self.viewport().mapToGlobal(position))\n", "path": "gui/qt/history_widget.py"}], "after_files": [{"content": "#!/usr/bin/env python\n#\n# Electrum - lightweight Bitcoin client\n# Copyright (C) 2015 Thomas Voegtlin\n#\n# Permission is hereby granted, free of charge, to any person\n# obtaining a copy of this software and associated documentation files\n# (the \"Software\"), to deal in the Software without restriction,\n# including without limitation the rights to use, copy, modify, merge,\n# publish, distribute, sublicense, and/or sell copies of the Software,\n# and to permit persons to whom the Software is furnished to do so,\n# subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\n# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\n# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\n# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\nimport webbrowser\n\nfrom util import *\nfrom electrum.i18n import _\nfrom electrum.util import block_explorer_URL, format_satoshis, format_time\nfrom electrum.plugins import run_hook\n\n\nclass HistoryWidget(MyTreeWidget):\n\n def __init__(self, parent=None):\n MyTreeWidget.__init__(self, parent, self.create_menu, [], 3)\n self.refresh_headers()\n self.setColumnHidden(1, True)\n self.config = self.parent.config\n\n def refresh_headers(self):\n headers = ['', '', _('Date'), _('Description') , _('Amount'),\n _('Balance')]\n run_hook('history_tab_headers', headers)\n self.update_headers(headers)\n\n def get_icon(self, conf, timestamp):\n time_str = _(\"unknown\")\n if conf > 0:\n time_str = format_time(timestamp)\n if conf == -1:\n time_str = _('Not Verified')\n icon = QIcon(\":icons/unconfirmed.png\")\n elif conf == 0:\n time_str = _('Unconfirmed')\n icon = QIcon(\":icons/unconfirmed.png\")\n elif conf < 6:\n icon = QIcon(\":icons/clock%d.png\"%conf)\n else:\n icon = QIcon(\":icons/confirmed.png\")\n return icon, time_str\n\n def get_domain(self):\n '''Replaced in address_dialog.py'''\n return self.wallet.get_account_addresses(self.parent.current_account)\n\n def on_update(self):\n self.wallet = self.parent.wallet\n h = self.wallet.get_history(self.get_domain())\n\n item = self.currentItem()\n current_tx = item.data(0, Qt.UserRole).toString() if item else None\n self.clear()\n run_hook('history_tab_update_begin')\n for tx in h:\n tx_hash, conf, value, timestamp, balance = tx\n if conf is None and timestamp is None:\n continue # skip history in offline mode\n icon, time_str = self.get_icon(conf, timestamp)\n v_str = self.parent.format_amount(value, True, whitespaces=True)\n balance_str = self.parent.format_amount(balance, whitespaces=True)\n label = self.wallet.get_label(tx_hash)\n entry = ['', tx_hash, time_str, label, v_str, balance_str]\n run_hook('history_tab_update', tx, entry)\n item = QTreeWidgetItem(entry)\n item.setIcon(0, icon)\n for i in range(len(entry)):\n if i>3:\n item.setTextAlignment(i, Qt.AlignRight)\n if i!=2:\n item.setFont(i, QFont(MONOSPACE_FONT))\n if value < 0:\n item.setForeground(3, QBrush(QColor(\"#BC1E1E\")))\n item.setForeground(4, QBrush(QColor(\"#BC1E1E\")))\n if tx_hash:\n item.setData(0, Qt.UserRole, tx_hash)\n self.insertTopLevelItem(0, item)\n if current_tx == tx_hash:\n self.setCurrentItem(item)\n\n def update_item(self, tx_hash, conf, timestamp):\n icon, time_str = self.get_icon(conf, timestamp)\n items = self.findItems(tx_hash, Qt.UserRole|Qt.MatchContains|Qt.MatchRecursive, column=1)\n if items:\n item = items[0]\n item.setIcon(0, icon)\n item.setText(2, time_str)\n\n def create_menu(self, position):\n self.selectedIndexes()\n item = self.currentItem()\n if not item:\n return\n tx_hash = str(item.data(0, Qt.UserRole).toString())\n if not tx_hash:\n return\n tx_URL = block_explorer_URL(self.config, 'tx', tx_hash)\n if not tx_URL:\n return\n menu = QMenu()\n menu.addAction(_(\"Copy ID to Clipboard\"), lambda: self.parent.app.clipboard().setText(tx_hash))\n menu.addAction(_(\"Details\"), lambda: self.parent.show_transaction(self.wallet.transactions.get(tx_hash)))\n menu.addAction(_(\"Edit description\"), lambda: self.editItem(item, self.editable_columns[0]))\n menu.addAction(_(\"View on block explorer\"), lambda: webbrowser.open(tx_URL))\n menu.exec_(self.viewport().mapToGlobal(position))\n", "path": "gui/qt/history_widget.py"}]} | 1,727 | 122 |
gh_patches_debug_41255 | rasdani/github-patches | git_diff | pantsbuild__pants-18035 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`No such file or directory: 'local_dists.pex/PEX-INFO'` when running a `python_source`
**Describe the bug**
After updating one of our CI checks to `./pants run` a `python_source` directly (vs. the previous code which ran the `pex_binary`), some of our jobs started failing with:
```
Traceback (most recent call last):
File "/opt/python/3.8.14/lib/python3.8/runpy.py", line 194, in _run_module_as_main
return _run_code(code, main_globals, None,
File "/opt/python/3.8.14/lib/python3.8/runpy.py", line 87, in _run_code
exec(code, run_globals)
File "/home/runner/.pants/execution/pants-sandbox-g6NiAf/./src.pex/__main__.py", line 89, in <module>
__venv_dir__ = __maybe_run_venv__(
File "/home/runner/.pants/execution/pants-sandbox-g6NiAf/./src.pex/__main__.py", line 37, in __maybe_run_venv__
venv_dir = venv_dir(
File "/home/runner/.pants/execution/pants-sandbox-g6NiAf/src.pex/.bootstrap/pex/variables.py", line 738, in venv_dir
File "/home/runner/.pants/execution/pants-sandbox-g6NiAf/src.pex/.bootstrap/pex/variables.py", line 736, in add_pex_path_items
File "/home/runner/.pants/execution/pants-sandbox-g6NiAf/src.pex/.bootstrap/pex/pex_info.py", line 82, in from_pex
FileNotFoundError: [Errno 2] No such file or directory: 'local_dists.pex/PEX-INFO'
```
The `python_source` has `run_goal_use_sandbox=False`.
**Pants version**
v2.15.0rc1
**OS**
Linux
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/python/pants/backend/python/goals/run_helper.py`
Content:
```
1 # Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3 from __future__ import annotations
4
5 import dataclasses
6 import os
7 import textwrap
8 from typing import Iterable, Optional
9
10 from pants.backend.python.subsystems.debugpy import DebugPy
11 from pants.backend.python.target_types import (
12 ConsoleScript,
13 PexEntryPointField,
14 ResolvedPexEntryPoint,
15 ResolvePexEntryPointRequest,
16 )
17 from pants.backend.python.util_rules.interpreter_constraints import InterpreterConstraints
18 from pants.backend.python.util_rules.local_dists import LocalDistsPex, LocalDistsPexRequest
19 from pants.backend.python.util_rules.pex import Pex, PexRequest, VenvPex, VenvPexRequest
20 from pants.backend.python.util_rules.pex_environment import PexEnvironment
21 from pants.backend.python.util_rules.pex_from_targets import (
22 InterpreterConstraintsRequest,
23 PexFromTargetsRequest,
24 )
25 from pants.backend.python.util_rules.python_sources import (
26 PythonSourceFiles,
27 PythonSourceFilesRequest,
28 )
29 from pants.core.goals.run import RunDebugAdapterRequest, RunRequest
30 from pants.core.subsystems.debug_adapter import DebugAdapterSubsystem
31 from pants.engine.addresses import Address
32 from pants.engine.fs import CreateDigest, Digest, FileContent, MergeDigests
33 from pants.engine.rules import Get, MultiGet, rule_helper
34 from pants.engine.target import TransitiveTargets, TransitiveTargetsRequest
35
36
37 def _in_chroot(relpath: str) -> str:
38 return os.path.join("{chroot}", relpath)
39
40
41 @rule_helper
42 async def _create_python_source_run_request(
43 address: Address,
44 *,
45 entry_point_field: PexEntryPointField,
46 pex_env: PexEnvironment,
47 run_in_sandbox: bool,
48 pex_path: Iterable[Pex] = (),
49 console_script: Optional[ConsoleScript] = None,
50 ) -> RunRequest:
51 addresses = [address]
52 interpreter_constraints, entry_point, transitive_targets = await MultiGet(
53 Get(InterpreterConstraints, InterpreterConstraintsRequest(addresses)),
54 Get(
55 ResolvedPexEntryPoint,
56 ResolvePexEntryPointRequest(entry_point_field),
57 ),
58 Get(TransitiveTargets, TransitiveTargetsRequest(addresses)),
59 )
60
61 pex_filename = (
62 address.generated_name.replace(".", "_") if address.generated_name else address.target_name
63 )
64
65 pex_request, sources = await MultiGet(
66 Get(
67 PexRequest,
68 PexFromTargetsRequest(
69 addresses,
70 output_filename=f"{pex_filename}.pex",
71 internal_only=True,
72 include_source_files=False,
73 # `PEX_EXTRA_SYS_PATH` should contain this entry_point's module.
74 main=console_script or entry_point.val,
75 additional_args=(
76 # N.B.: Since we cobble together the runtime environment via PEX_EXTRA_SYS_PATH
77 # below, it's important for any app that re-executes itself that these environment
78 # variables are not stripped.
79 "--no-strip-pex-env",
80 ),
81 ),
82 ),
83 Get(
84 PythonSourceFiles,
85 PythonSourceFilesRequest(transitive_targets.closure, include_files=True),
86 ),
87 )
88
89 local_dists = await Get(
90 LocalDistsPex,
91 LocalDistsPexRequest(
92 addresses,
93 internal_only=True,
94 interpreter_constraints=interpreter_constraints,
95 sources=sources,
96 ),
97 )
98 pex_request = dataclasses.replace(
99 pex_request, pex_path=(*pex_request.pex_path, local_dists.pex, *pex_path)
100 )
101
102 if run_in_sandbox:
103 # Note that a RunRequest always expects to run directly in the sandbox/workspace
104 # root, hence working_directory=None.
105 complete_pex_environment = pex_env.in_sandbox(working_directory=None)
106 else:
107 complete_pex_environment = pex_env.in_workspace()
108 venv_pex = await Get(VenvPex, VenvPexRequest(pex_request, complete_pex_environment))
109 input_digests = [
110 venv_pex.digest,
111 # Note regarding not-in-sandbox mode: You might think that the sources don't need to be copied
112 # into the chroot when using inline sources. But they do, because some of them might be
113 # codegenned, and those won't exist in the inline source tree. Rather than incurring the
114 # complexity of figuring out here which sources were codegenned, we copy everything.
115 # The inline source roots precede the chrooted ones in PEX_EXTRA_SYS_PATH, so the inline
116 # sources will take precedence and their copies in the chroot will be ignored.
117 local_dists.remaining_sources.source_files.snapshot.digest,
118 ]
119 merged_digest = await Get(Digest, MergeDigests(input_digests))
120
121 chrooted_source_roots = [_in_chroot(sr) for sr in sources.source_roots]
122 # The order here is important: we want the in-repo sources to take precedence over their
123 # copies in the sandbox (see above for why those copies exist even in non-sandboxed mode).
124 source_roots = [
125 *([] if run_in_sandbox else sources.source_roots),
126 *chrooted_source_roots,
127 ]
128 extra_env = {
129 **complete_pex_environment.environment_dict(python_configured=venv_pex.python is not None),
130 "PEX_EXTRA_SYS_PATH": os.pathsep.join(source_roots),
131 }
132
133 return RunRequest(
134 digest=merged_digest,
135 args=[_in_chroot(venv_pex.pex.argv0)],
136 extra_env=extra_env,
137 append_only_caches=complete_pex_environment.append_only_caches,
138 )
139
140
141 @rule_helper
142 async def _create_python_source_run_dap_request(
143 regular_run_request: RunRequest,
144 *,
145 debugpy: DebugPy,
146 debug_adapter: DebugAdapterSubsystem,
147 ) -> RunDebugAdapterRequest:
148 launcher_digest = await Get(
149 Digest,
150 CreateDigest(
151 [
152 FileContent(
153 "__debugpy_launcher.py",
154 textwrap.dedent(
155 """
156 import os
157 CHROOT = os.environ["PANTS_CHROOT"]
158
159 del os.environ["PEX_INTERPRETER"]
160
161 # See https://github.com/pantsbuild/pants/issues/17540
162 # For `run --debug-adapter`, the client might send a `pathMappings`
163 # (this is likely as VS Code likes to configure that by default) with
164 # a `remoteRoot` of ".". For `run`, CWD is set to the build root, so
165 # breakpoints set in-repo will never be hit. We fix this by monkeypatching
166 # pydevd (the library powering debugpy) so that a remoteRoot of "."
167 # means the sandbox root.
168
169 import debugpy._vendored.force_pydevd
170 from _pydevd_bundle.pydevd_process_net_command_json import PyDevJsonCommandProcessor
171 orig_resolve_remote_root = PyDevJsonCommandProcessor._resolve_remote_root
172
173 def patched_resolve_remote_root(self, local_root, remote_root):
174 if remote_root == ".":
175 remote_root = CHROOT
176 return orig_resolve_remote_root(self, local_root, remote_root)
177
178 PyDevJsonCommandProcessor._resolve_remote_root = patched_resolve_remote_root
179
180 from debugpy.server import cli
181 cli.main()
182 """
183 ).encode("utf-8"),
184 ),
185 ]
186 ),
187 )
188
189 merged_digest = await Get(
190 Digest,
191 MergeDigests(
192 [
193 regular_run_request.digest,
194 launcher_digest,
195 ]
196 ),
197 )
198 extra_env = dict(regular_run_request.extra_env)
199 extra_env["PEX_INTERPRETER"] = "1"
200 extra_env["PANTS_CHROOT"] = _in_chroot("").rstrip("/")
201 args = [
202 regular_run_request.args[0], # python executable
203 _in_chroot("__debugpy_launcher.py"),
204 *debugpy.get_args(debug_adapter),
205 ]
206
207 return RunDebugAdapterRequest(
208 digest=merged_digest,
209 args=args,
210 extra_env=extra_env,
211 append_only_caches=regular_run_request.append_only_caches,
212 )
213
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/python/pants/backend/python/goals/run_helper.py b/src/python/pants/backend/python/goals/run_helper.py
--- a/src/python/pants/backend/python/goals/run_helper.py
+++ b/src/python/pants/backend/python/goals/run_helper.py
@@ -14,14 +14,9 @@
ResolvedPexEntryPoint,
ResolvePexEntryPointRequest,
)
-from pants.backend.python.util_rules.interpreter_constraints import InterpreterConstraints
-from pants.backend.python.util_rules.local_dists import LocalDistsPex, LocalDistsPexRequest
from pants.backend.python.util_rules.pex import Pex, PexRequest, VenvPex, VenvPexRequest
from pants.backend.python.util_rules.pex_environment import PexEnvironment
-from pants.backend.python.util_rules.pex_from_targets import (
- InterpreterConstraintsRequest,
- PexFromTargetsRequest,
-)
+from pants.backend.python.util_rules.pex_from_targets import PexFromTargetsRequest
from pants.backend.python.util_rules.python_sources import (
PythonSourceFiles,
PythonSourceFilesRequest,
@@ -49,8 +44,7 @@
console_script: Optional[ConsoleScript] = None,
) -> RunRequest:
addresses = [address]
- interpreter_constraints, entry_point, transitive_targets = await MultiGet(
- Get(InterpreterConstraints, InterpreterConstraintsRequest(addresses)),
+ entry_point, transitive_targets = await MultiGet(
Get(
ResolvedPexEntryPoint,
ResolvePexEntryPointRequest(entry_point_field),
@@ -70,6 +64,7 @@
output_filename=f"{pex_filename}.pex",
internal_only=True,
include_source_files=False,
+ include_local_dists=True,
# `PEX_EXTRA_SYS_PATH` should contain this entry_point's module.
main=console_script or entry_point.val,
additional_args=(
@@ -86,18 +81,7 @@
),
)
- local_dists = await Get(
- LocalDistsPex,
- LocalDistsPexRequest(
- addresses,
- internal_only=True,
- interpreter_constraints=interpreter_constraints,
- sources=sources,
- ),
- )
- pex_request = dataclasses.replace(
- pex_request, pex_path=(*pex_request.pex_path, local_dists.pex, *pex_path)
- )
+ pex_request = dataclasses.replace(pex_request, pex_path=(*pex_request.pex_path, *pex_path))
if run_in_sandbox:
# Note that a RunRequest always expects to run directly in the sandbox/workspace
@@ -114,7 +98,7 @@
# complexity of figuring out here which sources were codegenned, we copy everything.
# The inline source roots precede the chrooted ones in PEX_EXTRA_SYS_PATH, so the inline
# sources will take precedence and their copies in the chroot will be ignored.
- local_dists.remaining_sources.source_files.snapshot.digest,
+ sources.source_files.snapshot.digest,
]
merged_digest = await Get(Digest, MergeDigests(input_digests))
| {"golden_diff": "diff --git a/src/python/pants/backend/python/goals/run_helper.py b/src/python/pants/backend/python/goals/run_helper.py\n--- a/src/python/pants/backend/python/goals/run_helper.py\n+++ b/src/python/pants/backend/python/goals/run_helper.py\n@@ -14,14 +14,9 @@\n ResolvedPexEntryPoint,\n ResolvePexEntryPointRequest,\n )\n-from pants.backend.python.util_rules.interpreter_constraints import InterpreterConstraints\n-from pants.backend.python.util_rules.local_dists import LocalDistsPex, LocalDistsPexRequest\n from pants.backend.python.util_rules.pex import Pex, PexRequest, VenvPex, VenvPexRequest\n from pants.backend.python.util_rules.pex_environment import PexEnvironment\n-from pants.backend.python.util_rules.pex_from_targets import (\n- InterpreterConstraintsRequest,\n- PexFromTargetsRequest,\n-)\n+from pants.backend.python.util_rules.pex_from_targets import PexFromTargetsRequest\n from pants.backend.python.util_rules.python_sources import (\n PythonSourceFiles,\n PythonSourceFilesRequest,\n@@ -49,8 +44,7 @@\n console_script: Optional[ConsoleScript] = None,\n ) -> RunRequest:\n addresses = [address]\n- interpreter_constraints, entry_point, transitive_targets = await MultiGet(\n- Get(InterpreterConstraints, InterpreterConstraintsRequest(addresses)),\n+ entry_point, transitive_targets = await MultiGet(\n Get(\n ResolvedPexEntryPoint,\n ResolvePexEntryPointRequest(entry_point_field),\n@@ -70,6 +64,7 @@\n output_filename=f\"{pex_filename}.pex\",\n internal_only=True,\n include_source_files=False,\n+ include_local_dists=True,\n # `PEX_EXTRA_SYS_PATH` should contain this entry_point's module.\n main=console_script or entry_point.val,\n additional_args=(\n@@ -86,18 +81,7 @@\n ),\n )\n \n- local_dists = await Get(\n- LocalDistsPex,\n- LocalDistsPexRequest(\n- addresses,\n- internal_only=True,\n- interpreter_constraints=interpreter_constraints,\n- sources=sources,\n- ),\n- )\n- pex_request = dataclasses.replace(\n- pex_request, pex_path=(*pex_request.pex_path, local_dists.pex, *pex_path)\n- )\n+ pex_request = dataclasses.replace(pex_request, pex_path=(*pex_request.pex_path, *pex_path))\n \n if run_in_sandbox:\n # Note that a RunRequest always expects to run directly in the sandbox/workspace\n@@ -114,7 +98,7 @@\n # complexity of figuring out here which sources were codegenned, we copy everything.\n # The inline source roots precede the chrooted ones in PEX_EXTRA_SYS_PATH, so the inline\n # sources will take precedence and their copies in the chroot will be ignored.\n- local_dists.remaining_sources.source_files.snapshot.digest,\n+ sources.source_files.snapshot.digest,\n ]\n merged_digest = await Get(Digest, MergeDigests(input_digests))\n", "issue": "`No such file or directory: 'local_dists.pex/PEX-INFO'` when running a `python_source`\n**Describe the bug**\r\n\r\nAfter updating one of our CI checks to `./pants run` a `python_source` directly (vs. the previous code which ran the `pex_binary`), some of our jobs started failing with:\r\n```\r\nTraceback (most recent call last):\r\n File \"/opt/python/3.8.14/lib/python3.8/runpy.py\", line 194, in _run_module_as_main\r\n return _run_code(code, main_globals, None,\r\n File \"/opt/python/3.8.14/lib/python3.8/runpy.py\", line 87, in _run_code\r\n exec(code, run_globals)\r\n File \"/home/runner/.pants/execution/pants-sandbox-g6NiAf/./src.pex/__main__.py\", line 89, in <module>\r\n __venv_dir__ = __maybe_run_venv__(\r\n File \"/home/runner/.pants/execution/pants-sandbox-g6NiAf/./src.pex/__main__.py\", line 37, in __maybe_run_venv__\r\n venv_dir = venv_dir(\r\n File \"/home/runner/.pants/execution/pants-sandbox-g6NiAf/src.pex/.bootstrap/pex/variables.py\", line 738, in venv_dir\r\n File \"/home/runner/.pants/execution/pants-sandbox-g6NiAf/src.pex/.bootstrap/pex/variables.py\", line 736, in add_pex_path_items\r\n File \"/home/runner/.pants/execution/pants-sandbox-g6NiAf/src.pex/.bootstrap/pex/pex_info.py\", line 82, in from_pex\r\nFileNotFoundError: [Errno 2] No such file or directory: 'local_dists.pex/PEX-INFO'\r\n```\r\n\r\nThe `python_source` has `run_goal_use_sandbox=False`.\r\n\r\n**Pants version**\r\n\r\nv2.15.0rc1\r\n\r\n**OS**\r\n\r\nLinux\r\n\n", "before_files": [{"content": "# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\nfrom __future__ import annotations\n\nimport dataclasses\nimport os\nimport textwrap\nfrom typing import Iterable, Optional\n\nfrom pants.backend.python.subsystems.debugpy import DebugPy\nfrom pants.backend.python.target_types import (\n ConsoleScript,\n PexEntryPointField,\n ResolvedPexEntryPoint,\n ResolvePexEntryPointRequest,\n)\nfrom pants.backend.python.util_rules.interpreter_constraints import InterpreterConstraints\nfrom pants.backend.python.util_rules.local_dists import LocalDistsPex, LocalDistsPexRequest\nfrom pants.backend.python.util_rules.pex import Pex, PexRequest, VenvPex, VenvPexRequest\nfrom pants.backend.python.util_rules.pex_environment import PexEnvironment\nfrom pants.backend.python.util_rules.pex_from_targets import (\n InterpreterConstraintsRequest,\n PexFromTargetsRequest,\n)\nfrom pants.backend.python.util_rules.python_sources import (\n PythonSourceFiles,\n PythonSourceFilesRequest,\n)\nfrom pants.core.goals.run import RunDebugAdapterRequest, RunRequest\nfrom pants.core.subsystems.debug_adapter import DebugAdapterSubsystem\nfrom pants.engine.addresses import Address\nfrom pants.engine.fs import CreateDigest, Digest, FileContent, MergeDigests\nfrom pants.engine.rules import Get, MultiGet, rule_helper\nfrom pants.engine.target import TransitiveTargets, TransitiveTargetsRequest\n\n\ndef _in_chroot(relpath: str) -> str:\n return os.path.join(\"{chroot}\", relpath)\n\n\n@rule_helper\nasync def _create_python_source_run_request(\n address: Address,\n *,\n entry_point_field: PexEntryPointField,\n pex_env: PexEnvironment,\n run_in_sandbox: bool,\n pex_path: Iterable[Pex] = (),\n console_script: Optional[ConsoleScript] = None,\n) -> RunRequest:\n addresses = [address]\n interpreter_constraints, entry_point, transitive_targets = await MultiGet(\n Get(InterpreterConstraints, InterpreterConstraintsRequest(addresses)),\n Get(\n ResolvedPexEntryPoint,\n ResolvePexEntryPointRequest(entry_point_field),\n ),\n Get(TransitiveTargets, TransitiveTargetsRequest(addresses)),\n )\n\n pex_filename = (\n address.generated_name.replace(\".\", \"_\") if address.generated_name else address.target_name\n )\n\n pex_request, sources = await MultiGet(\n Get(\n PexRequest,\n PexFromTargetsRequest(\n addresses,\n output_filename=f\"{pex_filename}.pex\",\n internal_only=True,\n include_source_files=False,\n # `PEX_EXTRA_SYS_PATH` should contain this entry_point's module.\n main=console_script or entry_point.val,\n additional_args=(\n # N.B.: Since we cobble together the runtime environment via PEX_EXTRA_SYS_PATH\n # below, it's important for any app that re-executes itself that these environment\n # variables are not stripped.\n \"--no-strip-pex-env\",\n ),\n ),\n ),\n Get(\n PythonSourceFiles,\n PythonSourceFilesRequest(transitive_targets.closure, include_files=True),\n ),\n )\n\n local_dists = await Get(\n LocalDistsPex,\n LocalDistsPexRequest(\n addresses,\n internal_only=True,\n interpreter_constraints=interpreter_constraints,\n sources=sources,\n ),\n )\n pex_request = dataclasses.replace(\n pex_request, pex_path=(*pex_request.pex_path, local_dists.pex, *pex_path)\n )\n\n if run_in_sandbox:\n # Note that a RunRequest always expects to run directly in the sandbox/workspace\n # root, hence working_directory=None.\n complete_pex_environment = pex_env.in_sandbox(working_directory=None)\n else:\n complete_pex_environment = pex_env.in_workspace()\n venv_pex = await Get(VenvPex, VenvPexRequest(pex_request, complete_pex_environment))\n input_digests = [\n venv_pex.digest,\n # Note regarding not-in-sandbox mode: You might think that the sources don't need to be copied\n # into the chroot when using inline sources. But they do, because some of them might be\n # codegenned, and those won't exist in the inline source tree. Rather than incurring the\n # complexity of figuring out here which sources were codegenned, we copy everything.\n # The inline source roots precede the chrooted ones in PEX_EXTRA_SYS_PATH, so the inline\n # sources will take precedence and their copies in the chroot will be ignored.\n local_dists.remaining_sources.source_files.snapshot.digest,\n ]\n merged_digest = await Get(Digest, MergeDigests(input_digests))\n\n chrooted_source_roots = [_in_chroot(sr) for sr in sources.source_roots]\n # The order here is important: we want the in-repo sources to take precedence over their\n # copies in the sandbox (see above for why those copies exist even in non-sandboxed mode).\n source_roots = [\n *([] if run_in_sandbox else sources.source_roots),\n *chrooted_source_roots,\n ]\n extra_env = {\n **complete_pex_environment.environment_dict(python_configured=venv_pex.python is not None),\n \"PEX_EXTRA_SYS_PATH\": os.pathsep.join(source_roots),\n }\n\n return RunRequest(\n digest=merged_digest,\n args=[_in_chroot(venv_pex.pex.argv0)],\n extra_env=extra_env,\n append_only_caches=complete_pex_environment.append_only_caches,\n )\n\n\n@rule_helper\nasync def _create_python_source_run_dap_request(\n regular_run_request: RunRequest,\n *,\n debugpy: DebugPy,\n debug_adapter: DebugAdapterSubsystem,\n) -> RunDebugAdapterRequest:\n launcher_digest = await Get(\n Digest,\n CreateDigest(\n [\n FileContent(\n \"__debugpy_launcher.py\",\n textwrap.dedent(\n \"\"\"\n import os\n CHROOT = os.environ[\"PANTS_CHROOT\"]\n\n del os.environ[\"PEX_INTERPRETER\"]\n\n # See https://github.com/pantsbuild/pants/issues/17540\n # For `run --debug-adapter`, the client might send a `pathMappings`\n # (this is likely as VS Code likes to configure that by default) with\n # a `remoteRoot` of \".\". For `run`, CWD is set to the build root, so\n # breakpoints set in-repo will never be hit. We fix this by monkeypatching\n # pydevd (the library powering debugpy) so that a remoteRoot of \".\"\n # means the sandbox root.\n\n import debugpy._vendored.force_pydevd\n from _pydevd_bundle.pydevd_process_net_command_json import PyDevJsonCommandProcessor\n orig_resolve_remote_root = PyDevJsonCommandProcessor._resolve_remote_root\n\n def patched_resolve_remote_root(self, local_root, remote_root):\n if remote_root == \".\":\n remote_root = CHROOT\n return orig_resolve_remote_root(self, local_root, remote_root)\n\n PyDevJsonCommandProcessor._resolve_remote_root = patched_resolve_remote_root\n\n from debugpy.server import cli\n cli.main()\n \"\"\"\n ).encode(\"utf-8\"),\n ),\n ]\n ),\n )\n\n merged_digest = await Get(\n Digest,\n MergeDigests(\n [\n regular_run_request.digest,\n launcher_digest,\n ]\n ),\n )\n extra_env = dict(regular_run_request.extra_env)\n extra_env[\"PEX_INTERPRETER\"] = \"1\"\n extra_env[\"PANTS_CHROOT\"] = _in_chroot(\"\").rstrip(\"/\")\n args = [\n regular_run_request.args[0], # python executable\n _in_chroot(\"__debugpy_launcher.py\"),\n *debugpy.get_args(debug_adapter),\n ]\n\n return RunDebugAdapterRequest(\n digest=merged_digest,\n args=args,\n extra_env=extra_env,\n append_only_caches=regular_run_request.append_only_caches,\n )\n", "path": "src/python/pants/backend/python/goals/run_helper.py"}], "after_files": [{"content": "# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\nfrom __future__ import annotations\n\nimport dataclasses\nimport os\nimport textwrap\nfrom typing import Iterable, Optional\n\nfrom pants.backend.python.subsystems.debugpy import DebugPy\nfrom pants.backend.python.target_types import (\n ConsoleScript,\n PexEntryPointField,\n ResolvedPexEntryPoint,\n ResolvePexEntryPointRequest,\n)\nfrom pants.backend.python.util_rules.pex import Pex, PexRequest, VenvPex, VenvPexRequest\nfrom pants.backend.python.util_rules.pex_environment import PexEnvironment\nfrom pants.backend.python.util_rules.pex_from_targets import PexFromTargetsRequest\nfrom pants.backend.python.util_rules.python_sources import (\n PythonSourceFiles,\n PythonSourceFilesRequest,\n)\nfrom pants.core.goals.run import RunDebugAdapterRequest, RunRequest\nfrom pants.core.subsystems.debug_adapter import DebugAdapterSubsystem\nfrom pants.engine.addresses import Address\nfrom pants.engine.fs import CreateDigest, Digest, FileContent, MergeDigests\nfrom pants.engine.rules import Get, MultiGet, rule_helper\nfrom pants.engine.target import TransitiveTargets, TransitiveTargetsRequest\n\n\ndef _in_chroot(relpath: str) -> str:\n return os.path.join(\"{chroot}\", relpath)\n\n\n@rule_helper\nasync def _create_python_source_run_request(\n address: Address,\n *,\n entry_point_field: PexEntryPointField,\n pex_env: PexEnvironment,\n run_in_sandbox: bool,\n pex_path: Iterable[Pex] = (),\n console_script: Optional[ConsoleScript] = None,\n) -> RunRequest:\n addresses = [address]\n entry_point, transitive_targets = await MultiGet(\n Get(\n ResolvedPexEntryPoint,\n ResolvePexEntryPointRequest(entry_point_field),\n ),\n Get(TransitiveTargets, TransitiveTargetsRequest(addresses)),\n )\n\n pex_filename = (\n address.generated_name.replace(\".\", \"_\") if address.generated_name else address.target_name\n )\n\n pex_request, sources = await MultiGet(\n Get(\n PexRequest,\n PexFromTargetsRequest(\n addresses,\n output_filename=f\"{pex_filename}.pex\",\n internal_only=True,\n include_source_files=False,\n include_local_dists=True,\n # `PEX_EXTRA_SYS_PATH` should contain this entry_point's module.\n main=console_script or entry_point.val,\n additional_args=(\n # N.B.: Since we cobble together the runtime environment via PEX_EXTRA_SYS_PATH\n # below, it's important for any app that re-executes itself that these environment\n # variables are not stripped.\n \"--no-strip-pex-env\",\n ),\n ),\n ),\n Get(\n PythonSourceFiles,\n PythonSourceFilesRequest(transitive_targets.closure, include_files=True),\n ),\n )\n\n pex_request = dataclasses.replace(pex_request, pex_path=(*pex_request.pex_path, *pex_path))\n\n if run_in_sandbox:\n # Note that a RunRequest always expects to run directly in the sandbox/workspace\n # root, hence working_directory=None.\n complete_pex_environment = pex_env.in_sandbox(working_directory=None)\n else:\n complete_pex_environment = pex_env.in_workspace()\n venv_pex = await Get(VenvPex, VenvPexRequest(pex_request, complete_pex_environment))\n input_digests = [\n venv_pex.digest,\n # Note regarding not-in-sandbox mode: You might think that the sources don't need to be copied\n # into the chroot when using inline sources. But they do, because some of them might be\n # codegenned, and those won't exist in the inline source tree. Rather than incurring the\n # complexity of figuring out here which sources were codegenned, we copy everything.\n # The inline source roots precede the chrooted ones in PEX_EXTRA_SYS_PATH, so the inline\n # sources will take precedence and their copies in the chroot will be ignored.\n sources.source_files.snapshot.digest,\n ]\n merged_digest = await Get(Digest, MergeDigests(input_digests))\n\n chrooted_source_roots = [_in_chroot(sr) for sr in sources.source_roots]\n # The order here is important: we want the in-repo sources to take precedence over their\n # copies in the sandbox (see above for why those copies exist even in non-sandboxed mode).\n source_roots = [\n *([] if run_in_sandbox else sources.source_roots),\n *chrooted_source_roots,\n ]\n extra_env = {\n **complete_pex_environment.environment_dict(python_configured=venv_pex.python is not None),\n \"PEX_EXTRA_SYS_PATH\": os.pathsep.join(source_roots),\n }\n\n return RunRequest(\n digest=merged_digest,\n args=[_in_chroot(venv_pex.pex.argv0)],\n extra_env=extra_env,\n append_only_caches=complete_pex_environment.append_only_caches,\n )\n\n\n@rule_helper\nasync def _create_python_source_run_dap_request(\n regular_run_request: RunRequest,\n *,\n debugpy: DebugPy,\n debug_adapter: DebugAdapterSubsystem,\n) -> RunDebugAdapterRequest:\n launcher_digest = await Get(\n Digest,\n CreateDigest(\n [\n FileContent(\n \"__debugpy_launcher.py\",\n textwrap.dedent(\n \"\"\"\n import os\n CHROOT = os.environ[\"PANTS_CHROOT\"]\n\n del os.environ[\"PEX_INTERPRETER\"]\n\n # See https://github.com/pantsbuild/pants/issues/17540\n # For `run --debug-adapter`, the client might send a `pathMappings`\n # (this is likely as VS Code likes to configure that by default) with\n # a `remoteRoot` of \".\". For `run`, CWD is set to the build root, so\n # breakpoints set in-repo will never be hit. We fix this by monkeypatching\n # pydevd (the library powering debugpy) so that a remoteRoot of \".\"\n # means the sandbox root.\n\n import debugpy._vendored.force_pydevd\n from _pydevd_bundle.pydevd_process_net_command_json import PyDevJsonCommandProcessor\n orig_resolve_remote_root = PyDevJsonCommandProcessor._resolve_remote_root\n\n def patched_resolve_remote_root(self, local_root, remote_root):\n if remote_root == \".\":\n remote_root = CHROOT\n return orig_resolve_remote_root(self, local_root, remote_root)\n\n PyDevJsonCommandProcessor._resolve_remote_root = patched_resolve_remote_root\n\n from debugpy.server import cli\n cli.main()\n \"\"\"\n ).encode(\"utf-8\"),\n ),\n ]\n ),\n )\n\n merged_digest = await Get(\n Digest,\n MergeDigests(\n [\n regular_run_request.digest,\n launcher_digest,\n ]\n ),\n )\n extra_env = dict(regular_run_request.extra_env)\n extra_env[\"PEX_INTERPRETER\"] = \"1\"\n extra_env[\"PANTS_CHROOT\"] = _in_chroot(\"\").rstrip(\"/\")\n args = [\n regular_run_request.args[0], # python executable\n _in_chroot(\"__debugpy_launcher.py\"),\n *debugpy.get_args(debug_adapter),\n ]\n\n return RunDebugAdapterRequest(\n digest=merged_digest,\n args=args,\n extra_env=extra_env,\n append_only_caches=regular_run_request.append_only_caches,\n )\n", "path": "src/python/pants/backend/python/goals/run_helper.py"}]} | 3,022 | 681 |
gh_patches_debug_627 | rasdani/github-patches | git_diff | bridgecrewio__checkov-3151 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Terraform parsing error string with escaped backslash at the end
**Describe the issue**
Checkov crashes if it encounters an escaped backslash (`"\\"`) at the end of a string.
**Examples**
Minimal example to reproduce the error:
```terraform
variable "slash" {
default = "\\"
}
output "slash" {
value = var.slash
}
```
`terraform validate` sees this configuration as valid, but checkov fails with a parsing error.
This only happens when the last character of the string is the escaped backslash, as the parser assumes the closing quotation mark is escaped. Adding any normal character at the end of the string doesn't trigger this error.
```terraform
variable "slash" {
default = "\\"
}
```
**Exception Trace**
Relevant traceback
```sh
> LOG_LEVEL=DEBUG checkov -d .
[...]
[MainThread ] [DEBUG] failed while parsing file /workdir/main.tf
Traceback (most recent call last):
File "/Users/user/.local/pipx/venvs/checkov/lib/python3.8/site-packages/checkov/terraform/parser.py", line 726, in _load_or_die_quietly
raw_data = hcl2.load(f)
File "/Users/user/.local/pipx/venvs/checkov/lib/python3.8/site-packages/hcl2/api.py", line 12, in load
return loads(file.read())
File "/Users/user/.local/pipx/venvs/checkov/lib/python3.8/site-packages/hcl2/api.py", line 80, in loads
raise ValueError(f"Line has unclosed quote marks: {line}")
ValueError: Line has unclosed quote marks: default = "\\"
[...]
```
**Desktop (please complete the following information):**
- OS: MacOS 12.3.1 (Intel)
- Checkov Version: 2.0.1230
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 import logging
3 import os
4 from importlib import util
5 from os import path
6
7 import setuptools
8 from setuptools import setup
9
10 # read the contents of your README file
11 this_directory = path.abspath(path.dirname(__file__))
12 with open(path.join(this_directory, "README.md"), encoding="utf-8") as f:
13 long_description = f.read()
14
15 logger = logging.getLogger(__name__)
16 spec = util.spec_from_file_location(
17 "checkov.version", os.path.join("checkov", "version.py")
18 )
19 # noinspection PyUnresolvedReferences
20 mod = util.module_from_spec(spec)
21 spec.loader.exec_module(mod) # type: ignore
22 version = mod.version # type: ignore
23
24 setup(
25 extras_require={
26 "dev": [
27 "pytest==5.3.1",
28 "coverage==5.5",
29 "coverage-badge",
30 "GitPython==3.1.7",
31 "bandit",
32 "jsonschema",
33 ]
34 },
35 install_requires=[
36 "bc-python-hcl2==0.3.42",
37 "cloudsplaining>=0.4.1",
38 "deep_merge",
39 "tabulate",
40 "colorama",
41 "termcolor",
42 "junit-xml>=1.9",
43 "dpath>=1.5.0,<2",
44 "pyyaml>=5.4.1",
45 "boto3>=1.17",
46 "GitPython",
47 "jmespath",
48 "tqdm",
49 "update_checker",
50 "semantic_version",
51 "packaging",
52 "networkx",
53 "dockerfile-parse",
54 "docker",
55 "configargparse",
56 "argcomplete",
57 "detect-secrets",
58 "policyuniverse",
59 "typing-extensions>=4.1.0",
60 "cachetools",
61 "cyclonedx-python-lib>=2.4.0",
62 "click>=8.0.0",
63 "aiohttp",
64 "aiodns",
65 "aiomultiprocess",
66 "jsonpath_ng",
67 "jsonschema~=3.0",
68 "prettytable>=3.0.0",
69 "pycep-parser==0.3.7",
70 "charset-normalizer",
71 ],
72 license="Apache License 2.0",
73 name="checkov",
74 version=version,
75 python_requires=">=3.7",
76 description="Infrastructure as code static analysis",
77 author="bridgecrew",
78 author_email="[email protected]",
79 url="https://github.com/bridgecrewio/checkov",
80 packages=setuptools.find_packages(exclude=["tests*", "integration_tests*"]),
81 include_package_data=True,
82 package_dir={
83 "checkov.bicep.checks.graph_checks": "checkov/bicep/checks/graph_checks",
84 "checkov.terraform.checks.graph_checks": "checkov/terraform/checks/graph_checks",
85 },
86 package_data={
87 "checkov": ["py.typed"],
88 "checkov.bicep.checks.graph_checks": ["*.yaml"],
89 "checkov.common.util.templates": ["*.jinja2"],
90 "checkov.terraform.checks.graph_checks": [
91 "aws/*.yaml",
92 "gcp/*.yaml",
93 "azure/*.yaml",
94 ],
95 },
96 scripts=["bin/checkov", "bin/checkov.cmd"],
97 long_description=long_description,
98 long_description_content_type="text/markdown",
99 classifiers=[
100 "Environment :: Console",
101 "Intended Audience :: Developers",
102 "Intended Audience :: System Administrators",
103 "License :: OSI Approved :: Apache Software License",
104 "Programming Language :: Python :: 3 :: Only",
105 "Programming Language :: Python :: 3.7",
106 "Programming Language :: Python :: 3.8",
107 "Programming Language :: Python :: 3.9",
108 "Programming Language :: Python :: 3.10",
109 "Topic :: Security",
110 "Topic :: Software Development :: Build Tools",
111 "Typing :: Typed",
112 ],
113 )
114
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -33,7 +33,7 @@
]
},
install_requires=[
- "bc-python-hcl2==0.3.42",
+ "bc-python-hcl2==0.3.44",
"cloudsplaining>=0.4.1",
"deep_merge",
"tabulate",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -33,7 +33,7 @@\n ]\n },\n install_requires=[\n- \"bc-python-hcl2==0.3.42\",\n+ \"bc-python-hcl2==0.3.44\",\n \"cloudsplaining>=0.4.1\",\n \"deep_merge\",\n \"tabulate\",\n", "issue": "Terraform parsing error string with escaped backslash at the end\n**Describe the issue**\r\nCheckov crashes if it encounters an escaped backslash (`\"\\\\\"`) at the end of a string.\r\n\r\n**Examples**\r\nMinimal example to reproduce the error:\r\n```terraform\r\nvariable \"slash\" {\r\n default = \"\\\\\"\r\n}\r\n\r\noutput \"slash\" {\r\n value = var.slash\r\n}\r\n```\r\n`terraform validate` sees this configuration as valid, but checkov fails with a parsing error.\r\n\r\nThis only happens when the last character of the string is the escaped backslash, as the parser assumes the closing quotation mark is escaped. Adding any normal character at the end of the string doesn't trigger this error.\r\n```terraform\r\nvariable \"slash\" {\r\n default = \"\\\\\"\r\n}\r\n```\r\n\r\n**Exception Trace**\r\nRelevant traceback\r\n```sh\r\n> LOG_LEVEL=DEBUG checkov -d .\r\n[...]\r\n[MainThread ] [DEBUG] failed while parsing file /workdir/main.tf\r\nTraceback (most recent call last):\r\n File \"/Users/user/.local/pipx/venvs/checkov/lib/python3.8/site-packages/checkov/terraform/parser.py\", line 726, in _load_or_die_quietly\r\n raw_data = hcl2.load(f)\r\n File \"/Users/user/.local/pipx/venvs/checkov/lib/python3.8/site-packages/hcl2/api.py\", line 12, in load\r\n return loads(file.read())\r\n File \"/Users/user/.local/pipx/venvs/checkov/lib/python3.8/site-packages/hcl2/api.py\", line 80, in loads\r\n raise ValueError(f\"Line has unclosed quote marks: {line}\")\r\nValueError: Line has unclosed quote marks: default = \"\\\\\"\r\n[...]\r\n```\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: MacOS 12.3.1 (Intel)\r\n - Checkov Version: 2.0.1230\n", "before_files": [{"content": "#!/usr/bin/env python\nimport logging\nimport os\nfrom importlib import util\nfrom os import path\n\nimport setuptools\nfrom setuptools import setup\n\n# read the contents of your README file\nthis_directory = path.abspath(path.dirname(__file__))\nwith open(path.join(this_directory, \"README.md\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nlogger = logging.getLogger(__name__)\nspec = util.spec_from_file_location(\n \"checkov.version\", os.path.join(\"checkov\", \"version.py\")\n)\n# noinspection PyUnresolvedReferences\nmod = util.module_from_spec(spec)\nspec.loader.exec_module(mod) # type: ignore\nversion = mod.version # type: ignore\n\nsetup(\n extras_require={\n \"dev\": [\n \"pytest==5.3.1\",\n \"coverage==5.5\",\n \"coverage-badge\",\n \"GitPython==3.1.7\",\n \"bandit\",\n \"jsonschema\",\n ]\n },\n install_requires=[\n \"bc-python-hcl2==0.3.42\",\n \"cloudsplaining>=0.4.1\",\n \"deep_merge\",\n \"tabulate\",\n \"colorama\",\n \"termcolor\",\n \"junit-xml>=1.9\",\n \"dpath>=1.5.0,<2\",\n \"pyyaml>=5.4.1\",\n \"boto3>=1.17\",\n \"GitPython\",\n \"jmespath\",\n \"tqdm\",\n \"update_checker\",\n \"semantic_version\",\n \"packaging\",\n \"networkx\",\n \"dockerfile-parse\",\n \"docker\",\n \"configargparse\",\n \"argcomplete\",\n \"detect-secrets\",\n \"policyuniverse\",\n \"typing-extensions>=4.1.0\",\n \"cachetools\",\n \"cyclonedx-python-lib>=2.4.0\",\n \"click>=8.0.0\",\n \"aiohttp\",\n \"aiodns\",\n \"aiomultiprocess\",\n \"jsonpath_ng\",\n \"jsonschema~=3.0\",\n \"prettytable>=3.0.0\",\n \"pycep-parser==0.3.7\",\n \"charset-normalizer\",\n ],\n license=\"Apache License 2.0\",\n name=\"checkov\",\n version=version,\n python_requires=\">=3.7\",\n description=\"Infrastructure as code static analysis\",\n author=\"bridgecrew\",\n author_email=\"[email protected]\",\n url=\"https://github.com/bridgecrewio/checkov\",\n packages=setuptools.find_packages(exclude=[\"tests*\", \"integration_tests*\"]),\n include_package_data=True,\n package_dir={\n \"checkov.bicep.checks.graph_checks\": \"checkov/bicep/checks/graph_checks\",\n \"checkov.terraform.checks.graph_checks\": \"checkov/terraform/checks/graph_checks\",\n },\n package_data={\n \"checkov\": [\"py.typed\"],\n \"checkov.bicep.checks.graph_checks\": [\"*.yaml\"],\n \"checkov.common.util.templates\": [\"*.jinja2\"],\n \"checkov.terraform.checks.graph_checks\": [\n \"aws/*.yaml\",\n \"gcp/*.yaml\",\n \"azure/*.yaml\",\n ],\n },\n scripts=[\"bin/checkov\", \"bin/checkov.cmd\"],\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n classifiers=[\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: System Administrators\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Topic :: Security\",\n \"Topic :: Software Development :: Build Tools\",\n \"Typing :: Typed\",\n ],\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\nimport logging\nimport os\nfrom importlib import util\nfrom os import path\n\nimport setuptools\nfrom setuptools import setup\n\n# read the contents of your README file\nthis_directory = path.abspath(path.dirname(__file__))\nwith open(path.join(this_directory, \"README.md\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nlogger = logging.getLogger(__name__)\nspec = util.spec_from_file_location(\n \"checkov.version\", os.path.join(\"checkov\", \"version.py\")\n)\n# noinspection PyUnresolvedReferences\nmod = util.module_from_spec(spec)\nspec.loader.exec_module(mod) # type: ignore\nversion = mod.version # type: ignore\n\nsetup(\n extras_require={\n \"dev\": [\n \"pytest==5.3.1\",\n \"coverage==5.5\",\n \"coverage-badge\",\n \"GitPython==3.1.7\",\n \"bandit\",\n \"jsonschema\",\n ]\n },\n install_requires=[\n \"bc-python-hcl2==0.3.44\",\n \"cloudsplaining>=0.4.1\",\n \"deep_merge\",\n \"tabulate\",\n \"colorama\",\n \"termcolor\",\n \"junit-xml>=1.9\",\n \"dpath>=1.5.0,<2\",\n \"pyyaml>=5.4.1\",\n \"boto3>=1.17\",\n \"GitPython\",\n \"jmespath\",\n \"tqdm\",\n \"update_checker\",\n \"semantic_version\",\n \"packaging\",\n \"networkx\",\n \"dockerfile-parse\",\n \"docker\",\n \"configargparse\",\n \"argcomplete\",\n \"detect-secrets\",\n \"policyuniverse\",\n \"typing-extensions>=4.1.0\",\n \"cachetools\",\n \"cyclonedx-python-lib>=2.4.0\",\n \"click>=8.0.0\",\n \"aiohttp\",\n \"aiodns\",\n \"aiomultiprocess\",\n \"jsonpath_ng\",\n \"jsonschema~=3.0\",\n \"prettytable>=3.0.0\",\n \"pycep-parser==0.3.7\",\n \"charset-normalizer\",\n ],\n license=\"Apache License 2.0\",\n name=\"checkov\",\n version=version,\n python_requires=\">=3.7\",\n description=\"Infrastructure as code static analysis\",\n author=\"bridgecrew\",\n author_email=\"[email protected]\",\n url=\"https://github.com/bridgecrewio/checkov\",\n packages=setuptools.find_packages(exclude=[\"tests*\", \"integration_tests*\"]),\n include_package_data=True,\n package_dir={\n \"checkov.bicep.checks.graph_checks\": \"checkov/bicep/checks/graph_checks\",\n \"checkov.terraform.checks.graph_checks\": \"checkov/terraform/checks/graph_checks\",\n },\n package_data={\n \"checkov\": [\"py.typed\"],\n \"checkov.bicep.checks.graph_checks\": [\"*.yaml\"],\n \"checkov.common.util.templates\": [\"*.jinja2\"],\n \"checkov.terraform.checks.graph_checks\": [\n \"aws/*.yaml\",\n \"gcp/*.yaml\",\n \"azure/*.yaml\",\n ],\n },\n scripts=[\"bin/checkov\", \"bin/checkov.cmd\"],\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n classifiers=[\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: System Administrators\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Topic :: Security\",\n \"Topic :: Software Development :: Build Tools\",\n \"Typing :: Typed\",\n ],\n)\n", "path": "setup.py"}]} | 1,765 | 94 |
gh_patches_debug_25963 | rasdani/github-patches | git_diff | pymedusa__Medusa-3131 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Login error
```
Error
unhashable type: 'list'
Traceback
Traceback (most recent call last):
File "/home/pi/Medusa/ext/tornado/web.py", line 1509, in _execute result = method(*self.path_args, **self.path_kwargs)
File "/home/pi/Medusa/medusa/server/web/core/authentication.py", line 73, in post notifiers.notify_login(self.request.remote_ip)
File "/home/pi/Medusa/medusa/notifiers/__init__.py", line 127, in notify_login n.notify_login(ipaddress)
File "/home/pi/Medusa/medusa/notifiers/nma.py", line 44, in notify_login self._sendNMA(nma_api=None, nma_priority=None, event=title, message=update_text.format(ipaddress))
File "/home/pi/Medusa/medusa/notifiers/nma.py", line 74, in _sendNMA if not response[nma_api][u'code'] == u'200':
TypeError: unhashable type: 'list'
Request Info
body: username=supergonkas&password=&remember_me=1&submit=Login
files: {}
protocol: http
connection:
body_arguments: {'username': ['supergonkas'], 'remember_me': ['1'], 'password': [''], 'submit': ['Login']}
uri: /login/?next=%2Fhistory%2F
query_arguments: {'next': ['/history/']}
_start_time: 1505997382.06
headers: Origin: http://:8081 Save-Data: on Content-Length: 61 Accept-Language: pt-PT,pt;q=0.8,en-US;q=0.6,en;q=0.4,es;q=0.2 Accept-Encoding: gzip, deflate Connection: keep-alive Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8 Upgrade-Insecure-Requests: 1 Dnt: 1 Host::8081 Referer: http://:8081/login/?next=%2Fhistory%2F Cache-Control: max-age=0 User-Agent: Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.116 Safari/537.36 Content-Type: application/x-www-form-urlencoded
host: :8081
version: HTTP/1.1
server_connection:
host_name: s
_finish_time: None
query: next=%2Fhistory%2F
arguments: {'username': ['supergonkas'], 'remember_me': ['1'], 'password': [''], 'submit': ['Login'], 'next': ['/history/']}
path: /login/
method: POST
remote_ip: 194.210.190.12
View Log(Errors)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `medusa/notifiers/nma.py`
Content:
```
1 # coding=utf-8
2
3 import logging
4
5 from medusa import app, common
6 from medusa.logger.adapters.style import BraceAdapter
7
8 from pynma import pynma
9
10 log = BraceAdapter(logging.getLogger(__name__))
11 log.logger.addHandler(logging.NullHandler())
12
13
14 class Notifier(object):
15 def test_notify(self, nma_api, nma_priority):
16 return self._sendNMA(nma_api, nma_priority, event='Test', message='Testing NMA settings from Medusa',
17 force=True)
18
19 def notify_snatch(self, ep_name, is_proper):
20 if app.NMA_NOTIFY_ONSNATCH:
21 self._sendNMA(nma_api=None, nma_priority=None, event=common.notifyStrings[(common.NOTIFY_SNATCH, common.NOTIFY_SNATCH_PROPER)[is_proper]],
22 message=ep_name)
23
24 def notify_download(self, ep_name):
25 if app.NMA_NOTIFY_ONDOWNLOAD:
26 self._sendNMA(nma_api=None, nma_priority=None, event=common.notifyStrings[common.NOTIFY_DOWNLOAD],
27 message=ep_name)
28
29 def notify_subtitle_download(self, ep_name, lang):
30 if app.NMA_NOTIFY_ONSUBTITLEDOWNLOAD:
31 self._sendNMA(nma_api=None, nma_priority=None, event=common.notifyStrings[common.NOTIFY_SUBTITLE_DOWNLOAD],
32 message=ep_name + ': ' + lang)
33
34 def notify_git_update(self, new_version='??'):
35 if app.USE_NMA:
36 update_text = common.notifyStrings[common.NOTIFY_GIT_UPDATE_TEXT]
37 title = common.notifyStrings[common.NOTIFY_GIT_UPDATE]
38 self._sendNMA(nma_api=None, nma_priority=None, event=title, message=update_text + new_version)
39
40 def notify_login(self, ipaddress=''):
41 if app.USE_NMA:
42 update_text = common.notifyStrings[common.NOTIFY_LOGIN_TEXT]
43 title = common.notifyStrings[common.NOTIFY_LOGIN]
44 self._sendNMA(nma_api=None, nma_priority=None, event=title, message=update_text.format(ipaddress))
45
46 def _sendNMA(self, nma_api=None, nma_priority=None, event=None, message=None, force=False):
47
48 title = 'Medusa'
49
50 if not app.USE_NMA and not force:
51 return False
52
53 if nma_api is None:
54 nma_api = app.NMA_API
55 else:
56 nma_api = nma_api.split(',')
57
58 if nma_priority is None:
59 nma_priority = app.NMA_PRIORITY
60
61 batch = False
62
63 p = pynma.PyNMA()
64 keys = nma_api
65 p.addkey(keys)
66
67 if len(keys) > 1:
68 batch = True
69
70 log.debug(u'NMA: Sending notice with details: event="{0}, message="{1}", priority={2}, batch={3}',
71 event, message, nma_priority, batch)
72 response = p.push(application=title, event=event, description=message, priority=nma_priority, batch_mode=batch)
73
74 if not response[nma_api][u'code'] == u'200':
75 log.error(u'Could not send notification to NotifyMyAndroid')
76 return False
77 else:
78 log.info(u'NMA: Notification sent to NotifyMyAndroid')
79 return True
80
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/medusa/notifiers/nma.py b/medusa/notifiers/nma.py
--- a/medusa/notifiers/nma.py
+++ b/medusa/notifiers/nma.py
@@ -4,8 +4,8 @@
from medusa import app, common
from medusa.logger.adapters.style import BraceAdapter
-
from pynma import pynma
+from six import text_type
log = BraceAdapter(logging.getLogger(__name__))
log.logger.addHandler(logging.NullHandler())
@@ -52,8 +52,8 @@
if nma_api is None:
nma_api = app.NMA_API
- else:
- nma_api = nma_api.split(',')
+ elif isinstance(nma_api, text_type):
+ nma_api = [nma_api]
if nma_priority is None:
nma_priority = app.NMA_PRIORITY
@@ -71,7 +71,7 @@
event, message, nma_priority, batch)
response = p.push(application=title, event=event, description=message, priority=nma_priority, batch_mode=batch)
- if not response[nma_api][u'code'] == u'200':
+ if not response[','.join(nma_api)][u'code'] == u'200':
log.error(u'Could not send notification to NotifyMyAndroid')
return False
else:
| {"golden_diff": "diff --git a/medusa/notifiers/nma.py b/medusa/notifiers/nma.py\n--- a/medusa/notifiers/nma.py\n+++ b/medusa/notifiers/nma.py\n@@ -4,8 +4,8 @@\n \n from medusa import app, common\n from medusa.logger.adapters.style import BraceAdapter\n-\n from pynma import pynma\n+from six import text_type\n \n log = BraceAdapter(logging.getLogger(__name__))\n log.logger.addHandler(logging.NullHandler())\n@@ -52,8 +52,8 @@\n \n if nma_api is None:\n nma_api = app.NMA_API\n- else:\n- nma_api = nma_api.split(',')\n+ elif isinstance(nma_api, text_type):\n+ nma_api = [nma_api]\n \n if nma_priority is None:\n nma_priority = app.NMA_PRIORITY\n@@ -71,7 +71,7 @@\n event, message, nma_priority, batch)\n response = p.push(application=title, event=event, description=message, priority=nma_priority, batch_mode=batch)\n \n- if not response[nma_api][u'code'] == u'200':\n+ if not response[','.join(nma_api)][u'code'] == u'200':\n log.error(u'Could not send notification to NotifyMyAndroid')\n return False\n else:\n", "issue": "Login error\n```\r\nError\r\nunhashable type: 'list'\r\nTraceback\r\nTraceback (most recent call last): \r\nFile \"/home/pi/Medusa/ext/tornado/web.py\", line 1509, in _execute result = method(*self.path_args, **self.path_kwargs) \r\nFile \"/home/pi/Medusa/medusa/server/web/core/authentication.py\", line 73, in post notifiers.notify_login(self.request.remote_ip) \r\nFile \"/home/pi/Medusa/medusa/notifiers/__init__.py\", line 127, in notify_login n.notify_login(ipaddress)\r\nFile \"/home/pi/Medusa/medusa/notifiers/nma.py\", line 44, in notify_login self._sendNMA(nma_api=None, nma_priority=None, event=title, message=update_text.format(ipaddress)) \r\nFile \"/home/pi/Medusa/medusa/notifiers/nma.py\", line 74, in _sendNMA if not response[nma_api][u'code'] == u'200': \r\nTypeError: unhashable type: 'list' \r\nRequest Info\r\nbody: username=supergonkas&password=&remember_me=1&submit=Login\r\nfiles: {}\r\nprotocol: http\r\nconnection: \r\nbody_arguments: {'username': ['supergonkas'], 'remember_me': ['1'], 'password': [''], 'submit': ['Login']}\r\nuri: /login/?next=%2Fhistory%2F\r\nquery_arguments: {'next': ['/history/']}\r\n_start_time: 1505997382.06\r\nheaders: Origin: http://:8081 Save-Data: on Content-Length: 61 Accept-Language: pt-PT,pt;q=0.8,en-US;q=0.6,en;q=0.4,es;q=0.2 Accept-Encoding: gzip, deflate Connection: keep-alive Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8 Upgrade-Insecure-Requests: 1 Dnt: 1 Host::8081 Referer: http://:8081/login/?next=%2Fhistory%2F Cache-Control: max-age=0 User-Agent: Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.116 Safari/537.36 Content-Type: application/x-www-form-urlencoded \r\nhost: :8081\r\nversion: HTTP/1.1\r\nserver_connection: \r\nhost_name: s\r\n_finish_time: None\r\nquery: next=%2Fhistory%2F\r\narguments: {'username': ['supergonkas'], 'remember_me': ['1'], 'password': [''], 'submit': ['Login'], 'next': ['/history/']}\r\npath: /login/\r\nmethod: POST\r\nremote_ip: 194.210.190.12\r\nView Log(Errors)\r\n```\n", "before_files": [{"content": "# coding=utf-8\n\nimport logging\n\nfrom medusa import app, common\nfrom medusa.logger.adapters.style import BraceAdapter\n\nfrom pynma import pynma\n\nlog = BraceAdapter(logging.getLogger(__name__))\nlog.logger.addHandler(logging.NullHandler())\n\n\nclass Notifier(object):\n def test_notify(self, nma_api, nma_priority):\n return self._sendNMA(nma_api, nma_priority, event='Test', message='Testing NMA settings from Medusa',\n force=True)\n\n def notify_snatch(self, ep_name, is_proper):\n if app.NMA_NOTIFY_ONSNATCH:\n self._sendNMA(nma_api=None, nma_priority=None, event=common.notifyStrings[(common.NOTIFY_SNATCH, common.NOTIFY_SNATCH_PROPER)[is_proper]],\n message=ep_name)\n\n def notify_download(self, ep_name):\n if app.NMA_NOTIFY_ONDOWNLOAD:\n self._sendNMA(nma_api=None, nma_priority=None, event=common.notifyStrings[common.NOTIFY_DOWNLOAD],\n message=ep_name)\n\n def notify_subtitle_download(self, ep_name, lang):\n if app.NMA_NOTIFY_ONSUBTITLEDOWNLOAD:\n self._sendNMA(nma_api=None, nma_priority=None, event=common.notifyStrings[common.NOTIFY_SUBTITLE_DOWNLOAD],\n message=ep_name + ': ' + lang)\n\n def notify_git_update(self, new_version='??'):\n if app.USE_NMA:\n update_text = common.notifyStrings[common.NOTIFY_GIT_UPDATE_TEXT]\n title = common.notifyStrings[common.NOTIFY_GIT_UPDATE]\n self._sendNMA(nma_api=None, nma_priority=None, event=title, message=update_text + new_version)\n\n def notify_login(self, ipaddress=''):\n if app.USE_NMA:\n update_text = common.notifyStrings[common.NOTIFY_LOGIN_TEXT]\n title = common.notifyStrings[common.NOTIFY_LOGIN]\n self._sendNMA(nma_api=None, nma_priority=None, event=title, message=update_text.format(ipaddress))\n\n def _sendNMA(self, nma_api=None, nma_priority=None, event=None, message=None, force=False):\n\n title = 'Medusa'\n\n if not app.USE_NMA and not force:\n return False\n\n if nma_api is None:\n nma_api = app.NMA_API\n else:\n nma_api = nma_api.split(',')\n\n if nma_priority is None:\n nma_priority = app.NMA_PRIORITY\n\n batch = False\n\n p = pynma.PyNMA()\n keys = nma_api\n p.addkey(keys)\n\n if len(keys) > 1:\n batch = True\n\n log.debug(u'NMA: Sending notice with details: event=\"{0}, message=\"{1}\", priority={2}, batch={3}',\n event, message, nma_priority, batch)\n response = p.push(application=title, event=event, description=message, priority=nma_priority, batch_mode=batch)\n\n if not response[nma_api][u'code'] == u'200':\n log.error(u'Could not send notification to NotifyMyAndroid')\n return False\n else:\n log.info(u'NMA: Notification sent to NotifyMyAndroid')\n return True\n", "path": "medusa/notifiers/nma.py"}], "after_files": [{"content": "# coding=utf-8\n\nimport logging\n\nfrom medusa import app, common\nfrom medusa.logger.adapters.style import BraceAdapter\nfrom pynma import pynma\nfrom six import text_type\n\nlog = BraceAdapter(logging.getLogger(__name__))\nlog.logger.addHandler(logging.NullHandler())\n\n\nclass Notifier(object):\n def test_notify(self, nma_api, nma_priority):\n return self._sendNMA(nma_api, nma_priority, event='Test', message='Testing NMA settings from Medusa',\n force=True)\n\n def notify_snatch(self, ep_name, is_proper):\n if app.NMA_NOTIFY_ONSNATCH:\n self._sendNMA(nma_api=None, nma_priority=None, event=common.notifyStrings[(common.NOTIFY_SNATCH, common.NOTIFY_SNATCH_PROPER)[is_proper]],\n message=ep_name)\n\n def notify_download(self, ep_name):\n if app.NMA_NOTIFY_ONDOWNLOAD:\n self._sendNMA(nma_api=None, nma_priority=None, event=common.notifyStrings[common.NOTIFY_DOWNLOAD],\n message=ep_name)\n\n def notify_subtitle_download(self, ep_name, lang):\n if app.NMA_NOTIFY_ONSUBTITLEDOWNLOAD:\n self._sendNMA(nma_api=None, nma_priority=None, event=common.notifyStrings[common.NOTIFY_SUBTITLE_DOWNLOAD],\n message=ep_name + ': ' + lang)\n\n def notify_git_update(self, new_version='??'):\n if app.USE_NMA:\n update_text = common.notifyStrings[common.NOTIFY_GIT_UPDATE_TEXT]\n title = common.notifyStrings[common.NOTIFY_GIT_UPDATE]\n self._sendNMA(nma_api=None, nma_priority=None, event=title, message=update_text + new_version)\n\n def notify_login(self, ipaddress=''):\n if app.USE_NMA:\n update_text = common.notifyStrings[common.NOTIFY_LOGIN_TEXT]\n title = common.notifyStrings[common.NOTIFY_LOGIN]\n self._sendNMA(nma_api=None, nma_priority=None, event=title, message=update_text.format(ipaddress))\n\n def _sendNMA(self, nma_api=None, nma_priority=None, event=None, message=None, force=False):\n\n title = 'Medusa'\n\n if not app.USE_NMA and not force:\n return False\n\n if nma_api is None:\n nma_api = app.NMA_API\n elif isinstance(nma_api, text_type):\n nma_api = [nma_api]\n\n if nma_priority is None:\n nma_priority = app.NMA_PRIORITY\n\n batch = False\n\n p = pynma.PyNMA()\n keys = nma_api\n p.addkey(keys)\n\n if len(keys) > 1:\n batch = True\n\n log.debug(u'NMA: Sending notice with details: event=\"{0}, message=\"{1}\", priority={2}, batch={3}',\n event, message, nma_priority, batch)\n response = p.push(application=title, event=event, description=message, priority=nma_priority, batch_mode=batch)\n\n if not response[','.join(nma_api)][u'code'] == u'200':\n log.error(u'Could not send notification to NotifyMyAndroid')\n return False\n else:\n log.info(u'NMA: Notification sent to NotifyMyAndroid')\n return True\n", "path": "medusa/notifiers/nma.py"}]} | 1,798 | 304 |
gh_patches_debug_16829 | rasdani/github-patches | git_diff | pyload__pyload-1412 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Smoozed
Hello,
Smooth is free in the moment for 10GB a day but the hooks said that it is not a premium account.
Would be great if somebody can have a look on it.
Thanks
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `module/plugins/accounts/SmoozedCom.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 import hashlib
4 import time
5
6 try:
7 from beaker.crypto.pbkdf2 import PBKDF2
8
9 except ImportError:
10 from beaker.crypto.pbkdf2 import pbkdf2
11 from binascii import b2a_hex
12
13 class PBKDF2(object):
14 def __init__(self, passphrase, salt, iterations=1000):
15 self.passphrase = passphrase
16 self.salt = salt
17 self.iterations = iterations
18
19 def hexread(self, octets):
20 return b2a_hex(pbkdf2(self.passphrase, self.salt, self.iterations, octets))
21
22 from module.common.json_layer import json_loads
23 from module.plugins.Account import Account
24
25
26 class SmoozedCom(Account):
27 __name__ = "SmoozedCom"
28 __type__ = "account"
29 __version__ = "0.04"
30
31 __description__ = """Smoozed.com account plugin"""
32 __license__ = "GPLv3"
33 __authors__ = [("", "")]
34
35
36 def loadAccountInfo(self, user, req):
37 # Get user data from premiumize.me
38 status = self.getAccountStatus(user, req)
39
40 self.logDebug(status)
41
42 if status['state'] != 'ok':
43 info = {'validuntil' : None,
44 'trafficleft': None,
45 'premium' : False}
46 else:
47 # Parse account info
48 info = {'validuntil' : float(status["data"]["user"]["user_premium"]),
49 'trafficleft': max(0, status["data"]["traffic"][1] - status["data"]["traffic"][0]),
50 'session' : status["data"]["session_key"],
51 'hosters' : [hoster["name"] for hoster in status["data"]["hoster"]]}
52
53 if info['validuntil'] < time.time():
54 info['premium'] = False
55 else:
56 info['premium'] = True
57
58 return info
59
60
61 def login(self, user, data, req):
62 # Get user data from premiumize.me
63 status = self.getAccountStatus(user, req)
64
65 # Check if user and password are valid
66 if status['state'] != 'ok':
67 self.wrongPassword()
68
69
70 def getAccountStatus(self, user, req):
71 password = self.getAccountData(user)['password']
72 salt = hashlib.sha256(password).hexdigest()
73 encrypted = PBKDF2(password, salt, iterations=1000).hexread(32)
74
75 return json_loads(req.load("http://www2.smoozed.com/api/login",
76 get={'auth': user, 'password': encrypted}))
77
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/module/plugins/accounts/SmoozedCom.py b/module/plugins/accounts/SmoozedCom.py
--- a/module/plugins/accounts/SmoozedCom.py
+++ b/module/plugins/accounts/SmoozedCom.py
@@ -34,7 +34,6 @@
def loadAccountInfo(self, user, req):
- # Get user data from premiumize.me
status = self.getAccountStatus(user, req)
self.logDebug(status)
@@ -51,7 +50,10 @@
'hosters' : [hoster["name"] for hoster in status["data"]["hoster"]]}
if info['validuntil'] < time.time():
- info['premium'] = False
+ if float(status["data"]["user"].get("user_trial", 0)) > time.time():
+ info['premium'] = True
+ else:
+ info['premium'] = False
else:
info['premium'] = True
| {"golden_diff": "diff --git a/module/plugins/accounts/SmoozedCom.py b/module/plugins/accounts/SmoozedCom.py\n--- a/module/plugins/accounts/SmoozedCom.py\n+++ b/module/plugins/accounts/SmoozedCom.py\n@@ -34,7 +34,6 @@\n \n \n def loadAccountInfo(self, user, req):\n- # Get user data from premiumize.me\n status = self.getAccountStatus(user, req)\n \n self.logDebug(status)\n@@ -51,7 +50,10 @@\n 'hosters' : [hoster[\"name\"] for hoster in status[\"data\"][\"hoster\"]]}\n \n if info['validuntil'] < time.time():\n- info['premium'] = False\n+ if float(status[\"data\"][\"user\"].get(\"user_trial\", 0)) > time.time():\n+ info['premium'] = True\n+ else:\n+ info['premium'] = False\n else:\n info['premium'] = True\n", "issue": "Smoozed\nHello,\n\nSmooth is free in the moment for 10GB a day but the hooks said that it is not a premium account.\n\nWould be great if somebody can have a look on it.\n\nThanks\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport hashlib\nimport time\n\ntry:\n from beaker.crypto.pbkdf2 import PBKDF2\n\nexcept ImportError:\n from beaker.crypto.pbkdf2 import pbkdf2\n from binascii import b2a_hex\n\n class PBKDF2(object):\n def __init__(self, passphrase, salt, iterations=1000):\n self.passphrase = passphrase\n self.salt = salt\n self.iterations = iterations\n\n def hexread(self, octets):\n return b2a_hex(pbkdf2(self.passphrase, self.salt, self.iterations, octets))\n\nfrom module.common.json_layer import json_loads\nfrom module.plugins.Account import Account\n\n\nclass SmoozedCom(Account):\n __name__ = \"SmoozedCom\"\n __type__ = \"account\"\n __version__ = \"0.04\"\n\n __description__ = \"\"\"Smoozed.com account plugin\"\"\"\n __license__ = \"GPLv3\"\n __authors__ = [(\"\", \"\")]\n\n\n def loadAccountInfo(self, user, req):\n # Get user data from premiumize.me\n status = self.getAccountStatus(user, req)\n\n self.logDebug(status)\n\n if status['state'] != 'ok':\n info = {'validuntil' : None,\n 'trafficleft': None,\n 'premium' : False}\n else:\n # Parse account info\n info = {'validuntil' : float(status[\"data\"][\"user\"][\"user_premium\"]),\n 'trafficleft': max(0, status[\"data\"][\"traffic\"][1] - status[\"data\"][\"traffic\"][0]),\n 'session' : status[\"data\"][\"session_key\"],\n 'hosters' : [hoster[\"name\"] for hoster in status[\"data\"][\"hoster\"]]}\n\n if info['validuntil'] < time.time():\n info['premium'] = False\n else:\n info['premium'] = True\n\n return info\n\n\n def login(self, user, data, req):\n # Get user data from premiumize.me\n status = self.getAccountStatus(user, req)\n\n # Check if user and password are valid\n if status['state'] != 'ok':\n self.wrongPassword()\n\n\n def getAccountStatus(self, user, req):\n password = self.getAccountData(user)['password']\n salt = hashlib.sha256(password).hexdigest()\n encrypted = PBKDF2(password, salt, iterations=1000).hexread(32)\n\n return json_loads(req.load(\"http://www2.smoozed.com/api/login\",\n get={'auth': user, 'password': encrypted}))\n", "path": "module/plugins/accounts/SmoozedCom.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport hashlib\nimport time\n\ntry:\n from beaker.crypto.pbkdf2 import PBKDF2\n\nexcept ImportError:\n from beaker.crypto.pbkdf2 import pbkdf2\n from binascii import b2a_hex\n\n class PBKDF2(object):\n def __init__(self, passphrase, salt, iterations=1000):\n self.passphrase = passphrase\n self.salt = salt\n self.iterations = iterations\n\n def hexread(self, octets):\n return b2a_hex(pbkdf2(self.passphrase, self.salt, self.iterations, octets))\n\nfrom module.common.json_layer import json_loads\nfrom module.plugins.Account import Account\n\n\nclass SmoozedCom(Account):\n __name__ = \"SmoozedCom\"\n __type__ = \"account\"\n __version__ = \"0.04\"\n\n __description__ = \"\"\"Smoozed.com account plugin\"\"\"\n __license__ = \"GPLv3\"\n __authors__ = [(\"\", \"\")]\n\n\n def loadAccountInfo(self, user, req):\n status = self.getAccountStatus(user, req)\n\n self.logDebug(status)\n\n if status['state'] != 'ok':\n info = {'validuntil' : None,\n 'trafficleft': None,\n 'premium' : False}\n else:\n # Parse account info\n info = {'validuntil' : float(status[\"data\"][\"user\"][\"user_premium\"]),\n 'trafficleft': max(0, status[\"data\"][\"traffic\"][1] - status[\"data\"][\"traffic\"][0]),\n 'session' : status[\"data\"][\"session_key\"],\n 'hosters' : [hoster[\"name\"] for hoster in status[\"data\"][\"hoster\"]]}\n\n if info['validuntil'] < time.time():\n if float(status[\"data\"][\"user\"].get(\"user_trial\", 0)) > time.time():\n info['premium'] = True\n else:\n info['premium'] = False\n else:\n info['premium'] = True\n\n return info\n\n\n def login(self, user, data, req):\n # Get user data from premiumize.me\n status = self.getAccountStatus(user, req)\n\n # Check if user and password are valid\n if status['state'] != 'ok':\n self.wrongPassword()\n\n\n def getAccountStatus(self, user, req):\n password = self.getAccountData(user)['password']\n salt = hashlib.sha256(password).hexdigest()\n encrypted = PBKDF2(password, salt, iterations=1000).hexread(32)\n\n return json_loads(req.load(\"http://www2.smoozed.com/api/login\",\n get={'auth': user, 'password': encrypted}))\n", "path": "module/plugins/accounts/SmoozedCom.py"}]} | 1,053 | 214 |
gh_patches_debug_6986 | rasdani/github-patches | git_diff | Lightning-AI__torchmetrics-1210 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
MAE can't handle MPS backend
## 🐛 Bug
The currently, the mean absolute error can't handle the MPS backend. This is a simple fix and just requires casting to .float()
### To Reproduce
This works:
```python
import torchmetrics
import torch
a = torch.tensor([1, 2, 3]).to(torch.device("mps"))
acc = torchmetrics.Accuracy().to(torch.device("mps"))
acc.update(a, a)
acc.compute()
```
this also works:
```
a = torch.tensor([1, 2, 3])
mae = torchmetrics.MeanAbsoluteError()
mae.update(a, a)
mae.compute()
```
but this crashes
```python
a = torch.tensor([1, 2, 3]).to(torch.device("mps"))
mae = torchmetrics.MeanAbsoluteError().to(torch.device("mps"))
mae.update(a, a)
mae.compute()
```
```
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
Input In [12], in <module>
1 a = torch.tensor([1, 2, 3]).to(torch.device("mps"))
3 acc = torchmetrics.MeanAbsoluteError().to(torch.device("mps"))
----> 4 acc.update(a, a)
5 acc.compute()
File ~/miniforge3/lib/python3.9/site-packages/torchmetrics/metric.py:391, in Metric._wrap_update.<locals>.wrapped_func(*args, **kwargs)
389 with torch.set_grad_enabled(self._enable_grad):
390 try:
--> 391 update(*args, **kwargs)
392 except RuntimeError as err:
393 if "Expected all tensors to be on" in str(err):
File ~/miniforge3/lib/python3.9/site-packages/torchmetrics/regression/mae.py:63, in MeanAbsoluteError.update(self, preds, target)
56 def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore
57 """Update state with predictions and targets.
58
59 Args:
60 preds: Predictions from model
61 target: Ground truth values
62 """
---> 63 sum_abs_error, n_obs = _mean_absolute_error_update(preds, target)
65 self.sum_abs_error += sum_abs_error
66 self.total += n_obs
File ~/miniforge3/lib/python3.9/site-packages/torchmetrics/functional/regression/mae.py:33, in _mean_absolute_error_update(preds, target)
23 """Updates and returns variables required to compute Mean Absolute Error.
24
25 Checks for same shape of input tensors.
(...)
29 target: Ground truth tensor
30 """
32 _check_same_shape(preds, target)
---> 33 sum_abs_error = torch.sum(torch.abs(preds - target))
34 n_obs = target.numel()
35 return sum_abs_error, n_obs
TypeError: Operation 'abs_out_mps()' does not support input type 'int64' in MPS backend.
```
### Environment
```
torch : 1.12.1
lightning : 2022.9.8
torchmetrics: 0.9.3
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/torchmetrics/functional/regression/mae.py`
Content:
```
1 # Copyright The PyTorch Lightning team.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 from typing import Tuple
15
16 import torch
17 from torch import Tensor
18
19 from torchmetrics.utilities.checks import _check_same_shape
20
21
22 def _mean_absolute_error_update(preds: Tensor, target: Tensor) -> Tuple[Tensor, int]:
23 """Updates and returns variables required to compute Mean Absolute Error.
24
25 Checks for same shape of input tensors.
26
27 Args:
28 preds: Predicted tensor
29 target: Ground truth tensor
30 """
31
32 _check_same_shape(preds, target)
33 sum_abs_error = torch.sum(torch.abs(preds - target))
34 n_obs = target.numel()
35 return sum_abs_error, n_obs
36
37
38 def _mean_absolute_error_compute(sum_abs_error: Tensor, n_obs: int) -> Tensor:
39 """Computes Mean Absolute Error.
40
41 Args:
42 sum_abs_error: Sum of absolute value of errors over all observations
43 n_obs: Number of predictions or observations
44
45 Example:
46 >>> preds = torch.tensor([0., 1, 2, 3])
47 >>> target = torch.tensor([0., 1, 2, 2])
48 >>> sum_abs_error, n_obs = _mean_absolute_error_update(preds, target)
49 >>> _mean_absolute_error_compute(sum_abs_error, n_obs)
50 tensor(0.2500)
51 """
52
53 return sum_abs_error / n_obs
54
55
56 def mean_absolute_error(preds: Tensor, target: Tensor) -> Tensor:
57 """Computes mean absolute error.
58
59 Args:
60 preds: estimated labels
61 target: ground truth labels
62
63 Return:
64 Tensor with MAE
65
66 Example:
67 >>> from torchmetrics.functional import mean_absolute_error
68 >>> x = torch.tensor([0., 1, 2, 3])
69 >>> y = torch.tensor([0., 1, 2, 2])
70 >>> mean_absolute_error(x, y)
71 tensor(0.2500)
72 """
73 sum_abs_error, n_obs = _mean_absolute_error_update(preds, target)
74 return _mean_absolute_error_compute(sum_abs_error, n_obs)
75
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/torchmetrics/functional/regression/mae.py b/src/torchmetrics/functional/regression/mae.py
--- a/src/torchmetrics/functional/regression/mae.py
+++ b/src/torchmetrics/functional/regression/mae.py
@@ -28,8 +28,9 @@
preds: Predicted tensor
target: Ground truth tensor
"""
-
_check_same_shape(preds, target)
+ preds = preds if preds.is_floating_point else preds.float()
+ target = target if target.is_floating_point else target.float()
sum_abs_error = torch.sum(torch.abs(preds - target))
n_obs = target.numel()
return sum_abs_error, n_obs
| {"golden_diff": "diff --git a/src/torchmetrics/functional/regression/mae.py b/src/torchmetrics/functional/regression/mae.py\n--- a/src/torchmetrics/functional/regression/mae.py\n+++ b/src/torchmetrics/functional/regression/mae.py\n@@ -28,8 +28,9 @@\n preds: Predicted tensor\n target: Ground truth tensor\n \"\"\"\n-\n _check_same_shape(preds, target)\n+ preds = preds if preds.is_floating_point else preds.float()\n+ target = target if target.is_floating_point else target.float()\n sum_abs_error = torch.sum(torch.abs(preds - target))\n n_obs = target.numel()\n return sum_abs_error, n_obs\n", "issue": "MAE can't handle MPS backend\n## \ud83d\udc1b Bug\r\n\r\nThe currently, the mean absolute error can't handle the MPS backend. This is a simple fix and just requires casting to .float()\r\n\r\n### To Reproduce\r\n\r\nThis works:\r\n\r\n```python\r\nimport torchmetrics\r\nimport torch\r\n\r\na = torch.tensor([1, 2, 3]).to(torch.device(\"mps\"))\r\n\r\nacc = torchmetrics.Accuracy().to(torch.device(\"mps\"))\r\nacc.update(a, a)\r\nacc.compute()\r\n```\r\n\r\nthis also works:\r\n\r\n```\r\na = torch.tensor([1, 2, 3])\r\n\r\nmae = torchmetrics.MeanAbsoluteError()\r\nmae.update(a, a)\r\nmae.compute()\r\n\r\n```\r\n\r\nbut this crashes\r\n\r\n```python\r\na = torch.tensor([1, 2, 3]).to(torch.device(\"mps\"))\r\n\r\nmae = torchmetrics.MeanAbsoluteError().to(torch.device(\"mps\"))\r\nmae.update(a, a)\r\nmae.compute()\r\n```\r\n\r\n```\r\n---------------------------------------------------------------------------\r\nTypeError Traceback (most recent call last)\r\nInput In [12], in <module>\r\n 1 a = torch.tensor([1, 2, 3]).to(torch.device(\"mps\"))\r\n 3 acc = torchmetrics.MeanAbsoluteError().to(torch.device(\"mps\"))\r\n----> 4 acc.update(a, a)\r\n 5 acc.compute()\r\n\r\nFile ~/miniforge3/lib/python3.9/site-packages/torchmetrics/metric.py:391, in Metric._wrap_update.<locals>.wrapped_func(*args, **kwargs)\r\n 389 with torch.set_grad_enabled(self._enable_grad):\r\n 390 try:\r\n--> 391 update(*args, **kwargs)\r\n 392 except RuntimeError as err:\r\n 393 if \"Expected all tensors to be on\" in str(err):\r\n\r\nFile ~/miniforge3/lib/python3.9/site-packages/torchmetrics/regression/mae.py:63, in MeanAbsoluteError.update(self, preds, target)\r\n 56 def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore\r\n 57 \"\"\"Update state with predictions and targets.\r\n 58 \r\n 59 Args:\r\n 60 preds: Predictions from model\r\n 61 target: Ground truth values\r\n 62 \"\"\"\r\n---> 63 sum_abs_error, n_obs = _mean_absolute_error_update(preds, target)\r\n 65 self.sum_abs_error += sum_abs_error\r\n 66 self.total += n_obs\r\n\r\nFile ~/miniforge3/lib/python3.9/site-packages/torchmetrics/functional/regression/mae.py:33, in _mean_absolute_error_update(preds, target)\r\n 23 \"\"\"Updates and returns variables required to compute Mean Absolute Error.\r\n 24 \r\n 25 Checks for same shape of input tensors.\r\n (...)\r\n 29 target: Ground truth tensor\r\n 30 \"\"\"\r\n 32 _check_same_shape(preds, target)\r\n---> 33 sum_abs_error = torch.sum(torch.abs(preds - target))\r\n 34 n_obs = target.numel()\r\n 35 return sum_abs_error, n_obs\r\n\r\nTypeError: Operation 'abs_out_mps()' does not support input type 'int64' in MPS backend.\r\n```\r\n\r\n\r\n### Environment\r\n\r\n```\r\ntorch : 1.12.1\r\nlightning : 2022.9.8\r\ntorchmetrics: 0.9.3\r\n```\r\n\n", "before_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Tuple\n\nimport torch\nfrom torch import Tensor\n\nfrom torchmetrics.utilities.checks import _check_same_shape\n\n\ndef _mean_absolute_error_update(preds: Tensor, target: Tensor) -> Tuple[Tensor, int]:\n \"\"\"Updates and returns variables required to compute Mean Absolute Error.\n\n Checks for same shape of input tensors.\n\n Args:\n preds: Predicted tensor\n target: Ground truth tensor\n \"\"\"\n\n _check_same_shape(preds, target)\n sum_abs_error = torch.sum(torch.abs(preds - target))\n n_obs = target.numel()\n return sum_abs_error, n_obs\n\n\ndef _mean_absolute_error_compute(sum_abs_error: Tensor, n_obs: int) -> Tensor:\n \"\"\"Computes Mean Absolute Error.\n\n Args:\n sum_abs_error: Sum of absolute value of errors over all observations\n n_obs: Number of predictions or observations\n\n Example:\n >>> preds = torch.tensor([0., 1, 2, 3])\n >>> target = torch.tensor([0., 1, 2, 2])\n >>> sum_abs_error, n_obs = _mean_absolute_error_update(preds, target)\n >>> _mean_absolute_error_compute(sum_abs_error, n_obs)\n tensor(0.2500)\n \"\"\"\n\n return sum_abs_error / n_obs\n\n\ndef mean_absolute_error(preds: Tensor, target: Tensor) -> Tensor:\n \"\"\"Computes mean absolute error.\n\n Args:\n preds: estimated labels\n target: ground truth labels\n\n Return:\n Tensor with MAE\n\n Example:\n >>> from torchmetrics.functional import mean_absolute_error\n >>> x = torch.tensor([0., 1, 2, 3])\n >>> y = torch.tensor([0., 1, 2, 2])\n >>> mean_absolute_error(x, y)\n tensor(0.2500)\n \"\"\"\n sum_abs_error, n_obs = _mean_absolute_error_update(preds, target)\n return _mean_absolute_error_compute(sum_abs_error, n_obs)\n", "path": "src/torchmetrics/functional/regression/mae.py"}], "after_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Tuple\n\nimport torch\nfrom torch import Tensor\n\nfrom torchmetrics.utilities.checks import _check_same_shape\n\n\ndef _mean_absolute_error_update(preds: Tensor, target: Tensor) -> Tuple[Tensor, int]:\n \"\"\"Updates and returns variables required to compute Mean Absolute Error.\n\n Checks for same shape of input tensors.\n\n Args:\n preds: Predicted tensor\n target: Ground truth tensor\n \"\"\"\n _check_same_shape(preds, target)\n preds = preds if preds.is_floating_point else preds.float()\n target = target if target.is_floating_point else target.float()\n sum_abs_error = torch.sum(torch.abs(preds - target))\n n_obs = target.numel()\n return sum_abs_error, n_obs\n\n\ndef _mean_absolute_error_compute(sum_abs_error: Tensor, n_obs: int) -> Tensor:\n \"\"\"Computes Mean Absolute Error.\n\n Args:\n sum_abs_error: Sum of absolute value of errors over all observations\n n_obs: Number of predictions or observations\n\n Example:\n >>> preds = torch.tensor([0., 1, 2, 3])\n >>> target = torch.tensor([0., 1, 2, 2])\n >>> sum_abs_error, n_obs = _mean_absolute_error_update(preds, target)\n >>> _mean_absolute_error_compute(sum_abs_error, n_obs)\n tensor(0.2500)\n \"\"\"\n\n return sum_abs_error / n_obs\n\n\ndef mean_absolute_error(preds: Tensor, target: Tensor) -> Tensor:\n \"\"\"Computes mean absolute error.\n\n Args:\n preds: estimated labels\n target: ground truth labels\n\n Return:\n Tensor with MAE\n\n Example:\n >>> from torchmetrics.functional import mean_absolute_error\n >>> x = torch.tensor([0., 1, 2, 3])\n >>> y = torch.tensor([0., 1, 2, 2])\n >>> mean_absolute_error(x, y)\n tensor(0.2500)\n \"\"\"\n sum_abs_error, n_obs = _mean_absolute_error_update(preds, target)\n return _mean_absolute_error_compute(sum_abs_error, n_obs)\n", "path": "src/torchmetrics/functional/regression/mae.py"}]} | 1,747 | 160 |
gh_patches_debug_22360 | rasdani/github-patches | git_diff | mathesar-foundation__mathesar-3243 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Links do not work after adding check constraint to a table
## Description
It appears that tables with any check constraint (e.g. on an unrelated column) breaks the display of all links on that table.
The foreign key constraint is held by the underlying database, and the links are shown in the link panel on the sidebar. However, in the grid the cells just show the raw value rather than a dropdown.
Another clue: clicking table > advanced > constraints results shows the error 'Unable to fetch table constraints When making an XHR request, the server responded with an error, but the response body was not valid JSON.' Under the hood this corresponds with a 500 request to `/api/db/v0/tables/22/constraints/?limit=500`
<details>
<summary>
500 response body
</summary>
```
<!doctype html>
<html lang="en">
<head>
<title>Server Error (500)</title>
</head>
<body>
<h1>Server Error (500)</h1><p></p>
</body>
</html>
```
</details>
## To Reproduce
<!-- How can we recreate this bug? Please try to provide a Minimal, Complete, and Verifiable (http://stackoverflow.com/help/mcve) example if code-related. -->
Steps:
1. Add a link column to a table in Mathesar OR in another tool, add a foreign key constraint and refresh the schema in Mathesar
2. [at this point, if you view the table you should see the link as a dropdown]
3. In another tool, add a constraint to any column in the table
4. View the table in Mathesar: The links continue to display fine
5. Refresh the schema in Mathesar
6. View the table in Mathesar
Expected: The links display as before
Actual: The raw value of the links (e.g. the number for an integer id column) are shown
7. In another tool, remove the constraint
8. View the table in Mathesar: all the data will disappear. Adding a record also seems broken from this point, but under the hood the record is actually added.
9. Refresh the schema in Mathesar
10. View the table in Mathesar: all the data is present and link works correctly again
Video:
This is all mock data. Ignore bit where I try to add person_id manually at the beginning, this is force of habit 😅. Also, I add a record in the external tool but this is not necessary for reproduction (this is just meant to show the check constraint appears to work fine in other tools, and doesn't stop manipulating data there, so I don't think this is a problem with the other tool).
https://github.com/centerofci/mathesar/assets/4953590/044e5e4f-ddbc-4d77-8032-c08375ff9055
## Environment
- OS: Host: macOS, but mathesar 0.1.2 is running inside a [podman](https://podman.io/) container. I don't think this is relevant though.
- Browser: Google Chrome
- Browser Version: 115.0.5790.114 (Official Build) (arm64)
- Other info: Using postgres:14 container.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mathesar/api/serializers/constraints.py`
Content:
```
1 from psycopg.errors import DuplicateTable, UniqueViolation
2 from rest_framework import serializers, status
3
4 from db.constraints import utils as constraint_utils
5 from db.identifiers import is_identifier_too_long
6 from db.constraints.base import ForeignKeyConstraint, UniqueConstraint
7
8 import mathesar.api.exceptions.database_exceptions.exceptions as database_api_exceptions
9 from mathesar.api.exceptions.validation_exceptions.exceptions import (
10 ConstraintColumnEmptyAPIException, UnsupportedConstraintAPIException,
11 InvalidTableName
12 )
13 from mathesar.api.serializers.shared_serializers import (
14 MathesarPolymorphicErrorMixin,
15 ReadWritePolymorphicSerializerMappingMixin,
16 )
17 from mathesar.models.base import Column, Constraint, Table
18
19
20 class TableFilteredPrimaryKeyRelatedField(serializers.PrimaryKeyRelatedField):
21 """
22 Limits the accepted related primary key values to a specific table.
23 For example, if the PrimaryKeyRelatedField is instantiated with a
24 Column queryset, only columns in the "associated table" are
25 accepted. The "associated table" is defined by the context dict's
26 `table_id` value.
27 """
28 def get_queryset(self):
29 table_id = self.context.get('table_id', None)
30 queryset = super(TableFilteredPrimaryKeyRelatedField, self).get_queryset()
31 if table_id is None or not queryset:
32 return None
33 return queryset.filter(table__id=table_id)
34
35
36 class BaseConstraintSerializer(serializers.ModelSerializer):
37 name = serializers.CharField(required=False)
38 type = serializers.CharField()
39 columns = TableFilteredPrimaryKeyRelatedField(queryset=Column.current_objects, many=True)
40
41 class Meta:
42 model = Constraint
43 fields = ['id', 'name', 'type', 'columns']
44
45 def construct_constraint_obj(self, table, data):
46 columns_attnum = [column.attnum for column in data.get('columns')]
47 if data.get('type') == constraint_utils.ConstraintType.UNIQUE.value:
48 return UniqueConstraint(data.get('name', None), table.oid, columns_attnum)
49 return None
50
51 def create(self, validated_data):
52 table = self.context['table']
53 constraint_obj = self.construct_constraint_obj(table, validated_data)
54 # Additional check is needed because we support read operations for primary key constraint,
55 # but we don't support write operations
56 if constraint_obj is None:
57 constraint_type = validated_data.get('type', None)
58 raise UnsupportedConstraintAPIException(constraint_type=constraint_type, field='type')
59 try:
60 constraint = table.add_constraint(constraint_obj)
61 except DuplicateTable as e:
62 raise database_api_exceptions.DuplicateTableAPIException(
63 e,
64 message='Relation with the same name already exists',
65 status_code=status.HTTP_400_BAD_REQUEST
66 )
67 except UniqueViolation as e:
68 raise database_api_exceptions.UniqueViolationAPIException(
69 e,
70 status_code=status.HTTP_400_BAD_REQUEST
71 )
72 return constraint
73
74 def validate_name(self, name):
75 if is_identifier_too_long(name):
76 raise database_api_exceptions.IdentifierTooLong(field='name')
77 return name
78
79
80 class ForeignKeyConstraintSerializer(BaseConstraintSerializer):
81 class Meta:
82 model = Constraint
83 fields = BaseConstraintSerializer.Meta.fields + [
84 'referent_columns',
85 'referent_table',
86 'onupdate',
87 'ondelete',
88 'deferrable',
89 'match'
90 ]
91
92 referent_columns = serializers.PrimaryKeyRelatedField(queryset=Column.current_objects.all(), many=True)
93 referent_table = serializers.SerializerMethodField()
94 onupdate = serializers.ChoiceField(
95 choices=['RESTRICT', 'CASCADE', 'SET NULL', 'NO ACTION', 'SET DEFAULT'],
96 required=False,
97 allow_null=True
98 )
99 ondelete = serializers.ChoiceField(
100 choices=['RESTRICT', 'CASCADE', 'SET NULL', 'NO ACTION', 'SET DEFAULT'],
101 required=False,
102 allow_null=True
103 )
104 deferrable = serializers.BooleanField(allow_null=True, required=False)
105 match = serializers.ChoiceField(choices=['SIMPLE', 'PARTIAL', 'FULL'], allow_null=True, required=False)
106
107 def get_referent_table(self, obj):
108 return obj.referent_columns[0].table.id
109
110 def construct_constraint_obj(self, table, data):
111 columns_attnum = [column.attnum for column in data.get('columns')]
112 referent_columns = data.get('referent_columns')
113 referent_columns_attnum = [column.attnum for column in referent_columns]
114 constraint_options_fields = ['onupdate', 'ondelete', 'deferrable']
115 constraint_options = {
116 constraint_options_field: data[constraint_options_field]
117 for constraint_options_field in constraint_options_fields if constraint_options_field in data
118 }
119 return ForeignKeyConstraint(
120 data.get('name', None),
121 table.oid,
122 columns_attnum,
123 referent_columns[0].table.oid,
124 referent_columns_attnum,
125 constraint_options
126 )
127
128
129 class ConstraintSerializer(
130 ReadWritePolymorphicSerializerMappingMixin,
131 MathesarPolymorphicErrorMixin,
132 serializers.ModelSerializer
133 ):
134 class Meta:
135 model = Constraint
136 fields = '__all__'
137
138 serializers_mapping = {
139 'foreignkey': ForeignKeyConstraintSerializer,
140 'primary': BaseConstraintSerializer,
141 'unique': BaseConstraintSerializer,
142 }
143
144 def get_mapping_field(self, data):
145 if isinstance(data, Constraint):
146 constraint_type = data.type
147 else:
148 constraint_type = data.get('type', None)
149 assert constraint_type is not None
150 return constraint_type
151
152 def create(self, validated_data):
153 serializer = self.get_serializer_class(self.get_mapping_field(validated_data))
154 return serializer.create(validated_data)
155
156 def run_validation(self, data):
157 if referent_table := data.get('referent_table', None):
158 referent_table_name = Table.current_objects.get(id=referent_table).name
159 if any(
160 invalid_char in referent_table_name
161 for invalid_char in ('(', ')')
162 ):
163 raise InvalidTableName(
164 referent_table_name,
165 field='referent_table'
166 )
167 constraint_type = data.get('type', None)
168 if constraint_type not in self.serializers_mapping.keys():
169 raise UnsupportedConstraintAPIException(constraint_type=constraint_type)
170 columns = data.get('columns', None)
171 if columns == []:
172 raise ConstraintColumnEmptyAPIException(field='columns')
173 return super(ConstraintSerializer, self).run_validation(data)
174
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mathesar/api/serializers/constraints.py b/mathesar/api/serializers/constraints.py
--- a/mathesar/api/serializers/constraints.py
+++ b/mathesar/api/serializers/constraints.py
@@ -139,6 +139,10 @@
'foreignkey': ForeignKeyConstraintSerializer,
'primary': BaseConstraintSerializer,
'unique': BaseConstraintSerializer,
+ # Even though 'check' & 'exclude' constraints are currently unsupported it's added here
+ # so that the app doesn't break in case these constraints are already present.
+ 'check': BaseConstraintSerializer,
+ 'exclude': BaseConstraintSerializer
}
def get_mapping_field(self, data):
@@ -165,7 +169,7 @@
field='referent_table'
)
constraint_type = data.get('type', None)
- if constraint_type not in self.serializers_mapping.keys():
+ if constraint_type not in ('foreignkey', 'primary', 'unique'):
raise UnsupportedConstraintAPIException(constraint_type=constraint_type)
columns = data.get('columns', None)
if columns == []:
| {"golden_diff": "diff --git a/mathesar/api/serializers/constraints.py b/mathesar/api/serializers/constraints.py\n--- a/mathesar/api/serializers/constraints.py\n+++ b/mathesar/api/serializers/constraints.py\n@@ -139,6 +139,10 @@\n 'foreignkey': ForeignKeyConstraintSerializer,\n 'primary': BaseConstraintSerializer,\n 'unique': BaseConstraintSerializer,\n+ # Even though 'check' & 'exclude' constraints are currently unsupported it's added here\n+ # so that the app doesn't break in case these constraints are already present.\n+ 'check': BaseConstraintSerializer,\n+ 'exclude': BaseConstraintSerializer\n }\n \n def get_mapping_field(self, data):\n@@ -165,7 +169,7 @@\n field='referent_table'\n )\n constraint_type = data.get('type', None)\n- if constraint_type not in self.serializers_mapping.keys():\n+ if constraint_type not in ('foreignkey', 'primary', 'unique'):\n raise UnsupportedConstraintAPIException(constraint_type=constraint_type)\n columns = data.get('columns', None)\n if columns == []:\n", "issue": "Links do not work after adding check constraint to a table\n## Description\r\n\r\nIt appears that tables with any check constraint (e.g. on an unrelated column) breaks the display of all links on that table.\r\n\r\nThe foreign key constraint is held by the underlying database, and the links are shown in the link panel on the sidebar. However, in the grid the cells just show the raw value rather than a dropdown.\r\n\r\nAnother clue: clicking table > advanced > constraints results shows the error 'Unable to fetch table constraints When making an XHR request, the server responded with an error, but the response body was not valid JSON.' Under the hood this corresponds with a 500 request to `/api/db/v0/tables/22/constraints/?limit=500`\r\n\r\n<details>\r\n<summary>\r\n500 response body\r\n</summary>\r\n\r\n```\r\n\r\n<!doctype html>\r\n<html lang=\"en\">\r\n<head>\r\n <title>Server Error (500)</title>\r\n</head>\r\n<body>\r\n <h1>Server Error (500)</h1><p></p>\r\n</body>\r\n</html>\r\n```\r\n\r\n</details>\r\n\r\n## To Reproduce\r\n<!-- How can we recreate this bug? Please try to provide a Minimal, Complete, and Verifiable (http://stackoverflow.com/help/mcve) example if code-related. -->\r\n\r\nSteps:\r\n1. Add a link column to a table in Mathesar OR in another tool, add a foreign key constraint and refresh the schema in Mathesar\r\n2. [at this point, if you view the table you should see the link as a dropdown]\r\n3. In another tool, add a constraint to any column in the table\r\n4. View the table in Mathesar: The links continue to display fine\r\n5. Refresh the schema in Mathesar\r\n6. View the table in Mathesar\r\n\r\nExpected: The links display as before\r\nActual: The raw value of the links (e.g. the number for an integer id column) are shown\r\n\r\n7. In another tool, remove the constraint\r\n8. View the table in Mathesar: all the data will disappear. Adding a record also seems broken from this point, but under the hood the record is actually added.\r\n9. Refresh the schema in Mathesar\r\n10. View the table in Mathesar: all the data is present and link works correctly again\r\n\r\nVideo:\r\n\r\nThis is all mock data. Ignore bit where I try to add person_id manually at the beginning, this is force of habit \ud83d\ude05. Also, I add a record in the external tool but this is not necessary for reproduction (this is just meant to show the check constraint appears to work fine in other tools, and doesn't stop manipulating data there, so I don't think this is a problem with the other tool).\r\n\r\nhttps://github.com/centerofci/mathesar/assets/4953590/044e5e4f-ddbc-4d77-8032-c08375ff9055\r\n\r\n## Environment\r\n\r\n - OS: Host: macOS, but mathesar 0.1.2 is running inside a [podman](https://podman.io/) container. I don't think this is relevant though.\r\n - Browser: Google Chrome\r\n - Browser Version: 115.0.5790.114 (Official Build) (arm64)\r\n - Other info: Using postgres:14 container.\n", "before_files": [{"content": "from psycopg.errors import DuplicateTable, UniqueViolation\nfrom rest_framework import serializers, status\n\nfrom db.constraints import utils as constraint_utils\nfrom db.identifiers import is_identifier_too_long\nfrom db.constraints.base import ForeignKeyConstraint, UniqueConstraint\n\nimport mathesar.api.exceptions.database_exceptions.exceptions as database_api_exceptions\nfrom mathesar.api.exceptions.validation_exceptions.exceptions import (\n ConstraintColumnEmptyAPIException, UnsupportedConstraintAPIException,\n InvalidTableName\n)\nfrom mathesar.api.serializers.shared_serializers import (\n MathesarPolymorphicErrorMixin,\n ReadWritePolymorphicSerializerMappingMixin,\n)\nfrom mathesar.models.base import Column, Constraint, Table\n\n\nclass TableFilteredPrimaryKeyRelatedField(serializers.PrimaryKeyRelatedField):\n \"\"\"\n Limits the accepted related primary key values to a specific table.\n For example, if the PrimaryKeyRelatedField is instantiated with a\n Column queryset, only columns in the \"associated table\" are\n accepted. The \"associated table\" is defined by the context dict's\n `table_id` value.\n \"\"\"\n def get_queryset(self):\n table_id = self.context.get('table_id', None)\n queryset = super(TableFilteredPrimaryKeyRelatedField, self).get_queryset()\n if table_id is None or not queryset:\n return None\n return queryset.filter(table__id=table_id)\n\n\nclass BaseConstraintSerializer(serializers.ModelSerializer):\n name = serializers.CharField(required=False)\n type = serializers.CharField()\n columns = TableFilteredPrimaryKeyRelatedField(queryset=Column.current_objects, many=True)\n\n class Meta:\n model = Constraint\n fields = ['id', 'name', 'type', 'columns']\n\n def construct_constraint_obj(self, table, data):\n columns_attnum = [column.attnum for column in data.get('columns')]\n if data.get('type') == constraint_utils.ConstraintType.UNIQUE.value:\n return UniqueConstraint(data.get('name', None), table.oid, columns_attnum)\n return None\n\n def create(self, validated_data):\n table = self.context['table']\n constraint_obj = self.construct_constraint_obj(table, validated_data)\n # Additional check is needed because we support read operations for primary key constraint,\n # but we don't support write operations\n if constraint_obj is None:\n constraint_type = validated_data.get('type', None)\n raise UnsupportedConstraintAPIException(constraint_type=constraint_type, field='type')\n try:\n constraint = table.add_constraint(constraint_obj)\n except DuplicateTable as e:\n raise database_api_exceptions.DuplicateTableAPIException(\n e,\n message='Relation with the same name already exists',\n status_code=status.HTTP_400_BAD_REQUEST\n )\n except UniqueViolation as e:\n raise database_api_exceptions.UniqueViolationAPIException(\n e,\n status_code=status.HTTP_400_BAD_REQUEST\n )\n return constraint\n\n def validate_name(self, name):\n if is_identifier_too_long(name):\n raise database_api_exceptions.IdentifierTooLong(field='name')\n return name\n\n\nclass ForeignKeyConstraintSerializer(BaseConstraintSerializer):\n class Meta:\n model = Constraint\n fields = BaseConstraintSerializer.Meta.fields + [\n 'referent_columns',\n 'referent_table',\n 'onupdate',\n 'ondelete',\n 'deferrable',\n 'match'\n ]\n\n referent_columns = serializers.PrimaryKeyRelatedField(queryset=Column.current_objects.all(), many=True)\n referent_table = serializers.SerializerMethodField()\n onupdate = serializers.ChoiceField(\n choices=['RESTRICT', 'CASCADE', 'SET NULL', 'NO ACTION', 'SET DEFAULT'],\n required=False,\n allow_null=True\n )\n ondelete = serializers.ChoiceField(\n choices=['RESTRICT', 'CASCADE', 'SET NULL', 'NO ACTION', 'SET DEFAULT'],\n required=False,\n allow_null=True\n )\n deferrable = serializers.BooleanField(allow_null=True, required=False)\n match = serializers.ChoiceField(choices=['SIMPLE', 'PARTIAL', 'FULL'], allow_null=True, required=False)\n\n def get_referent_table(self, obj):\n return obj.referent_columns[0].table.id\n\n def construct_constraint_obj(self, table, data):\n columns_attnum = [column.attnum for column in data.get('columns')]\n referent_columns = data.get('referent_columns')\n referent_columns_attnum = [column.attnum for column in referent_columns]\n constraint_options_fields = ['onupdate', 'ondelete', 'deferrable']\n constraint_options = {\n constraint_options_field: data[constraint_options_field]\n for constraint_options_field in constraint_options_fields if constraint_options_field in data\n }\n return ForeignKeyConstraint(\n data.get('name', None),\n table.oid,\n columns_attnum,\n referent_columns[0].table.oid,\n referent_columns_attnum,\n constraint_options\n )\n\n\nclass ConstraintSerializer(\n ReadWritePolymorphicSerializerMappingMixin,\n MathesarPolymorphicErrorMixin,\n serializers.ModelSerializer\n):\n class Meta:\n model = Constraint\n fields = '__all__'\n\n serializers_mapping = {\n 'foreignkey': ForeignKeyConstraintSerializer,\n 'primary': BaseConstraintSerializer,\n 'unique': BaseConstraintSerializer,\n }\n\n def get_mapping_field(self, data):\n if isinstance(data, Constraint):\n constraint_type = data.type\n else:\n constraint_type = data.get('type', None)\n assert constraint_type is not None\n return constraint_type\n\n def create(self, validated_data):\n serializer = self.get_serializer_class(self.get_mapping_field(validated_data))\n return serializer.create(validated_data)\n\n def run_validation(self, data):\n if referent_table := data.get('referent_table', None):\n referent_table_name = Table.current_objects.get(id=referent_table).name\n if any(\n invalid_char in referent_table_name\n for invalid_char in ('(', ')')\n ):\n raise InvalidTableName(\n referent_table_name,\n field='referent_table'\n )\n constraint_type = data.get('type', None)\n if constraint_type not in self.serializers_mapping.keys():\n raise UnsupportedConstraintAPIException(constraint_type=constraint_type)\n columns = data.get('columns', None)\n if columns == []:\n raise ConstraintColumnEmptyAPIException(field='columns')\n return super(ConstraintSerializer, self).run_validation(data)\n", "path": "mathesar/api/serializers/constraints.py"}], "after_files": [{"content": "from psycopg.errors import DuplicateTable, UniqueViolation\nfrom rest_framework import serializers, status\n\nfrom db.constraints import utils as constraint_utils\nfrom db.identifiers import is_identifier_too_long\nfrom db.constraints.base import ForeignKeyConstraint, UniqueConstraint\n\nimport mathesar.api.exceptions.database_exceptions.exceptions as database_api_exceptions\nfrom mathesar.api.exceptions.validation_exceptions.exceptions import (\n ConstraintColumnEmptyAPIException, UnsupportedConstraintAPIException,\n InvalidTableName\n)\nfrom mathesar.api.serializers.shared_serializers import (\n MathesarPolymorphicErrorMixin,\n ReadWritePolymorphicSerializerMappingMixin,\n)\nfrom mathesar.models.base import Column, Constraint, Table\n\n\nclass TableFilteredPrimaryKeyRelatedField(serializers.PrimaryKeyRelatedField):\n \"\"\"\n Limits the accepted related primary key values to a specific table.\n For example, if the PrimaryKeyRelatedField is instantiated with a\n Column queryset, only columns in the \"associated table\" are\n accepted. The \"associated table\" is defined by the context dict's\n `table_id` value.\n \"\"\"\n def get_queryset(self):\n table_id = self.context.get('table_id', None)\n queryset = super(TableFilteredPrimaryKeyRelatedField, self).get_queryset()\n if table_id is None or not queryset:\n return None\n return queryset.filter(table__id=table_id)\n\n\nclass BaseConstraintSerializer(serializers.ModelSerializer):\n name = serializers.CharField(required=False)\n type = serializers.CharField()\n columns = TableFilteredPrimaryKeyRelatedField(queryset=Column.current_objects, many=True)\n\n class Meta:\n model = Constraint\n fields = ['id', 'name', 'type', 'columns']\n\n def construct_constraint_obj(self, table, data):\n columns_attnum = [column.attnum for column in data.get('columns')]\n if data.get('type') == constraint_utils.ConstraintType.UNIQUE.value:\n return UniqueConstraint(data.get('name', None), table.oid, columns_attnum)\n return None\n\n def create(self, validated_data):\n table = self.context['table']\n constraint_obj = self.construct_constraint_obj(table, validated_data)\n # Additional check is needed because we support read operations for primary key constraint,\n # but we don't support write operations\n if constraint_obj is None:\n constraint_type = validated_data.get('type', None)\n raise UnsupportedConstraintAPIException(constraint_type=constraint_type, field='type')\n try:\n constraint = table.add_constraint(constraint_obj)\n except DuplicateTable as e:\n raise database_api_exceptions.DuplicateTableAPIException(\n e,\n message='Relation with the same name already exists',\n status_code=status.HTTP_400_BAD_REQUEST\n )\n except UniqueViolation as e:\n raise database_api_exceptions.UniqueViolationAPIException(\n e,\n status_code=status.HTTP_400_BAD_REQUEST\n )\n return constraint\n\n def validate_name(self, name):\n if is_identifier_too_long(name):\n raise database_api_exceptions.IdentifierTooLong(field='name')\n return name\n\n\nclass ForeignKeyConstraintSerializer(BaseConstraintSerializer):\n class Meta:\n model = Constraint\n fields = BaseConstraintSerializer.Meta.fields + [\n 'referent_columns',\n 'referent_table',\n 'onupdate',\n 'ondelete',\n 'deferrable',\n 'match'\n ]\n\n referent_columns = serializers.PrimaryKeyRelatedField(queryset=Column.current_objects.all(), many=True)\n referent_table = serializers.SerializerMethodField()\n onupdate = serializers.ChoiceField(\n choices=['RESTRICT', 'CASCADE', 'SET NULL', 'NO ACTION', 'SET DEFAULT'],\n required=False,\n allow_null=True\n )\n ondelete = serializers.ChoiceField(\n choices=['RESTRICT', 'CASCADE', 'SET NULL', 'NO ACTION', 'SET DEFAULT'],\n required=False,\n allow_null=True\n )\n deferrable = serializers.BooleanField(allow_null=True, required=False)\n match = serializers.ChoiceField(choices=['SIMPLE', 'PARTIAL', 'FULL'], allow_null=True, required=False)\n\n def get_referent_table(self, obj):\n return obj.referent_columns[0].table.id\n\n def construct_constraint_obj(self, table, data):\n columns_attnum = [column.attnum for column in data.get('columns')]\n referent_columns = data.get('referent_columns')\n referent_columns_attnum = [column.attnum for column in referent_columns]\n constraint_options_fields = ['onupdate', 'ondelete', 'deferrable']\n constraint_options = {\n constraint_options_field: data[constraint_options_field]\n for constraint_options_field in constraint_options_fields if constraint_options_field in data\n }\n return ForeignKeyConstraint(\n data.get('name', None),\n table.oid,\n columns_attnum,\n referent_columns[0].table.oid,\n referent_columns_attnum,\n constraint_options\n )\n\n\nclass ConstraintSerializer(\n ReadWritePolymorphicSerializerMappingMixin,\n MathesarPolymorphicErrorMixin,\n serializers.ModelSerializer\n):\n class Meta:\n model = Constraint\n fields = '__all__'\n\n serializers_mapping = {\n 'foreignkey': ForeignKeyConstraintSerializer,\n 'primary': BaseConstraintSerializer,\n 'unique': BaseConstraintSerializer,\n # Even though 'check' & 'exclude' constraints are currently unsupported it's added here\n # so that the app doesn't break in case these constraints are already present.\n 'check': BaseConstraintSerializer,\n 'exclude': BaseConstraintSerializer\n }\n\n def get_mapping_field(self, data):\n if isinstance(data, Constraint):\n constraint_type = data.type\n else:\n constraint_type = data.get('type', None)\n assert constraint_type is not None\n return constraint_type\n\n def create(self, validated_data):\n serializer = self.get_serializer_class(self.get_mapping_field(validated_data))\n return serializer.create(validated_data)\n\n def run_validation(self, data):\n if referent_table := data.get('referent_table', None):\n referent_table_name = Table.current_objects.get(id=referent_table).name\n if any(\n invalid_char in referent_table_name\n for invalid_char in ('(', ')')\n ):\n raise InvalidTableName(\n referent_table_name,\n field='referent_table'\n )\n constraint_type = data.get('type', None)\n if constraint_type not in ('foreignkey', 'primary', 'unique'):\n raise UnsupportedConstraintAPIException(constraint_type=constraint_type)\n columns = data.get('columns', None)\n if columns == []:\n raise ConstraintColumnEmptyAPIException(field='columns')\n return super(ConstraintSerializer, self).run_validation(data)\n", "path": "mathesar/api/serializers/constraints.py"}]} | 2,753 | 249 |
gh_patches_debug_30087 | rasdani/github-patches | git_diff | encode__uvicorn-724 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
CVE-2020-7694 ANSI escape sequence injection
Looks like there is a [critical](https://vuldb.com/?id.159010) [security issue in "all versions" of uvicorn](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-7694).
I presume there is a typo in the original CVE description, so "ASNI" should be "ANSI".
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `uvicorn/logging.py`
Content:
```
1 import http
2 import logging
3 import sys
4 from copy import copy
5
6 import click
7
8 TRACE_LOG_LEVEL = 5
9
10
11 class ColourizedFormatter(logging.Formatter):
12 """
13 A custom log formatter class that:
14
15 * Outputs the LOG_LEVEL with an appropriate color.
16 * If a log call includes an `extras={"color_message": ...}` it will be used
17 for formatting the output, instead of the plain text message.
18 """
19
20 level_name_colors = {
21 TRACE_LOG_LEVEL: lambda level_name: click.style(str(level_name), fg="blue"),
22 logging.DEBUG: lambda level_name: click.style(str(level_name), fg="cyan"),
23 logging.INFO: lambda level_name: click.style(str(level_name), fg="green"),
24 logging.WARNING: lambda level_name: click.style(str(level_name), fg="yellow"),
25 logging.ERROR: lambda level_name: click.style(str(level_name), fg="red"),
26 logging.CRITICAL: lambda level_name: click.style(
27 str(level_name), fg="bright_red"
28 ),
29 }
30
31 def __init__(self, fmt=None, datefmt=None, style="%", use_colors=None):
32 if use_colors in (True, False):
33 self.use_colors = use_colors
34 else:
35 self.use_colors = sys.stdout.isatty()
36 super().__init__(fmt=fmt, datefmt=datefmt, style=style)
37
38 def color_level_name(self, level_name, level_no):
39 default = lambda level_name: str(level_name)
40 func = self.level_name_colors.get(level_no, default)
41 return func(level_name)
42
43 def should_use_colors(self):
44 return True
45
46 def formatMessage(self, record):
47 recordcopy = copy(record)
48 levelname = recordcopy.levelname
49 seperator = " " * (8 - len(recordcopy.levelname))
50 if self.use_colors:
51 levelname = self.color_level_name(levelname, recordcopy.levelno)
52 if "color_message" in recordcopy.__dict__:
53 recordcopy.msg = recordcopy.__dict__["color_message"]
54 recordcopy.__dict__["message"] = recordcopy.getMessage()
55 recordcopy.__dict__["levelprefix"] = levelname + ":" + seperator
56 return super().formatMessage(recordcopy)
57
58
59 class DefaultFormatter(ColourizedFormatter):
60 def should_use_colors(self):
61 return sys.stderr.isatty()
62
63
64 class AccessFormatter(ColourizedFormatter):
65 status_code_colours = {
66 1: lambda code: click.style(str(code), fg="bright_white"),
67 2: lambda code: click.style(str(code), fg="green"),
68 3: lambda code: click.style(str(code), fg="yellow"),
69 4: lambda code: click.style(str(code), fg="red"),
70 5: lambda code: click.style(str(code), fg="bright_red"),
71 }
72
73 def get_client_addr(self, scope):
74 client = scope.get("client")
75 if not client:
76 return ""
77 return "%s:%d" % (client[0], client[1])
78
79 def get_path(self, scope):
80 return scope.get("root_path", "") + scope["path"]
81
82 def get_full_path(self, scope):
83 path = scope.get("root_path", "") + scope["path"]
84 query_string = scope.get("query_string", b"").decode("ascii")
85 if query_string:
86 return path + "?" + query_string
87 return path
88
89 def get_status_code(self, record):
90 status_code = record.__dict__["status_code"]
91 try:
92 status_phrase = http.HTTPStatus(status_code).phrase
93 except ValueError:
94 status_phrase = ""
95 status_and_phrase = "%s %s" % (status_code, status_phrase)
96
97 if self.use_colors:
98 default = lambda code: status_and_phrase
99 func = self.status_code_colours.get(status_code // 100, default)
100 return func(status_and_phrase)
101 return status_and_phrase
102
103 def formatMessage(self, record):
104 recordcopy = copy(record)
105 scope = recordcopy.__dict__["scope"]
106 method = scope["method"]
107 path = self.get_path(scope)
108 full_path = self.get_full_path(scope)
109 client_addr = self.get_client_addr(scope)
110 status_code = self.get_status_code(recordcopy)
111 http_version = scope["http_version"]
112 request_line = "%s %s HTTP/%s" % (method, full_path, http_version)
113 if self.use_colors:
114 request_line = click.style(request_line, bold=True)
115 recordcopy.__dict__.update(
116 {
117 "method": method,
118 "path": path,
119 "full_path": full_path,
120 "client_addr": client_addr,
121 "request_line": request_line,
122 "status_code": status_code,
123 "http_version": http_version,
124 }
125 )
126 return super().formatMessage(recordcopy)
127
```
Path: `uvicorn/protocols/utils.py`
Content:
```
1 import socket
2
3
4 def get_remote_addr(transport):
5 socket_info = transport.get_extra_info("socket")
6 if socket_info is not None:
7 try:
8 info = socket_info.getpeername()
9 except OSError:
10 # This case appears to inconsistently occur with uvloop
11 # bound to a unix domain socket.
12 family = None
13 info = None
14 else:
15 family = socket_info.family
16
17 if family in (socket.AF_INET, socket.AF_INET6):
18 return (str(info[0]), int(info[1]))
19 return None
20 info = transport.get_extra_info("peername")
21 if info is not None and isinstance(info, (list, tuple)) and len(info) == 2:
22 return (str(info[0]), int(info[1]))
23 return None
24
25
26 def get_local_addr(transport):
27 socket_info = transport.get_extra_info("socket")
28 if socket_info is not None:
29 info = socket_info.getsockname()
30 family = socket_info.family
31 if family in (socket.AF_INET, socket.AF_INET6):
32 return (str(info[0]), int(info[1]))
33 return None
34 info = transport.get_extra_info("sockname")
35 if info is not None and isinstance(info, (list, tuple)) and len(info) == 2:
36 return (str(info[0]), int(info[1]))
37 return None
38
39
40 def is_ssl(transport):
41 return bool(transport.get_extra_info("sslcontext"))
42
43
44 def get_client_addr(scope):
45 client = scope.get("client")
46 if not client:
47 return ""
48 return "%s:%d" % client
49
50
51 def get_path_with_query_string(scope):
52 path_with_query_string = scope.get("root_path", "") + scope["path"]
53 if scope["query_string"]:
54 path_with_query_string = "{}?{}".format(
55 path_with_query_string, scope["query_string"].decode("ascii")
56 )
57 return path_with_query_string
58
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/uvicorn/logging.py b/uvicorn/logging.py
--- a/uvicorn/logging.py
+++ b/uvicorn/logging.py
@@ -1,6 +1,7 @@
import http
import logging
import sys
+import urllib
from copy import copy
import click
@@ -77,14 +78,14 @@
return "%s:%d" % (client[0], client[1])
def get_path(self, scope):
- return scope.get("root_path", "") + scope["path"]
+ return urllib.parse.quote(scope.get("root_path", "") + scope["path"])
def get_full_path(self, scope):
path = scope.get("root_path", "") + scope["path"]
query_string = scope.get("query_string", b"").decode("ascii")
if query_string:
- return path + "?" + query_string
- return path
+ return urllib.parse.quote(path) + "?" + query_string
+ return urllib.parse.quote(path)
def get_status_code(self, record):
status_code = record.__dict__["status_code"]
diff --git a/uvicorn/protocols/utils.py b/uvicorn/protocols/utils.py
--- a/uvicorn/protocols/utils.py
+++ b/uvicorn/protocols/utils.py
@@ -1,4 +1,5 @@
import socket
+import urllib
def get_remote_addr(transport):
@@ -49,7 +50,9 @@
def get_path_with_query_string(scope):
- path_with_query_string = scope.get("root_path", "") + scope["path"]
+ path_with_query_string = urllib.parse.quote(
+ scope.get("root_path", "") + scope["path"]
+ )
if scope["query_string"]:
path_with_query_string = "{}?{}".format(
path_with_query_string, scope["query_string"].decode("ascii")
| {"golden_diff": "diff --git a/uvicorn/logging.py b/uvicorn/logging.py\n--- a/uvicorn/logging.py\n+++ b/uvicorn/logging.py\n@@ -1,6 +1,7 @@\n import http\n import logging\n import sys\n+import urllib\n from copy import copy\n \n import click\n@@ -77,14 +78,14 @@\n return \"%s:%d\" % (client[0], client[1])\n \n def get_path(self, scope):\n- return scope.get(\"root_path\", \"\") + scope[\"path\"]\n+ return urllib.parse.quote(scope.get(\"root_path\", \"\") + scope[\"path\"])\n \n def get_full_path(self, scope):\n path = scope.get(\"root_path\", \"\") + scope[\"path\"]\n query_string = scope.get(\"query_string\", b\"\").decode(\"ascii\")\n if query_string:\n- return path + \"?\" + query_string\n- return path\n+ return urllib.parse.quote(path) + \"?\" + query_string\n+ return urllib.parse.quote(path)\n \n def get_status_code(self, record):\n status_code = record.__dict__[\"status_code\"]\ndiff --git a/uvicorn/protocols/utils.py b/uvicorn/protocols/utils.py\n--- a/uvicorn/protocols/utils.py\n+++ b/uvicorn/protocols/utils.py\n@@ -1,4 +1,5 @@\n import socket\n+import urllib\n \n \n def get_remote_addr(transport):\n@@ -49,7 +50,9 @@\n \n \n def get_path_with_query_string(scope):\n- path_with_query_string = scope.get(\"root_path\", \"\") + scope[\"path\"]\n+ path_with_query_string = urllib.parse.quote(\n+ scope.get(\"root_path\", \"\") + scope[\"path\"]\n+ )\n if scope[\"query_string\"]:\n path_with_query_string = \"{}?{}\".format(\n path_with_query_string, scope[\"query_string\"].decode(\"ascii\")\n", "issue": "CVE-2020-7694 ANSI escape sequence injection\nLooks like there is a [critical](https://vuldb.com/?id.159010) [security issue in \"all versions\" of uvicorn](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-7694).\r\n\r\nI presume there is a typo in the original CVE description, so \"ASNI\" should be \"ANSI\".\n", "before_files": [{"content": "import http\nimport logging\nimport sys\nfrom copy import copy\n\nimport click\n\nTRACE_LOG_LEVEL = 5\n\n\nclass ColourizedFormatter(logging.Formatter):\n \"\"\"\n A custom log formatter class that:\n\n * Outputs the LOG_LEVEL with an appropriate color.\n * If a log call includes an `extras={\"color_message\": ...}` it will be used\n for formatting the output, instead of the plain text message.\n \"\"\"\n\n level_name_colors = {\n TRACE_LOG_LEVEL: lambda level_name: click.style(str(level_name), fg=\"blue\"),\n logging.DEBUG: lambda level_name: click.style(str(level_name), fg=\"cyan\"),\n logging.INFO: lambda level_name: click.style(str(level_name), fg=\"green\"),\n logging.WARNING: lambda level_name: click.style(str(level_name), fg=\"yellow\"),\n logging.ERROR: lambda level_name: click.style(str(level_name), fg=\"red\"),\n logging.CRITICAL: lambda level_name: click.style(\n str(level_name), fg=\"bright_red\"\n ),\n }\n\n def __init__(self, fmt=None, datefmt=None, style=\"%\", use_colors=None):\n if use_colors in (True, False):\n self.use_colors = use_colors\n else:\n self.use_colors = sys.stdout.isatty()\n super().__init__(fmt=fmt, datefmt=datefmt, style=style)\n\n def color_level_name(self, level_name, level_no):\n default = lambda level_name: str(level_name)\n func = self.level_name_colors.get(level_no, default)\n return func(level_name)\n\n def should_use_colors(self):\n return True\n\n def formatMessage(self, record):\n recordcopy = copy(record)\n levelname = recordcopy.levelname\n seperator = \" \" * (8 - len(recordcopy.levelname))\n if self.use_colors:\n levelname = self.color_level_name(levelname, recordcopy.levelno)\n if \"color_message\" in recordcopy.__dict__:\n recordcopy.msg = recordcopy.__dict__[\"color_message\"]\n recordcopy.__dict__[\"message\"] = recordcopy.getMessage()\n recordcopy.__dict__[\"levelprefix\"] = levelname + \":\" + seperator\n return super().formatMessage(recordcopy)\n\n\nclass DefaultFormatter(ColourizedFormatter):\n def should_use_colors(self):\n return sys.stderr.isatty()\n\n\nclass AccessFormatter(ColourizedFormatter):\n status_code_colours = {\n 1: lambda code: click.style(str(code), fg=\"bright_white\"),\n 2: lambda code: click.style(str(code), fg=\"green\"),\n 3: lambda code: click.style(str(code), fg=\"yellow\"),\n 4: lambda code: click.style(str(code), fg=\"red\"),\n 5: lambda code: click.style(str(code), fg=\"bright_red\"),\n }\n\n def get_client_addr(self, scope):\n client = scope.get(\"client\")\n if not client:\n return \"\"\n return \"%s:%d\" % (client[0], client[1])\n\n def get_path(self, scope):\n return scope.get(\"root_path\", \"\") + scope[\"path\"]\n\n def get_full_path(self, scope):\n path = scope.get(\"root_path\", \"\") + scope[\"path\"]\n query_string = scope.get(\"query_string\", b\"\").decode(\"ascii\")\n if query_string:\n return path + \"?\" + query_string\n return path\n\n def get_status_code(self, record):\n status_code = record.__dict__[\"status_code\"]\n try:\n status_phrase = http.HTTPStatus(status_code).phrase\n except ValueError:\n status_phrase = \"\"\n status_and_phrase = \"%s %s\" % (status_code, status_phrase)\n\n if self.use_colors:\n default = lambda code: status_and_phrase\n func = self.status_code_colours.get(status_code // 100, default)\n return func(status_and_phrase)\n return status_and_phrase\n\n def formatMessage(self, record):\n recordcopy = copy(record)\n scope = recordcopy.__dict__[\"scope\"]\n method = scope[\"method\"]\n path = self.get_path(scope)\n full_path = self.get_full_path(scope)\n client_addr = self.get_client_addr(scope)\n status_code = self.get_status_code(recordcopy)\n http_version = scope[\"http_version\"]\n request_line = \"%s %s HTTP/%s\" % (method, full_path, http_version)\n if self.use_colors:\n request_line = click.style(request_line, bold=True)\n recordcopy.__dict__.update(\n {\n \"method\": method,\n \"path\": path,\n \"full_path\": full_path,\n \"client_addr\": client_addr,\n \"request_line\": request_line,\n \"status_code\": status_code,\n \"http_version\": http_version,\n }\n )\n return super().formatMessage(recordcopy)\n", "path": "uvicorn/logging.py"}, {"content": "import socket\n\n\ndef get_remote_addr(transport):\n socket_info = transport.get_extra_info(\"socket\")\n if socket_info is not None:\n try:\n info = socket_info.getpeername()\n except OSError:\n # This case appears to inconsistently occur with uvloop\n # bound to a unix domain socket.\n family = None\n info = None\n else:\n family = socket_info.family\n\n if family in (socket.AF_INET, socket.AF_INET6):\n return (str(info[0]), int(info[1]))\n return None\n info = transport.get_extra_info(\"peername\")\n if info is not None and isinstance(info, (list, tuple)) and len(info) == 2:\n return (str(info[0]), int(info[1]))\n return None\n\n\ndef get_local_addr(transport):\n socket_info = transport.get_extra_info(\"socket\")\n if socket_info is not None:\n info = socket_info.getsockname()\n family = socket_info.family\n if family in (socket.AF_INET, socket.AF_INET6):\n return (str(info[0]), int(info[1]))\n return None\n info = transport.get_extra_info(\"sockname\")\n if info is not None and isinstance(info, (list, tuple)) and len(info) == 2:\n return (str(info[0]), int(info[1]))\n return None\n\n\ndef is_ssl(transport):\n return bool(transport.get_extra_info(\"sslcontext\"))\n\n\ndef get_client_addr(scope):\n client = scope.get(\"client\")\n if not client:\n return \"\"\n return \"%s:%d\" % client\n\n\ndef get_path_with_query_string(scope):\n path_with_query_string = scope.get(\"root_path\", \"\") + scope[\"path\"]\n if scope[\"query_string\"]:\n path_with_query_string = \"{}?{}\".format(\n path_with_query_string, scope[\"query_string\"].decode(\"ascii\")\n )\n return path_with_query_string\n", "path": "uvicorn/protocols/utils.py"}], "after_files": [{"content": "import http\nimport logging\nimport sys\nimport urllib\nfrom copy import copy\n\nimport click\n\nTRACE_LOG_LEVEL = 5\n\n\nclass ColourizedFormatter(logging.Formatter):\n \"\"\"\n A custom log formatter class that:\n\n * Outputs the LOG_LEVEL with an appropriate color.\n * If a log call includes an `extras={\"color_message\": ...}` it will be used\n for formatting the output, instead of the plain text message.\n \"\"\"\n\n level_name_colors = {\n TRACE_LOG_LEVEL: lambda level_name: click.style(str(level_name), fg=\"blue\"),\n logging.DEBUG: lambda level_name: click.style(str(level_name), fg=\"cyan\"),\n logging.INFO: lambda level_name: click.style(str(level_name), fg=\"green\"),\n logging.WARNING: lambda level_name: click.style(str(level_name), fg=\"yellow\"),\n logging.ERROR: lambda level_name: click.style(str(level_name), fg=\"red\"),\n logging.CRITICAL: lambda level_name: click.style(\n str(level_name), fg=\"bright_red\"\n ),\n }\n\n def __init__(self, fmt=None, datefmt=None, style=\"%\", use_colors=None):\n if use_colors in (True, False):\n self.use_colors = use_colors\n else:\n self.use_colors = sys.stdout.isatty()\n super().__init__(fmt=fmt, datefmt=datefmt, style=style)\n\n def color_level_name(self, level_name, level_no):\n default = lambda level_name: str(level_name)\n func = self.level_name_colors.get(level_no, default)\n return func(level_name)\n\n def should_use_colors(self):\n return True\n\n def formatMessage(self, record):\n recordcopy = copy(record)\n levelname = recordcopy.levelname\n seperator = \" \" * (8 - len(recordcopy.levelname))\n if self.use_colors:\n levelname = self.color_level_name(levelname, recordcopy.levelno)\n if \"color_message\" in recordcopy.__dict__:\n recordcopy.msg = recordcopy.__dict__[\"color_message\"]\n recordcopy.__dict__[\"message\"] = recordcopy.getMessage()\n recordcopy.__dict__[\"levelprefix\"] = levelname + \":\" + seperator\n return super().formatMessage(recordcopy)\n\n\nclass DefaultFormatter(ColourizedFormatter):\n def should_use_colors(self):\n return sys.stderr.isatty()\n\n\nclass AccessFormatter(ColourizedFormatter):\n status_code_colours = {\n 1: lambda code: click.style(str(code), fg=\"bright_white\"),\n 2: lambda code: click.style(str(code), fg=\"green\"),\n 3: lambda code: click.style(str(code), fg=\"yellow\"),\n 4: lambda code: click.style(str(code), fg=\"red\"),\n 5: lambda code: click.style(str(code), fg=\"bright_red\"),\n }\n\n def get_client_addr(self, scope):\n client = scope.get(\"client\")\n if not client:\n return \"\"\n return \"%s:%d\" % (client[0], client[1])\n\n def get_path(self, scope):\n return urllib.parse.quote(scope.get(\"root_path\", \"\") + scope[\"path\"])\n\n def get_full_path(self, scope):\n path = scope.get(\"root_path\", \"\") + scope[\"path\"]\n query_string = scope.get(\"query_string\", b\"\").decode(\"ascii\")\n if query_string:\n return urllib.parse.quote(path) + \"?\" + query_string\n return urllib.parse.quote(path)\n\n def get_status_code(self, record):\n status_code = record.__dict__[\"status_code\"]\n try:\n status_phrase = http.HTTPStatus(status_code).phrase\n except ValueError:\n status_phrase = \"\"\n status_and_phrase = \"%s %s\" % (status_code, status_phrase)\n\n if self.use_colors:\n default = lambda code: status_and_phrase\n func = self.status_code_colours.get(status_code // 100, default)\n return func(status_and_phrase)\n return status_and_phrase\n\n def formatMessage(self, record):\n recordcopy = copy(record)\n scope = recordcopy.__dict__[\"scope\"]\n method = scope[\"method\"]\n path = self.get_path(scope)\n full_path = self.get_full_path(scope)\n client_addr = self.get_client_addr(scope)\n status_code = self.get_status_code(recordcopy)\n http_version = scope[\"http_version\"]\n request_line = \"%s %s HTTP/%s\" % (method, full_path, http_version)\n if self.use_colors:\n request_line = click.style(request_line, bold=True)\n recordcopy.__dict__.update(\n {\n \"method\": method,\n \"path\": path,\n \"full_path\": full_path,\n \"client_addr\": client_addr,\n \"request_line\": request_line,\n \"status_code\": status_code,\n \"http_version\": http_version,\n }\n )\n return super().formatMessage(recordcopy)\n", "path": "uvicorn/logging.py"}, {"content": "import socket\nimport urllib\n\n\ndef get_remote_addr(transport):\n socket_info = transport.get_extra_info(\"socket\")\n if socket_info is not None:\n try:\n info = socket_info.getpeername()\n except OSError:\n # This case appears to inconsistently occur with uvloop\n # bound to a unix domain socket.\n family = None\n info = None\n else:\n family = socket_info.family\n\n if family in (socket.AF_INET, socket.AF_INET6):\n return (str(info[0]), int(info[1]))\n return None\n info = transport.get_extra_info(\"peername\")\n if info is not None and isinstance(info, (list, tuple)) and len(info) == 2:\n return (str(info[0]), int(info[1]))\n return None\n\n\ndef get_local_addr(transport):\n socket_info = transport.get_extra_info(\"socket\")\n if socket_info is not None:\n info = socket_info.getsockname()\n family = socket_info.family\n if family in (socket.AF_INET, socket.AF_INET6):\n return (str(info[0]), int(info[1]))\n return None\n info = transport.get_extra_info(\"sockname\")\n if info is not None and isinstance(info, (list, tuple)) and len(info) == 2:\n return (str(info[0]), int(info[1]))\n return None\n\n\ndef is_ssl(transport):\n return bool(transport.get_extra_info(\"sslcontext\"))\n\n\ndef get_client_addr(scope):\n client = scope.get(\"client\")\n if not client:\n return \"\"\n return \"%s:%d\" % client\n\n\ndef get_path_with_query_string(scope):\n path_with_query_string = urllib.parse.quote(\n scope.get(\"root_path\", \"\") + scope[\"path\"]\n )\n if scope[\"query_string\"]:\n path_with_query_string = \"{}?{}\".format(\n path_with_query_string, scope[\"query_string\"].decode(\"ascii\")\n )\n return path_with_query_string\n", "path": "uvicorn/protocols/utils.py"}]} | 2,215 | 407 |
gh_patches_debug_19099 | rasdani/github-patches | git_diff | wagtail__wagtail-6320 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`include_block block with context` does not include variables defined in templates
Wagtails `include_block` tag is meant to match as closely as possible Jinja2's `include` tag
- so one would expect that when you include the context the local template variables are also includes. However the following code does not make `foo` available to the template that `block` renders:
```
{% with foo="bar" %}
{% include_block block with context %}
{% endwith %}
```
Looks like this is because `jinja2.nodes.ContextReference()` does not currently include template defined variables (which we use in [wagtail/core/jinja2tags.py](wagtail/core/jinja2tags.py#L39) to get the context). There is an issue relating [to this on Jinja2's repo](https://github.com/pallets/jinja/issues/860).
We could provide our own Jinja2 environment with a patched `CodeGenerator` fairly easily to include template variables (the method described in the issue above does work and django is nice enough to expose a way to load custom environments).
I'd be keen to submit a PR for this - but I'm unsure if we want to ship a patched `CodeGenerator` class? If we were to do so we'd probably want to pin the Jinja2 version in Wagtail a little more tightly to avoid breakages.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2
3 from wagtail import __version__
4 from wagtail.utils.setup import assets, check_bdist_egg, sdist
5
6
7 try:
8 from setuptools import find_packages, setup
9 except ImportError:
10 from distutils.core import setup
11
12
13 # Hack to prevent "TypeError: 'NoneType' object is not callable" error
14 # in multiprocessing/util.py _exit_function when setup.py exits
15 # (see http://www.eby-sarna.com/pipermail/peak/2010-May/003357.html)
16 try:
17 import multiprocessing # noqa
18 except ImportError:
19 pass
20
21
22 install_requires = [
23 "Django>=2.2,<3.2",
24 "django-modelcluster>=5.1,<6.0",
25 "django-taggit>=1.0,<2.0",
26 "django-treebeard>=4.2.0,<5.0",
27 "djangorestframework>=3.11.1,<4.0",
28 "django-filter>=2.2,<3.0",
29 "draftjs_exporter>=2.1.5,<3.0",
30 "Pillow>=4.0.0,<9.0.0",
31 "beautifulsoup4>=4.8,<4.9",
32 "html5lib>=0.999,<2",
33 "Willow>=1.4,<1.5",
34 "requests>=2.11.1,<3.0",
35 "l18n>=2018.5",
36 "xlsxwriter>=1.2.8,<2.0",
37 "tablib[xls,xlsx]>=0.14.0",
38 "anyascii>=0.1.5",
39 ]
40
41 # Testing dependencies
42 testing_extras = [
43 # Required for running the tests
44 'python-dateutil>=2.2',
45 'pytz>=2014.7',
46 'elasticsearch>=5.0,<6.0',
47 'Jinja2>=2.8,<3.0',
48 'boto3>=1.16,<1.17',
49 'freezegun>=0.3.8',
50 'openpyxl>=2.6.4',
51 'Unidecode>=0.04.14,<2.0',
52
53 # For coverage and PEP8 linting
54 'coverage>=3.7.0',
55 'flake8>=3.6.0',
56 'isort==5.6.4', # leave this pinned - it tends to change rules between patch releases
57 'flake8-blind-except==0.1.1',
58 'flake8-print==2.0.2',
59 'doc8==0.8.1',
60
61 # For templates linting
62 'jinjalint>=0.5',
63
64 # Pipenv hack to fix broken dependency causing CircleCI failures
65 'docutils==0.15',
66
67 # django-taggit 1.3.0 made changes to verbose_name which affect migrations;
68 # the test suite migrations correspond to >=1.3.0
69 'django-taggit>=1.3.0,<2.0',
70 ]
71
72 # Documentation dependencies
73 documentation_extras = [
74 'pyenchant>=3.1.1,<4',
75 'sphinxcontrib-spelling>=5.4.0,<6',
76 'Sphinx>=1.5.2',
77 'sphinx-autobuild>=0.6.0',
78 'sphinx_rtd_theme>=0.1.9',
79 ]
80
81 setup(
82 name='wagtail',
83 version=__version__,
84 description='A Django content management system.',
85 author='Wagtail core team + contributors',
86 author_email='[email protected]', # For support queries, please see https://docs.wagtail.io/en/stable/support.html
87 url='https://wagtail.io/',
88 packages=find_packages(),
89 include_package_data=True,
90 license='BSD',
91 long_description="Wagtail is an open source content management \
92 system built on Django, with a strong community and commercial support. \
93 It’s focused on user experience, and offers precise control for \
94 designers and developers.\n\n\
95 For more details, see https://wagtail.io, https://docs.wagtail.io and \
96 https://github.com/wagtail/wagtail/.",
97 classifiers=[
98 'Development Status :: 5 - Production/Stable',
99 'Environment :: Web Environment',
100 'Intended Audience :: Developers',
101 'License :: OSI Approved :: BSD License',
102 'Operating System :: OS Independent',
103 'Programming Language :: Python',
104 'Programming Language :: Python :: 3',
105 'Programming Language :: Python :: 3.6',
106 'Programming Language :: Python :: 3.7',
107 'Programming Language :: Python :: 3.8',
108 'Programming Language :: Python :: 3.9',
109 'Framework :: Django',
110 'Framework :: Django :: 2.2',
111 'Framework :: Django :: 3.0',
112 'Framework :: Django :: 3.1',
113 'Framework :: Wagtail',
114 'Topic :: Internet :: WWW/HTTP :: Site Management',
115 ],
116 python_requires='>=3.6',
117 install_requires=install_requires,
118 extras_require={
119 'testing': testing_extras,
120 'docs': documentation_extras
121 },
122 entry_points="""
123 [console_scripts]
124 wagtail=wagtail.bin.wagtail:main
125 """,
126 zip_safe=False,
127 cmdclass={
128 'sdist': sdist,
129 'bdist_egg': check_bdist_egg,
130 'assets': assets,
131 },
132 )
133
```
Path: `wagtail/core/jinja2tags.py`
Content:
```
1 import jinja2
2 import jinja2.nodes
3
4 from jinja2.ext import Extension
5
6 from .templatetags.wagtailcore_tags import pageurl, richtext, slugurl, wagtail_version
7
8
9 class WagtailCoreExtension(Extension):
10 tags = {'include_block'}
11
12 def __init__(self, environment):
13 super().__init__(environment)
14
15 self.environment.globals.update({
16 'pageurl': jinja2.contextfunction(pageurl),
17 'slugurl': jinja2.contextfunction(slugurl),
18 'wagtail_version': wagtail_version,
19 })
20 self.environment.filters.update({
21 'richtext': richtext,
22 })
23
24 def parse(self, parser):
25 parse_method = getattr(self, 'parse_' + parser.stream.current.value)
26
27 return parse_method(parser)
28
29 def parse_include_block(self, parser):
30 lineno = next(parser.stream).lineno
31
32 args = [parser.parse_expression()]
33
34 with_context = True
35 if parser.stream.current.test_any('name:with', 'name:without') and parser.stream.look().test('name:context'):
36 with_context = next(parser.stream).value == 'with'
37 parser.stream.skip()
38
39 if with_context:
40 args.append(jinja2.nodes.ContextReference())
41 else:
42 # Actually we can just skip else branch because context arg default to None
43 args.append(jinja2.nodes.Const(None))
44
45 node = self.call_method('_include_block', args, lineno=lineno)
46 return jinja2.nodes.Output([node], lineno=lineno)
47
48 def _include_block(self, value, context=None):
49 if hasattr(value, 'render_as_block'):
50 if context:
51 new_context = context.get_all()
52 else:
53 new_context = {}
54
55 return jinja2.Markup(value.render_as_block(context=new_context))
56
57 return jinja2.Markup(value)
58
59
60 # Nicer import names
61 core = WagtailCoreExtension
62
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -44,7 +44,7 @@
'python-dateutil>=2.2',
'pytz>=2014.7',
'elasticsearch>=5.0,<6.0',
- 'Jinja2>=2.8,<3.0',
+ 'Jinja2>=2.11,<3.0',
'boto3>=1.16,<1.17',
'freezegun>=0.3.8',
'openpyxl>=2.6.4',
diff --git a/wagtail/core/jinja2tags.py b/wagtail/core/jinja2tags.py
--- a/wagtail/core/jinja2tags.py
+++ b/wagtail/core/jinja2tags.py
@@ -37,7 +37,11 @@
parser.stream.skip()
if with_context:
- args.append(jinja2.nodes.ContextReference())
+ if hasattr(jinja2.nodes, 'DerivedContextReference'):
+ # DerivedContextReference includes local variables. Introduced in Jinja 2.11
+ args.append(jinja2.nodes.DerivedContextReference())
+ else:
+ args.append(jinja2.nodes.ContextReference())
else:
# Actually we can just skip else branch because context arg default to None
args.append(jinja2.nodes.Const(None))
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -44,7 +44,7 @@\n 'python-dateutil>=2.2',\n 'pytz>=2014.7',\n 'elasticsearch>=5.0,<6.0',\n- 'Jinja2>=2.8,<3.0',\n+ 'Jinja2>=2.11,<3.0',\n 'boto3>=1.16,<1.17',\n 'freezegun>=0.3.8',\n 'openpyxl>=2.6.4',\ndiff --git a/wagtail/core/jinja2tags.py b/wagtail/core/jinja2tags.py\n--- a/wagtail/core/jinja2tags.py\n+++ b/wagtail/core/jinja2tags.py\n@@ -37,7 +37,11 @@\n parser.stream.skip()\n \n if with_context:\n- args.append(jinja2.nodes.ContextReference())\n+ if hasattr(jinja2.nodes, 'DerivedContextReference'):\n+ # DerivedContextReference includes local variables. Introduced in Jinja 2.11\n+ args.append(jinja2.nodes.DerivedContextReference())\n+ else:\n+ args.append(jinja2.nodes.ContextReference())\n else:\n # Actually we can just skip else branch because context arg default to None\n args.append(jinja2.nodes.Const(None))\n", "issue": "`include_block block with context` does not include variables defined in templates\nWagtails `include_block` tag is meant to match as closely as possible Jinja2's `include` tag \r\n- so one would expect that when you include the context the local template variables are also includes. However the following code does not make `foo` available to the template that `block` renders:\r\n\r\n```\r\n{% with foo=\"bar\" %}\r\n {% include_block block with context %}\r\n{% endwith %}\r\n```\r\n\r\nLooks like this is because `jinja2.nodes.ContextReference()` does not currently include template defined variables (which we use in [wagtail/core/jinja2tags.py](wagtail/core/jinja2tags.py#L39) to get the context). There is an issue relating [to this on Jinja2's repo](https://github.com/pallets/jinja/issues/860).\r\n\r\nWe could provide our own Jinja2 environment with a patched `CodeGenerator` fairly easily to include template variables (the method described in the issue above does work and django is nice enough to expose a way to load custom environments).\r\n\r\nI'd be keen to submit a PR for this - but I'm unsure if we want to ship a patched `CodeGenerator` class? If we were to do so we'd probably want to pin the Jinja2 version in Wagtail a little more tightly to avoid breakages.\n", "before_files": [{"content": "#!/usr/bin/env python\n\nfrom wagtail import __version__\nfrom wagtail.utils.setup import assets, check_bdist_egg, sdist\n\n\ntry:\n from setuptools import find_packages, setup\nexcept ImportError:\n from distutils.core import setup\n\n\n# Hack to prevent \"TypeError: 'NoneType' object is not callable\" error\n# in multiprocessing/util.py _exit_function when setup.py exits\n# (see http://www.eby-sarna.com/pipermail/peak/2010-May/003357.html)\ntry:\n import multiprocessing # noqa\nexcept ImportError:\n pass\n\n\ninstall_requires = [\n \"Django>=2.2,<3.2\",\n \"django-modelcluster>=5.1,<6.0\",\n \"django-taggit>=1.0,<2.0\",\n \"django-treebeard>=4.2.0,<5.0\",\n \"djangorestframework>=3.11.1,<4.0\",\n \"django-filter>=2.2,<3.0\",\n \"draftjs_exporter>=2.1.5,<3.0\",\n \"Pillow>=4.0.0,<9.0.0\",\n \"beautifulsoup4>=4.8,<4.9\",\n \"html5lib>=0.999,<2\",\n \"Willow>=1.4,<1.5\",\n \"requests>=2.11.1,<3.0\",\n \"l18n>=2018.5\",\n \"xlsxwriter>=1.2.8,<2.0\",\n \"tablib[xls,xlsx]>=0.14.0\",\n \"anyascii>=0.1.5\",\n]\n\n# Testing dependencies\ntesting_extras = [\n # Required for running the tests\n 'python-dateutil>=2.2',\n 'pytz>=2014.7',\n 'elasticsearch>=5.0,<6.0',\n 'Jinja2>=2.8,<3.0',\n 'boto3>=1.16,<1.17',\n 'freezegun>=0.3.8',\n 'openpyxl>=2.6.4',\n 'Unidecode>=0.04.14,<2.0',\n\n # For coverage and PEP8 linting\n 'coverage>=3.7.0',\n 'flake8>=3.6.0',\n 'isort==5.6.4', # leave this pinned - it tends to change rules between patch releases\n 'flake8-blind-except==0.1.1',\n 'flake8-print==2.0.2',\n 'doc8==0.8.1',\n\n # For templates linting\n 'jinjalint>=0.5',\n\n # Pipenv hack to fix broken dependency causing CircleCI failures\n 'docutils==0.15',\n\n # django-taggit 1.3.0 made changes to verbose_name which affect migrations;\n # the test suite migrations correspond to >=1.3.0\n 'django-taggit>=1.3.0,<2.0',\n]\n\n# Documentation dependencies\ndocumentation_extras = [\n 'pyenchant>=3.1.1,<4',\n 'sphinxcontrib-spelling>=5.4.0,<6',\n 'Sphinx>=1.5.2',\n 'sphinx-autobuild>=0.6.0',\n 'sphinx_rtd_theme>=0.1.9',\n]\n\nsetup(\n name='wagtail',\n version=__version__,\n description='A Django content management system.',\n author='Wagtail core team + contributors',\n author_email='[email protected]', # For support queries, please see https://docs.wagtail.io/en/stable/support.html\n url='https://wagtail.io/',\n packages=find_packages(),\n include_package_data=True,\n license='BSD',\n long_description=\"Wagtail is an open source content management \\\nsystem built on Django, with a strong community and commercial support. \\\nIt\u2019s focused on user experience, and offers precise control for \\\ndesigners and developers.\\n\\n\\\nFor more details, see https://wagtail.io, https://docs.wagtail.io and \\\nhttps://github.com/wagtail/wagtail/.\",\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Framework :: Django',\n 'Framework :: Django :: 2.2',\n 'Framework :: Django :: 3.0',\n 'Framework :: Django :: 3.1',\n 'Framework :: Wagtail',\n 'Topic :: Internet :: WWW/HTTP :: Site Management',\n ],\n python_requires='>=3.6',\n install_requires=install_requires,\n extras_require={\n 'testing': testing_extras,\n 'docs': documentation_extras\n },\n entry_points=\"\"\"\n [console_scripts]\n wagtail=wagtail.bin.wagtail:main\n \"\"\",\n zip_safe=False,\n cmdclass={\n 'sdist': sdist,\n 'bdist_egg': check_bdist_egg,\n 'assets': assets,\n },\n)\n", "path": "setup.py"}, {"content": "import jinja2\nimport jinja2.nodes\n\nfrom jinja2.ext import Extension\n\nfrom .templatetags.wagtailcore_tags import pageurl, richtext, slugurl, wagtail_version\n\n\nclass WagtailCoreExtension(Extension):\n tags = {'include_block'}\n\n def __init__(self, environment):\n super().__init__(environment)\n\n self.environment.globals.update({\n 'pageurl': jinja2.contextfunction(pageurl),\n 'slugurl': jinja2.contextfunction(slugurl),\n 'wagtail_version': wagtail_version,\n })\n self.environment.filters.update({\n 'richtext': richtext,\n })\n\n def parse(self, parser):\n parse_method = getattr(self, 'parse_' + parser.stream.current.value)\n\n return parse_method(parser)\n\n def parse_include_block(self, parser):\n lineno = next(parser.stream).lineno\n\n args = [parser.parse_expression()]\n\n with_context = True\n if parser.stream.current.test_any('name:with', 'name:without') and parser.stream.look().test('name:context'):\n with_context = next(parser.stream).value == 'with'\n parser.stream.skip()\n\n if with_context:\n args.append(jinja2.nodes.ContextReference())\n else:\n # Actually we can just skip else branch because context arg default to None\n args.append(jinja2.nodes.Const(None))\n\n node = self.call_method('_include_block', args, lineno=lineno)\n return jinja2.nodes.Output([node], lineno=lineno)\n\n def _include_block(self, value, context=None):\n if hasattr(value, 'render_as_block'):\n if context:\n new_context = context.get_all()\n else:\n new_context = {}\n\n return jinja2.Markup(value.render_as_block(context=new_context))\n\n return jinja2.Markup(value)\n\n\n# Nicer import names\ncore = WagtailCoreExtension\n", "path": "wagtail/core/jinja2tags.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\nfrom wagtail import __version__\nfrom wagtail.utils.setup import assets, check_bdist_egg, sdist\n\n\ntry:\n from setuptools import find_packages, setup\nexcept ImportError:\n from distutils.core import setup\n\n\n# Hack to prevent \"TypeError: 'NoneType' object is not callable\" error\n# in multiprocessing/util.py _exit_function when setup.py exits\n# (see http://www.eby-sarna.com/pipermail/peak/2010-May/003357.html)\ntry:\n import multiprocessing # noqa\nexcept ImportError:\n pass\n\n\ninstall_requires = [\n \"Django>=2.2,<3.2\",\n \"django-modelcluster>=5.1,<6.0\",\n \"django-taggit>=1.0,<2.0\",\n \"django-treebeard>=4.2.0,<5.0\",\n \"djangorestframework>=3.11.1,<4.0\",\n \"django-filter>=2.2,<3.0\",\n \"draftjs_exporter>=2.1.5,<3.0\",\n \"Pillow>=4.0.0,<9.0.0\",\n \"beautifulsoup4>=4.8,<4.9\",\n \"html5lib>=0.999,<2\",\n \"Willow>=1.4,<1.5\",\n \"requests>=2.11.1,<3.0\",\n \"l18n>=2018.5\",\n \"xlsxwriter>=1.2.8,<2.0\",\n \"tablib[xls,xlsx]>=0.14.0\",\n \"anyascii>=0.1.5\",\n]\n\n# Testing dependencies\ntesting_extras = [\n # Required for running the tests\n 'python-dateutil>=2.2',\n 'pytz>=2014.7',\n 'elasticsearch>=5.0,<6.0',\n 'Jinja2>=2.11,<3.0',\n 'boto3>=1.16,<1.17',\n 'freezegun>=0.3.8',\n 'openpyxl>=2.6.4',\n 'Unidecode>=0.04.14,<2.0',\n\n # For coverage and PEP8 linting\n 'coverage>=3.7.0',\n 'flake8>=3.6.0',\n 'isort==5.6.4', # leave this pinned - it tends to change rules between patch releases\n 'flake8-blind-except==0.1.1',\n 'flake8-print==2.0.2',\n 'doc8==0.8.1',\n\n # For templates linting\n 'jinjalint>=0.5',\n\n # Pipenv hack to fix broken dependency causing CircleCI failures\n 'docutils==0.15',\n\n # django-taggit 1.3.0 made changes to verbose_name which affect migrations;\n # the test suite migrations correspond to >=1.3.0\n 'django-taggit>=1.3.0,<2.0',\n]\n\n# Documentation dependencies\ndocumentation_extras = [\n 'pyenchant>=3.1.1,<4',\n 'sphinxcontrib-spelling>=5.4.0,<6',\n 'Sphinx>=1.5.2',\n 'sphinx-autobuild>=0.6.0',\n 'sphinx_rtd_theme>=0.1.9',\n]\n\nsetup(\n name='wagtail',\n version=__version__,\n description='A Django content management system.',\n author='Wagtail core team + contributors',\n author_email='[email protected]', # For support queries, please see https://docs.wagtail.io/en/stable/support.html\n url='https://wagtail.io/',\n packages=find_packages(),\n include_package_data=True,\n license='BSD',\n long_description=\"Wagtail is an open source content management \\\nsystem built on Django, with a strong community and commercial support. \\\nIt\u2019s focused on user experience, and offers precise control for \\\ndesigners and developers.\\n\\n\\\nFor more details, see https://wagtail.io, https://docs.wagtail.io and \\\nhttps://github.com/wagtail/wagtail/.\",\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Framework :: Django',\n 'Framework :: Django :: 2.2',\n 'Framework :: Django :: 3.0',\n 'Framework :: Django :: 3.1',\n 'Framework :: Wagtail',\n 'Topic :: Internet :: WWW/HTTP :: Site Management',\n ],\n python_requires='>=3.6',\n install_requires=install_requires,\n extras_require={\n 'testing': testing_extras,\n 'docs': documentation_extras\n },\n entry_points=\"\"\"\n [console_scripts]\n wagtail=wagtail.bin.wagtail:main\n \"\"\",\n zip_safe=False,\n cmdclass={\n 'sdist': sdist,\n 'bdist_egg': check_bdist_egg,\n 'assets': assets,\n },\n)\n", "path": "setup.py"}, {"content": "import jinja2\nimport jinja2.nodes\n\nfrom jinja2.ext import Extension\n\nfrom .templatetags.wagtailcore_tags import pageurl, richtext, slugurl, wagtail_version\n\n\nclass WagtailCoreExtension(Extension):\n tags = {'include_block'}\n\n def __init__(self, environment):\n super().__init__(environment)\n\n self.environment.globals.update({\n 'pageurl': jinja2.contextfunction(pageurl),\n 'slugurl': jinja2.contextfunction(slugurl),\n 'wagtail_version': wagtail_version,\n })\n self.environment.filters.update({\n 'richtext': richtext,\n })\n\n def parse(self, parser):\n parse_method = getattr(self, 'parse_' + parser.stream.current.value)\n\n return parse_method(parser)\n\n def parse_include_block(self, parser):\n lineno = next(parser.stream).lineno\n\n args = [parser.parse_expression()]\n\n with_context = True\n if parser.stream.current.test_any('name:with', 'name:without') and parser.stream.look().test('name:context'):\n with_context = next(parser.stream).value == 'with'\n parser.stream.skip()\n\n if with_context:\n if hasattr(jinja2.nodes, 'DerivedContextReference'):\n # DerivedContextReference includes local variables. Introduced in Jinja 2.11\n args.append(jinja2.nodes.DerivedContextReference())\n else:\n args.append(jinja2.nodes.ContextReference())\n else:\n # Actually we can just skip else branch because context arg default to None\n args.append(jinja2.nodes.Const(None))\n\n node = self.call_method('_include_block', args, lineno=lineno)\n return jinja2.nodes.Output([node], lineno=lineno)\n\n def _include_block(self, value, context=None):\n if hasattr(value, 'render_as_block'):\n if context:\n new_context = context.get_all()\n else:\n new_context = {}\n\n return jinja2.Markup(value.render_as_block(context=new_context))\n\n return jinja2.Markup(value)\n\n\n# Nicer import names\ncore = WagtailCoreExtension\n", "path": "wagtail/core/jinja2tags.py"}]} | 2,619 | 315 |
gh_patches_debug_36955 | rasdani/github-patches | git_diff | deis__deis-4071 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Dynamic typing for config response
```
POST /v1/apps/<app id>/config/
{
"cpu" {
"web": 1024
}
}
Response:
{
...
"cpu" {
"web": 1024
}
}
POST /v1/apps/<app id>/config/
{
"cpu" {
"web": "1024"
}
}
Response:
{
...
"cpu" {
"web": "1024"
}
}
```
I think that dynamic parsing for the request is fine, but having a dynamic type for a response make it very difficult to parse.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `controller/api/serializers.py`
Content:
```
1 """
2 Classes to serialize the RESTful representation of Deis API models.
3 """
4
5 from __future__ import unicode_literals
6
7 import json
8 import re
9
10 from django.conf import settings
11 from django.contrib.auth.models import User
12 from django.utils import timezone
13 from rest_framework import serializers
14 from rest_framework.validators import UniqueTogetherValidator
15
16 from api import models
17
18
19 PROCTYPE_MATCH = re.compile(r'^(?P<type>[a-z]+)')
20 MEMLIMIT_MATCH = re.compile(r'^(?P<mem>[0-9]+(MB|KB|GB|[BKMG]))$', re.IGNORECASE)
21 CPUSHARE_MATCH = re.compile(r'^(?P<cpu>[0-9]+)$')
22 TAGKEY_MATCH = re.compile(r'^[a-z]+$')
23 TAGVAL_MATCH = re.compile(r'^\w+$')
24
25
26 class JSONFieldSerializer(serializers.Field):
27 def to_representation(self, obj):
28 return obj
29
30 def to_internal_value(self, data):
31 try:
32 val = json.loads(data)
33 except TypeError:
34 val = data
35 return val
36
37
38 class ModelSerializer(serializers.ModelSerializer):
39
40 uuid = serializers.ReadOnlyField()
41
42 def get_validators(self):
43 """
44 Hack to remove DRF's UniqueTogetherValidator when it concerns the UUID.
45
46 See https://github.com/deis/deis/pull/2898#discussion_r23105147
47 """
48 validators = super(ModelSerializer, self).get_validators()
49 for v in validators:
50 if isinstance(v, UniqueTogetherValidator) and 'uuid' in v.fields:
51 validators.remove(v)
52 return validators
53
54
55 class UserSerializer(serializers.ModelSerializer):
56 class Meta:
57 model = User
58 fields = ['email', 'username', 'password', 'first_name', 'last_name', 'is_superuser',
59 'is_staff', 'groups', 'user_permissions', 'last_login', 'date_joined',
60 'is_active']
61 read_only_fields = ['is_superuser', 'is_staff', 'groups',
62 'user_permissions', 'last_login', 'date_joined', 'is_active']
63 extra_kwargs = {'password': {'write_only': True}}
64
65 def create(self, validated_data):
66 now = timezone.now()
67 user = User(
68 email=validated_data.get('email'),
69 username=validated_data.get('username'),
70 last_login=now,
71 date_joined=now,
72 is_active=True
73 )
74 if validated_data.get('first_name'):
75 user.first_name = validated_data['first_name']
76 if validated_data.get('last_name'):
77 user.last_name = validated_data['last_name']
78 user.set_password(validated_data['password'])
79 # Make the first signup an admin / superuser
80 if not User.objects.filter(is_superuser=True).exists():
81 user.is_superuser = user.is_staff = True
82 user.save()
83 return user
84
85
86 class AdminUserSerializer(serializers.ModelSerializer):
87 """Serialize admin status for a User model."""
88
89 class Meta:
90 model = User
91 fields = ['username', 'is_superuser']
92 read_only_fields = ['username']
93
94
95 class AppSerializer(ModelSerializer):
96 """Serialize a :class:`~api.models.App` model."""
97
98 owner = serializers.ReadOnlyField(source='owner.username')
99 structure = JSONFieldSerializer(required=False)
100 created = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)
101 updated = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)
102
103 class Meta:
104 """Metadata options for a :class:`AppSerializer`."""
105 model = models.App
106 fields = ['uuid', 'id', 'owner', 'url', 'structure', 'created', 'updated']
107 read_only_fields = ['uuid']
108
109
110 class BuildSerializer(ModelSerializer):
111 """Serialize a :class:`~api.models.Build` model."""
112
113 app = serializers.SlugRelatedField(slug_field='id', queryset=models.App.objects.all())
114 owner = serializers.ReadOnlyField(source='owner.username')
115 procfile = JSONFieldSerializer(required=False)
116 created = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)
117 updated = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)
118
119 class Meta:
120 """Metadata options for a :class:`BuildSerializer`."""
121 model = models.Build
122 fields = ['owner', 'app', 'image', 'sha', 'procfile', 'dockerfile', 'created',
123 'updated', 'uuid']
124 read_only_fields = ['uuid']
125
126
127 class ConfigSerializer(ModelSerializer):
128 """Serialize a :class:`~api.models.Config` model."""
129
130 app = serializers.SlugRelatedField(slug_field='id', queryset=models.App.objects.all())
131 owner = serializers.ReadOnlyField(source='owner.username')
132 values = JSONFieldSerializer(required=False)
133 memory = JSONFieldSerializer(required=False)
134 cpu = JSONFieldSerializer(required=False)
135 tags = JSONFieldSerializer(required=False)
136 created = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)
137 updated = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)
138
139 class Meta:
140 """Metadata options for a :class:`ConfigSerializer`."""
141 model = models.Config
142
143 def validate_memory(self, value):
144 for k, v in value.viewitems():
145 if v is None: # use NoneType to unset a value
146 continue
147 if not re.match(PROCTYPE_MATCH, k):
148 raise serializers.ValidationError("Process types can only contain [a-z]")
149 if not re.match(MEMLIMIT_MATCH, str(v)):
150 raise serializers.ValidationError(
151 "Limit format: <number><unit>, where unit = B, K, M or G")
152 return value
153
154 def validate_cpu(self, value):
155 for k, v in value.viewitems():
156 if v is None: # use NoneType to unset a value
157 continue
158 if not re.match(PROCTYPE_MATCH, k):
159 raise serializers.ValidationError("Process types can only contain [a-z]")
160 shares = re.match(CPUSHARE_MATCH, str(v))
161 if not shares:
162 raise serializers.ValidationError("CPU shares must be an integer")
163 for v in shares.groupdict().viewvalues():
164 try:
165 i = int(v)
166 except ValueError:
167 raise serializers.ValidationError("CPU shares must be an integer")
168 if i > 1024 or i < 0:
169 raise serializers.ValidationError("CPU shares must be between 0 and 1024")
170 return value
171
172 def validate_tags(self, value):
173 for k, v in value.viewitems():
174 if v is None: # use NoneType to unset a value
175 continue
176 if not re.match(TAGKEY_MATCH, k):
177 raise serializers.ValidationError("Tag keys can only contain [a-z]")
178 if not re.match(TAGVAL_MATCH, str(v)):
179 raise serializers.ValidationError("Invalid tag value")
180 return value
181
182
183 class ReleaseSerializer(ModelSerializer):
184 """Serialize a :class:`~api.models.Release` model."""
185
186 app = serializers.SlugRelatedField(slug_field='id', queryset=models.App.objects.all())
187 owner = serializers.ReadOnlyField(source='owner.username')
188 created = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)
189 updated = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)
190
191 class Meta:
192 """Metadata options for a :class:`ReleaseSerializer`."""
193 model = models.Release
194
195
196 class ContainerSerializer(ModelSerializer):
197 """Serialize a :class:`~api.models.Container` model."""
198
199 app = serializers.SlugRelatedField(slug_field='id', queryset=models.App.objects.all())
200 owner = serializers.ReadOnlyField(source='owner.username')
201 created = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)
202 updated = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)
203 release = serializers.SerializerMethodField()
204
205 class Meta:
206 """Metadata options for a :class:`ContainerSerializer`."""
207 model = models.Container
208 fields = ['owner', 'app', 'release', 'type', 'num', 'state', 'created', 'updated', 'uuid']
209
210 def get_release(self, obj):
211 return "v{}".format(obj.release.version)
212
213
214 class KeySerializer(ModelSerializer):
215 """Serialize a :class:`~api.models.Key` model."""
216
217 owner = serializers.ReadOnlyField(source='owner.username')
218 fingerprint = serializers.CharField(read_only=True)
219 created = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)
220 updated = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)
221
222 class Meta:
223 """Metadata options for a KeySerializer."""
224 model = models.Key
225
226
227 class DomainSerializer(ModelSerializer):
228 """Serialize a :class:`~api.models.Domain` model."""
229
230 app = serializers.SlugRelatedField(slug_field='id', queryset=models.App.objects.all())
231 owner = serializers.ReadOnlyField(source='owner.username')
232 created = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)
233 updated = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)
234
235 class Meta:
236 """Metadata options for a :class:`DomainSerializer`."""
237 model = models.Domain
238 fields = ['uuid', 'owner', 'created', 'updated', 'app', 'domain']
239
240 def validate_domain(self, value):
241 """
242 Check that the hostname is valid
243 """
244 if len(value) > 255:
245 raise serializers.ValidationError('Hostname must be 255 characters or less.')
246 if value[-1:] == ".":
247 value = value[:-1] # strip exactly one dot from the right, if present
248 labels = value.split('.')
249 if 'xip.io' in value:
250 return value
251 if labels[0] == '*':
252 raise serializers.ValidationError(
253 'Adding a wildcard subdomain is currently not supported.')
254 allowed = re.compile("^(?!-)[a-z0-9-]{1,63}(?<!-)$", re.IGNORECASE)
255 for label in labels:
256 match = allowed.match(label)
257 if not match or '--' in label or label.isdigit() or \
258 len(labels) == 1 and any(char.isdigit() for char in label):
259 raise serializers.ValidationError('Hostname does not look valid.')
260 if models.Domain.objects.filter(domain=value).exists():
261 raise serializers.ValidationError(
262 "The domain {} is already in use by another app".format(value))
263 return value
264
265
266 class CertificateSerializer(ModelSerializer):
267 """Serialize a :class:`~api.models.Cert` model."""
268
269 owner = serializers.ReadOnlyField(source='owner.username')
270 expires = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)
271 created = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)
272 updated = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)
273
274 class Meta:
275 """Metadata options for a DomainCertSerializer."""
276 model = models.Certificate
277 extra_kwargs = {'certificate': {'write_only': True},
278 'key': {'write_only': True},
279 'common_name': {'required': False}}
280 read_only_fields = ['expires', 'created', 'updated']
281
282
283 class PushSerializer(ModelSerializer):
284 """Serialize a :class:`~api.models.Push` model."""
285
286 app = serializers.SlugRelatedField(slug_field='id', queryset=models.App.objects.all())
287 owner = serializers.ReadOnlyField(source='owner.username')
288 created = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)
289 updated = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)
290
291 class Meta:
292 """Metadata options for a :class:`PushSerializer`."""
293 model = models.Push
294 fields = ['uuid', 'owner', 'app', 'sha', 'fingerprint', 'receive_user', 'receive_repo',
295 'ssh_connection', 'ssh_original_command', 'created', 'updated']
296
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/controller/api/serializers.py b/controller/api/serializers.py
--- a/controller/api/serializers.py
+++ b/controller/api/serializers.py
@@ -24,10 +24,16 @@
class JSONFieldSerializer(serializers.Field):
+ """
+ A Django REST framework serializer for JSON data.
+ """
+
def to_representation(self, obj):
+ """Serialize the field's JSON data, for read operations."""
return obj
def to_internal_value(self, data):
+ """Deserialize the field's JSON data, for write operations."""
try:
val = json.loads(data)
except TypeError:
@@ -35,6 +41,41 @@
return val
+class JSONIntFieldSerializer(JSONFieldSerializer):
+ """
+ A JSON serializer that coerces its data to integers.
+ """
+
+ def to_internal_value(self, data):
+ """Deserialize the field's JSON integer data."""
+ field = super(JSONIntFieldSerializer, self).to_internal_value(data)
+
+ for k, v in field.viewitems():
+ if v is not None: # NoneType is used to unset a value
+ try:
+ field[k] = int(v)
+ except ValueError:
+ field[k] = v
+ # Do nothing, the validator will catch this later
+ return field
+
+
+class JSONStringFieldSerializer(JSONFieldSerializer):
+ """
+ A JSON serializer that coerces its data to strings.
+ """
+
+ def to_internal_value(self, data):
+ """Deserialize the field's JSON string data."""
+ field = super(JSONStringFieldSerializer, self).to_internal_value(data)
+
+ for k, v in field.viewitems():
+ if v is not None: # NoneType is used to unset a value
+ field[k] = unicode(v)
+
+ return field
+
+
class ModelSerializer(serializers.ModelSerializer):
uuid = serializers.ReadOnlyField()
@@ -129,10 +170,10 @@
app = serializers.SlugRelatedField(slug_field='id', queryset=models.App.objects.all())
owner = serializers.ReadOnlyField(source='owner.username')
- values = JSONFieldSerializer(required=False)
- memory = JSONFieldSerializer(required=False)
- cpu = JSONFieldSerializer(required=False)
- tags = JSONFieldSerializer(required=False)
+ values = JSONStringFieldSerializer(required=False)
+ memory = JSONStringFieldSerializer(required=False)
+ cpu = JSONIntFieldSerializer(required=False)
+ tags = JSONStringFieldSerializer(required=False)
created = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)
updated = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)
| {"golden_diff": "diff --git a/controller/api/serializers.py b/controller/api/serializers.py\n--- a/controller/api/serializers.py\n+++ b/controller/api/serializers.py\n@@ -24,10 +24,16 @@\n \n \n class JSONFieldSerializer(serializers.Field):\n+ \"\"\"\n+ A Django REST framework serializer for JSON data.\n+ \"\"\"\n+\n def to_representation(self, obj):\n+ \"\"\"Serialize the field's JSON data, for read operations.\"\"\"\n return obj\n \n def to_internal_value(self, data):\n+ \"\"\"Deserialize the field's JSON data, for write operations.\"\"\"\n try:\n val = json.loads(data)\n except TypeError:\n@@ -35,6 +41,41 @@\n return val\n \n \n+class JSONIntFieldSerializer(JSONFieldSerializer):\n+ \"\"\"\n+ A JSON serializer that coerces its data to integers.\n+ \"\"\"\n+\n+ def to_internal_value(self, data):\n+ \"\"\"Deserialize the field's JSON integer data.\"\"\"\n+ field = super(JSONIntFieldSerializer, self).to_internal_value(data)\n+\n+ for k, v in field.viewitems():\n+ if v is not None: # NoneType is used to unset a value\n+ try:\n+ field[k] = int(v)\n+ except ValueError:\n+ field[k] = v\n+ # Do nothing, the validator will catch this later\n+ return field\n+\n+\n+class JSONStringFieldSerializer(JSONFieldSerializer):\n+ \"\"\"\n+ A JSON serializer that coerces its data to strings.\n+ \"\"\"\n+\n+ def to_internal_value(self, data):\n+ \"\"\"Deserialize the field's JSON string data.\"\"\"\n+ field = super(JSONStringFieldSerializer, self).to_internal_value(data)\n+\n+ for k, v in field.viewitems():\n+ if v is not None: # NoneType is used to unset a value\n+ field[k] = unicode(v)\n+\n+ return field\n+\n+\n class ModelSerializer(serializers.ModelSerializer):\n \n uuid = serializers.ReadOnlyField()\n@@ -129,10 +170,10 @@\n \n app = serializers.SlugRelatedField(slug_field='id', queryset=models.App.objects.all())\n owner = serializers.ReadOnlyField(source='owner.username')\n- values = JSONFieldSerializer(required=False)\n- memory = JSONFieldSerializer(required=False)\n- cpu = JSONFieldSerializer(required=False)\n- tags = JSONFieldSerializer(required=False)\n+ values = JSONStringFieldSerializer(required=False)\n+ memory = JSONStringFieldSerializer(required=False)\n+ cpu = JSONIntFieldSerializer(required=False)\n+ tags = JSONStringFieldSerializer(required=False)\n created = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n updated = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n", "issue": "Dynamic typing for config response \n```\nPOST /v1/apps/<app id>/config/\n\n{\n \"cpu\" {\n \"web\": 1024\n }\n}\n\nResponse:\n\n{\n ...\n \"cpu\" {\n \"web\": 1024\n }\n}\n\nPOST /v1/apps/<app id>/config/\n\n{\n \"cpu\" {\n \"web\": \"1024\"\n }\n}\n\nResponse:\n\n{\n ...\n \"cpu\" {\n \"web\": \"1024\"\n }\n}\n```\n\nI think that dynamic parsing for the request is fine, but having a dynamic type for a response make it very difficult to parse.\n\n", "before_files": [{"content": "\"\"\"\nClasses to serialize the RESTful representation of Deis API models.\n\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport json\nimport re\n\nfrom django.conf import settings\nfrom django.contrib.auth.models import User\nfrom django.utils import timezone\nfrom rest_framework import serializers\nfrom rest_framework.validators import UniqueTogetherValidator\n\nfrom api import models\n\n\nPROCTYPE_MATCH = re.compile(r'^(?P<type>[a-z]+)')\nMEMLIMIT_MATCH = re.compile(r'^(?P<mem>[0-9]+(MB|KB|GB|[BKMG]))$', re.IGNORECASE)\nCPUSHARE_MATCH = re.compile(r'^(?P<cpu>[0-9]+)$')\nTAGKEY_MATCH = re.compile(r'^[a-z]+$')\nTAGVAL_MATCH = re.compile(r'^\\w+$')\n\n\nclass JSONFieldSerializer(serializers.Field):\n def to_representation(self, obj):\n return obj\n\n def to_internal_value(self, data):\n try:\n val = json.loads(data)\n except TypeError:\n val = data\n return val\n\n\nclass ModelSerializer(serializers.ModelSerializer):\n\n uuid = serializers.ReadOnlyField()\n\n def get_validators(self):\n \"\"\"\n Hack to remove DRF's UniqueTogetherValidator when it concerns the UUID.\n\n See https://github.com/deis/deis/pull/2898#discussion_r23105147\n \"\"\"\n validators = super(ModelSerializer, self).get_validators()\n for v in validators:\n if isinstance(v, UniqueTogetherValidator) and 'uuid' in v.fields:\n validators.remove(v)\n return validators\n\n\nclass UserSerializer(serializers.ModelSerializer):\n class Meta:\n model = User\n fields = ['email', 'username', 'password', 'first_name', 'last_name', 'is_superuser',\n 'is_staff', 'groups', 'user_permissions', 'last_login', 'date_joined',\n 'is_active']\n read_only_fields = ['is_superuser', 'is_staff', 'groups',\n 'user_permissions', 'last_login', 'date_joined', 'is_active']\n extra_kwargs = {'password': {'write_only': True}}\n\n def create(self, validated_data):\n now = timezone.now()\n user = User(\n email=validated_data.get('email'),\n username=validated_data.get('username'),\n last_login=now,\n date_joined=now,\n is_active=True\n )\n if validated_data.get('first_name'):\n user.first_name = validated_data['first_name']\n if validated_data.get('last_name'):\n user.last_name = validated_data['last_name']\n user.set_password(validated_data['password'])\n # Make the first signup an admin / superuser\n if not User.objects.filter(is_superuser=True).exists():\n user.is_superuser = user.is_staff = True\n user.save()\n return user\n\n\nclass AdminUserSerializer(serializers.ModelSerializer):\n \"\"\"Serialize admin status for a User model.\"\"\"\n\n class Meta:\n model = User\n fields = ['username', 'is_superuser']\n read_only_fields = ['username']\n\n\nclass AppSerializer(ModelSerializer):\n \"\"\"Serialize a :class:`~api.models.App` model.\"\"\"\n\n owner = serializers.ReadOnlyField(source='owner.username')\n structure = JSONFieldSerializer(required=False)\n created = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n updated = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n\n class Meta:\n \"\"\"Metadata options for a :class:`AppSerializer`.\"\"\"\n model = models.App\n fields = ['uuid', 'id', 'owner', 'url', 'structure', 'created', 'updated']\n read_only_fields = ['uuid']\n\n\nclass BuildSerializer(ModelSerializer):\n \"\"\"Serialize a :class:`~api.models.Build` model.\"\"\"\n\n app = serializers.SlugRelatedField(slug_field='id', queryset=models.App.objects.all())\n owner = serializers.ReadOnlyField(source='owner.username')\n procfile = JSONFieldSerializer(required=False)\n created = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n updated = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n\n class Meta:\n \"\"\"Metadata options for a :class:`BuildSerializer`.\"\"\"\n model = models.Build\n fields = ['owner', 'app', 'image', 'sha', 'procfile', 'dockerfile', 'created',\n 'updated', 'uuid']\n read_only_fields = ['uuid']\n\n\nclass ConfigSerializer(ModelSerializer):\n \"\"\"Serialize a :class:`~api.models.Config` model.\"\"\"\n\n app = serializers.SlugRelatedField(slug_field='id', queryset=models.App.objects.all())\n owner = serializers.ReadOnlyField(source='owner.username')\n values = JSONFieldSerializer(required=False)\n memory = JSONFieldSerializer(required=False)\n cpu = JSONFieldSerializer(required=False)\n tags = JSONFieldSerializer(required=False)\n created = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n updated = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n\n class Meta:\n \"\"\"Metadata options for a :class:`ConfigSerializer`.\"\"\"\n model = models.Config\n\n def validate_memory(self, value):\n for k, v in value.viewitems():\n if v is None: # use NoneType to unset a value\n continue\n if not re.match(PROCTYPE_MATCH, k):\n raise serializers.ValidationError(\"Process types can only contain [a-z]\")\n if not re.match(MEMLIMIT_MATCH, str(v)):\n raise serializers.ValidationError(\n \"Limit format: <number><unit>, where unit = B, K, M or G\")\n return value\n\n def validate_cpu(self, value):\n for k, v in value.viewitems():\n if v is None: # use NoneType to unset a value\n continue\n if not re.match(PROCTYPE_MATCH, k):\n raise serializers.ValidationError(\"Process types can only contain [a-z]\")\n shares = re.match(CPUSHARE_MATCH, str(v))\n if not shares:\n raise serializers.ValidationError(\"CPU shares must be an integer\")\n for v in shares.groupdict().viewvalues():\n try:\n i = int(v)\n except ValueError:\n raise serializers.ValidationError(\"CPU shares must be an integer\")\n if i > 1024 or i < 0:\n raise serializers.ValidationError(\"CPU shares must be between 0 and 1024\")\n return value\n\n def validate_tags(self, value):\n for k, v in value.viewitems():\n if v is None: # use NoneType to unset a value\n continue\n if not re.match(TAGKEY_MATCH, k):\n raise serializers.ValidationError(\"Tag keys can only contain [a-z]\")\n if not re.match(TAGVAL_MATCH, str(v)):\n raise serializers.ValidationError(\"Invalid tag value\")\n return value\n\n\nclass ReleaseSerializer(ModelSerializer):\n \"\"\"Serialize a :class:`~api.models.Release` model.\"\"\"\n\n app = serializers.SlugRelatedField(slug_field='id', queryset=models.App.objects.all())\n owner = serializers.ReadOnlyField(source='owner.username')\n created = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n updated = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n\n class Meta:\n \"\"\"Metadata options for a :class:`ReleaseSerializer`.\"\"\"\n model = models.Release\n\n\nclass ContainerSerializer(ModelSerializer):\n \"\"\"Serialize a :class:`~api.models.Container` model.\"\"\"\n\n app = serializers.SlugRelatedField(slug_field='id', queryset=models.App.objects.all())\n owner = serializers.ReadOnlyField(source='owner.username')\n created = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n updated = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n release = serializers.SerializerMethodField()\n\n class Meta:\n \"\"\"Metadata options for a :class:`ContainerSerializer`.\"\"\"\n model = models.Container\n fields = ['owner', 'app', 'release', 'type', 'num', 'state', 'created', 'updated', 'uuid']\n\n def get_release(self, obj):\n return \"v{}\".format(obj.release.version)\n\n\nclass KeySerializer(ModelSerializer):\n \"\"\"Serialize a :class:`~api.models.Key` model.\"\"\"\n\n owner = serializers.ReadOnlyField(source='owner.username')\n fingerprint = serializers.CharField(read_only=True)\n created = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n updated = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n\n class Meta:\n \"\"\"Metadata options for a KeySerializer.\"\"\"\n model = models.Key\n\n\nclass DomainSerializer(ModelSerializer):\n \"\"\"Serialize a :class:`~api.models.Domain` model.\"\"\"\n\n app = serializers.SlugRelatedField(slug_field='id', queryset=models.App.objects.all())\n owner = serializers.ReadOnlyField(source='owner.username')\n created = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n updated = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n\n class Meta:\n \"\"\"Metadata options for a :class:`DomainSerializer`.\"\"\"\n model = models.Domain\n fields = ['uuid', 'owner', 'created', 'updated', 'app', 'domain']\n\n def validate_domain(self, value):\n \"\"\"\n Check that the hostname is valid\n \"\"\"\n if len(value) > 255:\n raise serializers.ValidationError('Hostname must be 255 characters or less.')\n if value[-1:] == \".\":\n value = value[:-1] # strip exactly one dot from the right, if present\n labels = value.split('.')\n if 'xip.io' in value:\n return value\n if labels[0] == '*':\n raise serializers.ValidationError(\n 'Adding a wildcard subdomain is currently not supported.')\n allowed = re.compile(\"^(?!-)[a-z0-9-]{1,63}(?<!-)$\", re.IGNORECASE)\n for label in labels:\n match = allowed.match(label)\n if not match or '--' in label or label.isdigit() or \\\n len(labels) == 1 and any(char.isdigit() for char in label):\n raise serializers.ValidationError('Hostname does not look valid.')\n if models.Domain.objects.filter(domain=value).exists():\n raise serializers.ValidationError(\n \"The domain {} is already in use by another app\".format(value))\n return value\n\n\nclass CertificateSerializer(ModelSerializer):\n \"\"\"Serialize a :class:`~api.models.Cert` model.\"\"\"\n\n owner = serializers.ReadOnlyField(source='owner.username')\n expires = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n created = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n updated = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n\n class Meta:\n \"\"\"Metadata options for a DomainCertSerializer.\"\"\"\n model = models.Certificate\n extra_kwargs = {'certificate': {'write_only': True},\n 'key': {'write_only': True},\n 'common_name': {'required': False}}\n read_only_fields = ['expires', 'created', 'updated']\n\n\nclass PushSerializer(ModelSerializer):\n \"\"\"Serialize a :class:`~api.models.Push` model.\"\"\"\n\n app = serializers.SlugRelatedField(slug_field='id', queryset=models.App.objects.all())\n owner = serializers.ReadOnlyField(source='owner.username')\n created = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n updated = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n\n class Meta:\n \"\"\"Metadata options for a :class:`PushSerializer`.\"\"\"\n model = models.Push\n fields = ['uuid', 'owner', 'app', 'sha', 'fingerprint', 'receive_user', 'receive_repo',\n 'ssh_connection', 'ssh_original_command', 'created', 'updated']\n", "path": "controller/api/serializers.py"}], "after_files": [{"content": "\"\"\"\nClasses to serialize the RESTful representation of Deis API models.\n\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport json\nimport re\n\nfrom django.conf import settings\nfrom django.contrib.auth.models import User\nfrom django.utils import timezone\nfrom rest_framework import serializers\nfrom rest_framework.validators import UniqueTogetherValidator\n\nfrom api import models\n\n\nPROCTYPE_MATCH = re.compile(r'^(?P<type>[a-z]+)')\nMEMLIMIT_MATCH = re.compile(r'^(?P<mem>[0-9]+(MB|KB|GB|[BKMG]))$', re.IGNORECASE)\nCPUSHARE_MATCH = re.compile(r'^(?P<cpu>[0-9]+)$')\nTAGKEY_MATCH = re.compile(r'^[a-z]+$')\nTAGVAL_MATCH = re.compile(r'^\\w+$')\n\n\nclass JSONFieldSerializer(serializers.Field):\n \"\"\"\n A Django REST framework serializer for JSON data.\n \"\"\"\n\n def to_representation(self, obj):\n \"\"\"Serialize the field's JSON data, for read operations.\"\"\"\n return obj\n\n def to_internal_value(self, data):\n \"\"\"Deserialize the field's JSON data, for write operations.\"\"\"\n try:\n val = json.loads(data)\n except TypeError:\n val = data\n return val\n\n\nclass JSONIntFieldSerializer(JSONFieldSerializer):\n \"\"\"\n A JSON serializer that coerces its data to integers.\n \"\"\"\n\n def to_internal_value(self, data):\n \"\"\"Deserialize the field's JSON integer data.\"\"\"\n field = super(JSONIntFieldSerializer, self).to_internal_value(data)\n\n for k, v in field.viewitems():\n if v is not None: # NoneType is used to unset a value\n try:\n field[k] = int(v)\n except ValueError:\n field[k] = v\n # Do nothing, the validator will catch this later\n return field\n\n\nclass JSONStringFieldSerializer(JSONFieldSerializer):\n \"\"\"\n A JSON serializer that coerces its data to strings.\n \"\"\"\n\n def to_internal_value(self, data):\n \"\"\"Deserialize the field's JSON string data.\"\"\"\n field = super(JSONStringFieldSerializer, self).to_internal_value(data)\n\n for k, v in field.viewitems():\n if v is not None: # NoneType is used to unset a value\n field[k] = unicode(v)\n\n return field\n\n\nclass ModelSerializer(serializers.ModelSerializer):\n\n uuid = serializers.ReadOnlyField()\n\n def get_validators(self):\n \"\"\"\n Hack to remove DRF's UniqueTogetherValidator when it concerns the UUID.\n\n See https://github.com/deis/deis/pull/2898#discussion_r23105147\n \"\"\"\n validators = super(ModelSerializer, self).get_validators()\n for v in validators:\n if isinstance(v, UniqueTogetherValidator) and 'uuid' in v.fields:\n validators.remove(v)\n return validators\n\n\nclass UserSerializer(serializers.ModelSerializer):\n class Meta:\n model = User\n fields = ['email', 'username', 'password', 'first_name', 'last_name', 'is_superuser',\n 'is_staff', 'groups', 'user_permissions', 'last_login', 'date_joined',\n 'is_active']\n read_only_fields = ['is_superuser', 'is_staff', 'groups',\n 'user_permissions', 'last_login', 'date_joined', 'is_active']\n extra_kwargs = {'password': {'write_only': True}}\n\n def create(self, validated_data):\n now = timezone.now()\n user = User(\n email=validated_data.get('email'),\n username=validated_data.get('username'),\n last_login=now,\n date_joined=now,\n is_active=True\n )\n if validated_data.get('first_name'):\n user.first_name = validated_data['first_name']\n if validated_data.get('last_name'):\n user.last_name = validated_data['last_name']\n user.set_password(validated_data['password'])\n # Make the first signup an admin / superuser\n if not User.objects.filter(is_superuser=True).exists():\n user.is_superuser = user.is_staff = True\n user.save()\n return user\n\n\nclass AdminUserSerializer(serializers.ModelSerializer):\n \"\"\"Serialize admin status for a User model.\"\"\"\n\n class Meta:\n model = User\n fields = ['username', 'is_superuser']\n read_only_fields = ['username']\n\n\nclass AppSerializer(ModelSerializer):\n \"\"\"Serialize a :class:`~api.models.App` model.\"\"\"\n\n owner = serializers.ReadOnlyField(source='owner.username')\n structure = JSONFieldSerializer(required=False)\n created = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n updated = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n\n class Meta:\n \"\"\"Metadata options for a :class:`AppSerializer`.\"\"\"\n model = models.App\n fields = ['uuid', 'id', 'owner', 'url', 'structure', 'created', 'updated']\n read_only_fields = ['uuid']\n\n\nclass BuildSerializer(ModelSerializer):\n \"\"\"Serialize a :class:`~api.models.Build` model.\"\"\"\n\n app = serializers.SlugRelatedField(slug_field='id', queryset=models.App.objects.all())\n owner = serializers.ReadOnlyField(source='owner.username')\n procfile = JSONFieldSerializer(required=False)\n created = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n updated = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n\n class Meta:\n \"\"\"Metadata options for a :class:`BuildSerializer`.\"\"\"\n model = models.Build\n fields = ['owner', 'app', 'image', 'sha', 'procfile', 'dockerfile', 'created',\n 'updated', 'uuid']\n read_only_fields = ['uuid']\n\n\nclass ConfigSerializer(ModelSerializer):\n \"\"\"Serialize a :class:`~api.models.Config` model.\"\"\"\n\n app = serializers.SlugRelatedField(slug_field='id', queryset=models.App.objects.all())\n owner = serializers.ReadOnlyField(source='owner.username')\n values = JSONStringFieldSerializer(required=False)\n memory = JSONStringFieldSerializer(required=False)\n cpu = JSONIntFieldSerializer(required=False)\n tags = JSONStringFieldSerializer(required=False)\n created = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n updated = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n\n class Meta:\n \"\"\"Metadata options for a :class:`ConfigSerializer`.\"\"\"\n model = models.Config\n\n def validate_memory(self, value):\n for k, v in value.viewitems():\n if v is None: # use NoneType to unset a value\n continue\n if not re.match(PROCTYPE_MATCH, k):\n raise serializers.ValidationError(\"Process types can only contain [a-z]\")\n if not re.match(MEMLIMIT_MATCH, str(v)):\n raise serializers.ValidationError(\n \"Limit format: <number><unit>, where unit = B, K, M or G\")\n return value\n\n def validate_cpu(self, value):\n for k, v in value.viewitems():\n if v is None: # use NoneType to unset a value\n continue\n if not re.match(PROCTYPE_MATCH, k):\n raise serializers.ValidationError(\"Process types can only contain [a-z]\")\n shares = re.match(CPUSHARE_MATCH, str(v))\n if not shares:\n raise serializers.ValidationError(\"CPU shares must be an integer\")\n for v in shares.groupdict().viewvalues():\n try:\n i = int(v)\n except ValueError:\n raise serializers.ValidationError(\"CPU shares must be an integer\")\n if i > 1024 or i < 0:\n raise serializers.ValidationError(\"CPU shares must be between 0 and 1024\")\n return value\n\n def validate_tags(self, value):\n for k, v in value.viewitems():\n if v is None: # use NoneType to unset a value\n continue\n if not re.match(TAGKEY_MATCH, k):\n raise serializers.ValidationError(\"Tag keys can only contain [a-z]\")\n if not re.match(TAGVAL_MATCH, str(v)):\n raise serializers.ValidationError(\"Invalid tag value\")\n return value\n\n\nclass ReleaseSerializer(ModelSerializer):\n \"\"\"Serialize a :class:`~api.models.Release` model.\"\"\"\n\n app = serializers.SlugRelatedField(slug_field='id', queryset=models.App.objects.all())\n owner = serializers.ReadOnlyField(source='owner.username')\n created = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n updated = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n\n class Meta:\n \"\"\"Metadata options for a :class:`ReleaseSerializer`.\"\"\"\n model = models.Release\n\n\nclass ContainerSerializer(ModelSerializer):\n \"\"\"Serialize a :class:`~api.models.Container` model.\"\"\"\n\n app = serializers.SlugRelatedField(slug_field='id', queryset=models.App.objects.all())\n owner = serializers.ReadOnlyField(source='owner.username')\n created = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n updated = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n release = serializers.SerializerMethodField()\n\n class Meta:\n \"\"\"Metadata options for a :class:`ContainerSerializer`.\"\"\"\n model = models.Container\n fields = ['owner', 'app', 'release', 'type', 'num', 'state', 'created', 'updated', 'uuid']\n\n def get_release(self, obj):\n return \"v{}\".format(obj.release.version)\n\n\nclass KeySerializer(ModelSerializer):\n \"\"\"Serialize a :class:`~api.models.Key` model.\"\"\"\n\n owner = serializers.ReadOnlyField(source='owner.username')\n fingerprint = serializers.CharField(read_only=True)\n created = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n updated = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n\n class Meta:\n \"\"\"Metadata options for a KeySerializer.\"\"\"\n model = models.Key\n\n\nclass DomainSerializer(ModelSerializer):\n \"\"\"Serialize a :class:`~api.models.Domain` model.\"\"\"\n\n app = serializers.SlugRelatedField(slug_field='id', queryset=models.App.objects.all())\n owner = serializers.ReadOnlyField(source='owner.username')\n created = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n updated = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n\n class Meta:\n \"\"\"Metadata options for a :class:`DomainSerializer`.\"\"\"\n model = models.Domain\n fields = ['uuid', 'owner', 'created', 'updated', 'app', 'domain']\n\n def validate_domain(self, value):\n \"\"\"\n Check that the hostname is valid\n \"\"\"\n if len(value) > 255:\n raise serializers.ValidationError('Hostname must be 255 characters or less.')\n if value[-1:] == \".\":\n value = value[:-1] # strip exactly one dot from the right, if present\n labels = value.split('.')\n if 'xip.io' in value:\n return value\n if labels[0] == '*':\n raise serializers.ValidationError(\n 'Adding a wildcard subdomain is currently not supported.')\n allowed = re.compile(\"^(?!-)[a-z0-9-]{1,63}(?<!-)$\", re.IGNORECASE)\n for label in labels:\n match = allowed.match(label)\n if not match or '--' in label or label.isdigit() or \\\n len(labels) == 1 and any(char.isdigit() for char in label):\n raise serializers.ValidationError('Hostname does not look valid.')\n if models.Domain.objects.filter(domain=value).exists():\n raise serializers.ValidationError(\n \"The domain {} is already in use by another app\".format(value))\n return value\n\n\nclass CertificateSerializer(ModelSerializer):\n \"\"\"Serialize a :class:`~api.models.Cert` model.\"\"\"\n\n owner = serializers.ReadOnlyField(source='owner.username')\n expires = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n created = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n updated = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n\n class Meta:\n \"\"\"Metadata options for a DomainCertSerializer.\"\"\"\n model = models.Certificate\n extra_kwargs = {'certificate': {'write_only': True},\n 'key': {'write_only': True},\n 'common_name': {'required': False}}\n read_only_fields = ['expires', 'created', 'updated']\n\n\nclass PushSerializer(ModelSerializer):\n \"\"\"Serialize a :class:`~api.models.Push` model.\"\"\"\n\n app = serializers.SlugRelatedField(slug_field='id', queryset=models.App.objects.all())\n owner = serializers.ReadOnlyField(source='owner.username')\n created = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n updated = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n\n class Meta:\n \"\"\"Metadata options for a :class:`PushSerializer`.\"\"\"\n model = models.Push\n fields = ['uuid', 'owner', 'app', 'sha', 'fingerprint', 'receive_user', 'receive_repo',\n 'ssh_connection', 'ssh_original_command', 'created', 'updated']\n", "path": "controller/api/serializers.py"}]} | 3,699 | 597 |
gh_patches_debug_1424 | rasdani/github-patches | git_diff | cookiecutter__cookiecutter-1891 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
2.2.0 CLI reports version 2.1.2dev0
* Cookiecutter version: 2.2.0 (or 2.1.2dev0, depending on who you ask)
* Template project url: n/a
* Python version: 3.11
* Operating System: linux
### Description:
Get the accurate version of cookiecutter from the CLI
### What I've run:
```bash
cookiecutter --version
Cookiecutter 2.1.2.dev0 from $PREFIX/lib/python3.11/site-packages (Python 3.11.4 | packaged by conda-forge | (main, Jun 10 2023, 18:08:17) [GCC 12.2.0])
```
Would be a one-line fix, but ideally would be always be sourced from exactly one place:
- `setup.py` and `importlib_metadata`
- `__init__.py`
- a `VERSION` file
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 """cookiecutter distutils configuration."""
2 from setuptools import setup
3
4 version = "2.2.2.dev0"
5
6 with open('README.md', encoding='utf-8') as readme_file:
7 readme = readme_file.read()
8
9 requirements = [
10 'binaryornot>=0.4.4',
11 'Jinja2>=2.7,<4.0.0',
12 'click>=7.0,<9.0.0',
13 'pyyaml>=5.3.1',
14 'python-slugify>=4.0.0',
15 'requests>=2.23.0',
16 'arrow',
17 ]
18
19 setup(
20 name='cookiecutter',
21 version=version,
22 description=(
23 'A command-line utility that creates projects from project '
24 'templates, e.g. creating a Python package project from a '
25 'Python package project template.'
26 ),
27 long_description=readme,
28 long_description_content_type='text/markdown',
29 author='Audrey Feldroy',
30 author_email='[email protected]',
31 url='https://github.com/cookiecutter/cookiecutter',
32 project_urls={
33 "Documentation": "https://cookiecutter.readthedocs.io",
34 "Issues": "https://github.com/cookiecutter/cookiecutter/issues",
35 "Discord": "https://discord.gg/9BrxzPKuEW",
36 },
37 packages=['cookiecutter'],
38 package_dir={'cookiecutter': 'cookiecutter'},
39 entry_points={'console_scripts': ['cookiecutter = cookiecutter.__main__:main']},
40 include_package_data=True,
41 python_requires='>=3.7',
42 install_requires=requirements,
43 license='BSD',
44 zip_safe=False,
45 classifiers=[
46 "Development Status :: 5 - Production/Stable",
47 "Environment :: Console",
48 "Intended Audience :: Developers",
49 "Natural Language :: English",
50 "License :: OSI Approved :: BSD License",
51 "Programming Language :: Python :: 3 :: Only",
52 "Programming Language :: Python :: 3",
53 "Programming Language :: Python :: 3.7",
54 "Programming Language :: Python :: 3.8",
55 "Programming Language :: Python :: 3.9",
56 "Programming Language :: Python :: 3.10",
57 "Programming Language :: Python :: 3.11",
58 "Programming Language :: Python :: Implementation :: CPython",
59 "Programming Language :: Python :: Implementation :: PyPy",
60 "Programming Language :: Python",
61 "Topic :: Software Development",
62 ],
63 keywords=[
64 "cookiecutter",
65 "Python",
66 "projects",
67 "project templates",
68 "Jinja2",
69 "skeleton",
70 "scaffolding",
71 "project directory",
72 "package",
73 "packaging",
74 ],
75 )
76
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -1,7 +1,7 @@
"""cookiecutter distutils configuration."""
from setuptools import setup
-version = "2.2.2.dev0"
+version = "2.2.2"
with open('README.md', encoding='utf-8') as readme_file:
readme = readme_file.read()
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -1,7 +1,7 @@\n \"\"\"cookiecutter distutils configuration.\"\"\"\n from setuptools import setup\n \n-version = \"2.2.2.dev0\"\n+version = \"2.2.2\"\n \n with open('README.md', encoding='utf-8') as readme_file:\n readme = readme_file.read()\n", "issue": "2.2.0 CLI reports version 2.1.2dev0 \n* Cookiecutter version: 2.2.0 (or 2.1.2dev0, depending on who you ask)\r\n* Template project url: n/a\r\n* Python version: 3.11\r\n* Operating System: linux\r\n\r\n### Description:\r\n\r\nGet the accurate version of cookiecutter from the CLI\r\n\r\n### What I've run:\r\n\r\n```bash\r\ncookiecutter --version\r\nCookiecutter 2.1.2.dev0 from $PREFIX/lib/python3.11/site-packages (Python 3.11.4 | packaged by conda-forge | (main, Jun 10 2023, 18:08:17) [GCC 12.2.0])\r\n```\r\n\r\nWould be a one-line fix, but ideally would be always be sourced from exactly one place:\r\n- `setup.py` and `importlib_metadata`\r\n- `__init__.py`\r\n- a `VERSION` file\n", "before_files": [{"content": "\"\"\"cookiecutter distutils configuration.\"\"\"\nfrom setuptools import setup\n\nversion = \"2.2.2.dev0\"\n\nwith open('README.md', encoding='utf-8') as readme_file:\n readme = readme_file.read()\n\nrequirements = [\n 'binaryornot>=0.4.4',\n 'Jinja2>=2.7,<4.0.0',\n 'click>=7.0,<9.0.0',\n 'pyyaml>=5.3.1',\n 'python-slugify>=4.0.0',\n 'requests>=2.23.0',\n 'arrow',\n]\n\nsetup(\n name='cookiecutter',\n version=version,\n description=(\n 'A command-line utility that creates projects from project '\n 'templates, e.g. creating a Python package project from a '\n 'Python package project template.'\n ),\n long_description=readme,\n long_description_content_type='text/markdown',\n author='Audrey Feldroy',\n author_email='[email protected]',\n url='https://github.com/cookiecutter/cookiecutter',\n project_urls={\n \"Documentation\": \"https://cookiecutter.readthedocs.io\",\n \"Issues\": \"https://github.com/cookiecutter/cookiecutter/issues\",\n \"Discord\": \"https://discord.gg/9BrxzPKuEW\",\n },\n packages=['cookiecutter'],\n package_dir={'cookiecutter': 'cookiecutter'},\n entry_points={'console_scripts': ['cookiecutter = cookiecutter.__main__:main']},\n include_package_data=True,\n python_requires='>=3.7',\n install_requires=requirements,\n license='BSD',\n zip_safe=False,\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"Natural Language :: English\",\n \"License :: OSI Approved :: BSD License\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Programming Language :: Python\",\n \"Topic :: Software Development\",\n ],\n keywords=[\n \"cookiecutter\",\n \"Python\",\n \"projects\",\n \"project templates\",\n \"Jinja2\",\n \"skeleton\",\n \"scaffolding\",\n \"project directory\",\n \"package\",\n \"packaging\",\n ],\n)\n", "path": "setup.py"}], "after_files": [{"content": "\"\"\"cookiecutter distutils configuration.\"\"\"\nfrom setuptools import setup\n\nversion = \"2.2.2\"\n\nwith open('README.md', encoding='utf-8') as readme_file:\n readme = readme_file.read()\n\nrequirements = [\n 'binaryornot>=0.4.4',\n 'Jinja2>=2.7,<4.0.0',\n 'click>=7.0,<9.0.0',\n 'pyyaml>=5.3.1',\n 'python-slugify>=4.0.0',\n 'requests>=2.23.0',\n 'arrow',\n]\n\nsetup(\n name='cookiecutter',\n version=version,\n description=(\n 'A command-line utility that creates projects from project '\n 'templates, e.g. creating a Python package project from a '\n 'Python package project template.'\n ),\n long_description=readme,\n long_description_content_type='text/markdown',\n author='Audrey Feldroy',\n author_email='[email protected]',\n url='https://github.com/cookiecutter/cookiecutter',\n project_urls={\n \"Documentation\": \"https://cookiecutter.readthedocs.io\",\n \"Issues\": \"https://github.com/cookiecutter/cookiecutter/issues\",\n \"Discord\": \"https://discord.gg/9BrxzPKuEW\",\n },\n packages=['cookiecutter'],\n package_dir={'cookiecutter': 'cookiecutter'},\n entry_points={'console_scripts': ['cookiecutter = cookiecutter.__main__:main']},\n include_package_data=True,\n python_requires='>=3.7',\n install_requires=requirements,\n license='BSD',\n zip_safe=False,\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"Natural Language :: English\",\n \"License :: OSI Approved :: BSD License\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Programming Language :: Python\",\n \"Topic :: Software Development\",\n ],\n keywords=[\n \"cookiecutter\",\n \"Python\",\n \"projects\",\n \"project templates\",\n \"Jinja2\",\n \"skeleton\",\n \"scaffolding\",\n \"project directory\",\n \"package\",\n \"packaging\",\n ],\n)\n", "path": "setup.py"}]} | 1,211 | 88 |
gh_patches_debug_9792 | rasdani/github-patches | git_diff | fossasia__open-event-server-5593 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Users are given various roles when they are deleted
**Describe the bug**
Users are given various roles when they are deleted
**Expected behavior**
<!-- A clear and concise description of what you expected to happen. -->
**Screenshots**

**Additional context**
I am working on this issue
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `app/api/users.py`
Content:
```
1 import base64
2
3 from flask import Blueprint, request, jsonify, abort, make_response
4 from flask_jwt import current_identity as current_user
5 from flask_rest_jsonapi import ResourceDetail, ResourceList, ResourceRelationship
6 from sqlalchemy.orm.exc import NoResultFound
7 import urllib.error
8
9 from app import get_settings
10 from app.api.bootstrap import api
11 from app.api.helpers.db import safe_query, get_count
12 from app.api.helpers.exceptions import ConflictException, UnprocessableEntity, ForbiddenException
13 from app.api.helpers.files import create_save_image_sizes, make_frontend_url
14 from app.api.helpers.mail import send_email_confirmation, send_email_change_user_email, send_email_with_action
15 from app.api.helpers.permission_manager import has_access
16 from app.api.helpers.permissions import is_user_itself
17 from app.api.helpers.utilities import get_serializer, str_generator
18 from app.api.schema.users import UserSchema, UserSchemaPublic
19 from app.models import db
20 from app.models.access_code import AccessCode
21 from app.models.discount_code import DiscountCode
22 from app.models.email_notification import EmailNotification
23 from app.models.event_invoice import EventInvoice
24 from app.models.feedback import Feedback
25 from app.models.mail import USER_REGISTER_WITH_PASSWORD, PASSWORD_RESET_AND_VERIFY
26 from app.models.notification import Notification
27 from app.models.session import Session
28 from app.models.speaker import Speaker
29 from app.models.ticket_holder import TicketHolder
30 from app.models.user import User
31 from app.models.users_events_role import UsersEventsRoles
32
33 user_misc_routes = Blueprint('user_misc', __name__, url_prefix='/v1')
34
35
36 class UserList(ResourceList):
37 """
38 List and create Users
39 """
40
41 def before_create_object(self, data, view_kwargs):
42 """
43 method to check if there is an existing user with same email which is received in data to create a new user
44 :param data:
45 :param view_kwargs:
46 :return:
47 """
48 if db.session.query(User.id).filter_by(email=data['email']).scalar() is not None:
49 raise ConflictException({'pointer': '/data/attributes/email'}, "Email already exists")
50
51 def after_create_object(self, user, data, view_kwargs):
52 """
53 method to send-
54 email notification
55 mail link for register verification
56 add image urls
57 :param user:
58 :param data:
59 :param view_kwargs:
60 :return:
61 """
62
63 if user.was_registered_with_order:
64 link = make_frontend_url('/reset-password', {'token': user.reset_password})
65 send_email_with_action(user, PASSWORD_RESET_AND_VERIFY, app_name=get_settings()['app_name'],
66 email=user.email, link=link)
67 else:
68 s = get_serializer()
69 hash = str(base64.b64encode(str(s.dumps([user.email, str_generator()])).encode()), 'utf-8')
70 link = make_frontend_url('/verify'.format(id=user.id), {'token': hash})
71 send_email_with_action(user, USER_REGISTER_WITH_PASSWORD, app_name=get_settings()['app_name'],
72 email=user.email)
73 send_email_confirmation(user.email, link)
74
75 if data.get('original_image_url'):
76 try:
77 uploaded_images = create_save_image_sizes(data['original_image_url'], 'speaker-image', user.id)
78 except (urllib.error.HTTPError, urllib.error.URLError):
79 raise UnprocessableEntity(
80 {'source': 'attributes/original-image-url'}, 'Invalid Image URL'
81 )
82 uploaded_images['small_image_url'] = uploaded_images['thumbnail_image_url']
83 del uploaded_images['large_image_url']
84 self.session.query(User).filter_by(id=user.id).update(uploaded_images)
85
86 decorators = (api.has_permission('is_admin', methods="GET"),)
87 schema = UserSchema
88 data_layer = {'session': db.session,
89 'model': User,
90 'methods': {
91 'before_create_object': before_create_object,
92 'after_create_object': after_create_object
93 }}
94
95
96 class UserDetail(ResourceDetail):
97 """
98 User detail by id
99 """
100
101 def before_get(self, args, kwargs):
102
103 if current_user.is_admin or current_user.is_super_admin or current_user:
104 self.schema = UserSchema
105 else:
106 self.schema = UserSchemaPublic
107
108 def before_get_object(self, view_kwargs):
109 """
110 before get method for user object
111 :param view_kwargs:
112 :return:
113 """
114 if view_kwargs.get('notification_id') is not None:
115 notification = safe_query(self, Notification, 'id', view_kwargs['notification_id'], 'notification_id')
116 if notification.user_id is not None:
117 view_kwargs['id'] = notification.user_id
118 else:
119 view_kwargs['id'] = None
120
121 if view_kwargs.get('feedback_id') is not None:
122 feedback = safe_query(self, Feedback, 'id', view_kwargs['feedback_id'], 'feedback_id')
123 if feedback.user_id is not None:
124 view_kwargs['id'] = feedback.user_id
125 else:
126 view_kwargs['id'] = None
127
128 if view_kwargs.get('attendee_id') is not None:
129 attendee = safe_query(self, TicketHolder, 'id', view_kwargs['attendee_id'], 'attendee_id')
130 if attendee.user is not None:
131 if (not has_access('is_user_itself',
132 user_id=attendee.user.id) or not has_access('is_coorganizer',
133 event_id=attendee.event_id)):
134 raise ForbiddenException({'source': ''}, 'Access Forbidden')
135 view_kwargs['id'] = attendee.user.id
136 else:
137 view_kwargs['id'] = None
138
139 if view_kwargs.get('event_invoice_id') is not None:
140 event_invoice = safe_query(self, EventInvoice, 'id', view_kwargs['event_invoice_id'], 'event_invoice_id')
141 if event_invoice.user_id is not None:
142 view_kwargs['id'] = event_invoice.user_id
143 else:
144 view_kwargs['id'] = None
145
146 if view_kwargs.get('users_events_role_id') is not None:
147 users_events_role = safe_query(self, UsersEventsRoles, 'id', view_kwargs['users_events_role_id'],
148 'users_events_role_id')
149 if users_events_role.user_id is not None:
150 view_kwargs['id'] = users_events_role.user_id
151 else:
152 view_kwargs['id'] = None
153
154 if view_kwargs.get('speaker_id') is not None:
155 speaker = safe_query(self, Speaker, 'id', view_kwargs['speaker_id'], 'speaker_id')
156 if speaker.user_id is not None:
157 view_kwargs['id'] = speaker.user_id
158 else:
159 view_kwargs['id'] = None
160
161 if view_kwargs.get('session_id') is not None:
162 session = safe_query(self, Session, 'id', view_kwargs['session_id'], 'session_id')
163 if session.creator_id is not None:
164 view_kwargs['id'] = session.creator_id
165 else:
166 view_kwargs['id'] = None
167
168 if view_kwargs.get('access_code_id') is not None:
169 access_code = safe_query(self, AccessCode, 'id', view_kwargs['access_code_id'], 'access_code_id')
170 if access_code.marketer_id is not None:
171 view_kwargs['id'] = access_code.marketer_id
172 else:
173 view_kwargs['id'] = None
174
175 if view_kwargs.get('discount_code_id') is not None:
176 discount_code = safe_query(self, DiscountCode, 'id', view_kwargs['discount_code_id'], 'discount_code_id')
177 if discount_code.marketer_id is not None:
178 view_kwargs['id'] = discount_code.marketer_id
179 else:
180 view_kwargs['id'] = None
181
182 if view_kwargs.get('email_notification_id') is not None:
183 email_notification = safe_query(self, EmailNotification, 'id', view_kwargs['email_notification_id'],
184 'email_notification_id')
185 if email_notification.user_id is not None:
186 view_kwargs['id'] = email_notification.user_id
187 else:
188 view_kwargs['id'] = None
189
190 def before_update_object(self, user, data, view_kwargs):
191 if data.get('original_image_url') and data['original_image_url'] != user.original_image_url:
192 try:
193 uploaded_images = create_save_image_sizes(data['original_image_url'], 'speaker-image', user.id)
194 except (urllib.error.HTTPError, urllib.error.URLError):
195 raise UnprocessableEntity(
196 {'source': 'attributes/original-image-url'}, 'Invalid Image URL'
197 )
198 data['original_image_url'] = uploaded_images['original_image_url']
199 data['small_image_url'] = uploaded_images['thumbnail_image_url']
200 data['thumbnail_image_url'] = uploaded_images['thumbnail_image_url']
201 data['icon_image_url'] = uploaded_images['icon_image_url']
202
203 if data.get('email') and data['email'] != user.email:
204 try:
205 db.session.query(User).filter_by(email=data['email']).one()
206 except NoResultFound:
207 view_kwargs['email_changed'] = user.email
208 else:
209 raise ConflictException({'pointer': '/data/attributes/email'}, "Email already exists")
210
211 if has_access('is_super_admin') and data.get('is_admin') != user.is_admin:
212 user.is_admin = not user.is_admin
213
214 if has_access('is_admin') and data.get('is_sales_admin') != user.is_sales_admin:
215 user.is_sales_admin = not user.is_sales_admin
216
217 if has_access('is_admin') and data.get('is_marketer') != user.is_marketer:
218 user.is_marketer = not user.is_marketer
219
220 def after_update_object(self, user, data, view_kwargs):
221 """
222 method to mail user about email change
223 :param user:
224 :param data:
225 :param view_kwargs:
226 :return:
227 """
228 if view_kwargs.get('email_changed'):
229 send_email_change_user_email(user, view_kwargs.get('email_changed'))
230
231 decorators = (api.has_permission('is_user_itself', fetch="user_id,id", fetch_as="user_id",
232 model=[Notification, Feedback, UsersEventsRoles, Session, EventInvoice, AccessCode,
233 DiscountCode, EmailNotification, Speaker, User],
234 fetch_key_url="notification_id, feedback_id, users_events_role_id, session_id, \
235 event_invoice_id, access_code_id, discount_code_id, email_notification_id, speaker_id, id",
236 leave_if=lambda a: a.get('attendee_id')),)
237 schema = UserSchema
238 data_layer = {'session': db.session,
239 'model': User,
240 'methods': {
241 'before_get_object': before_get_object,
242 'before_update_object': before_update_object,
243 'after_update_object': after_update_object
244 }}
245
246
247 class UserRelationship(ResourceRelationship):
248 """
249 User Relationship
250 """
251 decorators = (is_user_itself,)
252 schema = UserSchema
253 data_layer = {'session': db.session,
254 'model': User}
255
256
257 @user_misc_routes.route('/users/checkEmail', methods=['POST'])
258 def is_email_available():
259 email = request.json.get('email', None)
260 if email:
261 if get_count(db.session.query(User).filter_by(email=email)):
262 return jsonify(
263 result="False"
264 )
265 else:
266 return jsonify(
267 result="True"
268 )
269 else:
270 abort(
271 make_response(jsonify(error="Email field missing"), 422)
272 )
273
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/app/api/users.py b/app/api/users.py
--- a/app/api/users.py
+++ b/app/api/users.py
@@ -208,7 +208,7 @@
else:
raise ConflictException({'pointer': '/data/attributes/email'}, "Email already exists")
- if has_access('is_super_admin') and data.get('is_admin') != user.is_admin:
+ if has_access('is_super_admin') and data.get('is_admin') and data.get('is_admin') != user.is_admin:
user.is_admin = not user.is_admin
if has_access('is_admin') and data.get('is_sales_admin') != user.is_sales_admin:
| {"golden_diff": "diff --git a/app/api/users.py b/app/api/users.py\n--- a/app/api/users.py\n+++ b/app/api/users.py\n@@ -208,7 +208,7 @@\n else:\n raise ConflictException({'pointer': '/data/attributes/email'}, \"Email already exists\")\n \n- if has_access('is_super_admin') and data.get('is_admin') != user.is_admin:\n+ if has_access('is_super_admin') and data.get('is_admin') and data.get('is_admin') != user.is_admin:\n user.is_admin = not user.is_admin\n \n if has_access('is_admin') and data.get('is_sales_admin') != user.is_sales_admin:\n", "issue": "Users are given various roles when they are deleted\n**Describe the bug**\r\nUsers are given various roles when they are deleted\r\n\r\n**Expected behavior**\r\n<!-- A clear and concise description of what you expected to happen. -->\r\n\r\n**Screenshots**\r\n\r\n\r\n**Additional context**\r\nI am working on this issue\n", "before_files": [{"content": "import base64\n\nfrom flask import Blueprint, request, jsonify, abort, make_response\nfrom flask_jwt import current_identity as current_user\nfrom flask_rest_jsonapi import ResourceDetail, ResourceList, ResourceRelationship\nfrom sqlalchemy.orm.exc import NoResultFound\nimport urllib.error\n\nfrom app import get_settings\nfrom app.api.bootstrap import api\nfrom app.api.helpers.db import safe_query, get_count\nfrom app.api.helpers.exceptions import ConflictException, UnprocessableEntity, ForbiddenException\nfrom app.api.helpers.files import create_save_image_sizes, make_frontend_url\nfrom app.api.helpers.mail import send_email_confirmation, send_email_change_user_email, send_email_with_action\nfrom app.api.helpers.permission_manager import has_access\nfrom app.api.helpers.permissions import is_user_itself\nfrom app.api.helpers.utilities import get_serializer, str_generator\nfrom app.api.schema.users import UserSchema, UserSchemaPublic\nfrom app.models import db\nfrom app.models.access_code import AccessCode\nfrom app.models.discount_code import DiscountCode\nfrom app.models.email_notification import EmailNotification\nfrom app.models.event_invoice import EventInvoice\nfrom app.models.feedback import Feedback\nfrom app.models.mail import USER_REGISTER_WITH_PASSWORD, PASSWORD_RESET_AND_VERIFY\nfrom app.models.notification import Notification\nfrom app.models.session import Session\nfrom app.models.speaker import Speaker\nfrom app.models.ticket_holder import TicketHolder\nfrom app.models.user import User\nfrom app.models.users_events_role import UsersEventsRoles\n\nuser_misc_routes = Blueprint('user_misc', __name__, url_prefix='/v1')\n\n\nclass UserList(ResourceList):\n \"\"\"\n List and create Users\n \"\"\"\n\n def before_create_object(self, data, view_kwargs):\n \"\"\"\n method to check if there is an existing user with same email which is received in data to create a new user\n :param data:\n :param view_kwargs:\n :return:\n \"\"\"\n if db.session.query(User.id).filter_by(email=data['email']).scalar() is not None:\n raise ConflictException({'pointer': '/data/attributes/email'}, \"Email already exists\")\n\n def after_create_object(self, user, data, view_kwargs):\n \"\"\"\n method to send-\n email notification\n mail link for register verification\n add image urls\n :param user:\n :param data:\n :param view_kwargs:\n :return:\n \"\"\"\n\n if user.was_registered_with_order:\n link = make_frontend_url('/reset-password', {'token': user.reset_password})\n send_email_with_action(user, PASSWORD_RESET_AND_VERIFY, app_name=get_settings()['app_name'],\n email=user.email, link=link)\n else:\n s = get_serializer()\n hash = str(base64.b64encode(str(s.dumps([user.email, str_generator()])).encode()), 'utf-8')\n link = make_frontend_url('/verify'.format(id=user.id), {'token': hash})\n send_email_with_action(user, USER_REGISTER_WITH_PASSWORD, app_name=get_settings()['app_name'],\n email=user.email)\n send_email_confirmation(user.email, link)\n\n if data.get('original_image_url'):\n try:\n uploaded_images = create_save_image_sizes(data['original_image_url'], 'speaker-image', user.id)\n except (urllib.error.HTTPError, urllib.error.URLError):\n raise UnprocessableEntity(\n {'source': 'attributes/original-image-url'}, 'Invalid Image URL'\n )\n uploaded_images['small_image_url'] = uploaded_images['thumbnail_image_url']\n del uploaded_images['large_image_url']\n self.session.query(User).filter_by(id=user.id).update(uploaded_images)\n\n decorators = (api.has_permission('is_admin', methods=\"GET\"),)\n schema = UserSchema\n data_layer = {'session': db.session,\n 'model': User,\n 'methods': {\n 'before_create_object': before_create_object,\n 'after_create_object': after_create_object\n }}\n\n\nclass UserDetail(ResourceDetail):\n \"\"\"\n User detail by id\n \"\"\"\n\n def before_get(self, args, kwargs):\n\n if current_user.is_admin or current_user.is_super_admin or current_user:\n self.schema = UserSchema\n else:\n self.schema = UserSchemaPublic\n\n def before_get_object(self, view_kwargs):\n \"\"\"\n before get method for user object\n :param view_kwargs:\n :return:\n \"\"\"\n if view_kwargs.get('notification_id') is not None:\n notification = safe_query(self, Notification, 'id', view_kwargs['notification_id'], 'notification_id')\n if notification.user_id is not None:\n view_kwargs['id'] = notification.user_id\n else:\n view_kwargs['id'] = None\n\n if view_kwargs.get('feedback_id') is not None:\n feedback = safe_query(self, Feedback, 'id', view_kwargs['feedback_id'], 'feedback_id')\n if feedback.user_id is not None:\n view_kwargs['id'] = feedback.user_id\n else:\n view_kwargs['id'] = None\n\n if view_kwargs.get('attendee_id') is not None:\n attendee = safe_query(self, TicketHolder, 'id', view_kwargs['attendee_id'], 'attendee_id')\n if attendee.user is not None:\n if (not has_access('is_user_itself',\n user_id=attendee.user.id) or not has_access('is_coorganizer',\n event_id=attendee.event_id)):\n raise ForbiddenException({'source': ''}, 'Access Forbidden')\n view_kwargs['id'] = attendee.user.id\n else:\n view_kwargs['id'] = None\n\n if view_kwargs.get('event_invoice_id') is not None:\n event_invoice = safe_query(self, EventInvoice, 'id', view_kwargs['event_invoice_id'], 'event_invoice_id')\n if event_invoice.user_id is not None:\n view_kwargs['id'] = event_invoice.user_id\n else:\n view_kwargs['id'] = None\n\n if view_kwargs.get('users_events_role_id') is not None:\n users_events_role = safe_query(self, UsersEventsRoles, 'id', view_kwargs['users_events_role_id'],\n 'users_events_role_id')\n if users_events_role.user_id is not None:\n view_kwargs['id'] = users_events_role.user_id\n else:\n view_kwargs['id'] = None\n\n if view_kwargs.get('speaker_id') is not None:\n speaker = safe_query(self, Speaker, 'id', view_kwargs['speaker_id'], 'speaker_id')\n if speaker.user_id is not None:\n view_kwargs['id'] = speaker.user_id\n else:\n view_kwargs['id'] = None\n\n if view_kwargs.get('session_id') is not None:\n session = safe_query(self, Session, 'id', view_kwargs['session_id'], 'session_id')\n if session.creator_id is not None:\n view_kwargs['id'] = session.creator_id\n else:\n view_kwargs['id'] = None\n\n if view_kwargs.get('access_code_id') is not None:\n access_code = safe_query(self, AccessCode, 'id', view_kwargs['access_code_id'], 'access_code_id')\n if access_code.marketer_id is not None:\n view_kwargs['id'] = access_code.marketer_id\n else:\n view_kwargs['id'] = None\n\n if view_kwargs.get('discount_code_id') is not None:\n discount_code = safe_query(self, DiscountCode, 'id', view_kwargs['discount_code_id'], 'discount_code_id')\n if discount_code.marketer_id is not None:\n view_kwargs['id'] = discount_code.marketer_id\n else:\n view_kwargs['id'] = None\n\n if view_kwargs.get('email_notification_id') is not None:\n email_notification = safe_query(self, EmailNotification, 'id', view_kwargs['email_notification_id'],\n 'email_notification_id')\n if email_notification.user_id is not None:\n view_kwargs['id'] = email_notification.user_id\n else:\n view_kwargs['id'] = None\n\n def before_update_object(self, user, data, view_kwargs):\n if data.get('original_image_url') and data['original_image_url'] != user.original_image_url:\n try:\n uploaded_images = create_save_image_sizes(data['original_image_url'], 'speaker-image', user.id)\n except (urllib.error.HTTPError, urllib.error.URLError):\n raise UnprocessableEntity(\n {'source': 'attributes/original-image-url'}, 'Invalid Image URL'\n )\n data['original_image_url'] = uploaded_images['original_image_url']\n data['small_image_url'] = uploaded_images['thumbnail_image_url']\n data['thumbnail_image_url'] = uploaded_images['thumbnail_image_url']\n data['icon_image_url'] = uploaded_images['icon_image_url']\n\n if data.get('email') and data['email'] != user.email:\n try:\n db.session.query(User).filter_by(email=data['email']).one()\n except NoResultFound:\n view_kwargs['email_changed'] = user.email\n else:\n raise ConflictException({'pointer': '/data/attributes/email'}, \"Email already exists\")\n\n if has_access('is_super_admin') and data.get('is_admin') != user.is_admin:\n user.is_admin = not user.is_admin\n\n if has_access('is_admin') and data.get('is_sales_admin') != user.is_sales_admin:\n user.is_sales_admin = not user.is_sales_admin\n\n if has_access('is_admin') and data.get('is_marketer') != user.is_marketer:\n user.is_marketer = not user.is_marketer\n\n def after_update_object(self, user, data, view_kwargs):\n \"\"\"\n method to mail user about email change\n :param user:\n :param data:\n :param view_kwargs:\n :return:\n \"\"\"\n if view_kwargs.get('email_changed'):\n send_email_change_user_email(user, view_kwargs.get('email_changed'))\n\n decorators = (api.has_permission('is_user_itself', fetch=\"user_id,id\", fetch_as=\"user_id\",\n model=[Notification, Feedback, UsersEventsRoles, Session, EventInvoice, AccessCode,\n DiscountCode, EmailNotification, Speaker, User],\n fetch_key_url=\"notification_id, feedback_id, users_events_role_id, session_id, \\\n event_invoice_id, access_code_id, discount_code_id, email_notification_id, speaker_id, id\",\n leave_if=lambda a: a.get('attendee_id')),)\n schema = UserSchema\n data_layer = {'session': db.session,\n 'model': User,\n 'methods': {\n 'before_get_object': before_get_object,\n 'before_update_object': before_update_object,\n 'after_update_object': after_update_object\n }}\n\n\nclass UserRelationship(ResourceRelationship):\n \"\"\"\n User Relationship\n \"\"\"\n decorators = (is_user_itself,)\n schema = UserSchema\n data_layer = {'session': db.session,\n 'model': User}\n\n\n@user_misc_routes.route('/users/checkEmail', methods=['POST'])\ndef is_email_available():\n email = request.json.get('email', None)\n if email:\n if get_count(db.session.query(User).filter_by(email=email)):\n return jsonify(\n result=\"False\"\n )\n else:\n return jsonify(\n result=\"True\"\n )\n else:\n abort(\n make_response(jsonify(error=\"Email field missing\"), 422)\n )\n", "path": "app/api/users.py"}], "after_files": [{"content": "import base64\n\nfrom flask import Blueprint, request, jsonify, abort, make_response\nfrom flask_jwt import current_identity as current_user\nfrom flask_rest_jsonapi import ResourceDetail, ResourceList, ResourceRelationship\nfrom sqlalchemy.orm.exc import NoResultFound\nimport urllib.error\n\nfrom app import get_settings\nfrom app.api.bootstrap import api\nfrom app.api.helpers.db import safe_query, get_count\nfrom app.api.helpers.exceptions import ConflictException, UnprocessableEntity, ForbiddenException\nfrom app.api.helpers.files import create_save_image_sizes, make_frontend_url\nfrom app.api.helpers.mail import send_email_confirmation, send_email_change_user_email, send_email_with_action\nfrom app.api.helpers.permission_manager import has_access\nfrom app.api.helpers.permissions import is_user_itself\nfrom app.api.helpers.utilities import get_serializer, str_generator\nfrom app.api.schema.users import UserSchema, UserSchemaPublic\nfrom app.models import db\nfrom app.models.access_code import AccessCode\nfrom app.models.discount_code import DiscountCode\nfrom app.models.email_notification import EmailNotification\nfrom app.models.event_invoice import EventInvoice\nfrom app.models.feedback import Feedback\nfrom app.models.mail import USER_REGISTER_WITH_PASSWORD, PASSWORD_RESET_AND_VERIFY\nfrom app.models.notification import Notification\nfrom app.models.session import Session\nfrom app.models.speaker import Speaker\nfrom app.models.ticket_holder import TicketHolder\nfrom app.models.user import User\nfrom app.models.users_events_role import UsersEventsRoles\n\nuser_misc_routes = Blueprint('user_misc', __name__, url_prefix='/v1')\n\n\nclass UserList(ResourceList):\n \"\"\"\n List and create Users\n \"\"\"\n\n def before_create_object(self, data, view_kwargs):\n \"\"\"\n method to check if there is an existing user with same email which is received in data to create a new user\n :param data:\n :param view_kwargs:\n :return:\n \"\"\"\n if db.session.query(User.id).filter_by(email=data['email']).scalar() is not None:\n raise ConflictException({'pointer': '/data/attributes/email'}, \"Email already exists\")\n\n def after_create_object(self, user, data, view_kwargs):\n \"\"\"\n method to send-\n email notification\n mail link for register verification\n add image urls\n :param user:\n :param data:\n :param view_kwargs:\n :return:\n \"\"\"\n\n if user.was_registered_with_order:\n link = make_frontend_url('/reset-password', {'token': user.reset_password})\n send_email_with_action(user, PASSWORD_RESET_AND_VERIFY, app_name=get_settings()['app_name'],\n email=user.email, link=link)\n else:\n s = get_serializer()\n hash = str(base64.b64encode(str(s.dumps([user.email, str_generator()])).encode()), 'utf-8')\n link = make_frontend_url('/verify'.format(id=user.id), {'token': hash})\n send_email_with_action(user, USER_REGISTER_WITH_PASSWORD, app_name=get_settings()['app_name'],\n email=user.email)\n send_email_confirmation(user.email, link)\n\n if data.get('original_image_url'):\n try:\n uploaded_images = create_save_image_sizes(data['original_image_url'], 'speaker-image', user.id)\n except (urllib.error.HTTPError, urllib.error.URLError):\n raise UnprocessableEntity(\n {'source': 'attributes/original-image-url'}, 'Invalid Image URL'\n )\n uploaded_images['small_image_url'] = uploaded_images['thumbnail_image_url']\n del uploaded_images['large_image_url']\n self.session.query(User).filter_by(id=user.id).update(uploaded_images)\n\n decorators = (api.has_permission('is_admin', methods=\"GET\"),)\n schema = UserSchema\n data_layer = {'session': db.session,\n 'model': User,\n 'methods': {\n 'before_create_object': before_create_object,\n 'after_create_object': after_create_object\n }}\n\n\nclass UserDetail(ResourceDetail):\n \"\"\"\n User detail by id\n \"\"\"\n\n def before_get(self, args, kwargs):\n\n if current_user.is_admin or current_user.is_super_admin or current_user:\n self.schema = UserSchema\n else:\n self.schema = UserSchemaPublic\n\n def before_get_object(self, view_kwargs):\n \"\"\"\n before get method for user object\n :param view_kwargs:\n :return:\n \"\"\"\n if view_kwargs.get('notification_id') is not None:\n notification = safe_query(self, Notification, 'id', view_kwargs['notification_id'], 'notification_id')\n if notification.user_id is not None:\n view_kwargs['id'] = notification.user_id\n else:\n view_kwargs['id'] = None\n\n if view_kwargs.get('feedback_id') is not None:\n feedback = safe_query(self, Feedback, 'id', view_kwargs['feedback_id'], 'feedback_id')\n if feedback.user_id is not None:\n view_kwargs['id'] = feedback.user_id\n else:\n view_kwargs['id'] = None\n\n if view_kwargs.get('attendee_id') is not None:\n attendee = safe_query(self, TicketHolder, 'id', view_kwargs['attendee_id'], 'attendee_id')\n if attendee.user is not None:\n if (not has_access('is_user_itself',\n user_id=attendee.user.id) or not has_access('is_coorganizer',\n event_id=attendee.event_id)):\n raise ForbiddenException({'source': ''}, 'Access Forbidden')\n view_kwargs['id'] = attendee.user.id\n else:\n view_kwargs['id'] = None\n\n if view_kwargs.get('event_invoice_id') is not None:\n event_invoice = safe_query(self, EventInvoice, 'id', view_kwargs['event_invoice_id'], 'event_invoice_id')\n if event_invoice.user_id is not None:\n view_kwargs['id'] = event_invoice.user_id\n else:\n view_kwargs['id'] = None\n\n if view_kwargs.get('users_events_role_id') is not None:\n users_events_role = safe_query(self, UsersEventsRoles, 'id', view_kwargs['users_events_role_id'],\n 'users_events_role_id')\n if users_events_role.user_id is not None:\n view_kwargs['id'] = users_events_role.user_id\n else:\n view_kwargs['id'] = None\n\n if view_kwargs.get('speaker_id') is not None:\n speaker = safe_query(self, Speaker, 'id', view_kwargs['speaker_id'], 'speaker_id')\n if speaker.user_id is not None:\n view_kwargs['id'] = speaker.user_id\n else:\n view_kwargs['id'] = None\n\n if view_kwargs.get('session_id') is not None:\n session = safe_query(self, Session, 'id', view_kwargs['session_id'], 'session_id')\n if session.creator_id is not None:\n view_kwargs['id'] = session.creator_id\n else:\n view_kwargs['id'] = None\n\n if view_kwargs.get('access_code_id') is not None:\n access_code = safe_query(self, AccessCode, 'id', view_kwargs['access_code_id'], 'access_code_id')\n if access_code.marketer_id is not None:\n view_kwargs['id'] = access_code.marketer_id\n else:\n view_kwargs['id'] = None\n\n if view_kwargs.get('discount_code_id') is not None:\n discount_code = safe_query(self, DiscountCode, 'id', view_kwargs['discount_code_id'], 'discount_code_id')\n if discount_code.marketer_id is not None:\n view_kwargs['id'] = discount_code.marketer_id\n else:\n view_kwargs['id'] = None\n\n if view_kwargs.get('email_notification_id') is not None:\n email_notification = safe_query(self, EmailNotification, 'id', view_kwargs['email_notification_id'],\n 'email_notification_id')\n if email_notification.user_id is not None:\n view_kwargs['id'] = email_notification.user_id\n else:\n view_kwargs['id'] = None\n\n def before_update_object(self, user, data, view_kwargs):\n if data.get('original_image_url') and data['original_image_url'] != user.original_image_url:\n try:\n uploaded_images = create_save_image_sizes(data['original_image_url'], 'speaker-image', user.id)\n except (urllib.error.HTTPError, urllib.error.URLError):\n raise UnprocessableEntity(\n {'source': 'attributes/original-image-url'}, 'Invalid Image URL'\n )\n data['original_image_url'] = uploaded_images['original_image_url']\n data['small_image_url'] = uploaded_images['thumbnail_image_url']\n data['thumbnail_image_url'] = uploaded_images['thumbnail_image_url']\n data['icon_image_url'] = uploaded_images['icon_image_url']\n\n if data.get('email') and data['email'] != user.email:\n try:\n db.session.query(User).filter_by(email=data['email']).one()\n except NoResultFound:\n view_kwargs['email_changed'] = user.email\n else:\n raise ConflictException({'pointer': '/data/attributes/email'}, \"Email already exists\")\n\n if has_access('is_super_admin') and data.get('is_admin') and data.get('is_admin') != user.is_admin:\n user.is_admin = not user.is_admin\n\n if has_access('is_admin') and data.get('is_sales_admin') != user.is_sales_admin:\n user.is_sales_admin = not user.is_sales_admin\n\n if has_access('is_admin') and data.get('is_marketer') != user.is_marketer:\n user.is_marketer = not user.is_marketer\n\n def after_update_object(self, user, data, view_kwargs):\n \"\"\"\n method to mail user about email change\n :param user:\n :param data:\n :param view_kwargs:\n :return:\n \"\"\"\n if view_kwargs.get('email_changed'):\n send_email_change_user_email(user, view_kwargs.get('email_changed'))\n\n decorators = (api.has_permission('is_user_itself', fetch=\"user_id,id\", fetch_as=\"user_id\",\n model=[Notification, Feedback, UsersEventsRoles, Session, EventInvoice, AccessCode,\n DiscountCode, EmailNotification, Speaker, User],\n fetch_key_url=\"notification_id, feedback_id, users_events_role_id, session_id, \\\n event_invoice_id, access_code_id, discount_code_id, email_notification_id, speaker_id, id\",\n leave_if=lambda a: a.get('attendee_id')),)\n schema = UserSchema\n data_layer = {'session': db.session,\n 'model': User,\n 'methods': {\n 'before_get_object': before_get_object,\n 'before_update_object': before_update_object,\n 'after_update_object': after_update_object\n }}\n\n\nclass UserRelationship(ResourceRelationship):\n \"\"\"\n User Relationship\n \"\"\"\n decorators = (is_user_itself,)\n schema = UserSchema\n data_layer = {'session': db.session,\n 'model': User}\n\n\n@user_misc_routes.route('/users/checkEmail', methods=['POST'])\ndef is_email_available():\n email = request.json.get('email', None)\n if email:\n if get_count(db.session.query(User).filter_by(email=email)):\n return jsonify(\n result=\"False\"\n )\n else:\n return jsonify(\n result=\"True\"\n )\n else:\n abort(\n make_response(jsonify(error=\"Email field missing\"), 422)\n )\n", "path": "app/api/users.py"}]} | 3,505 | 147 |
gh_patches_debug_62 | rasdani/github-patches | git_diff | edgedb__edgedb-2139 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Better syntax errors for substitution tokens
Currently as of 1.0-alpha.8+dev.5341.g66ec73494 it fails with InternalServerError:
```
edgedb> SELECT \(x);
ERROR: InternalServerError: (<class 'edb.edgeql.parser.grammar.tokens.TokenMeta'>, 'SUBSTITUTION')
Hint: This is most likely a bug in EdgeDB. Please consider opening an issue ticket at https://github.com/edgedb/edgedb/issues/new?template=bug_report.md
Server traceback:
Traceback (most recent call last):
File "/work/edb/server/procpool/worker.py", line 75, in worker
res = await meth(*args)
File "/work/edb/server/compiler/compiler.py", line 1935, in compile
return self._compile(ctx=ctx, source=source)
File "/work/edb/server/compiler/compiler.py", line 1487, in _compile
return self._try_compile(ctx=ctx, source=source)
File "/work/edb/server/compiler/compiler.py", line 1519, in _try_compile
statements = edgeql.parse_block(source)
File "/work/edb/edgeql/parser/__init__.py", line 69, in parse_block
return parser.parse(source)
File "/work/edb/common/parsing.py", line 401, in parse
token = self.process_lex_token(mod, tok)
File "/work/edb/common/parsing.py", line 390, in process_lex_token
return mod.TokenMeta.for_lex_token(tok.kind())(
File "/work/edb/common/parsing.py", line 100, in for_lex_token
return mcls.token_map[mcls, token]
KeyError: (<class 'edb.edgeql.parser.grammar.tokens.TokenMeta'>, 'SUBSTITUTION')
```
This bug appeared after #2131
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `edb/edgeql/parser/grammar/tokens.py`
Content:
```
1 #
2 # This source file is part of the EdgeDB open source project.
3 #
4 # Copyright 2008-present MagicStack Inc. and the EdgeDB authors.
5 #
6 # Licensed under the Apache License, Version 2.0 (the "License");
7 # you may not use this file except in compliance with the License.
8 # You may obtain a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing, software
13 # distributed under the License is distributed on an "AS IS" BASIS,
14 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 # See the License for the specific language governing permissions and
16 # limitations under the License.
17 #
18
19
20 from __future__ import annotations
21
22 import re
23 import sys
24 import types
25
26 from edb.common import parsing
27
28 from . import keywords
29 from . import precedence
30 from . import lexer
31
32
33 clean_string = re.compile(r"'(?:\s|\n)+'")
34 string_quote = re.compile(lexer.re_dquote)
35
36
37 class TokenMeta(parsing.TokenMeta):
38 pass
39
40
41 class Token(parsing.Token, metaclass=TokenMeta,
42 precedence_class=precedence.PrecedenceMeta):
43 pass
44
45
46 class T_DOT(Token, lextoken='.'):
47 pass
48
49
50 class T_DOTBW(Token, lextoken='.<'):
51 pass
52
53
54 class T_LBRACKET(Token, lextoken='['):
55 pass
56
57
58 class T_RBRACKET(Token, lextoken=']'):
59 pass
60
61
62 class T_LPAREN(Token, lextoken='('):
63 pass
64
65
66 class T_RPAREN(Token, lextoken=')'):
67 pass
68
69
70 class T_LBRACE(Token, lextoken='{'):
71 pass
72
73
74 class T_RBRACE(Token, lextoken='}'):
75 pass
76
77
78 class T_DOUBLECOLON(Token, lextoken='::'):
79 pass
80
81
82 class T_DOUBLEQMARK(Token, lextoken='??'):
83 pass
84
85
86 class T_COLON(Token, lextoken=':'):
87 pass
88
89
90 class T_SEMICOLON(Token, lextoken=';'):
91 pass
92
93
94 class T_COMMA(Token, lextoken=','):
95 pass
96
97
98 class T_PLUS(Token, lextoken='+'):
99 pass
100
101
102 class T_DOUBLEPLUS(Token, lextoken='++'):
103 pass
104
105
106 class T_MINUS(Token, lextoken='-'):
107 pass
108
109
110 class T_STAR(Token, lextoken='*'):
111 pass
112
113
114 class T_SLASH(Token, lextoken='/'):
115 pass
116
117
118 class T_DOUBLESLASH(Token, lextoken='//'):
119 pass
120
121
122 class T_PERCENT(Token, lextoken='%'):
123 pass
124
125
126 class T_CIRCUMFLEX(Token, lextoken='^'):
127 pass
128
129
130 class T_AT(Token, lextoken='@'):
131 pass
132
133
134 class T_ARGUMENT(Token):
135 pass
136
137
138 class T_ASSIGN(Token):
139 pass
140
141
142 class T_ADDASSIGN(Token):
143 pass
144
145
146 class T_REMASSIGN(Token):
147 pass
148
149
150 class T_ARROW(Token):
151 pass
152
153
154 class T_LANGBRACKET(Token, lextoken='<'):
155 pass
156
157
158 class T_RANGBRACKET(Token, lextoken='>'):
159 pass
160
161
162 class T_EQUALS(Token, lextoken='='):
163 pass
164
165
166 class T_AMPER(Token, lextoken='&'):
167 pass
168
169
170 class T_PIPE(Token, lextoken='|'):
171 pass
172
173
174 class T_NAMEDONLY(Token):
175 pass
176
177
178 class T_SETANNOTATION(Token):
179 pass
180
181
182 class T_SETTYPE(Token):
183 pass
184
185
186 class T_ICONST(Token):
187 pass
188
189
190 class T_NICONST(Token):
191 pass
192
193
194 class T_FCONST(Token):
195 pass
196
197
198 class T_NFCONST(Token):
199 pass
200
201
202 class T_BCONST(Token):
203 pass
204
205
206 class T_SCONST(Token):
207 pass
208
209
210 class T_RSCONST(Token):
211 pass
212
213
214 class T_IDENT(Token):
215 pass
216
217
218 class T_OP(Token):
219 pass
220
221
222 class T_EOF(Token):
223 pass
224
225
226 def _gen_keyword_tokens():
227 # Define keyword tokens
228
229 mod = sys.modules[__name__]
230
231 def clsexec(ns):
232 ns['__module__'] = __name__
233 return ns
234
235 for token, _ in keywords.edgeql_keywords.values():
236 clsname = 'T_{}'.format(token)
237 clskwds = dict(metaclass=parsing.TokenMeta, token=token)
238 cls = types.new_class(clsname, (Token,), clskwds, clsexec)
239 setattr(mod, clsname, cls)
240
241
242 _gen_keyword_tokens()
243
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/edb/edgeql/parser/grammar/tokens.py b/edb/edgeql/parser/grammar/tokens.py
--- a/edb/edgeql/parser/grammar/tokens.py
+++ b/edb/edgeql/parser/grammar/tokens.py
@@ -219,6 +219,10 @@
pass
+class T_SUBSTITUTION(Token):
+ pass
+
+
class T_EOF(Token):
pass
| {"golden_diff": "diff --git a/edb/edgeql/parser/grammar/tokens.py b/edb/edgeql/parser/grammar/tokens.py\n--- a/edb/edgeql/parser/grammar/tokens.py\n+++ b/edb/edgeql/parser/grammar/tokens.py\n@@ -219,6 +219,10 @@\n pass\n \n \n+class T_SUBSTITUTION(Token):\n+ pass\n+\n+\n class T_EOF(Token):\n pass\n", "issue": "Better syntax errors for substitution tokens\nCurrently as of 1.0-alpha.8+dev.5341.g66ec73494 it fails with InternalServerError:\r\n```\r\nedgedb> SELECT \\(x);\r\nERROR: InternalServerError: (<class 'edb.edgeql.parser.grammar.tokens.TokenMeta'>, 'SUBSTITUTION')\r\n Hint: This is most likely a bug in EdgeDB. Please consider opening an issue ticket at https://github.com/edgedb/edgedb/issues/new?template=bug_report.md\r\n Server traceback:\r\n Traceback (most recent call last):\r\n File \"/work/edb/server/procpool/worker.py\", line 75, in worker\r\n res = await meth(*args)\r\n File \"/work/edb/server/compiler/compiler.py\", line 1935, in compile\r\n return self._compile(ctx=ctx, source=source)\r\n File \"/work/edb/server/compiler/compiler.py\", line 1487, in _compile\r\n return self._try_compile(ctx=ctx, source=source)\r\n File \"/work/edb/server/compiler/compiler.py\", line 1519, in _try_compile\r\n statements = edgeql.parse_block(source)\r\n File \"/work/edb/edgeql/parser/__init__.py\", line 69, in parse_block\r\n return parser.parse(source)\r\n File \"/work/edb/common/parsing.py\", line 401, in parse\r\n token = self.process_lex_token(mod, tok)\r\n File \"/work/edb/common/parsing.py\", line 390, in process_lex_token\r\n return mod.TokenMeta.for_lex_token(tok.kind())(\r\n File \"/work/edb/common/parsing.py\", line 100, in for_lex_token\r\n return mcls.token_map[mcls, token]\r\n KeyError: (<class 'edb.edgeql.parser.grammar.tokens.TokenMeta'>, 'SUBSTITUTION')\r\n```\r\nThis bug appeared after #2131 \n", "before_files": [{"content": "#\n# This source file is part of the EdgeDB open source project.\n#\n# Copyright 2008-present MagicStack Inc. and the EdgeDB authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\nfrom __future__ import annotations\n\nimport re\nimport sys\nimport types\n\nfrom edb.common import parsing\n\nfrom . import keywords\nfrom . import precedence\nfrom . import lexer\n\n\nclean_string = re.compile(r\"'(?:\\s|\\n)+'\")\nstring_quote = re.compile(lexer.re_dquote)\n\n\nclass TokenMeta(parsing.TokenMeta):\n pass\n\n\nclass Token(parsing.Token, metaclass=TokenMeta,\n precedence_class=precedence.PrecedenceMeta):\n pass\n\n\nclass T_DOT(Token, lextoken='.'):\n pass\n\n\nclass T_DOTBW(Token, lextoken='.<'):\n pass\n\n\nclass T_LBRACKET(Token, lextoken='['):\n pass\n\n\nclass T_RBRACKET(Token, lextoken=']'):\n pass\n\n\nclass T_LPAREN(Token, lextoken='('):\n pass\n\n\nclass T_RPAREN(Token, lextoken=')'):\n pass\n\n\nclass T_LBRACE(Token, lextoken='{'):\n pass\n\n\nclass T_RBRACE(Token, lextoken='}'):\n pass\n\n\nclass T_DOUBLECOLON(Token, lextoken='::'):\n pass\n\n\nclass T_DOUBLEQMARK(Token, lextoken='??'):\n pass\n\n\nclass T_COLON(Token, lextoken=':'):\n pass\n\n\nclass T_SEMICOLON(Token, lextoken=';'):\n pass\n\n\nclass T_COMMA(Token, lextoken=','):\n pass\n\n\nclass T_PLUS(Token, lextoken='+'):\n pass\n\n\nclass T_DOUBLEPLUS(Token, lextoken='++'):\n pass\n\n\nclass T_MINUS(Token, lextoken='-'):\n pass\n\n\nclass T_STAR(Token, lextoken='*'):\n pass\n\n\nclass T_SLASH(Token, lextoken='/'):\n pass\n\n\nclass T_DOUBLESLASH(Token, lextoken='//'):\n pass\n\n\nclass T_PERCENT(Token, lextoken='%'):\n pass\n\n\nclass T_CIRCUMFLEX(Token, lextoken='^'):\n pass\n\n\nclass T_AT(Token, lextoken='@'):\n pass\n\n\nclass T_ARGUMENT(Token):\n pass\n\n\nclass T_ASSIGN(Token):\n pass\n\n\nclass T_ADDASSIGN(Token):\n pass\n\n\nclass T_REMASSIGN(Token):\n pass\n\n\nclass T_ARROW(Token):\n pass\n\n\nclass T_LANGBRACKET(Token, lextoken='<'):\n pass\n\n\nclass T_RANGBRACKET(Token, lextoken='>'):\n pass\n\n\nclass T_EQUALS(Token, lextoken='='):\n pass\n\n\nclass T_AMPER(Token, lextoken='&'):\n pass\n\n\nclass T_PIPE(Token, lextoken='|'):\n pass\n\n\nclass T_NAMEDONLY(Token):\n pass\n\n\nclass T_SETANNOTATION(Token):\n pass\n\n\nclass T_SETTYPE(Token):\n pass\n\n\nclass T_ICONST(Token):\n pass\n\n\nclass T_NICONST(Token):\n pass\n\n\nclass T_FCONST(Token):\n pass\n\n\nclass T_NFCONST(Token):\n pass\n\n\nclass T_BCONST(Token):\n pass\n\n\nclass T_SCONST(Token):\n pass\n\n\nclass T_RSCONST(Token):\n pass\n\n\nclass T_IDENT(Token):\n pass\n\n\nclass T_OP(Token):\n pass\n\n\nclass T_EOF(Token):\n pass\n\n\ndef _gen_keyword_tokens():\n # Define keyword tokens\n\n mod = sys.modules[__name__]\n\n def clsexec(ns):\n ns['__module__'] = __name__\n return ns\n\n for token, _ in keywords.edgeql_keywords.values():\n clsname = 'T_{}'.format(token)\n clskwds = dict(metaclass=parsing.TokenMeta, token=token)\n cls = types.new_class(clsname, (Token,), clskwds, clsexec)\n setattr(mod, clsname, cls)\n\n\n_gen_keyword_tokens()\n", "path": "edb/edgeql/parser/grammar/tokens.py"}], "after_files": [{"content": "#\n# This source file is part of the EdgeDB open source project.\n#\n# Copyright 2008-present MagicStack Inc. and the EdgeDB authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\nfrom __future__ import annotations\n\nimport re\nimport sys\nimport types\n\nfrom edb.common import parsing\n\nfrom . import keywords\nfrom . import precedence\nfrom . import lexer\n\n\nclean_string = re.compile(r\"'(?:\\s|\\n)+'\")\nstring_quote = re.compile(lexer.re_dquote)\n\n\nclass TokenMeta(parsing.TokenMeta):\n pass\n\n\nclass Token(parsing.Token, metaclass=TokenMeta,\n precedence_class=precedence.PrecedenceMeta):\n pass\n\n\nclass T_DOT(Token, lextoken='.'):\n pass\n\n\nclass T_DOTBW(Token, lextoken='.<'):\n pass\n\n\nclass T_LBRACKET(Token, lextoken='['):\n pass\n\n\nclass T_RBRACKET(Token, lextoken=']'):\n pass\n\n\nclass T_LPAREN(Token, lextoken='('):\n pass\n\n\nclass T_RPAREN(Token, lextoken=')'):\n pass\n\n\nclass T_LBRACE(Token, lextoken='{'):\n pass\n\n\nclass T_RBRACE(Token, lextoken='}'):\n pass\n\n\nclass T_DOUBLECOLON(Token, lextoken='::'):\n pass\n\n\nclass T_DOUBLEQMARK(Token, lextoken='??'):\n pass\n\n\nclass T_COLON(Token, lextoken=':'):\n pass\n\n\nclass T_SEMICOLON(Token, lextoken=';'):\n pass\n\n\nclass T_COMMA(Token, lextoken=','):\n pass\n\n\nclass T_PLUS(Token, lextoken='+'):\n pass\n\n\nclass T_DOUBLEPLUS(Token, lextoken='++'):\n pass\n\n\nclass T_MINUS(Token, lextoken='-'):\n pass\n\n\nclass T_STAR(Token, lextoken='*'):\n pass\n\n\nclass T_SLASH(Token, lextoken='/'):\n pass\n\n\nclass T_DOUBLESLASH(Token, lextoken='//'):\n pass\n\n\nclass T_PERCENT(Token, lextoken='%'):\n pass\n\n\nclass T_CIRCUMFLEX(Token, lextoken='^'):\n pass\n\n\nclass T_AT(Token, lextoken='@'):\n pass\n\n\nclass T_ARGUMENT(Token):\n pass\n\n\nclass T_ASSIGN(Token):\n pass\n\n\nclass T_ADDASSIGN(Token):\n pass\n\n\nclass T_REMASSIGN(Token):\n pass\n\n\nclass T_ARROW(Token):\n pass\n\n\nclass T_LANGBRACKET(Token, lextoken='<'):\n pass\n\n\nclass T_RANGBRACKET(Token, lextoken='>'):\n pass\n\n\nclass T_EQUALS(Token, lextoken='='):\n pass\n\n\nclass T_AMPER(Token, lextoken='&'):\n pass\n\n\nclass T_PIPE(Token, lextoken='|'):\n pass\n\n\nclass T_NAMEDONLY(Token):\n pass\n\n\nclass T_SETANNOTATION(Token):\n pass\n\n\nclass T_SETTYPE(Token):\n pass\n\n\nclass T_ICONST(Token):\n pass\n\n\nclass T_NICONST(Token):\n pass\n\n\nclass T_FCONST(Token):\n pass\n\n\nclass T_NFCONST(Token):\n pass\n\n\nclass T_BCONST(Token):\n pass\n\n\nclass T_SCONST(Token):\n pass\n\n\nclass T_RSCONST(Token):\n pass\n\n\nclass T_IDENT(Token):\n pass\n\n\nclass T_OP(Token):\n pass\n\n\nclass T_SUBSTITUTION(Token):\n pass\n\n\nclass T_EOF(Token):\n pass\n\n\ndef _gen_keyword_tokens():\n # Define keyword tokens\n\n mod = sys.modules[__name__]\n\n def clsexec(ns):\n ns['__module__'] = __name__\n return ns\n\n for token, _ in keywords.edgeql_keywords.values():\n clsname = 'T_{}'.format(token)\n clskwds = dict(metaclass=parsing.TokenMeta, token=token)\n cls = types.new_class(clsname, (Token,), clskwds, clsexec)\n setattr(mod, clsname, cls)\n\n\n_gen_keyword_tokens()\n", "path": "edb/edgeql/parser/grammar/tokens.py"}]} | 2,357 | 97 |
gh_patches_debug_33 | rasdani/github-patches | git_diff | googleapis__google-api-python-client-1864 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Stop using external 'mock' dependency
As of Python 3.4, 'mock' is included in the standard library under the unittest module, and since the lowest supported version of Python is greater than that, we can remove the external dependency.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `noxfile.py`
Content:
```
1 # Copyright 2020 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import os
16 import shutil
17
18 import nox
19
20 BLACK_VERSION = "black==22.3.0"
21 ISORT_VERSION = "isort==5.10.1"
22 BLACK_PATHS = [
23 "apiclient",
24 "googleapiclient",
25 "scripts",
26 "tests",
27 "describe.py",
28 "expandsymlinks.py",
29 "noxfile.py",
30 "owlbot.py",
31 "setup.py",
32 ]
33
34 test_dependencies = [
35 "django>=2.0.0",
36 "google-auth",
37 "google-auth-httplib2",
38 "mox",
39 "parameterized",
40 "pyopenssl",
41 "pytest",
42 "pytest-cov",
43 "webtest",
44 "coverage",
45 "mock",
46 ]
47
48
49 @nox.session(python=["3.7"])
50 def lint(session):
51 session.install("flake8")
52 session.run(
53 "flake8",
54 "googleapiclient",
55 "tests",
56 "--count",
57 "--select=E9,F63,F7,F82",
58 "--show-source",
59 "--statistics",
60 )
61
62
63 @nox.session(python="3.8")
64 def format(session):
65 """
66 Run isort to sort imports. Then run black
67 to format code to uniform standard.
68 """
69 session.install(BLACK_VERSION, ISORT_VERSION)
70 # Use the --fss option to sort imports using strict alphabetical order.
71 # See https://pycqa.github.io/isort/docs/configuration/options.html#force-sort-within-sections
72 session.run(
73 "isort",
74 "--fss",
75 *BLACK_PATHS,
76 )
77 session.run(
78 "black",
79 *BLACK_PATHS,
80 )
81
82
83 @nox.session(python=["3.7", "3.8", "3.9", "3.10"])
84 @nox.parametrize(
85 "oauth2client",
86 [
87 "oauth2client<2dev",
88 "oauth2client>=2,<=3dev",
89 "oauth2client>=3,<=4dev",
90 "oauth2client>=4,<=5dev",
91 ],
92 )
93 def unit(session, oauth2client):
94 # Clean up dist and build folders
95 shutil.rmtree("dist", ignore_errors=True)
96 shutil.rmtree("build", ignore_errors=True)
97
98 session.install(*test_dependencies)
99 session.install(oauth2client)
100
101 # Create and install wheels
102 session.run("python3", "setup.py", "bdist_wheel")
103 session.install(os.path.join("dist", os.listdir("dist").pop()))
104
105 # Run tests from a different directory to test the package artifacts
106 root_dir = os.path.dirname(os.path.realpath(__file__))
107 temp_dir = session.create_tmp()
108 session.chdir(temp_dir)
109 shutil.copytree(os.path.join(root_dir, "tests"), "tests")
110
111 # Run py.test against the unit tests.
112 session.run(
113 "py.test",
114 "--quiet",
115 "--cov=googleapiclient",
116 "--cov=tests",
117 "--cov-append",
118 "--cov-config=.coveragerc",
119 "--cov-report=",
120 "--cov-fail-under=85",
121 "tests",
122 *session.posargs,
123 )
124
125
126 @nox.session(python=["3.9"])
127 def scripts(session):
128 session.install(*test_dependencies)
129 session.install("-e", ".")
130 session.install("-r", "scripts/requirements.txt")
131
132 # Run py.test against the unit tests.
133 session.run(
134 "py.test",
135 "--quiet",
136 "--cov=scripts",
137 "--cov-config=.coveragerc",
138 "--cov-report=",
139 "--cov-fail-under=91",
140 "scripts",
141 *session.posargs,
142 )
143
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/noxfile.py b/noxfile.py
--- a/noxfile.py
+++ b/noxfile.py
@@ -42,7 +42,6 @@
"pytest-cov",
"webtest",
"coverage",
- "mock",
]
| {"golden_diff": "diff --git a/noxfile.py b/noxfile.py\n--- a/noxfile.py\n+++ b/noxfile.py\n@@ -42,7 +42,6 @@\n \"pytest-cov\",\n \"webtest\",\n \"coverage\",\n- \"mock\",\n ]\n", "issue": "Stop using external 'mock' dependency\nAs of Python 3.4, 'mock' is included in the standard library under the unittest module, and since the lowest supported version of Python is greater than that, we can remove the external dependency.\n", "before_files": [{"content": "# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport shutil\n\nimport nox\n\nBLACK_VERSION = \"black==22.3.0\"\nISORT_VERSION = \"isort==5.10.1\"\nBLACK_PATHS = [\n \"apiclient\",\n \"googleapiclient\",\n \"scripts\",\n \"tests\",\n \"describe.py\",\n \"expandsymlinks.py\",\n \"noxfile.py\",\n \"owlbot.py\",\n \"setup.py\",\n]\n\ntest_dependencies = [\n \"django>=2.0.0\",\n \"google-auth\",\n \"google-auth-httplib2\",\n \"mox\",\n \"parameterized\",\n \"pyopenssl\",\n \"pytest\",\n \"pytest-cov\",\n \"webtest\",\n \"coverage\",\n \"mock\",\n]\n\n\[email protected](python=[\"3.7\"])\ndef lint(session):\n session.install(\"flake8\")\n session.run(\n \"flake8\",\n \"googleapiclient\",\n \"tests\",\n \"--count\",\n \"--select=E9,F63,F7,F82\",\n \"--show-source\",\n \"--statistics\",\n )\n\n\[email protected](python=\"3.8\")\ndef format(session):\n \"\"\"\n Run isort to sort imports. Then run black\n to format code to uniform standard.\n \"\"\"\n session.install(BLACK_VERSION, ISORT_VERSION)\n # Use the --fss option to sort imports using strict alphabetical order.\n # See https://pycqa.github.io/isort/docs/configuration/options.html#force-sort-within-sections\n session.run(\n \"isort\",\n \"--fss\",\n *BLACK_PATHS,\n )\n session.run(\n \"black\",\n *BLACK_PATHS,\n )\n\n\[email protected](python=[\"3.7\", \"3.8\", \"3.9\", \"3.10\"])\[email protected](\n \"oauth2client\",\n [\n \"oauth2client<2dev\",\n \"oauth2client>=2,<=3dev\",\n \"oauth2client>=3,<=4dev\",\n \"oauth2client>=4,<=5dev\",\n ],\n)\ndef unit(session, oauth2client):\n # Clean up dist and build folders\n shutil.rmtree(\"dist\", ignore_errors=True)\n shutil.rmtree(\"build\", ignore_errors=True)\n\n session.install(*test_dependencies)\n session.install(oauth2client)\n\n # Create and install wheels\n session.run(\"python3\", \"setup.py\", \"bdist_wheel\")\n session.install(os.path.join(\"dist\", os.listdir(\"dist\").pop()))\n\n # Run tests from a different directory to test the package artifacts\n root_dir = os.path.dirname(os.path.realpath(__file__))\n temp_dir = session.create_tmp()\n session.chdir(temp_dir)\n shutil.copytree(os.path.join(root_dir, \"tests\"), \"tests\")\n\n # Run py.test against the unit tests.\n session.run(\n \"py.test\",\n \"--quiet\",\n \"--cov=googleapiclient\",\n \"--cov=tests\",\n \"--cov-append\",\n \"--cov-config=.coveragerc\",\n \"--cov-report=\",\n \"--cov-fail-under=85\",\n \"tests\",\n *session.posargs,\n )\n\n\[email protected](python=[\"3.9\"])\ndef scripts(session):\n session.install(*test_dependencies)\n session.install(\"-e\", \".\")\n session.install(\"-r\", \"scripts/requirements.txt\")\n\n # Run py.test against the unit tests.\n session.run(\n \"py.test\",\n \"--quiet\",\n \"--cov=scripts\",\n \"--cov-config=.coveragerc\",\n \"--cov-report=\",\n \"--cov-fail-under=91\",\n \"scripts\",\n *session.posargs,\n )\n", "path": "noxfile.py"}], "after_files": [{"content": "# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport shutil\n\nimport nox\n\nBLACK_VERSION = \"black==22.3.0\"\nISORT_VERSION = \"isort==5.10.1\"\nBLACK_PATHS = [\n \"apiclient\",\n \"googleapiclient\",\n \"scripts\",\n \"tests\",\n \"describe.py\",\n \"expandsymlinks.py\",\n \"noxfile.py\",\n \"owlbot.py\",\n \"setup.py\",\n]\n\ntest_dependencies = [\n \"django>=2.0.0\",\n \"google-auth\",\n \"google-auth-httplib2\",\n \"mox\",\n \"parameterized\",\n \"pyopenssl\",\n \"pytest\",\n \"pytest-cov\",\n \"webtest\",\n \"coverage\",\n]\n\n\[email protected](python=[\"3.7\"])\ndef lint(session):\n session.install(\"flake8\")\n session.run(\n \"flake8\",\n \"googleapiclient\",\n \"tests\",\n \"--count\",\n \"--select=E9,F63,F7,F82\",\n \"--show-source\",\n \"--statistics\",\n )\n\n\[email protected](python=\"3.8\")\ndef format(session):\n \"\"\"\n Run isort to sort imports. Then run black\n to format code to uniform standard.\n \"\"\"\n session.install(BLACK_VERSION, ISORT_VERSION)\n # Use the --fss option to sort imports using strict alphabetical order.\n # See https://pycqa.github.io/isort/docs/configuration/options.html#force-sort-within-sections\n session.run(\n \"isort\",\n \"--fss\",\n *BLACK_PATHS,\n )\n session.run(\n \"black\",\n *BLACK_PATHS,\n )\n\n\[email protected](python=[\"3.7\", \"3.8\", \"3.9\", \"3.10\"])\[email protected](\n \"oauth2client\",\n [\n \"oauth2client<2dev\",\n \"oauth2client>=2,<=3dev\",\n \"oauth2client>=3,<=4dev\",\n \"oauth2client>=4,<=5dev\",\n ],\n)\ndef unit(session, oauth2client):\n # Clean up dist and build folders\n shutil.rmtree(\"dist\", ignore_errors=True)\n shutil.rmtree(\"build\", ignore_errors=True)\n\n session.install(*test_dependencies)\n session.install(oauth2client)\n\n # Create and install wheels\n session.run(\"python3\", \"setup.py\", \"bdist_wheel\")\n session.install(os.path.join(\"dist\", os.listdir(\"dist\").pop()))\n\n # Run tests from a different directory to test the package artifacts\n root_dir = os.path.dirname(os.path.realpath(__file__))\n temp_dir = session.create_tmp()\n session.chdir(temp_dir)\n shutil.copytree(os.path.join(root_dir, \"tests\"), \"tests\")\n\n # Run py.test against the unit tests.\n session.run(\n \"py.test\",\n \"--quiet\",\n \"--cov=googleapiclient\",\n \"--cov=tests\",\n \"--cov-append\",\n \"--cov-config=.coveragerc\",\n \"--cov-report=\",\n \"--cov-fail-under=85\",\n \"tests\",\n *session.posargs,\n )\n\n\[email protected](python=[\"3.9\"])\ndef scripts(session):\n session.install(*test_dependencies)\n session.install(\"-e\", \".\")\n session.install(\"-r\", \"scripts/requirements.txt\")\n\n # Run py.test against the unit tests.\n session.run(\n \"py.test\",\n \"--quiet\",\n \"--cov=scripts\",\n \"--cov-config=.coveragerc\",\n \"--cov-report=\",\n \"--cov-fail-under=91\",\n \"scripts\",\n *session.posargs,\n )\n", "path": "noxfile.py"}]} | 1,557 | 61 |
gh_patches_debug_7786 | rasdani/github-patches | git_diff | Lightning-AI__pytorch-lightning-619 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Comet PAPI Depreciated
Use of the Comet API logger reports an unecessary depreciation warning relating to the use of comet_ml.papi, rather than the newer comet_ml.api.
Example:
`COMET WARNING: You have imported comet_ml.papi; this interface is deprecated. Please use comet_ml.api instead. For more information, see: https://www.comet.ml/docs/python-sdk/releases/#release-300`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pytorch_lightning/logging/comet.py`
Content:
```
1 """
2 Log using `comet <https://www.comet.ml>`_
3
4 Comet logger can be used in either online or offline mode.
5 To log in online mode, CometLogger requries an API key:
6
7 .. code-block:: python
8
9 from pytorch_lightning.logging import CometLogger
10 # arguments made to CometLogger are passed on to the comet_ml.Experiment class
11 comet_logger = CometLogger(
12 api_key=os.environ["COMET_KEY"],
13 workspace=os.environ["COMET_WORKSPACE"], # Optional
14 project_name="default_project", # Optional
15 rest_api_key=os.environ["COMET_REST_KEY"], # Optional
16 experiment_name="default" # Optional
17 )
18 trainer = Trainer(logger=comet_logger)
19
20 To log in offline mode, CometLogger requires a path to a local directory:
21
22 .. code-block:: python
23
24 from pytorch_lightning.logging import CometLogger
25 # arguments made to CometLogger are passed on to the comet_ml.Experiment class
26 comet_logger = CometLogger(
27 save_dir=".",
28 workspace=os.environ["COMET_WORKSPACE"], # Optional
29 project_name="default_project", # Optional
30 rest_api_key=os.environ["COMET_REST_KEY"], # Optional
31 experiment_name="default" # Optional
32 )
33 trainer = Trainer(logger=comet_logger)
34
35
36 Use the logger anywhere in you LightningModule as follows:
37
38 .. code-block:: python
39
40 def train_step(...):
41 # example
42 self.logger.experiment.whatever_comet_ml_supports(...)
43
44 def any_lightning_module_function_or_hook(...):
45 self.logger.experiment.whatever_comet_ml_supports(...)
46
47
48 """
49
50 from logging import getLogger
51
52 try:
53 from comet_ml import Experiment as CometExperiment
54 from comet_ml import OfflineExperiment as CometOfflineExperiment
55 from comet_ml.papi import API
56 except ImportError:
57 raise ImportError('Missing comet_ml package.')
58
59 from torch import is_tensor
60
61 from .base import LightningLoggerBase, rank_zero_only
62 from ..utilities.debugging import MisconfigurationException
63
64 logger = getLogger(__name__)
65
66
67 class CometLogger(LightningLoggerBase):
68 def __init__(self, api_key=None, save_dir=None, workspace=None,
69 rest_api_key=None, project_name=None, experiment_name=None, **kwargs):
70 """Initialize a Comet.ml logger.
71 Requires either an API Key (online mode) or a local directory path (offline mode)
72
73 :param str api_key: Required in online mode. API key, found on Comet.ml
74 :param str save_dir: Required in offline mode. The path for the directory to save local comet logs
75 :param str workspace: Optional. Name of workspace for this user
76 :param str project_name: Optional. Send your experiment to a specific project.
77 Otherwise will be sent to Uncategorized Experiments.
78 If project name does not already exists Comet.ml will create a new project.
79 :param str rest_api_key: Optional. Rest API key found in Comet.ml settings.
80 This is used to determine version number
81 :param str experiment_name: Optional. String representing the name for this particular experiment on Comet.ml
82 """
83 super().__init__()
84 self._experiment = None
85
86 # Determine online or offline mode based on which arguments were passed to CometLogger
87 if save_dir is not None and api_key is not None:
88 # If arguments are passed for both save_dir and api_key, preference is given to online mode
89 self.mode = "online"
90 self.api_key = api_key
91 elif api_key is not None:
92 self.mode = "online"
93 self.api_key = api_key
94 elif save_dir is not None:
95 self.mode = "offline"
96 self.save_dir = save_dir
97 else:
98 # If neither api_key nor save_dir are passed as arguments, raise an exception
99 raise MisconfigurationException("CometLogger requires either api_key or save_dir during initialization.")
100
101 logger.info(f"CometLogger will be initialized in {self.mode} mode")
102
103 self.workspace = workspace
104 self.project_name = project_name
105 self._kwargs = kwargs
106
107 if rest_api_key is not None:
108 # Comet.ml rest API, used to determine version number
109 self.rest_api_key = rest_api_key
110 self.comet_api = API(self.rest_api_key)
111 else:
112 self.rest_api_key = None
113 self.comet_api = None
114
115 if experiment_name:
116 try:
117 self.name = experiment_name
118 except TypeError as e:
119 logger.exception("Failed to set experiment name for comet.ml logger")
120
121 @property
122 def experiment(self):
123 if self._experiment is not None:
124 return self._experiment
125
126 if self.mode == "online":
127 self._experiment = CometExperiment(
128 api_key=self.api_key,
129 workspace=self.workspace,
130 project_name=self.project_name,
131 **self._kwargs
132 )
133 else:
134 self._experiment = CometOfflineExperiment(
135 offline_directory=self.save_dir,
136 workspace=self.workspace,
137 project_name=self.project_name,
138 **self._kwargs
139 )
140
141 return self._experiment
142
143 @rank_zero_only
144 def log_hyperparams(self, params):
145 self.experiment.log_parameters(vars(params))
146
147 @rank_zero_only
148 def log_metrics(self, metrics, step=None):
149 # Comet.ml expects metrics to be a dictionary of detached tensors on CPU
150 for key, val in metrics.items():
151 if is_tensor(val):
152 metrics[key] = val.cpu().detach()
153
154 self.experiment.log_metrics(metrics, step=step)
155
156 @rank_zero_only
157 def finalize(self, status):
158 self.experiment.end()
159
160 @property
161 def name(self):
162 return self.experiment.project_name
163
164 @name.setter
165 def name(self, value):
166 self.experiment.set_name(value)
167
168 @property
169 def version(self):
170 return self.experiment.id
171
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pytorch_lightning/logging/comet.py b/pytorch_lightning/logging/comet.py
--- a/pytorch_lightning/logging/comet.py
+++ b/pytorch_lightning/logging/comet.py
@@ -52,7 +52,11 @@
try:
from comet_ml import Experiment as CometExperiment
from comet_ml import OfflineExperiment as CometOfflineExperiment
- from comet_ml.papi import API
+ try:
+ from comet_ml.api import API
+ except ImportError:
+ # For more information, see: https://www.comet.ml/docs/python-sdk/releases/#release-300
+ from comet_ml.papi import API
except ImportError:
raise ImportError('Missing comet_ml package.')
| {"golden_diff": "diff --git a/pytorch_lightning/logging/comet.py b/pytorch_lightning/logging/comet.py\n--- a/pytorch_lightning/logging/comet.py\n+++ b/pytorch_lightning/logging/comet.py\n@@ -52,7 +52,11 @@\n try:\n from comet_ml import Experiment as CometExperiment\n from comet_ml import OfflineExperiment as CometOfflineExperiment\n- from comet_ml.papi import API\n+ try:\n+ from comet_ml.api import API\n+ except ImportError:\n+ # For more information, see: https://www.comet.ml/docs/python-sdk/releases/#release-300\n+ from comet_ml.papi import API\n except ImportError:\n raise ImportError('Missing comet_ml package.')\n", "issue": "Comet PAPI Depreciated\nUse of the Comet API logger reports an unecessary depreciation warning relating to the use of comet_ml.papi, rather than the newer comet_ml.api.\r\n\r\nExample:\r\n`COMET WARNING: You have imported comet_ml.papi; this interface is deprecated. Please use comet_ml.api instead. For more information, see: https://www.comet.ml/docs/python-sdk/releases/#release-300`\n", "before_files": [{"content": "\"\"\"\nLog using `comet <https://www.comet.ml>`_\n\nComet logger can be used in either online or offline mode.\nTo log in online mode, CometLogger requries an API key:\n\n.. code-block:: python\n\n from pytorch_lightning.logging import CometLogger\n # arguments made to CometLogger are passed on to the comet_ml.Experiment class\n comet_logger = CometLogger(\n api_key=os.environ[\"COMET_KEY\"],\n workspace=os.environ[\"COMET_WORKSPACE\"], # Optional\n project_name=\"default_project\", # Optional\n rest_api_key=os.environ[\"COMET_REST_KEY\"], # Optional\n experiment_name=\"default\" # Optional\n )\n trainer = Trainer(logger=comet_logger)\n\nTo log in offline mode, CometLogger requires a path to a local directory:\n\n.. code-block:: python\n\n from pytorch_lightning.logging import CometLogger\n # arguments made to CometLogger are passed on to the comet_ml.Experiment class\n comet_logger = CometLogger(\n save_dir=\".\",\n workspace=os.environ[\"COMET_WORKSPACE\"], # Optional\n project_name=\"default_project\", # Optional\n rest_api_key=os.environ[\"COMET_REST_KEY\"], # Optional\n experiment_name=\"default\" # Optional\n )\n trainer = Trainer(logger=comet_logger)\n\n\nUse the logger anywhere in you LightningModule as follows:\n\n.. code-block:: python\n\n def train_step(...):\n # example\n self.logger.experiment.whatever_comet_ml_supports(...)\n\n def any_lightning_module_function_or_hook(...):\n self.logger.experiment.whatever_comet_ml_supports(...)\n\n\n\"\"\"\n\nfrom logging import getLogger\n\ntry:\n from comet_ml import Experiment as CometExperiment\n from comet_ml import OfflineExperiment as CometOfflineExperiment\n from comet_ml.papi import API\nexcept ImportError:\n raise ImportError('Missing comet_ml package.')\n\nfrom torch import is_tensor\n\nfrom .base import LightningLoggerBase, rank_zero_only\nfrom ..utilities.debugging import MisconfigurationException\n\nlogger = getLogger(__name__)\n\n\nclass CometLogger(LightningLoggerBase):\n def __init__(self, api_key=None, save_dir=None, workspace=None,\n rest_api_key=None, project_name=None, experiment_name=None, **kwargs):\n \"\"\"Initialize a Comet.ml logger.\n Requires either an API Key (online mode) or a local directory path (offline mode)\n\n :param str api_key: Required in online mode. API key, found on Comet.ml\n :param str save_dir: Required in offline mode. The path for the directory to save local comet logs\n :param str workspace: Optional. Name of workspace for this user\n :param str project_name: Optional. Send your experiment to a specific project.\n Otherwise will be sent to Uncategorized Experiments.\n If project name does not already exists Comet.ml will create a new project.\n :param str rest_api_key: Optional. Rest API key found in Comet.ml settings.\n This is used to determine version number\n :param str experiment_name: Optional. String representing the name for this particular experiment on Comet.ml\n \"\"\"\n super().__init__()\n self._experiment = None\n\n # Determine online or offline mode based on which arguments were passed to CometLogger\n if save_dir is not None and api_key is not None:\n # If arguments are passed for both save_dir and api_key, preference is given to online mode\n self.mode = \"online\"\n self.api_key = api_key\n elif api_key is not None:\n self.mode = \"online\"\n self.api_key = api_key\n elif save_dir is not None:\n self.mode = \"offline\"\n self.save_dir = save_dir\n else:\n # If neither api_key nor save_dir are passed as arguments, raise an exception\n raise MisconfigurationException(\"CometLogger requires either api_key or save_dir during initialization.\")\n\n logger.info(f\"CometLogger will be initialized in {self.mode} mode\")\n\n self.workspace = workspace\n self.project_name = project_name\n self._kwargs = kwargs\n\n if rest_api_key is not None:\n # Comet.ml rest API, used to determine version number\n self.rest_api_key = rest_api_key\n self.comet_api = API(self.rest_api_key)\n else:\n self.rest_api_key = None\n self.comet_api = None\n\n if experiment_name:\n try:\n self.name = experiment_name\n except TypeError as e:\n logger.exception(\"Failed to set experiment name for comet.ml logger\")\n\n @property\n def experiment(self):\n if self._experiment is not None:\n return self._experiment\n\n if self.mode == \"online\":\n self._experiment = CometExperiment(\n api_key=self.api_key,\n workspace=self.workspace,\n project_name=self.project_name,\n **self._kwargs\n )\n else:\n self._experiment = CometOfflineExperiment(\n offline_directory=self.save_dir,\n workspace=self.workspace,\n project_name=self.project_name,\n **self._kwargs\n )\n\n return self._experiment\n\n @rank_zero_only\n def log_hyperparams(self, params):\n self.experiment.log_parameters(vars(params))\n\n @rank_zero_only\n def log_metrics(self, metrics, step=None):\n # Comet.ml expects metrics to be a dictionary of detached tensors on CPU\n for key, val in metrics.items():\n if is_tensor(val):\n metrics[key] = val.cpu().detach()\n\n self.experiment.log_metrics(metrics, step=step)\n\n @rank_zero_only\n def finalize(self, status):\n self.experiment.end()\n\n @property\n def name(self):\n return self.experiment.project_name\n\n @name.setter\n def name(self, value):\n self.experiment.set_name(value)\n\n @property\n def version(self):\n return self.experiment.id\n", "path": "pytorch_lightning/logging/comet.py"}], "after_files": [{"content": "\"\"\"\nLog using `comet <https://www.comet.ml>`_\n\nComet logger can be used in either online or offline mode.\nTo log in online mode, CometLogger requries an API key:\n\n.. code-block:: python\n\n from pytorch_lightning.logging import CometLogger\n # arguments made to CometLogger are passed on to the comet_ml.Experiment class\n comet_logger = CometLogger(\n api_key=os.environ[\"COMET_KEY\"],\n workspace=os.environ[\"COMET_WORKSPACE\"], # Optional\n project_name=\"default_project\", # Optional\n rest_api_key=os.environ[\"COMET_REST_KEY\"], # Optional\n experiment_name=\"default\" # Optional\n )\n trainer = Trainer(logger=comet_logger)\n\nTo log in offline mode, CometLogger requires a path to a local directory:\n\n.. code-block:: python\n\n from pytorch_lightning.logging import CometLogger\n # arguments made to CometLogger are passed on to the comet_ml.Experiment class\n comet_logger = CometLogger(\n save_dir=\".\",\n workspace=os.environ[\"COMET_WORKSPACE\"], # Optional\n project_name=\"default_project\", # Optional\n rest_api_key=os.environ[\"COMET_REST_KEY\"], # Optional\n experiment_name=\"default\" # Optional\n )\n trainer = Trainer(logger=comet_logger)\n\n\nUse the logger anywhere in you LightningModule as follows:\n\n.. code-block:: python\n\n def train_step(...):\n # example\n self.logger.experiment.whatever_comet_ml_supports(...)\n\n def any_lightning_module_function_or_hook(...):\n self.logger.experiment.whatever_comet_ml_supports(...)\n\n\n\"\"\"\n\nfrom logging import getLogger\n\ntry:\n from comet_ml import Experiment as CometExperiment\n from comet_ml import OfflineExperiment as CometOfflineExperiment\n try:\n from comet_ml.api import API\n except ImportError:\n # For more information, see: https://www.comet.ml/docs/python-sdk/releases/#release-300\n from comet_ml.papi import API\nexcept ImportError:\n raise ImportError('Missing comet_ml package.')\n\nfrom torch import is_tensor\n\nfrom .base import LightningLoggerBase, rank_zero_only\nfrom ..utilities.debugging import MisconfigurationException\n\nlogger = getLogger(__name__)\n\n\nclass CometLogger(LightningLoggerBase):\n def __init__(self, api_key=None, save_dir=None, workspace=None,\n rest_api_key=None, project_name=None, experiment_name=None, **kwargs):\n \"\"\"Initialize a Comet.ml logger.\n Requires either an API Key (online mode) or a local directory path (offline mode)\n\n :param str api_key: Required in online mode. API key, found on Comet.ml\n :param str save_dir: Required in offline mode. The path for the directory to save local comet logs\n :param str workspace: Optional. Name of workspace for this user\n :param str project_name: Optional. Send your experiment to a specific project.\n Otherwise will be sent to Uncategorized Experiments.\n If project name does not already exists Comet.ml will create a new project.\n :param str rest_api_key: Optional. Rest API key found in Comet.ml settings.\n This is used to determine version number\n :param str experiment_name: Optional. String representing the name for this particular experiment on Comet.ml\n \"\"\"\n super().__init__()\n self._experiment = None\n\n # Determine online or offline mode based on which arguments were passed to CometLogger\n if save_dir is not None and api_key is not None:\n # If arguments are passed for both save_dir and api_key, preference is given to online mode\n self.mode = \"online\"\n self.api_key = api_key\n elif api_key is not None:\n self.mode = \"online\"\n self.api_key = api_key\n elif save_dir is not None:\n self.mode = \"offline\"\n self.save_dir = save_dir\n else:\n # If neither api_key nor save_dir are passed as arguments, raise an exception\n raise MisconfigurationException(\"CometLogger requires either api_key or save_dir during initialization.\")\n\n logger.info(f\"CometLogger will be initialized in {self.mode} mode\")\n\n self.workspace = workspace\n self.project_name = project_name\n self._kwargs = kwargs\n\n if rest_api_key is not None:\n # Comet.ml rest API, used to determine version number\n self.rest_api_key = rest_api_key\n self.comet_api = API(self.rest_api_key)\n else:\n self.rest_api_key = None\n self.comet_api = None\n\n if experiment_name:\n try:\n self.name = experiment_name\n except TypeError as e:\n logger.exception(\"Failed to set experiment name for comet.ml logger\")\n\n @property\n def experiment(self):\n if self._experiment is not None:\n return self._experiment\n\n if self.mode == \"online\":\n self._experiment = CometExperiment(\n api_key=self.api_key,\n workspace=self.workspace,\n project_name=self.project_name,\n **self._kwargs\n )\n else:\n self._experiment = CometOfflineExperiment(\n offline_directory=self.save_dir,\n workspace=self.workspace,\n project_name=self.project_name,\n **self._kwargs\n )\n\n return self._experiment\n\n @rank_zero_only\n def log_hyperparams(self, params):\n self.experiment.log_parameters(vars(params))\n\n @rank_zero_only\n def log_metrics(self, metrics, step=None):\n # Comet.ml expects metrics to be a dictionary of detached tensors on CPU\n for key, val in metrics.items():\n if is_tensor(val):\n metrics[key] = val.cpu().detach()\n\n self.experiment.log_metrics(metrics, step=step)\n\n @rank_zero_only\n def finalize(self, status):\n self.experiment.end()\n\n @property\n def name(self):\n return self.experiment.project_name\n\n @name.setter\n def name(self, value):\n self.experiment.set_name(value)\n\n @property\n def version(self):\n return self.experiment.id\n", "path": "pytorch_lightning/logging/comet.py"}]} | 2,015 | 157 |
gh_patches_debug_10854 | rasdani/github-patches | git_diff | liqd__adhocracy4-1528 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
project duplicate functionality with image
**URL:** https://meinberlin-dev.liqd.net/dashboard/projects/fake-project-799/basic/
**user:** moderator/ initiator/admin/
**expected behaviour:** When a project is duplicated, images of the new (duplicated) project are saved correctly
**behaviour:** *Duplicated new project's images are saved with a duplicated file path, e.g instead of `projects/background/image.png`, they are saved as `projects/background/projects/background/image.png`
~**important screensize:** *mobile? desktop?*~
~**device & browser:** *e.g. Desktop, firefox 68.0.2 (64-Bit)*~
**Comment/Question:** *Is this easy to do? What ideas do you have? Would this be a story?*

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `adhocracy4/dashboard/mixins.py`
Content:
```
1 from copy import deepcopy
2
3 from django.apps import apps
4 from django.conf import settings
5 from django.contrib import messages
6 from django.core.exceptions import PermissionDenied
7 from django.shortcuts import get_object_or_404
8 from django.shortcuts import redirect
9 from django.urls import NoReverseMatch
10 from django.utils import timezone
11 from django.utils.translation import gettext_lazy as _
12 from django.views.generic import base
13 from django.views.generic import edit
14
15 from adhocracy4.projects import models as project_models
16 from adhocracy4.rules import mixins as rules_mixins
17
18 from . import components
19 from . import get_project_dashboard
20 from . import signals
21
22 Organisation = apps.get_model(settings.A4_ORGANISATIONS_MODEL)
23
24
25 class DashboardBaseMixin(rules_mixins.PermissionRequiredMixin):
26 organisation_lookup_field = "slug"
27 organisation_url_kwarg = "organisation_slug"
28
29 @property
30 def organisation(self):
31 if self.organisation_url_kwarg and self.organisation_url_kwarg in self.kwargs:
32 lookup = {
33 self.organisation_lookup_field: self.kwargs[self.organisation_url_kwarg]
34 }
35 return get_object_or_404(Organisation, **lookup)
36
37 return self.project.organisation
38
39 @property
40 def other_organisations_of_user(self):
41 user = self.request.user
42 if self.organisation:
43 initiator_orgs = user.organisation_set.all()
44 if hasattr(Organisation, "groups") and user.groups.all():
45 user_groups = user.groups.all().values_list("id", flat=True)
46 group_orgs = Organisation.objects.filter(groups__in=user_groups)
47 orgs = initiator_orgs | group_orgs
48 return orgs.distinct().exclude(pk=self.organisation.pk)
49 return initiator_orgs.exclude(pk=self.organisation.pk)
50 else:
51 return None
52
53 def get_permission_object(self):
54 raise NotImplementedError("Set permission object.")
55
56 def get_success_url(self):
57 return self.request.path
58
59
60 class BlueprintMixin:
61 @property
62 def blueprint(self):
63 from .blueprints import get_blueprints
64
65 return dict(get_blueprints())[self.blueprint_key]
66
67 @property
68 def blueprint_key(self):
69 return self.kwargs["blueprint_slug"]
70
71
72 class DashboardComponentMixin(base.ContextMixin):
73 """Set the menu_item and add dashboard information to the context data.
74
75 Assumes self.project, self.module and self.component are set.
76 """
77
78 menu_item = "project"
79 component = None
80
81 def get_context_data(self, **kwargs):
82 context = super().get_context_data(**kwargs)
83
84 # Workaround Djangos update behavior:
85 # All fields from the POST data will be set on the view.object model
86 # instance, regardless of validation errors.
87 # Thus it is not reliable to check on empty fields on the view.object
88 # but it has to be ensured that the model reflects the database.
89 project = deepcopy(self.project)
90 if project:
91 project.refresh_from_db()
92
93 dashboard = get_project_dashboard(project)
94
95 context["dashboard_menu"] = dashboard.get_menu(self.module, self.component)
96
97 num_valid, num_required = dashboard.get_progress()
98 project_num_valid, project_num_required = dashboard.get_project_progress()
99 project_is_complete = project_num_valid == project_num_required
100 context["project_progress"] = {
101 "valid": num_valid,
102 "required": num_required,
103 "project_is_complete": project_is_complete,
104 }
105
106 return context
107
108
109 class DashboardComponentFormSignalMixin(edit.FormMixin):
110 def form_valid(self, form):
111 response = super().form_valid(form)
112
113 component = self.component
114 if component.identifier in components.projects:
115 signals.project_component_updated.send(
116 sender=component.__class__,
117 project=self.project,
118 component=component,
119 user=self.request.user,
120 )
121 else:
122 signals.module_component_updated.send(
123 sender=component.__class__,
124 module=self.module,
125 component=component,
126 user=self.request.user,
127 )
128 return response
129
130
131 class DashboardComponentDeleteSignalMixin(edit.DeletionMixin):
132 def delete(self, request, *args, **kwargs):
133 # Project and module have to be stored before delete is called as
134 # they may rely on the still existing db object.
135 project = self.project
136 module = self.module
137
138 response = super().delete(request, *args, **kwargs)
139
140 component = self.component
141 if component.identifier in components.projects:
142 signals.project_component_updated.send(
143 sender=component.__class__,
144 project=project,
145 component=component,
146 user=self.request.user,
147 )
148 else:
149 signals.module_component_updated.send(
150 sender=component.__class__,
151 module=module,
152 component=component,
153 user=self.request.user,
154 )
155 return response
156
157
158 class DashboardProjectDuplicateMixin:
159 def post(self, request, *args, **kwargs):
160 if "duplicate" in request.POST:
161 pk = int(request.POST["project_pk"])
162 project = get_object_or_404(project_models.Project, pk=pk)
163 can_add = request.user.has_perm("a4projects.add_project", project)
164
165 if not can_add:
166 raise PermissionDenied()
167
168 project_clone = deepcopy(project)
169 project_clone.pk = None
170 if project_clone.tile_image:
171 project_clone.tile_image.save(
172 project.tile_image.name, project.tile_image, False
173 )
174 if project_clone.image:
175 project_clone.image.save(project.image.name, project.image, False)
176 project_clone.created = timezone.now()
177 project_clone.is_draft = True
178 project_clone.is_archived = False
179 project_clone.save()
180 signals.project_created.send(
181 sender=None, project=project_clone, user=self.request.user
182 )
183
184 for moderator in project.moderators.all():
185 project_clone.moderators.add(moderator)
186
187 for module in project.module_set.all():
188 module_clone = deepcopy(module)
189 module_clone.project = project_clone
190 module_clone.pk = None
191 module_clone.save()
192 signals.module_created.send(
193 sender=None, module=module_clone, user=self.request.user
194 )
195
196 for phase in module.phase_set.all():
197 phase_clone = deepcopy(phase)
198 phase_clone.module = module_clone
199 phase_clone.pk = None
200 phase_clone.save()
201
202 settings_instance = module.settings_instance
203 if settings_instance:
204 settings_instance_clone = deepcopy(settings_instance)
205 settings_instance_clone.pk = None
206 settings_instance_clone.module = module_clone
207 settings_instance_clone.save()
208
209 messages.success(request, _("Project successfully duplicated."))
210
211 try:
212 org_slug = project_clone.organisation.slug
213 return redirect(
214 "a4dashboard:project-edit",
215 organisation_slug=org_slug,
216 project_slug=project_clone.slug,
217 )
218 except NoReverseMatch:
219 return redirect(
220 "a4dashboard:project-edit", project_slug=project_clone.slug
221 )
222 else:
223 return super().post(request, *args, **kwargs)
224
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/adhocracy4/dashboard/mixins.py b/adhocracy4/dashboard/mixins.py
--- a/adhocracy4/dashboard/mixins.py
+++ b/adhocracy4/dashboard/mixins.py
@@ -167,12 +167,6 @@
project_clone = deepcopy(project)
project_clone.pk = None
- if project_clone.tile_image:
- project_clone.tile_image.save(
- project.tile_image.name, project.tile_image, False
- )
- if project_clone.image:
- project_clone.image.save(project.image.name, project.image, False)
project_clone.created = timezone.now()
project_clone.is_draft = True
project_clone.is_archived = False
| {"golden_diff": "diff --git a/adhocracy4/dashboard/mixins.py b/adhocracy4/dashboard/mixins.py\n--- a/adhocracy4/dashboard/mixins.py\n+++ b/adhocracy4/dashboard/mixins.py\n@@ -167,12 +167,6 @@\n \n project_clone = deepcopy(project)\n project_clone.pk = None\n- if project_clone.tile_image:\n- project_clone.tile_image.save(\n- project.tile_image.name, project.tile_image, False\n- )\n- if project_clone.image:\n- project_clone.image.save(project.image.name, project.image, False)\n project_clone.created = timezone.now()\n project_clone.is_draft = True\n project_clone.is_archived = False\n", "issue": "project duplicate functionality with image\n**URL:** https://meinberlin-dev.liqd.net/dashboard/projects/fake-project-799/basic/\r\n**user:** moderator/ initiator/admin/\r\n**expected behaviour:** When a project is duplicated, images of the new (duplicated) project are saved correctly\r\n**behaviour:** *Duplicated new project's images are saved with a duplicated file path, e.g instead of `projects/background/image.png`, they are saved as `projects/background/projects/background/image.png`\r\n~**important screensize:** *mobile? desktop?*~\r\n~**device & browser:** *e.g. Desktop, firefox 68.0.2 (64-Bit)*~\r\n**Comment/Question:** *Is this easy to do? What ideas do you have? Would this be a story?* \r\n\r\n\r\n\n", "before_files": [{"content": "from copy import deepcopy\n\nfrom django.apps import apps\nfrom django.conf import settings\nfrom django.contrib import messages\nfrom django.core.exceptions import PermissionDenied\nfrom django.shortcuts import get_object_or_404\nfrom django.shortcuts import redirect\nfrom django.urls import NoReverseMatch\nfrom django.utils import timezone\nfrom django.utils.translation import gettext_lazy as _\nfrom django.views.generic import base\nfrom django.views.generic import edit\n\nfrom adhocracy4.projects import models as project_models\nfrom adhocracy4.rules import mixins as rules_mixins\n\nfrom . import components\nfrom . import get_project_dashboard\nfrom . import signals\n\nOrganisation = apps.get_model(settings.A4_ORGANISATIONS_MODEL)\n\n\nclass DashboardBaseMixin(rules_mixins.PermissionRequiredMixin):\n organisation_lookup_field = \"slug\"\n organisation_url_kwarg = \"organisation_slug\"\n\n @property\n def organisation(self):\n if self.organisation_url_kwarg and self.organisation_url_kwarg in self.kwargs:\n lookup = {\n self.organisation_lookup_field: self.kwargs[self.organisation_url_kwarg]\n }\n return get_object_or_404(Organisation, **lookup)\n\n return self.project.organisation\n\n @property\n def other_organisations_of_user(self):\n user = self.request.user\n if self.organisation:\n initiator_orgs = user.organisation_set.all()\n if hasattr(Organisation, \"groups\") and user.groups.all():\n user_groups = user.groups.all().values_list(\"id\", flat=True)\n group_orgs = Organisation.objects.filter(groups__in=user_groups)\n orgs = initiator_orgs | group_orgs\n return orgs.distinct().exclude(pk=self.organisation.pk)\n return initiator_orgs.exclude(pk=self.organisation.pk)\n else:\n return None\n\n def get_permission_object(self):\n raise NotImplementedError(\"Set permission object.\")\n\n def get_success_url(self):\n return self.request.path\n\n\nclass BlueprintMixin:\n @property\n def blueprint(self):\n from .blueprints import get_blueprints\n\n return dict(get_blueprints())[self.blueprint_key]\n\n @property\n def blueprint_key(self):\n return self.kwargs[\"blueprint_slug\"]\n\n\nclass DashboardComponentMixin(base.ContextMixin):\n \"\"\"Set the menu_item and add dashboard information to the context data.\n\n Assumes self.project, self.module and self.component are set.\n \"\"\"\n\n menu_item = \"project\"\n component = None\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n # Workaround Djangos update behavior:\n # All fields from the POST data will be set on the view.object model\n # instance, regardless of validation errors.\n # Thus it is not reliable to check on empty fields on the view.object\n # but it has to be ensured that the model reflects the database.\n project = deepcopy(self.project)\n if project:\n project.refresh_from_db()\n\n dashboard = get_project_dashboard(project)\n\n context[\"dashboard_menu\"] = dashboard.get_menu(self.module, self.component)\n\n num_valid, num_required = dashboard.get_progress()\n project_num_valid, project_num_required = dashboard.get_project_progress()\n project_is_complete = project_num_valid == project_num_required\n context[\"project_progress\"] = {\n \"valid\": num_valid,\n \"required\": num_required,\n \"project_is_complete\": project_is_complete,\n }\n\n return context\n\n\nclass DashboardComponentFormSignalMixin(edit.FormMixin):\n def form_valid(self, form):\n response = super().form_valid(form)\n\n component = self.component\n if component.identifier in components.projects:\n signals.project_component_updated.send(\n sender=component.__class__,\n project=self.project,\n component=component,\n user=self.request.user,\n )\n else:\n signals.module_component_updated.send(\n sender=component.__class__,\n module=self.module,\n component=component,\n user=self.request.user,\n )\n return response\n\n\nclass DashboardComponentDeleteSignalMixin(edit.DeletionMixin):\n def delete(self, request, *args, **kwargs):\n # Project and module have to be stored before delete is called as\n # they may rely on the still existing db object.\n project = self.project\n module = self.module\n\n response = super().delete(request, *args, **kwargs)\n\n component = self.component\n if component.identifier in components.projects:\n signals.project_component_updated.send(\n sender=component.__class__,\n project=project,\n component=component,\n user=self.request.user,\n )\n else:\n signals.module_component_updated.send(\n sender=component.__class__,\n module=module,\n component=component,\n user=self.request.user,\n )\n return response\n\n\nclass DashboardProjectDuplicateMixin:\n def post(self, request, *args, **kwargs):\n if \"duplicate\" in request.POST:\n pk = int(request.POST[\"project_pk\"])\n project = get_object_or_404(project_models.Project, pk=pk)\n can_add = request.user.has_perm(\"a4projects.add_project\", project)\n\n if not can_add:\n raise PermissionDenied()\n\n project_clone = deepcopy(project)\n project_clone.pk = None\n if project_clone.tile_image:\n project_clone.tile_image.save(\n project.tile_image.name, project.tile_image, False\n )\n if project_clone.image:\n project_clone.image.save(project.image.name, project.image, False)\n project_clone.created = timezone.now()\n project_clone.is_draft = True\n project_clone.is_archived = False\n project_clone.save()\n signals.project_created.send(\n sender=None, project=project_clone, user=self.request.user\n )\n\n for moderator in project.moderators.all():\n project_clone.moderators.add(moderator)\n\n for module in project.module_set.all():\n module_clone = deepcopy(module)\n module_clone.project = project_clone\n module_clone.pk = None\n module_clone.save()\n signals.module_created.send(\n sender=None, module=module_clone, user=self.request.user\n )\n\n for phase in module.phase_set.all():\n phase_clone = deepcopy(phase)\n phase_clone.module = module_clone\n phase_clone.pk = None\n phase_clone.save()\n\n settings_instance = module.settings_instance\n if settings_instance:\n settings_instance_clone = deepcopy(settings_instance)\n settings_instance_clone.pk = None\n settings_instance_clone.module = module_clone\n settings_instance_clone.save()\n\n messages.success(request, _(\"Project successfully duplicated.\"))\n\n try:\n org_slug = project_clone.organisation.slug\n return redirect(\n \"a4dashboard:project-edit\",\n organisation_slug=org_slug,\n project_slug=project_clone.slug,\n )\n except NoReverseMatch:\n return redirect(\n \"a4dashboard:project-edit\", project_slug=project_clone.slug\n )\n else:\n return super().post(request, *args, **kwargs)\n", "path": "adhocracy4/dashboard/mixins.py"}], "after_files": [{"content": "from copy import deepcopy\n\nfrom django.apps import apps\nfrom django.conf import settings\nfrom django.contrib import messages\nfrom django.core.exceptions import PermissionDenied\nfrom django.shortcuts import get_object_or_404\nfrom django.shortcuts import redirect\nfrom django.urls import NoReverseMatch\nfrom django.utils import timezone\nfrom django.utils.translation import gettext_lazy as _\nfrom django.views.generic import base\nfrom django.views.generic import edit\n\nfrom adhocracy4.projects import models as project_models\nfrom adhocracy4.rules import mixins as rules_mixins\n\nfrom . import components\nfrom . import get_project_dashboard\nfrom . import signals\n\nOrganisation = apps.get_model(settings.A4_ORGANISATIONS_MODEL)\n\n\nclass DashboardBaseMixin(rules_mixins.PermissionRequiredMixin):\n organisation_lookup_field = \"slug\"\n organisation_url_kwarg = \"organisation_slug\"\n\n @property\n def organisation(self):\n if self.organisation_url_kwarg and self.organisation_url_kwarg in self.kwargs:\n lookup = {\n self.organisation_lookup_field: self.kwargs[self.organisation_url_kwarg]\n }\n return get_object_or_404(Organisation, **lookup)\n\n return self.project.organisation\n\n @property\n def other_organisations_of_user(self):\n user = self.request.user\n if self.organisation:\n initiator_orgs = user.organisation_set.all()\n if hasattr(Organisation, \"groups\") and user.groups.all():\n user_groups = user.groups.all().values_list(\"id\", flat=True)\n group_orgs = Organisation.objects.filter(groups__in=user_groups)\n orgs = initiator_orgs | group_orgs\n return orgs.distinct().exclude(pk=self.organisation.pk)\n return initiator_orgs.exclude(pk=self.organisation.pk)\n else:\n return None\n\n def get_permission_object(self):\n raise NotImplementedError(\"Set permission object.\")\n\n def get_success_url(self):\n return self.request.path\n\n\nclass BlueprintMixin:\n @property\n def blueprint(self):\n from .blueprints import get_blueprints\n\n return dict(get_blueprints())[self.blueprint_key]\n\n @property\n def blueprint_key(self):\n return self.kwargs[\"blueprint_slug\"]\n\n\nclass DashboardComponentMixin(base.ContextMixin):\n \"\"\"Set the menu_item and add dashboard information to the context data.\n\n Assumes self.project, self.module and self.component are set.\n \"\"\"\n\n menu_item = \"project\"\n component = None\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n # Workaround Djangos update behavior:\n # All fields from the POST data will be set on the view.object model\n # instance, regardless of validation errors.\n # Thus it is not reliable to check on empty fields on the view.object\n # but it has to be ensured that the model reflects the database.\n project = deepcopy(self.project)\n if project:\n project.refresh_from_db()\n\n dashboard = get_project_dashboard(project)\n\n context[\"dashboard_menu\"] = dashboard.get_menu(self.module, self.component)\n\n num_valid, num_required = dashboard.get_progress()\n project_num_valid, project_num_required = dashboard.get_project_progress()\n project_is_complete = project_num_valid == project_num_required\n context[\"project_progress\"] = {\n \"valid\": num_valid,\n \"required\": num_required,\n \"project_is_complete\": project_is_complete,\n }\n\n return context\n\n\nclass DashboardComponentFormSignalMixin(edit.FormMixin):\n def form_valid(self, form):\n response = super().form_valid(form)\n\n component = self.component\n if component.identifier in components.projects:\n signals.project_component_updated.send(\n sender=component.__class__,\n project=self.project,\n component=component,\n user=self.request.user,\n )\n else:\n signals.module_component_updated.send(\n sender=component.__class__,\n module=self.module,\n component=component,\n user=self.request.user,\n )\n return response\n\n\nclass DashboardComponentDeleteSignalMixin(edit.DeletionMixin):\n def delete(self, request, *args, **kwargs):\n # Project and module have to be stored before delete is called as\n # they may rely on the still existing db object.\n project = self.project\n module = self.module\n\n response = super().delete(request, *args, **kwargs)\n\n component = self.component\n if component.identifier in components.projects:\n signals.project_component_updated.send(\n sender=component.__class__,\n project=project,\n component=component,\n user=self.request.user,\n )\n else:\n signals.module_component_updated.send(\n sender=component.__class__,\n module=module,\n component=component,\n user=self.request.user,\n )\n return response\n\n\nclass DashboardProjectDuplicateMixin:\n def post(self, request, *args, **kwargs):\n if \"duplicate\" in request.POST:\n pk = int(request.POST[\"project_pk\"])\n project = get_object_or_404(project_models.Project, pk=pk)\n can_add = request.user.has_perm(\"a4projects.add_project\", project)\n\n if not can_add:\n raise PermissionDenied()\n\n project_clone = deepcopy(project)\n project_clone.pk = None\n project_clone.created = timezone.now()\n project_clone.is_draft = True\n project_clone.is_archived = False\n project_clone.save()\n signals.project_created.send(\n sender=None, project=project_clone, user=self.request.user\n )\n\n for moderator in project.moderators.all():\n project_clone.moderators.add(moderator)\n\n for module in project.module_set.all():\n module_clone = deepcopy(module)\n module_clone.project = project_clone\n module_clone.pk = None\n module_clone.save()\n signals.module_created.send(\n sender=None, module=module_clone, user=self.request.user\n )\n\n for phase in module.phase_set.all():\n phase_clone = deepcopy(phase)\n phase_clone.module = module_clone\n phase_clone.pk = None\n phase_clone.save()\n\n settings_instance = module.settings_instance\n if settings_instance:\n settings_instance_clone = deepcopy(settings_instance)\n settings_instance_clone.pk = None\n settings_instance_clone.module = module_clone\n settings_instance_clone.save()\n\n messages.success(request, _(\"Project successfully duplicated.\"))\n\n try:\n org_slug = project_clone.organisation.slug\n return redirect(\n \"a4dashboard:project-edit\",\n organisation_slug=org_slug,\n project_slug=project_clone.slug,\n )\n except NoReverseMatch:\n return redirect(\n \"a4dashboard:project-edit\", project_slug=project_clone.slug\n )\n else:\n return super().post(request, *args, **kwargs)\n", "path": "adhocracy4/dashboard/mixins.py"}]} | 2,533 | 157 |
gh_patches_debug_17701 | rasdani/github-patches | git_diff | boto__boto-1808 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Documentation Issue?
I found that the documentation here:
http://boto.readthedocs.org/en/latest/ref/cloudwatch.html#boto.ec2.cloudwatch.alarm.MetricAlarm
Says 'dimensions' takes a list of dicts. Doing so I ran into issues creating an alarm:
```
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/jenkins/.virtualenvs/warmachine/local/lib/python2.7/site-packages/boto/ec2/cloudwatch/__init__.py", line 529, in put_metric_alarm
self.build_dimension_param(alarm.dimensions, params)
File "/jenkins/.virtualenvs/warmachine/local/lib/python2.7/site-packages/boto/ec2/cloudwatch/__init__.py", line 126, in build_dimension_param
dim_value = dimension[dim_name]
TypeError: list indices must be integers, not dict
```
From looking at the source code it takes a dict. This seems to check out with the rest of the documentation on the page linked above.
Lastly the example seems really bad. It looks like:
Dimensions of alarm, such as: [{‘InstanceId’:[‘i-0123456,i-0123457’]}]
Which IMO should read:
Dimensions of alarm, such as: [{‘InstanceId’:‘i-0123456,i-0123457’}]
Making it much more explicit that it is a string containing the values. With instance id you can only specify one value, showing a list doesn't make this very explicit... one might assume you could pass a second string with more instances.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `boto/ec2/cloudwatch/alarm.py`
Content:
```
1 # Copyright (c) 2010 Reza Lotun http://reza.lotun.name
2 #
3 # Permission is hereby granted, free of charge, to any person obtaining a
4 # copy of this software and associated documentation files (the
5 # "Software"), to deal in the Software without restriction, including
6 # without limitation the rights to use, copy, modify, merge, publish, dis-
7 # tribute, sublicense, and/or sell copies of the Software, and to permit
8 # persons to whom the Software is furnished to do so, subject to the fol-
9 # lowing conditions:
10 #
11 # The above copyright notice and this permission notice shall be included
12 # in all copies or substantial portions of the Software.
13 #
14 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
16 # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
17 # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
18 # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
20 # IN THE SOFTWARE.
21 #
22
23 from datetime import datetime
24 from boto.resultset import ResultSet
25 from boto.ec2.cloudwatch.listelement import ListElement
26 from boto.ec2.cloudwatch.dimension import Dimension
27 from boto.compat import json
28
29
30 class MetricAlarms(list):
31 def __init__(self, connection=None):
32 """
33 Parses a list of MetricAlarms.
34 """
35 list.__init__(self)
36 self.connection = connection
37
38 def startElement(self, name, attrs, connection):
39 if name == 'member':
40 metric_alarm = MetricAlarm(connection)
41 self.append(metric_alarm)
42 return metric_alarm
43
44 def endElement(self, name, value, connection):
45 pass
46
47
48 class MetricAlarm(object):
49
50 OK = 'OK'
51 ALARM = 'ALARM'
52 INSUFFICIENT_DATA = 'INSUFFICIENT_DATA'
53
54 _cmp_map = {
55 '>=': 'GreaterThanOrEqualToThreshold',
56 '>': 'GreaterThanThreshold',
57 '<': 'LessThanThreshold',
58 '<=': 'LessThanOrEqualToThreshold',
59 }
60 _rev_cmp_map = dict((v, k) for (k, v) in _cmp_map.iteritems())
61
62 def __init__(self, connection=None, name=None, metric=None,
63 namespace=None, statistic=None, comparison=None,
64 threshold=None, period=None, evaluation_periods=None,
65 unit=None, description='', dimensions=None,
66 alarm_actions=None, insufficient_data_actions=None,
67 ok_actions=None):
68 """
69 Creates a new Alarm.
70
71 :type name: str
72 :param name: Name of alarm.
73
74 :type metric: str
75 :param metric: Name of alarm's associated metric.
76
77 :type namespace: str
78 :param namespace: The namespace for the alarm's metric.
79
80 :type statistic: str
81 :param statistic: The statistic to apply to the alarm's associated
82 metric.
83 Valid values: SampleCount|Average|Sum|Minimum|Maximum
84
85 :type comparison: str
86 :param comparison: Comparison used to compare statistic with threshold.
87 Valid values: >= | > | < | <=
88
89 :type threshold: float
90 :param threshold: The value against which the specified statistic
91 is compared.
92
93 :type period: int
94 :param period: The period in seconds over which teh specified
95 statistic is applied.
96
97 :type evaluation_periods: int
98 :param evaluation_periods: The number of periods over which data is
99 compared to the specified threshold.
100
101 :type unit: str
102 :param unit: Allowed Values are:
103 Seconds|Microseconds|Milliseconds,
104 Bytes|Kilobytes|Megabytes|Gigabytes|Terabytes,
105 Bits|Kilobits|Megabits|Gigabits|Terabits,
106 Percent|Count|
107 Bytes/Second|Kilobytes/Second|Megabytes/Second|
108 Gigabytes/Second|Terabytes/Second,
109 Bits/Second|Kilobits/Second|Megabits/Second,
110 Gigabits/Second|Terabits/Second|Count/Second|None
111
112 :type description: str
113 :param description: Description of MetricAlarm
114
115 :type dimensions: list of dicts
116 :param dimensions: Dimensions of alarm, such as:
117 [{'InstanceId':['i-0123456,i-0123457']}]
118
119 :type alarm_actions: list of strs
120 :param alarm_actions: A list of the ARNs of the actions to take in
121 ALARM state
122
123 :type insufficient_data_actions: list of strs
124 :param insufficient_data_actions: A list of the ARNs of the actions to
125 take in INSUFFICIENT_DATA state
126
127 :type ok_actions: list of strs
128 :param ok_actions: A list of the ARNs of the actions to take in OK state
129 """
130 self.name = name
131 self.connection = connection
132 self.metric = metric
133 self.namespace = namespace
134 self.statistic = statistic
135 if threshold is not None:
136 self.threshold = float(threshold)
137 else:
138 self.threshold = None
139 self.comparison = self._cmp_map.get(comparison)
140 if period is not None:
141 self.period = int(period)
142 else:
143 self.period = None
144 if evaluation_periods is not None:
145 self.evaluation_periods = int(evaluation_periods)
146 else:
147 self.evaluation_periods = None
148 self.actions_enabled = None
149 self.alarm_arn = None
150 self.last_updated = None
151 self.description = description
152 self.dimensions = dimensions
153 self.state_reason = None
154 self.state_value = None
155 self.unit = unit
156 self.alarm_actions = alarm_actions
157 self.insufficient_data_actions = insufficient_data_actions
158 self.ok_actions = ok_actions
159
160 def __repr__(self):
161 return 'MetricAlarm:%s[%s(%s) %s %s]' % (self.name, self.metric,
162 self.statistic,
163 self.comparison,
164 self.threshold)
165
166 def startElement(self, name, attrs, connection):
167 if name == 'AlarmActions':
168 self.alarm_actions = ListElement()
169 return self.alarm_actions
170 elif name == 'InsufficientDataActions':
171 self.insufficient_data_actions = ListElement()
172 return self.insufficient_data_actions
173 elif name == 'OKActions':
174 self.ok_actions = ListElement()
175 return self.ok_actions
176 elif name == 'Dimensions':
177 self.dimensions = Dimension()
178 return self.dimensions
179 else:
180 pass
181
182 def endElement(self, name, value, connection):
183 if name == 'ActionsEnabled':
184 self.actions_enabled = value
185 elif name == 'AlarmArn':
186 self.alarm_arn = value
187 elif name == 'AlarmConfigurationUpdatedTimestamp':
188 self.last_updated = value
189 elif name == 'AlarmDescription':
190 self.description = value
191 elif name == 'AlarmName':
192 self.name = value
193 elif name == 'ComparisonOperator':
194 setattr(self, 'comparison', self._rev_cmp_map[value])
195 elif name == 'EvaluationPeriods':
196 self.evaluation_periods = int(value)
197 elif name == 'MetricName':
198 self.metric = value
199 elif name == 'Namespace':
200 self.namespace = value
201 elif name == 'Period':
202 self.period = int(value)
203 elif name == 'StateReason':
204 self.state_reason = value
205 elif name == 'StateValue':
206 self.state_value = value
207 elif name == 'Statistic':
208 self.statistic = value
209 elif name == 'Threshold':
210 self.threshold = float(value)
211 elif name == 'Unit':
212 self.unit = value
213 else:
214 setattr(self, name, value)
215
216 def set_state(self, value, reason, data=None):
217 """ Temporarily sets the state of an alarm.
218
219 :type value: str
220 :param value: OK | ALARM | INSUFFICIENT_DATA
221
222 :type reason: str
223 :param reason: Reason alarm set (human readable).
224
225 :type data: str
226 :param data: Reason data (will be jsonified).
227 """
228 return self.connection.set_alarm_state(self.name, reason, value, data)
229
230 def update(self):
231 return self.connection.update_alarm(self)
232
233 def enable_actions(self):
234 return self.connection.enable_alarm_actions([self.name])
235
236 def disable_actions(self):
237 return self.connection.disable_alarm_actions([self.name])
238
239 def describe_history(self, start_date=None, end_date=None, max_records=None,
240 history_item_type=None, next_token=None):
241 return self.connection.describe_alarm_history(self.name, start_date,
242 end_date, max_records,
243 history_item_type,
244 next_token)
245
246 def add_alarm_action(self, action_arn=None):
247 """
248 Adds an alarm action, represented as an SNS topic, to this alarm.
249 What do do when alarm is triggered.
250
251 :type action_arn: str
252 :param action_arn: SNS topics to which notification should be
253 sent if the alarm goes to state ALARM.
254 """
255 if not action_arn:
256 return # Raise exception instead?
257 self.actions_enabled = 'true'
258 self.alarm_actions.append(action_arn)
259
260 def add_insufficient_data_action(self, action_arn=None):
261 """
262 Adds an insufficient_data action, represented as an SNS topic, to
263 this alarm. What to do when the insufficient_data state is reached.
264
265 :type action_arn: str
266 :param action_arn: SNS topics to which notification should be
267 sent if the alarm goes to state INSUFFICIENT_DATA.
268 """
269 if not action_arn:
270 return
271 self.actions_enabled = 'true'
272 self.insufficient_data_actions.append(action_arn)
273
274 def add_ok_action(self, action_arn=None):
275 """
276 Adds an ok action, represented as an SNS topic, to this alarm. What
277 to do when the ok state is reached.
278
279 :type action_arn: str
280 :param action_arn: SNS topics to which notification should be
281 sent if the alarm goes to state INSUFFICIENT_DATA.
282 """
283 if not action_arn:
284 return
285 self.actions_enabled = 'true'
286 self.ok_actions.append(action_arn)
287
288 def delete(self):
289 self.connection.delete_alarms([self.name])
290
291 class AlarmHistoryItem(object):
292 def __init__(self, connection=None):
293 self.connection = connection
294
295 def __repr__(self):
296 return 'AlarmHistory:%s[%s at %s]' % (self.name, self.summary, self.timestamp)
297
298 def startElement(self, name, attrs, connection):
299 pass
300
301 def endElement(self, name, value, connection):
302 if name == 'AlarmName':
303 self.name = value
304 elif name == 'HistoryData':
305 self.data = json.loads(value)
306 elif name == 'HistoryItemType':
307 self.tem_type = value
308 elif name == 'HistorySummary':
309 self.summary = value
310 elif name == 'Timestamp':
311 try:
312 self.timestamp = datetime.strptime(value,
313 '%Y-%m-%dT%H:%M:%S.%fZ')
314 except ValueError:
315 self.timestamp = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ')
316
317
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/boto/ec2/cloudwatch/alarm.py b/boto/ec2/cloudwatch/alarm.py
--- a/boto/ec2/cloudwatch/alarm.py
+++ b/boto/ec2/cloudwatch/alarm.py
@@ -112,9 +112,16 @@
:type description: str
:param description: Description of MetricAlarm
- :type dimensions: list of dicts
- :param dimensions: Dimensions of alarm, such as:
- [{'InstanceId':['i-0123456,i-0123457']}]
+ :type dimensions: dict
+ :param dimensions: A dictionary of dimension key/values where
+ the key is the dimension name and the value
+ is either a scalar value or an iterator
+ of values to be associated with that
+ dimension.
+ Example: {
+ 'InstanceId': ['i-0123456', 'i-0123457'],
+ 'LoadBalancerName': 'test-lb'
+ }
:type alarm_actions: list of strs
:param alarm_actions: A list of the ARNs of the actions to take in
| {"golden_diff": "diff --git a/boto/ec2/cloudwatch/alarm.py b/boto/ec2/cloudwatch/alarm.py\n--- a/boto/ec2/cloudwatch/alarm.py\n+++ b/boto/ec2/cloudwatch/alarm.py\n@@ -112,9 +112,16 @@\n :type description: str\n :param description: Description of MetricAlarm\n \n- :type dimensions: list of dicts\n- :param dimensions: Dimensions of alarm, such as:\n- [{'InstanceId':['i-0123456,i-0123457']}]\n+ :type dimensions: dict\n+ :param dimensions: A dictionary of dimension key/values where\n+ the key is the dimension name and the value\n+ is either a scalar value or an iterator\n+ of values to be associated with that\n+ dimension.\n+ Example: {\n+ 'InstanceId': ['i-0123456', 'i-0123457'],\n+ 'LoadBalancerName': 'test-lb'\n+ }\n \n :type alarm_actions: list of strs\n :param alarm_actions: A list of the ARNs of the actions to take in\n", "issue": "Documentation Issue?\nI found that the documentation here:\n\nhttp://boto.readthedocs.org/en/latest/ref/cloudwatch.html#boto.ec2.cloudwatch.alarm.MetricAlarm\n\nSays 'dimensions' takes a list of dicts. Doing so I ran into issues creating an alarm:\n\n```\nTraceback (most recent call last):\n File \"<stdin>\", line 1, in <module>\n File \"/jenkins/.virtualenvs/warmachine/local/lib/python2.7/site-packages/boto/ec2/cloudwatch/__init__.py\", line 529, in put_metric_alarm\n self.build_dimension_param(alarm.dimensions, params)\n File \"/jenkins/.virtualenvs/warmachine/local/lib/python2.7/site-packages/boto/ec2/cloudwatch/__init__.py\", line 126, in build_dimension_param\n dim_value = dimension[dim_name]\nTypeError: list indices must be integers, not dict\n```\n\nFrom looking at the source code it takes a dict. This seems to check out with the rest of the documentation on the page linked above.\n\nLastly the example seems really bad. It looks like:\n\nDimensions of alarm, such as: [{\u2018InstanceId\u2019:[\u2018i-0123456,i-0123457\u2019]}]\n\nWhich IMO should read:\n\nDimensions of alarm, such as: [{\u2018InstanceId\u2019:\u2018i-0123456,i-0123457\u2019}]\n\nMaking it much more explicit that it is a string containing the values. With instance id you can only specify one value, showing a list doesn't make this very explicit... one might assume you could pass a second string with more instances.\n\n", "before_files": [{"content": "# Copyright (c) 2010 Reza Lotun http://reza.lotun.name\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish, dis-\n# tribute, sublicense, and/or sell copies of the Software, and to permit\n# persons to whom the Software is furnished to do so, subject to the fol-\n# lowing conditions:\n#\n# The above copyright notice and this permission notice shall be included\n# in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\n# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-\n# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT\n# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,\n# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n# IN THE SOFTWARE.\n#\n\nfrom datetime import datetime\nfrom boto.resultset import ResultSet\nfrom boto.ec2.cloudwatch.listelement import ListElement\nfrom boto.ec2.cloudwatch.dimension import Dimension\nfrom boto.compat import json\n\n\nclass MetricAlarms(list):\n def __init__(self, connection=None):\n \"\"\"\n Parses a list of MetricAlarms.\n \"\"\"\n list.__init__(self)\n self.connection = connection\n\n def startElement(self, name, attrs, connection):\n if name == 'member':\n metric_alarm = MetricAlarm(connection)\n self.append(metric_alarm)\n return metric_alarm\n\n def endElement(self, name, value, connection):\n pass\n\n\nclass MetricAlarm(object):\n\n OK = 'OK'\n ALARM = 'ALARM'\n INSUFFICIENT_DATA = 'INSUFFICIENT_DATA'\n\n _cmp_map = {\n '>=': 'GreaterThanOrEqualToThreshold',\n '>': 'GreaterThanThreshold',\n '<': 'LessThanThreshold',\n '<=': 'LessThanOrEqualToThreshold',\n }\n _rev_cmp_map = dict((v, k) for (k, v) in _cmp_map.iteritems())\n\n def __init__(self, connection=None, name=None, metric=None,\n namespace=None, statistic=None, comparison=None,\n threshold=None, period=None, evaluation_periods=None,\n unit=None, description='', dimensions=None,\n alarm_actions=None, insufficient_data_actions=None,\n ok_actions=None):\n \"\"\"\n Creates a new Alarm.\n\n :type name: str\n :param name: Name of alarm.\n\n :type metric: str\n :param metric: Name of alarm's associated metric.\n\n :type namespace: str\n :param namespace: The namespace for the alarm's metric.\n\n :type statistic: str\n :param statistic: The statistic to apply to the alarm's associated\n metric.\n Valid values: SampleCount|Average|Sum|Minimum|Maximum\n\n :type comparison: str\n :param comparison: Comparison used to compare statistic with threshold.\n Valid values: >= | > | < | <=\n\n :type threshold: float\n :param threshold: The value against which the specified statistic\n is compared.\n\n :type period: int\n :param period: The period in seconds over which teh specified\n statistic is applied.\n\n :type evaluation_periods: int\n :param evaluation_periods: The number of periods over which data is\n compared to the specified threshold.\n\n :type unit: str\n :param unit: Allowed Values are:\n Seconds|Microseconds|Milliseconds,\n Bytes|Kilobytes|Megabytes|Gigabytes|Terabytes,\n Bits|Kilobits|Megabits|Gigabits|Terabits,\n Percent|Count|\n Bytes/Second|Kilobytes/Second|Megabytes/Second|\n Gigabytes/Second|Terabytes/Second,\n Bits/Second|Kilobits/Second|Megabits/Second,\n Gigabits/Second|Terabits/Second|Count/Second|None\n\n :type description: str\n :param description: Description of MetricAlarm\n\n :type dimensions: list of dicts\n :param dimensions: Dimensions of alarm, such as:\n [{'InstanceId':['i-0123456,i-0123457']}]\n \n :type alarm_actions: list of strs\n :param alarm_actions: A list of the ARNs of the actions to take in\n ALARM state\n \n :type insufficient_data_actions: list of strs\n :param insufficient_data_actions: A list of the ARNs of the actions to\n take in INSUFFICIENT_DATA state\n \n :type ok_actions: list of strs\n :param ok_actions: A list of the ARNs of the actions to take in OK state\n \"\"\"\n self.name = name\n self.connection = connection\n self.metric = metric\n self.namespace = namespace\n self.statistic = statistic\n if threshold is not None:\n self.threshold = float(threshold)\n else:\n self.threshold = None\n self.comparison = self._cmp_map.get(comparison)\n if period is not None:\n self.period = int(period)\n else:\n self.period = None\n if evaluation_periods is not None:\n self.evaluation_periods = int(evaluation_periods)\n else:\n self.evaluation_periods = None\n self.actions_enabled = None\n self.alarm_arn = None\n self.last_updated = None\n self.description = description\n self.dimensions = dimensions\n self.state_reason = None\n self.state_value = None\n self.unit = unit\n self.alarm_actions = alarm_actions\n self.insufficient_data_actions = insufficient_data_actions\n self.ok_actions = ok_actions\n\n def __repr__(self):\n return 'MetricAlarm:%s[%s(%s) %s %s]' % (self.name, self.metric,\n self.statistic,\n self.comparison,\n self.threshold)\n\n def startElement(self, name, attrs, connection):\n if name == 'AlarmActions':\n self.alarm_actions = ListElement()\n return self.alarm_actions\n elif name == 'InsufficientDataActions':\n self.insufficient_data_actions = ListElement()\n return self.insufficient_data_actions\n elif name == 'OKActions':\n self.ok_actions = ListElement()\n return self.ok_actions\n elif name == 'Dimensions':\n self.dimensions = Dimension()\n return self.dimensions\n else:\n pass\n\n def endElement(self, name, value, connection):\n if name == 'ActionsEnabled':\n self.actions_enabled = value\n elif name == 'AlarmArn':\n self.alarm_arn = value\n elif name == 'AlarmConfigurationUpdatedTimestamp':\n self.last_updated = value\n elif name == 'AlarmDescription':\n self.description = value\n elif name == 'AlarmName':\n self.name = value\n elif name == 'ComparisonOperator':\n setattr(self, 'comparison', self._rev_cmp_map[value])\n elif name == 'EvaluationPeriods':\n self.evaluation_periods = int(value)\n elif name == 'MetricName':\n self.metric = value\n elif name == 'Namespace':\n self.namespace = value\n elif name == 'Period':\n self.period = int(value)\n elif name == 'StateReason':\n self.state_reason = value\n elif name == 'StateValue':\n self.state_value = value\n elif name == 'Statistic':\n self.statistic = value\n elif name == 'Threshold':\n self.threshold = float(value)\n elif name == 'Unit':\n self.unit = value\n else:\n setattr(self, name, value)\n\n def set_state(self, value, reason, data=None):\n \"\"\" Temporarily sets the state of an alarm.\n\n :type value: str\n :param value: OK | ALARM | INSUFFICIENT_DATA\n\n :type reason: str\n :param reason: Reason alarm set (human readable).\n\n :type data: str\n :param data: Reason data (will be jsonified).\n \"\"\"\n return self.connection.set_alarm_state(self.name, reason, value, data)\n\n def update(self):\n return self.connection.update_alarm(self)\n\n def enable_actions(self):\n return self.connection.enable_alarm_actions([self.name])\n\n def disable_actions(self):\n return self.connection.disable_alarm_actions([self.name])\n\n def describe_history(self, start_date=None, end_date=None, max_records=None,\n history_item_type=None, next_token=None):\n return self.connection.describe_alarm_history(self.name, start_date,\n end_date, max_records,\n history_item_type,\n next_token)\n\n def add_alarm_action(self, action_arn=None):\n \"\"\"\n Adds an alarm action, represented as an SNS topic, to this alarm. \n What do do when alarm is triggered.\n\n :type action_arn: str\n :param action_arn: SNS topics to which notification should be \n sent if the alarm goes to state ALARM.\n \"\"\"\n if not action_arn:\n return # Raise exception instead?\n self.actions_enabled = 'true'\n self.alarm_actions.append(action_arn)\n\n def add_insufficient_data_action(self, action_arn=None):\n \"\"\"\n Adds an insufficient_data action, represented as an SNS topic, to\n this alarm. What to do when the insufficient_data state is reached.\n\n :type action_arn: str\n :param action_arn: SNS topics to which notification should be \n sent if the alarm goes to state INSUFFICIENT_DATA.\n \"\"\"\n if not action_arn:\n return\n self.actions_enabled = 'true'\n self.insufficient_data_actions.append(action_arn)\n \n def add_ok_action(self, action_arn=None):\n \"\"\"\n Adds an ok action, represented as an SNS topic, to this alarm. What\n to do when the ok state is reached.\n\n :type action_arn: str\n :param action_arn: SNS topics to which notification should be \n sent if the alarm goes to state INSUFFICIENT_DATA.\n \"\"\"\n if not action_arn:\n return\n self.actions_enabled = 'true'\n self.ok_actions.append(action_arn)\n\n def delete(self):\n self.connection.delete_alarms([self.name])\n\nclass AlarmHistoryItem(object):\n def __init__(self, connection=None):\n self.connection = connection\n\n def __repr__(self):\n return 'AlarmHistory:%s[%s at %s]' % (self.name, self.summary, self.timestamp)\n\n def startElement(self, name, attrs, connection):\n pass\n\n def endElement(self, name, value, connection):\n if name == 'AlarmName':\n self.name = value\n elif name == 'HistoryData':\n self.data = json.loads(value)\n elif name == 'HistoryItemType':\n self.tem_type = value\n elif name == 'HistorySummary':\n self.summary = value\n elif name == 'Timestamp':\n try:\n self.timestamp = datetime.strptime(value,\n '%Y-%m-%dT%H:%M:%S.%fZ')\n except ValueError:\n self.timestamp = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ')\n\n", "path": "boto/ec2/cloudwatch/alarm.py"}], "after_files": [{"content": "# Copyright (c) 2010 Reza Lotun http://reza.lotun.name\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish, dis-\n# tribute, sublicense, and/or sell copies of the Software, and to permit\n# persons to whom the Software is furnished to do so, subject to the fol-\n# lowing conditions:\n#\n# The above copyright notice and this permission notice shall be included\n# in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\n# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-\n# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT\n# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,\n# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n# IN THE SOFTWARE.\n#\n\nfrom datetime import datetime\nfrom boto.resultset import ResultSet\nfrom boto.ec2.cloudwatch.listelement import ListElement\nfrom boto.ec2.cloudwatch.dimension import Dimension\nfrom boto.compat import json\n\n\nclass MetricAlarms(list):\n def __init__(self, connection=None):\n \"\"\"\n Parses a list of MetricAlarms.\n \"\"\"\n list.__init__(self)\n self.connection = connection\n\n def startElement(self, name, attrs, connection):\n if name == 'member':\n metric_alarm = MetricAlarm(connection)\n self.append(metric_alarm)\n return metric_alarm\n\n def endElement(self, name, value, connection):\n pass\n\n\nclass MetricAlarm(object):\n\n OK = 'OK'\n ALARM = 'ALARM'\n INSUFFICIENT_DATA = 'INSUFFICIENT_DATA'\n\n _cmp_map = {\n '>=': 'GreaterThanOrEqualToThreshold',\n '>': 'GreaterThanThreshold',\n '<': 'LessThanThreshold',\n '<=': 'LessThanOrEqualToThreshold',\n }\n _rev_cmp_map = dict((v, k) for (k, v) in _cmp_map.iteritems())\n\n def __init__(self, connection=None, name=None, metric=None,\n namespace=None, statistic=None, comparison=None,\n threshold=None, period=None, evaluation_periods=None,\n unit=None, description='', dimensions=None,\n alarm_actions=None, insufficient_data_actions=None,\n ok_actions=None):\n \"\"\"\n Creates a new Alarm.\n\n :type name: str\n :param name: Name of alarm.\n\n :type metric: str\n :param metric: Name of alarm's associated metric.\n\n :type namespace: str\n :param namespace: The namespace for the alarm's metric.\n\n :type statistic: str\n :param statistic: The statistic to apply to the alarm's associated\n metric.\n Valid values: SampleCount|Average|Sum|Minimum|Maximum\n\n :type comparison: str\n :param comparison: Comparison used to compare statistic with threshold.\n Valid values: >= | > | < | <=\n\n :type threshold: float\n :param threshold: The value against which the specified statistic\n is compared.\n\n :type period: int\n :param period: The period in seconds over which teh specified\n statistic is applied.\n\n :type evaluation_periods: int\n :param evaluation_periods: The number of periods over which data is\n compared to the specified threshold.\n\n :type unit: str\n :param unit: Allowed Values are:\n Seconds|Microseconds|Milliseconds,\n Bytes|Kilobytes|Megabytes|Gigabytes|Terabytes,\n Bits|Kilobits|Megabits|Gigabits|Terabits,\n Percent|Count|\n Bytes/Second|Kilobytes/Second|Megabytes/Second|\n Gigabytes/Second|Terabytes/Second,\n Bits/Second|Kilobits/Second|Megabits/Second,\n Gigabits/Second|Terabits/Second|Count/Second|None\n\n :type description: str\n :param description: Description of MetricAlarm\n\n :type dimensions: dict\n :param dimensions: A dictionary of dimension key/values where\n the key is the dimension name and the value\n is either a scalar value or an iterator\n of values to be associated with that\n dimension.\n Example: {\n 'InstanceId': ['i-0123456', 'i-0123457'],\n 'LoadBalancerName': 'test-lb'\n }\n \n :type alarm_actions: list of strs\n :param alarm_actions: A list of the ARNs of the actions to take in\n ALARM state\n \n :type insufficient_data_actions: list of strs\n :param insufficient_data_actions: A list of the ARNs of the actions to\n take in INSUFFICIENT_DATA state\n \n :type ok_actions: list of strs\n :param ok_actions: A list of the ARNs of the actions to take in OK state\n \"\"\"\n self.name = name\n self.connection = connection\n self.metric = metric\n self.namespace = namespace\n self.statistic = statistic\n if threshold is not None:\n self.threshold = float(threshold)\n else:\n self.threshold = None\n self.comparison = self._cmp_map.get(comparison)\n if period is not None:\n self.period = int(period)\n else:\n self.period = None\n if evaluation_periods is not None:\n self.evaluation_periods = int(evaluation_periods)\n else:\n self.evaluation_periods = None\n self.actions_enabled = None\n self.alarm_arn = None\n self.last_updated = None\n self.description = description\n self.dimensions = dimensions\n self.state_reason = None\n self.state_value = None\n self.unit = unit\n self.alarm_actions = alarm_actions\n self.insufficient_data_actions = insufficient_data_actions\n self.ok_actions = ok_actions\n\n def __repr__(self):\n return 'MetricAlarm:%s[%s(%s) %s %s]' % (self.name, self.metric,\n self.statistic,\n self.comparison,\n self.threshold)\n\n def startElement(self, name, attrs, connection):\n if name == 'AlarmActions':\n self.alarm_actions = ListElement()\n return self.alarm_actions\n elif name == 'InsufficientDataActions':\n self.insufficient_data_actions = ListElement()\n return self.insufficient_data_actions\n elif name == 'OKActions':\n self.ok_actions = ListElement()\n return self.ok_actions\n elif name == 'Dimensions':\n self.dimensions = Dimension()\n return self.dimensions\n else:\n pass\n\n def endElement(self, name, value, connection):\n if name == 'ActionsEnabled':\n self.actions_enabled = value\n elif name == 'AlarmArn':\n self.alarm_arn = value\n elif name == 'AlarmConfigurationUpdatedTimestamp':\n self.last_updated = value\n elif name == 'AlarmDescription':\n self.description = value\n elif name == 'AlarmName':\n self.name = value\n elif name == 'ComparisonOperator':\n setattr(self, 'comparison', self._rev_cmp_map[value])\n elif name == 'EvaluationPeriods':\n self.evaluation_periods = int(value)\n elif name == 'MetricName':\n self.metric = value\n elif name == 'Namespace':\n self.namespace = value\n elif name == 'Period':\n self.period = int(value)\n elif name == 'StateReason':\n self.state_reason = value\n elif name == 'StateValue':\n self.state_value = value\n elif name == 'Statistic':\n self.statistic = value\n elif name == 'Threshold':\n self.threshold = float(value)\n elif name == 'Unit':\n self.unit = value\n else:\n setattr(self, name, value)\n\n def set_state(self, value, reason, data=None):\n \"\"\" Temporarily sets the state of an alarm.\n\n :type value: str\n :param value: OK | ALARM | INSUFFICIENT_DATA\n\n :type reason: str\n :param reason: Reason alarm set (human readable).\n\n :type data: str\n :param data: Reason data (will be jsonified).\n \"\"\"\n return self.connection.set_alarm_state(self.name, reason, value, data)\n\n def update(self):\n return self.connection.update_alarm(self)\n\n def enable_actions(self):\n return self.connection.enable_alarm_actions([self.name])\n\n def disable_actions(self):\n return self.connection.disable_alarm_actions([self.name])\n\n def describe_history(self, start_date=None, end_date=None, max_records=None,\n history_item_type=None, next_token=None):\n return self.connection.describe_alarm_history(self.name, start_date,\n end_date, max_records,\n history_item_type,\n next_token)\n\n def add_alarm_action(self, action_arn=None):\n \"\"\"\n Adds an alarm action, represented as an SNS topic, to this alarm. \n What do do when alarm is triggered.\n\n :type action_arn: str\n :param action_arn: SNS topics to which notification should be \n sent if the alarm goes to state ALARM.\n \"\"\"\n if not action_arn:\n return # Raise exception instead?\n self.actions_enabled = 'true'\n self.alarm_actions.append(action_arn)\n\n def add_insufficient_data_action(self, action_arn=None):\n \"\"\"\n Adds an insufficient_data action, represented as an SNS topic, to\n this alarm. What to do when the insufficient_data state is reached.\n\n :type action_arn: str\n :param action_arn: SNS topics to which notification should be \n sent if the alarm goes to state INSUFFICIENT_DATA.\n \"\"\"\n if not action_arn:\n return\n self.actions_enabled = 'true'\n self.insufficient_data_actions.append(action_arn)\n \n def add_ok_action(self, action_arn=None):\n \"\"\"\n Adds an ok action, represented as an SNS topic, to this alarm. What\n to do when the ok state is reached.\n\n :type action_arn: str\n :param action_arn: SNS topics to which notification should be \n sent if the alarm goes to state INSUFFICIENT_DATA.\n \"\"\"\n if not action_arn:\n return\n self.actions_enabled = 'true'\n self.ok_actions.append(action_arn)\n\n def delete(self):\n self.connection.delete_alarms([self.name])\n\nclass AlarmHistoryItem(object):\n def __init__(self, connection=None):\n self.connection = connection\n\n def __repr__(self):\n return 'AlarmHistory:%s[%s at %s]' % (self.name, self.summary, self.timestamp)\n\n def startElement(self, name, attrs, connection):\n pass\n\n def endElement(self, name, value, connection):\n if name == 'AlarmName':\n self.name = value\n elif name == 'HistoryData':\n self.data = json.loads(value)\n elif name == 'HistoryItemType':\n self.tem_type = value\n elif name == 'HistorySummary':\n self.summary = value\n elif name == 'Timestamp':\n try:\n self.timestamp = datetime.strptime(value,\n '%Y-%m-%dT%H:%M:%S.%fZ')\n except ValueError:\n self.timestamp = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ')\n\n", "path": "boto/ec2/cloudwatch/alarm.py"}]} | 3,936 | 264 |
gh_patches_debug_28626 | rasdani/github-patches | git_diff | pymodbus-dev__pymodbus-1053 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Updating Server Example does not work
<!--
Please use the Pymodbus gitter channel at https://gitter.im/pymodbus_dev/Lobby or Stack Overflow(tag [pymodbus](https://stackoverflow.com/questions/tagged/pymodbus) for
support questions.
Before opening a new issue, make sure you do the following:
* check that your issue isn't already filed: https://github.com/riptideio/pymodbus/issues
* check the discussions forum https://github.com/riptideio/pymodbus/discussions
* prepare a short, runnable example that reproduce the issue with the latest development version of Pymodbus
-->
### Versions
* Python: 3.7, 3.8, 3,10
* OS: ubuntu bionic
* Pymodbus: 2.5.3
* Modbus Hardware (if used):
### Pymodbus Specific
* Server: tcp - sync/async
### Description
I try to run example, first error is
`TypeError: ModbusDeviceIdentification.__init__() got an unexpected keyword argument 'info_name'`
when i remove that info, it ends on
```
modbusupdater.py", line 81, in run_updating_server
loop.start(time, now=False) # initially delay by time
AttributeError: '_UnixSelectorEventLoop' object has no attribute 'start'
```
async/sync server have same error with info_name, but run when removed. But i am unable to run updating server example.
Thank you
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/common/updating_server.py`
Content:
```
1 #!/usr/bin/env python3
2 # pylint: disable=missing-any-param-doc,differing-param-doc
3 """Pymodbus Server With Updating Thread.
4
5 This is an example of having a background thread updating the
6 context while the server is operating. This can also be done with
7 a python thread::
8
9 from threading import Thread
10 Thread(target=updating_writer, args=(context,)).start()
11 """
12 import logging
13 import asyncio
14
15 from pymodbus.datastore import (
16 ModbusSequentialDataBlock,
17 ModbusServerContext,
18 ModbusSlaveContext,
19 )
20 from pymodbus.device import ModbusDeviceIdentification
21 from pymodbus.server.async_io import StartTcpServer
22 from pymodbus.version import version
23
24 # --------------------------------------------------------------------------- #
25 # configure the service logging
26 # --------------------------------------------------------------------------- #
27 log = logging.getLogger()
28 log.setLevel(logging.DEBUG)
29
30 # --------------------------------------------------------------------------- #
31 # define your callback process
32 # --------------------------------------------------------------------------- #
33
34
35 def updating_writer(extra):
36 """Run every so often,
37
38 and updates live values of the context. It should be noted
39 that there is a race condition for the update.
40
41 :param arguments: The input arguments to the call
42 """
43 log.debug("updating the context")
44 context = extra[0]
45 register = 3
46 slave_id = 0x00
47 address = 0x10
48 values = context[slave_id].getValues(register, address, count=5)
49 values = [v + 1 for v in values]
50 txt = f"new values: {str(values)}"
51 log.debug(txt)
52 context[slave_id].setValues(register, address, values)
53
54
55 def run_updating_server():
56 """Run updating server."""
57 # ----------------------------------------------------------------------- #
58 # initialize your data store
59 # ----------------------------------------------------------------------- #
60
61 store = ModbusSlaveContext(
62 di=ModbusSequentialDataBlock(0, [17] * 100),
63 co=ModbusSequentialDataBlock(0, [17] * 100),
64 hr=ModbusSequentialDataBlock(0, [17] * 100),
65 ir=ModbusSequentialDataBlock(0, [17] * 100),
66 )
67 context = ModbusServerContext(slaves=store, single=True)
68
69 # ----------------------------------------------------------------------- #
70 # initialize the server information
71 # ----------------------------------------------------------------------- #
72 identity = ModbusDeviceIdentification(
73 info_name={
74 "VendorName": "pymodbus",
75 "ProductCode": "PM",
76 "VendorUrl": "https://github.com/riptideio/pymodbus/",
77 "ProductName": "pymodbus Server",
78 "ModelName": "pymodbus Server",
79 "MajorMinorRevision": version.short(),
80 }
81 )
82
83 # ----------------------------------------------------------------------- #
84 # run the server you want
85 # ----------------------------------------------------------------------- #
86 time = 5 # 5 seconds delay
87 loop = asyncio.get_event_loop()
88 loop.start(time, now=False) # initially delay by time
89 StartTcpServer(context, identity=identity, address=("localhost", 5020))
90
91
92 if __name__ == "__main__":
93 run_updating_server()
94
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/examples/common/updating_server.py b/examples/common/updating_server.py
--- a/examples/common/updating_server.py
+++ b/examples/common/updating_server.py
@@ -36,7 +36,7 @@
"""Run every so often,
and updates live values of the context. It should be noted
- that there is a race condition for the update.
+ that there is a lrace condition for the update.
:param arguments: The input arguments to the call
"""
@@ -52,7 +52,7 @@
context[slave_id].setValues(register, address, values)
-def run_updating_server():
+async def run_updating_server():
"""Run updating server."""
# ----------------------------------------------------------------------- #
# initialize your data store
@@ -83,11 +83,15 @@
# ----------------------------------------------------------------------- #
# run the server you want
# ----------------------------------------------------------------------- #
- time = 5 # 5 seconds delay
- loop = asyncio.get_event_loop()
- loop.start(time, now=False) # initially delay by time
- StartTcpServer(context, identity=identity, address=("localhost", 5020))
+ log.debug("Start server")
+ await StartTcpServer(
+ context,
+ identity=identity,
+ address=("localhost", 5020),
+ defer_start=False
+ )
+ log.debug("Done")
if __name__ == "__main__":
- run_updating_server()
+ asyncio.run(run_updating_server())
| {"golden_diff": "diff --git a/examples/common/updating_server.py b/examples/common/updating_server.py\n--- a/examples/common/updating_server.py\n+++ b/examples/common/updating_server.py\n@@ -36,7 +36,7 @@\n \"\"\"Run every so often,\n \n and updates live values of the context. It should be noted\n- that there is a race condition for the update.\n+ that there is a lrace condition for the update.\n \n :param arguments: The input arguments to the call\n \"\"\"\n@@ -52,7 +52,7 @@\n context[slave_id].setValues(register, address, values)\n \n \n-def run_updating_server():\n+async def run_updating_server():\n \"\"\"Run updating server.\"\"\"\n # ----------------------------------------------------------------------- #\n # initialize your data store\n@@ -83,11 +83,15 @@\n # ----------------------------------------------------------------------- #\n # run the server you want\n # ----------------------------------------------------------------------- #\n- time = 5 # 5 seconds delay\n- loop = asyncio.get_event_loop()\n- loop.start(time, now=False) # initially delay by time\n- StartTcpServer(context, identity=identity, address=(\"localhost\", 5020))\n+ log.debug(\"Start server\")\n+ await StartTcpServer(\n+ context,\n+ identity=identity,\n+ address=(\"localhost\", 5020),\n+ defer_start=False\n+ )\n+ log.debug(\"Done\")\n \n \n if __name__ == \"__main__\":\n- run_updating_server()\n+ asyncio.run(run_updating_server())\n", "issue": "Updating Server Example does not work\n<!--\r\nPlease use the Pymodbus gitter channel at https://gitter.im/pymodbus_dev/Lobby or Stack Overflow(tag [pymodbus](https://stackoverflow.com/questions/tagged/pymodbus) for\r\nsupport questions.\r\n\r\nBefore opening a new issue, make sure you do the following:\r\n * check that your issue isn't already filed: https://github.com/riptideio/pymodbus/issues\r\n * check the discussions forum https://github.com/riptideio/pymodbus/discussions\r\n * prepare a short, runnable example that reproduce the issue with the latest development version of Pymodbus\r\n-->\r\n\r\n### Versions\r\n\r\n* Python: 3.7, 3.8, 3,10\r\n* OS: ubuntu bionic\r\n* Pymodbus: 2.5.3\r\n* Modbus Hardware (if used): \r\n\r\n### Pymodbus Specific\r\n* Server: tcp - sync/async\r\n\r\n\r\n### Description\r\n\r\nI try to run example, first error is\r\n`TypeError: ModbusDeviceIdentification.__init__() got an unexpected keyword argument 'info_name'`\r\n\r\nwhen i remove that info, it ends on\r\n\r\n```\r\nmodbusupdater.py\", line 81, in run_updating_server\r\n loop.start(time, now=False) # initially delay by time\r\nAttributeError: '_UnixSelectorEventLoop' object has no attribute 'start'\r\n```\r\n\r\nasync/sync server have same error with info_name, but run when removed. But i am unable to run updating server example.\r\n\r\nThank you\n", "before_files": [{"content": "#!/usr/bin/env python3\n# pylint: disable=missing-any-param-doc,differing-param-doc\n\"\"\"Pymodbus Server With Updating Thread.\n\nThis is an example of having a background thread updating the\ncontext while the server is operating. This can also be done with\na python thread::\n\n from threading import Thread\n Thread(target=updating_writer, args=(context,)).start()\n\"\"\"\nimport logging\nimport asyncio\n\nfrom pymodbus.datastore import (\n ModbusSequentialDataBlock,\n ModbusServerContext,\n ModbusSlaveContext,\n)\nfrom pymodbus.device import ModbusDeviceIdentification\nfrom pymodbus.server.async_io import StartTcpServer\nfrom pymodbus.version import version\n\n# --------------------------------------------------------------------------- #\n# configure the service logging\n# --------------------------------------------------------------------------- #\nlog = logging.getLogger()\nlog.setLevel(logging.DEBUG)\n\n# --------------------------------------------------------------------------- #\n# define your callback process\n# --------------------------------------------------------------------------- #\n\n\ndef updating_writer(extra):\n \"\"\"Run every so often,\n\n and updates live values of the context. It should be noted\n that there is a race condition for the update.\n\n :param arguments: The input arguments to the call\n \"\"\"\n log.debug(\"updating the context\")\n context = extra[0]\n register = 3\n slave_id = 0x00\n address = 0x10\n values = context[slave_id].getValues(register, address, count=5)\n values = [v + 1 for v in values]\n txt = f\"new values: {str(values)}\"\n log.debug(txt)\n context[slave_id].setValues(register, address, values)\n\n\ndef run_updating_server():\n \"\"\"Run updating server.\"\"\"\n # ----------------------------------------------------------------------- #\n # initialize your data store\n # ----------------------------------------------------------------------- #\n\n store = ModbusSlaveContext(\n di=ModbusSequentialDataBlock(0, [17] * 100),\n co=ModbusSequentialDataBlock(0, [17] * 100),\n hr=ModbusSequentialDataBlock(0, [17] * 100),\n ir=ModbusSequentialDataBlock(0, [17] * 100),\n )\n context = ModbusServerContext(slaves=store, single=True)\n\n # ----------------------------------------------------------------------- #\n # initialize the server information\n # ----------------------------------------------------------------------- #\n identity = ModbusDeviceIdentification(\n info_name={\n \"VendorName\": \"pymodbus\",\n \"ProductCode\": \"PM\",\n \"VendorUrl\": \"https://github.com/riptideio/pymodbus/\",\n \"ProductName\": \"pymodbus Server\",\n \"ModelName\": \"pymodbus Server\",\n \"MajorMinorRevision\": version.short(),\n }\n )\n\n # ----------------------------------------------------------------------- #\n # run the server you want\n # ----------------------------------------------------------------------- #\n time = 5 # 5 seconds delay\n loop = asyncio.get_event_loop()\n loop.start(time, now=False) # initially delay by time\n StartTcpServer(context, identity=identity, address=(\"localhost\", 5020))\n\n\nif __name__ == \"__main__\":\n run_updating_server()\n", "path": "examples/common/updating_server.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n# pylint: disable=missing-any-param-doc,differing-param-doc\n\"\"\"Pymodbus Server With Updating Thread.\n\nThis is an example of having a background thread updating the\ncontext while the server is operating. This can also be done with\na python thread::\n\n from threading import Thread\n Thread(target=updating_writer, args=(context,)).start()\n\"\"\"\nimport logging\nimport asyncio\n\nfrom pymodbus.datastore import (\n ModbusSequentialDataBlock,\n ModbusServerContext,\n ModbusSlaveContext,\n)\nfrom pymodbus.device import ModbusDeviceIdentification\nfrom pymodbus.server.async_io import StartTcpServer\nfrom pymodbus.version import version\n\n# --------------------------------------------------------------------------- #\n# configure the service logging\n# --------------------------------------------------------------------------- #\nlog = logging.getLogger()\nlog.setLevel(logging.DEBUG)\n\n# --------------------------------------------------------------------------- #\n# define your callback process\n# --------------------------------------------------------------------------- #\n\n\ndef updating_writer(extra):\n \"\"\"Run every so often,\n\n and updates live values of the context. It should be noted\n that there is a lrace condition for the update.\n\n :param arguments: The input arguments to the call\n \"\"\"\n log.debug(\"updating the context\")\n context = extra[0]\n register = 3\n slave_id = 0x00\n address = 0x10\n values = context[slave_id].getValues(register, address, count=5)\n values = [v + 1 for v in values]\n txt = f\"new values: {str(values)}\"\n log.debug(txt)\n context[slave_id].setValues(register, address, values)\n\n\nasync def run_updating_server():\n \"\"\"Run updating server.\"\"\"\n # ----------------------------------------------------------------------- #\n # initialize your data store\n # ----------------------------------------------------------------------- #\n\n store = ModbusSlaveContext(\n di=ModbusSequentialDataBlock(0, [17] * 100),\n co=ModbusSequentialDataBlock(0, [17] * 100),\n hr=ModbusSequentialDataBlock(0, [17] * 100),\n ir=ModbusSequentialDataBlock(0, [17] * 100),\n )\n context = ModbusServerContext(slaves=store, single=True)\n\n # ----------------------------------------------------------------------- #\n # initialize the server information\n # ----------------------------------------------------------------------- #\n identity = ModbusDeviceIdentification(\n info_name={\n \"VendorName\": \"pymodbus\",\n \"ProductCode\": \"PM\",\n \"VendorUrl\": \"https://github.com/riptideio/pymodbus/\",\n \"ProductName\": \"pymodbus Server\",\n \"ModelName\": \"pymodbus Server\",\n \"MajorMinorRevision\": version.short(),\n }\n )\n\n # ----------------------------------------------------------------------- #\n # run the server you want\n # ----------------------------------------------------------------------- #\n log.debug(\"Start server\")\n await StartTcpServer(\n context,\n identity=identity,\n address=(\"localhost\", 5020),\n defer_start=False\n )\n log.debug(\"Done\")\n\n\nif __name__ == \"__main__\":\n asyncio.run(run_updating_server())\n", "path": "examples/common/updating_server.py"}]} | 1,458 | 338 |
gh_patches_debug_30500 | rasdani/github-patches | git_diff | Lightning-AI__torchmetrics-1943 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
CLIPScore demo code is incorrect
Page: [CLIPScore](https://torchmetrics.readthedocs.io/en/stable/multimodal/clip_score.html)
Demo import CLIPScore as:
```python
from torchmetrics.multimodal import CLIPScore
```
But the correct import is:
```python
from torchmetrics.multimodal.clip_score import CLIPScore
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/torchmetrics/multimodal/clip_score.py`
Content:
```
1 # Copyright The Lightning team.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 import os
15 from typing import Any, List, Optional, Sequence, Union
16
17 import torch
18 from torch import Tensor
19 from typing_extensions import Literal
20
21 from torchmetrics import Metric
22 from torchmetrics.functional.multimodal.clip_score import _clip_score_update, _get_model_and_processor
23 from torchmetrics.utilities.checks import _SKIP_SLOW_DOCTEST, _try_proceed_with_timeout
24 from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE, _PESQ_AVAILABLE, _TRANSFORMERS_AVAILABLE
25 from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
26
27 if not _MATPLOTLIB_AVAILABLE:
28 __doctest_skip__ = ["CLIPScore.plot"]
29
30 _DEFAULT_MODEL: str = "openai/clip-vit-large-patch14"
31
32 if _TRANSFORMERS_AVAILABLE:
33 from transformers import CLIPModel as _CLIPModel
34 from transformers import CLIPProcessor as _CLIPProcessor
35
36 def _download_clip() -> None:
37 _CLIPModel.from_pretrained(_DEFAULT_MODEL)
38 _CLIPProcessor.from_pretrained(_DEFAULT_MODEL)
39
40 if _SKIP_SLOW_DOCTEST and not _try_proceed_with_timeout(_download_clip):
41 __doctest_skip__ = ["CLIPScore", "CLIPScore.plot"]
42 else:
43 __doctest_skip__ = ["CLIPScore", "CLIPScore.plot"]
44
45
46 class CLIPScore(Metric):
47 r"""Calculates `CLIP Score`_ which is a text-to-image similarity metric.
48
49 CLIP is a reference free metric that can be used to evaluate the correlation between a generated caption for an
50 image and the actual content of the image. It has been found to be highly correlated with human judgement. The
51 metric is defined as:
52
53 .. math::
54 \text{CLIPScore(I, C)} = max(100 * cos(E_I, E_C), 0)
55
56 which corresponds to the cosine similarity between visual CLIP embedding :math:`E_i` for an image :math:`i` and
57 textual CLIP embedding :math:`E_C` for an caption :math:`C`. The score is bound between 0 and 100 and the closer
58 to 100 the better.
59
60 .. note:: Metric is not scriptable
61
62 Args:
63 model_name_or_path: string indicating the version of the CLIP model to use. Available models are:
64
65 - `"openai/clip-vit-base-patch16"`
66 - `"openai/clip-vit-base-patch32"`
67 - `"openai/clip-vit-large-patch14-336"`
68 - `"openai/clip-vit-large-patch14"`
69
70 kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
71
72 Raises:
73 ModuleNotFoundError:
74 If transformers package is not installed or version is lower than 4.10.0
75
76 Example:
77 >>> import torch
78 >>> _ = torch.manual_seed(42)
79 >>> from torchmetrics.multimodal import CLIPScore
80 >>> metric = CLIPScore(model_name_or_path="openai/clip-vit-base-patch16")
81 >>> score = metric(torch.randint(255, (3, 224, 224)), "a photo of a cat")
82 >>> print(score.detach())
83 tensor(24.7691)
84
85 """
86
87 is_differentiable: bool = False
88 higher_is_better: bool = True
89 full_state_update: bool = True
90 plot_lower_bound: float = 0.0
91
92 score: Tensor
93 n_samples: Tensor
94 plot_upper_bound = 100.0
95
96 def __init__(
97 self,
98 model_name_or_path: Literal[
99 "openai/clip-vit-base-patch16",
100 "openai/clip-vit-base-patch32",
101 "openai/clip-vit-large-patch14-336",
102 "openai/clip-vit-large-patch14",
103 ] = _DEFAULT_MODEL, # type: ignore[assignment]
104 **kwargs: Any,
105 ) -> None:
106 super().__init__(**kwargs)
107 self.model, self.processor = _get_model_and_processor(model_name_or_path)
108 self.add_state("score", torch.tensor(0.0), dist_reduce_fx="sum")
109 self.add_state("n_samples", torch.tensor(0, dtype=torch.long), dist_reduce_fx="sum")
110
111 def update(self, images: Union[Tensor, List[Tensor]], text: Union[str, List[str]]) -> None:
112 """Update CLIP score on a batch of images and text.
113
114 Args:
115 images: Either a single [N, C, H, W] tensor or a list of [C, H, W] tensors
116 text: Either a single caption or a list of captions
117
118 Raises:
119 ValueError:
120 If not all images have format [C, H, W]
121 ValueError:
122 If the number of images and captions do not match
123
124 """
125 score, n_samples = _clip_score_update(images, text, self.model, self.processor)
126 self.score += score.sum(0)
127 self.n_samples += n_samples
128
129 def compute(self) -> Tensor:
130 """Compute accumulated clip score."""
131 return torch.max(self.score / self.n_samples, torch.zeros_like(self.score))
132
133 def plot(self, val: Union[Tensor, Sequence[Tensor], None] = None, ax: Optional[_AX_TYPE] = None) -> _PLOT_OUT_TYPE:
134 """Plot a single or multiple values from the metric.
135
136 Args:
137 val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
138 If no value is provided, will automatically call `metric.compute` and plot that result.
139 ax: An matplotlib axis object. If provided will add plot to that axis
140
141 Returns:
142 Figure and Axes object
143
144 Raises:
145 ModuleNotFoundError:
146 If `matplotlib` is not installed
147
148 .. plot::
149 :scale: 75
150
151 >>> # Example plotting a single value
152 >>> import torch
153 >>> from torchmetrics.multimodal import CLIPScore
154 >>> metric = CLIPScore(model_name_or_path="openai/clip-vit-base-patch16")
155 >>> metric.update(torch.randint(255, (3, 224, 224)), "a photo of a cat")
156 >>> fig_, ax_ = metric.plot()
157
158 .. plot::
159 :scale: 75
160
161 >>> # Example plotting multiple values
162 >>> import torch
163 >>> from torchmetrics.multimodal import CLIPScore
164 >>> metric = CLIPScore(model_name_or_path="openai/clip-vit-base-patch16")
165 >>> values = [ ]
166 >>> for _ in range(10):
167 ... values.append(metric(torch.randint(255, (3, 224, 224)), "a photo of a cat"))
168 >>> fig_, ax_ = metric.plot(values)
169
170 """
171 return self._plot(val, ax)
172
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/torchmetrics/multimodal/clip_score.py b/src/torchmetrics/multimodal/clip_score.py
--- a/src/torchmetrics/multimodal/clip_score.py
+++ b/src/torchmetrics/multimodal/clip_score.py
@@ -76,7 +76,7 @@
Example:
>>> import torch
>>> _ = torch.manual_seed(42)
- >>> from torchmetrics.multimodal import CLIPScore
+ >>> from torchmetrics.multimodal.clip_score import CLIPScore
>>> metric = CLIPScore(model_name_or_path="openai/clip-vit-base-patch16")
>>> score = metric(torch.randint(255, (3, 224, 224)), "a photo of a cat")
>>> print(score.detach())
@@ -150,7 +150,7 @@
>>> # Example plotting a single value
>>> import torch
- >>> from torchmetrics.multimodal import CLIPScore
+ >>> from torchmetrics.multimodal.clip_score import CLIPScore
>>> metric = CLIPScore(model_name_or_path="openai/clip-vit-base-patch16")
>>> metric.update(torch.randint(255, (3, 224, 224)), "a photo of a cat")
>>> fig_, ax_ = metric.plot()
@@ -160,7 +160,7 @@
>>> # Example plotting multiple values
>>> import torch
- >>> from torchmetrics.multimodal import CLIPScore
+ >>> from torchmetrics.multimodal.clip_score import CLIPScore
>>> metric = CLIPScore(model_name_or_path="openai/clip-vit-base-patch16")
>>> values = [ ]
>>> for _ in range(10):
| {"golden_diff": "diff --git a/src/torchmetrics/multimodal/clip_score.py b/src/torchmetrics/multimodal/clip_score.py\n--- a/src/torchmetrics/multimodal/clip_score.py\n+++ b/src/torchmetrics/multimodal/clip_score.py\n@@ -76,7 +76,7 @@\n Example:\n >>> import torch\n >>> _ = torch.manual_seed(42)\n- >>> from torchmetrics.multimodal import CLIPScore\n+ >>> from torchmetrics.multimodal.clip_score import CLIPScore\n >>> metric = CLIPScore(model_name_or_path=\"openai/clip-vit-base-patch16\")\n >>> score = metric(torch.randint(255, (3, 224, 224)), \"a photo of a cat\")\n >>> print(score.detach())\n@@ -150,7 +150,7 @@\n \n >>> # Example plotting a single value\n >>> import torch\n- >>> from torchmetrics.multimodal import CLIPScore\n+ >>> from torchmetrics.multimodal.clip_score import CLIPScore\n >>> metric = CLIPScore(model_name_or_path=\"openai/clip-vit-base-patch16\")\n >>> metric.update(torch.randint(255, (3, 224, 224)), \"a photo of a cat\")\n >>> fig_, ax_ = metric.plot()\n@@ -160,7 +160,7 @@\n \n >>> # Example plotting multiple values\n >>> import torch\n- >>> from torchmetrics.multimodal import CLIPScore\n+ >>> from torchmetrics.multimodal.clip_score import CLIPScore\n >>> metric = CLIPScore(model_name_or_path=\"openai/clip-vit-base-patch16\")\n >>> values = [ ]\n >>> for _ in range(10):\n", "issue": "CLIPScore demo code is incorrect\n\r\nPage: [CLIPScore](https://torchmetrics.readthedocs.io/en/stable/multimodal/clip_score.html)\r\n\r\nDemo import CLIPScore as:\r\n```python\r\nfrom torchmetrics.multimodal import CLIPScore\r\n```\r\n\r\nBut the correct import is: \r\n```python\r\nfrom torchmetrics.multimodal.clip_score import CLIPScore\r\n```\n", "before_files": [{"content": "# Copyright The Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport os\nfrom typing import Any, List, Optional, Sequence, Union\n\nimport torch\nfrom torch import Tensor\nfrom typing_extensions import Literal\n\nfrom torchmetrics import Metric\nfrom torchmetrics.functional.multimodal.clip_score import _clip_score_update, _get_model_and_processor\nfrom torchmetrics.utilities.checks import _SKIP_SLOW_DOCTEST, _try_proceed_with_timeout\nfrom torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE, _PESQ_AVAILABLE, _TRANSFORMERS_AVAILABLE\nfrom torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE\n\nif not _MATPLOTLIB_AVAILABLE:\n __doctest_skip__ = [\"CLIPScore.plot\"]\n\n_DEFAULT_MODEL: str = \"openai/clip-vit-large-patch14\"\n\nif _TRANSFORMERS_AVAILABLE:\n from transformers import CLIPModel as _CLIPModel\n from transformers import CLIPProcessor as _CLIPProcessor\n\n def _download_clip() -> None:\n _CLIPModel.from_pretrained(_DEFAULT_MODEL)\n _CLIPProcessor.from_pretrained(_DEFAULT_MODEL)\n\n if _SKIP_SLOW_DOCTEST and not _try_proceed_with_timeout(_download_clip):\n __doctest_skip__ = [\"CLIPScore\", \"CLIPScore.plot\"]\nelse:\n __doctest_skip__ = [\"CLIPScore\", \"CLIPScore.plot\"]\n\n\nclass CLIPScore(Metric):\n r\"\"\"Calculates `CLIP Score`_ which is a text-to-image similarity metric.\n\n CLIP is a reference free metric that can be used to evaluate the correlation between a generated caption for an\n image and the actual content of the image. It has been found to be highly correlated with human judgement. The\n metric is defined as:\n\n .. math::\n \\text{CLIPScore(I, C)} = max(100 * cos(E_I, E_C), 0)\n\n which corresponds to the cosine similarity between visual CLIP embedding :math:`E_i` for an image :math:`i` and\n textual CLIP embedding :math:`E_C` for an caption :math:`C`. The score is bound between 0 and 100 and the closer\n to 100 the better.\n\n .. note:: Metric is not scriptable\n\n Args:\n model_name_or_path: string indicating the version of the CLIP model to use. Available models are:\n\n - `\"openai/clip-vit-base-patch16\"`\n - `\"openai/clip-vit-base-patch32\"`\n - `\"openai/clip-vit-large-patch14-336\"`\n - `\"openai/clip-vit-large-patch14\"`\n\n kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.\n\n Raises:\n ModuleNotFoundError:\n If transformers package is not installed or version is lower than 4.10.0\n\n Example:\n >>> import torch\n >>> _ = torch.manual_seed(42)\n >>> from torchmetrics.multimodal import CLIPScore\n >>> metric = CLIPScore(model_name_or_path=\"openai/clip-vit-base-patch16\")\n >>> score = metric(torch.randint(255, (3, 224, 224)), \"a photo of a cat\")\n >>> print(score.detach())\n tensor(24.7691)\n\n \"\"\"\n\n is_differentiable: bool = False\n higher_is_better: bool = True\n full_state_update: bool = True\n plot_lower_bound: float = 0.0\n\n score: Tensor\n n_samples: Tensor\n plot_upper_bound = 100.0\n\n def __init__(\n self,\n model_name_or_path: Literal[\n \"openai/clip-vit-base-patch16\",\n \"openai/clip-vit-base-patch32\",\n \"openai/clip-vit-large-patch14-336\",\n \"openai/clip-vit-large-patch14\",\n ] = _DEFAULT_MODEL, # type: ignore[assignment]\n **kwargs: Any,\n ) -> None:\n super().__init__(**kwargs)\n self.model, self.processor = _get_model_and_processor(model_name_or_path)\n self.add_state(\"score\", torch.tensor(0.0), dist_reduce_fx=\"sum\")\n self.add_state(\"n_samples\", torch.tensor(0, dtype=torch.long), dist_reduce_fx=\"sum\")\n\n def update(self, images: Union[Tensor, List[Tensor]], text: Union[str, List[str]]) -> None:\n \"\"\"Update CLIP score on a batch of images and text.\n\n Args:\n images: Either a single [N, C, H, W] tensor or a list of [C, H, W] tensors\n text: Either a single caption or a list of captions\n\n Raises:\n ValueError:\n If not all images have format [C, H, W]\n ValueError:\n If the number of images and captions do not match\n\n \"\"\"\n score, n_samples = _clip_score_update(images, text, self.model, self.processor)\n self.score += score.sum(0)\n self.n_samples += n_samples\n\n def compute(self) -> Tensor:\n \"\"\"Compute accumulated clip score.\"\"\"\n return torch.max(self.score / self.n_samples, torch.zeros_like(self.score))\n\n def plot(self, val: Union[Tensor, Sequence[Tensor], None] = None, ax: Optional[_AX_TYPE] = None) -> _PLOT_OUT_TYPE:\n \"\"\"Plot a single or multiple values from the metric.\n\n Args:\n val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.\n If no value is provided, will automatically call `metric.compute` and plot that result.\n ax: An matplotlib axis object. If provided will add plot to that axis\n\n Returns:\n Figure and Axes object\n\n Raises:\n ModuleNotFoundError:\n If `matplotlib` is not installed\n\n .. plot::\n :scale: 75\n\n >>> # Example plotting a single value\n >>> import torch\n >>> from torchmetrics.multimodal import CLIPScore\n >>> metric = CLIPScore(model_name_or_path=\"openai/clip-vit-base-patch16\")\n >>> metric.update(torch.randint(255, (3, 224, 224)), \"a photo of a cat\")\n >>> fig_, ax_ = metric.plot()\n\n .. plot::\n :scale: 75\n\n >>> # Example plotting multiple values\n >>> import torch\n >>> from torchmetrics.multimodal import CLIPScore\n >>> metric = CLIPScore(model_name_or_path=\"openai/clip-vit-base-patch16\")\n >>> values = [ ]\n >>> for _ in range(10):\n ... values.append(metric(torch.randint(255, (3, 224, 224)), \"a photo of a cat\"))\n >>> fig_, ax_ = metric.plot(values)\n\n \"\"\"\n return self._plot(val, ax)\n", "path": "src/torchmetrics/multimodal/clip_score.py"}], "after_files": [{"content": "# Copyright The Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport os\nfrom typing import Any, List, Optional, Sequence, Union\n\nimport torch\nfrom torch import Tensor\nfrom typing_extensions import Literal\n\nfrom torchmetrics import Metric\nfrom torchmetrics.functional.multimodal.clip_score import _clip_score_update, _get_model_and_processor\nfrom torchmetrics.utilities.checks import _SKIP_SLOW_DOCTEST, _try_proceed_with_timeout\nfrom torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE, _PESQ_AVAILABLE, _TRANSFORMERS_AVAILABLE\nfrom torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE\n\nif not _MATPLOTLIB_AVAILABLE:\n __doctest_skip__ = [\"CLIPScore.plot\"]\n\n_DEFAULT_MODEL: str = \"openai/clip-vit-large-patch14\"\n\nif _TRANSFORMERS_AVAILABLE:\n from transformers import CLIPModel as _CLIPModel\n from transformers import CLIPProcessor as _CLIPProcessor\n\n def _download_clip() -> None:\n _CLIPModel.from_pretrained(_DEFAULT_MODEL)\n _CLIPProcessor.from_pretrained(_DEFAULT_MODEL)\n\n if _SKIP_SLOW_DOCTEST and not _try_proceed_with_timeout(_download_clip):\n __doctest_skip__ = [\"CLIPScore\", \"CLIPScore.plot\"]\nelse:\n __doctest_skip__ = [\"CLIPScore\", \"CLIPScore.plot\"]\n\n\nclass CLIPScore(Metric):\n r\"\"\"Calculates `CLIP Score`_ which is a text-to-image similarity metric.\n\n CLIP is a reference free metric that can be used to evaluate the correlation between a generated caption for an\n image and the actual content of the image. It has been found to be highly correlated with human judgement. The\n metric is defined as:\n\n .. math::\n \\text{CLIPScore(I, C)} = max(100 * cos(E_I, E_C), 0)\n\n which corresponds to the cosine similarity between visual CLIP embedding :math:`E_i` for an image :math:`i` and\n textual CLIP embedding :math:`E_C` for an caption :math:`C`. The score is bound between 0 and 100 and the closer\n to 100 the better.\n\n .. note:: Metric is not scriptable\n\n Args:\n model_name_or_path: string indicating the version of the CLIP model to use. Available models are:\n\n - `\"openai/clip-vit-base-patch16\"`\n - `\"openai/clip-vit-base-patch32\"`\n - `\"openai/clip-vit-large-patch14-336\"`\n - `\"openai/clip-vit-large-patch14\"`\n\n kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.\n\n Raises:\n ModuleNotFoundError:\n If transformers package is not installed or version is lower than 4.10.0\n\n Example:\n >>> import torch\n >>> _ = torch.manual_seed(42)\n >>> from torchmetrics.multimodal.clip_score import CLIPScore\n >>> metric = CLIPScore(model_name_or_path=\"openai/clip-vit-base-patch16\")\n >>> score = metric(torch.randint(255, (3, 224, 224)), \"a photo of a cat\")\n >>> print(score.detach())\n tensor(24.7691)\n\n \"\"\"\n\n is_differentiable: bool = False\n higher_is_better: bool = True\n full_state_update: bool = True\n plot_lower_bound: float = 0.0\n\n score: Tensor\n n_samples: Tensor\n plot_upper_bound = 100.0\n\n def __init__(\n self,\n model_name_or_path: Literal[\n \"openai/clip-vit-base-patch16\",\n \"openai/clip-vit-base-patch32\",\n \"openai/clip-vit-large-patch14-336\",\n \"openai/clip-vit-large-patch14\",\n ] = _DEFAULT_MODEL, # type: ignore[assignment]\n **kwargs: Any,\n ) -> None:\n super().__init__(**kwargs)\n self.model, self.processor = _get_model_and_processor(model_name_or_path)\n self.add_state(\"score\", torch.tensor(0.0), dist_reduce_fx=\"sum\")\n self.add_state(\"n_samples\", torch.tensor(0, dtype=torch.long), dist_reduce_fx=\"sum\")\n\n def update(self, images: Union[Tensor, List[Tensor]], text: Union[str, List[str]]) -> None:\n \"\"\"Update CLIP score on a batch of images and text.\n\n Args:\n images: Either a single [N, C, H, W] tensor or a list of [C, H, W] tensors\n text: Either a single caption or a list of captions\n\n Raises:\n ValueError:\n If not all images have format [C, H, W]\n ValueError:\n If the number of images and captions do not match\n\n \"\"\"\n score, n_samples = _clip_score_update(images, text, self.model, self.processor)\n self.score += score.sum(0)\n self.n_samples += n_samples\n\n def compute(self) -> Tensor:\n \"\"\"Compute accumulated clip score.\"\"\"\n return torch.max(self.score / self.n_samples, torch.zeros_like(self.score))\n\n def plot(self, val: Union[Tensor, Sequence[Tensor], None] = None, ax: Optional[_AX_TYPE] = None) -> _PLOT_OUT_TYPE:\n \"\"\"Plot a single or multiple values from the metric.\n\n Args:\n val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.\n If no value is provided, will automatically call `metric.compute` and plot that result.\n ax: An matplotlib axis object. If provided will add plot to that axis\n\n Returns:\n Figure and Axes object\n\n Raises:\n ModuleNotFoundError:\n If `matplotlib` is not installed\n\n .. plot::\n :scale: 75\n\n >>> # Example plotting a single value\n >>> import torch\n >>> from torchmetrics.multimodal.clip_score import CLIPScore\n >>> metric = CLIPScore(model_name_or_path=\"openai/clip-vit-base-patch16\")\n >>> metric.update(torch.randint(255, (3, 224, 224)), \"a photo of a cat\")\n >>> fig_, ax_ = metric.plot()\n\n .. plot::\n :scale: 75\n\n >>> # Example plotting multiple values\n >>> import torch\n >>> from torchmetrics.multimodal.clip_score import CLIPScore\n >>> metric = CLIPScore(model_name_or_path=\"openai/clip-vit-base-patch16\")\n >>> values = [ ]\n >>> for _ in range(10):\n ... values.append(metric(torch.randint(255, (3, 224, 224)), \"a photo of a cat\"))\n >>> fig_, ax_ = metric.plot(values)\n\n \"\"\"\n return self._plot(val, ax)\n", "path": "src/torchmetrics/multimodal/clip_score.py"}]} | 2,443 | 405 |
gh_patches_debug_34843 | rasdani/github-patches | git_diff | ARM-DOE__ACT-678 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Handling Incorrect ARM DQRs when applied with ACT function
We have implemented a function to query the ARM Data Quality Report database to return time periods when data is flagged. There are three levels of flagging within the DQRs, with two of them able to replace the variable values with NaN. ARM has a lot of DQRs where the list of variables is not accurate, and is over listing the affected variables.
For example a DQR may indicate the instrument is malfunctioning and select all variables in the netCDF file. While the data is incorrect the location, time, and QC variables are correct. Currently we loop over variables and apply the DQR to variables listed in the DQR. Some variables listed are scalars which is causing an exception that we catch and handle. Handling the exception has a print statement indicating the scalar variable could not be indexed.
We should discuss a better way to handle these variables as the print statement may confuse users when there is nothing wrong with the scalar variables. A couple of options include:
1) Correctly handling the replacement of data values with NaN for the scalar variables
2) Setting a keyword to enact the print statement, but set to False as default
3) Skip over these most likely incorrectly listed variables.
An example output for sgpaosccn2colaE13.b1 on 20170903 using D170905.3
```
Skipping 'eta_lookup_table' DQR application because of IndexError
Skipping 'base_time' DQR application because of IndexError
Skipping 'alt' DQR application because of IndexError
Skipping 'droplet_size_bounds' DQR application because of IndexError
Skipping 'lon' DQR application because of IndexError
Skipping 'lat' DQR application because of IndexError
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `act/qc/arm.py`
Content:
```
1 """
2 Functions specifically for working with QC/DQRs from
3 the Atmospheric Radiation Measurement Program (ARM).
4
5 """
6
7 import datetime as dt
8 import numpy as np
9 import requests
10
11 from act.config import DEFAULT_DATASTREAM_NAME
12
13
14 def add_dqr_to_qc(
15 ds,
16 variable=None,
17 assessment='incorrect,suspect',
18 exclude=None,
19 include=None,
20 normalize_assessment=True,
21 cleanup_qc=True,
22 dqr_link=False,
23 ):
24 """
25 Function to query the ARM DQR web service for reports and
26 add as a new quality control test to ancillary quality control
27 variable. If no anicllary quality control variable exist a new
28 one will be created and lined to the data variable through
29 ancillary_variables attribure.
30
31 See online documentation from ARM Data
32 Quality Office on the use of the DQR web service.
33
34 https://code.arm.gov/docs/dqrws-examples/wikis/home
35
36 Information about the DQR web-service avaible at
37 https://adc.arm.gov/dqrws/
38
39 Parameters
40 ----------
41 ds : xarray.Dataset
42 Xarray dataset
43 variable : string, or list of str, or None
44 Variables to check DQR web service. If set to None will
45 attempt to update all variables.
46 assessment : string
47 assessment type to get DQRs. Current options include
48 'missing', 'suspect', 'incorrect' or any combination separated
49 by a comma.
50 exclude : list of strings
51 DQR IDs to exclude from adding into QC
52 include : list of strings
53 List of DQR IDs to include in flagging of data. Any other DQR IDs
54 will be ignored.
55 normalize_assessment : boolean
56 The DQR assessment term is different than the embedded QC
57 term. Embedded QC uses "Bad" and "Indeterminate" while
58 DQRs use "Incorrect" and "Suspect". Setting this will ensure
59 the same terms are used for both.
60 cleanup_qc : boolean
61 Call clean.cleanup() method to convert to standardized ancillary
62 quality control variables. Has a little bit of overhead so
63 if the Dataset has already been cleaned up, no need to run.
64 dqr_link : boolean
65 Prints out a link for each DQR to read the full DQR. Defaults to False
66
67 Returns
68 -------
69 ds : xarray.Dataset
70 Xarray dataset containing new quality control variables
71
72 Examples
73 --------
74 .. code-block:: python
75
76 from act.qc.arm import add_dqr_to_qc
77 ds = add_dqr_to_qc(ds, variable=['temp_mean', 'atmos_pressure'])
78
79
80 """
81
82 # DQR Webservice goes off datastreams, pull from the dataset
83 if 'datastream' in ds.attrs:
84 datastream = ds.attrs['datastream']
85 elif '_datastream' in ds.attrs:
86 datastream = ds.attrs['_datastream']
87 else:
88 raise ValueError('Dataset does not have datastream attribute')
89
90 if datastream == DEFAULT_DATASTREAM_NAME:
91 raise ValueError("'datastream' name required for DQR service set to default value "
92 f"{datastream}. Unable to perform DQR service query.")
93
94 # Clean up QC to conform to CF conventions
95 if cleanup_qc:
96 ds.clean.cleanup()
97
98 # In order to properly flag data, get all variables if None. Exclude QC variables.
99 if variable is None:
100 variable = list(set(ds.data_vars) - set(ds.clean.matched_qc_variables))
101
102 # Check to ensure variable is list
103 if not isinstance(variable, (list, tuple)):
104 variable = [variable]
105
106 # Loop through each variable and call web service for that variable
107 for var_name in variable:
108 # Create URL
109 url = 'http://www.archive.arm.gov/dqrws/ARMDQR?datastream='
110 url += datastream
111 url += '&varname=' + var_name
112 url += ''.join(
113 [
114 '&searchmetric=',
115 assessment,
116 '&dqrfields=dqrid,starttime,endtime,metric,subject',
117 ]
118 )
119
120 # Call web service
121 req = requests.get(url)
122
123 # Check status values and raise error if not successful
124 status = req.status_code
125 if status == 400:
126 raise ValueError('Check parameters')
127 if status == 500:
128 raise ValueError('DQR Webservice Temporarily Down')
129
130 # Get data and run through each dqr
131 dqrs = req.text.splitlines()
132 time = ds['time'].values
133 dqr_results = {}
134 for line in dqrs:
135 line = line.split('|')
136 dqr_no = line[0]
137
138 # Exclude DQRs if in list
139 if exclude is not None and dqr_no in exclude:
140 continue
141
142 # Only include if in include list
143 if include is not None and dqr_no not in include:
144 continue
145
146 starttime = np.datetime64(dt.datetime.utcfromtimestamp(int(line[1])))
147 endtime = np.datetime64(dt.datetime.utcfromtimestamp(int(line[2])))
148 ind = np.where((time >= starttime) & (time <= endtime))
149 if ind[0].size == 0:
150 continue
151
152 if dqr_no in dqr_results.keys():
153 dqr_results[dqr_no]['index'] = np.append(dqr_results[dqr_no]['index'], ind)
154 else:
155 dqr_results[dqr_no] = {
156 'index': ind,
157 'test_assessment': line[3],
158 'test_meaning': ': '.join([dqr_no, line[-1]]),
159 }
160 if dqr_link:
161 print_url = 'https://adc.arm.gov/ArchiveServices/DQRService?dqrid=' + str(dqr_no)
162 print(dqr_no, '-', line[3], ':', print_url)
163 for key, value in dqr_results.items():
164 try:
165 ds.qcfilter.add_test(
166 var_name,
167 index=value['index'],
168 test_meaning=value['test_meaning'],
169 test_assessment=value['test_assessment'],
170 )
171 except IndexError:
172 print(f"Skipping '{var_name}' DQR application because of IndexError")
173
174 if normalize_assessment:
175 ds.clean.normalize_assessment(variables=var_name)
176
177 return ds
178
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/act/qc/arm.py b/act/qc/arm.py
--- a/act/qc/arm.py
+++ b/act/qc/arm.py
@@ -20,6 +20,7 @@
normalize_assessment=True,
cleanup_qc=True,
dqr_link=False,
+ skip_location_vars=False,
):
"""
Function to query the ARM DQR web service for reports and
@@ -63,6 +64,9 @@
if the Dataset has already been cleaned up, no need to run.
dqr_link : boolean
Prints out a link for each DQR to read the full DQR. Defaults to False
+ skip_location_vars : boolean
+ Does not apply DQRs to location variables. This can be useful in the event
+ the submitter has erroneously selected all variables.
Returns
-------
@@ -104,7 +108,11 @@
variable = [variable]
# Loop through each variable and call web service for that variable
+ loc_vars = ['lat', 'lon', 'alt', 'latitude', 'longitude', 'altitude']
for var_name in variable:
+ if skip_location_vars:
+ if var_name in loc_vars:
+ continue
# Create URL
url = 'http://www.archive.arm.gov/dqrws/ARMDQR?datastream='
url += datastream
@@ -146,9 +154,13 @@
starttime = np.datetime64(dt.datetime.utcfromtimestamp(int(line[1])))
endtime = np.datetime64(dt.datetime.utcfromtimestamp(int(line[2])))
ind = np.where((time >= starttime) & (time <= endtime))
+
if ind[0].size == 0:
continue
+ if 'time' not in ds[var_name].dims:
+ ind = np.where((ds[var_name].values == ds[var_name].values) | (np.isnan(ds[var_name].values)))
+
if dqr_no in dqr_results.keys():
dqr_results[dqr_no]['index'] = np.append(dqr_results[dqr_no]['index'], ind)
else:
| {"golden_diff": "diff --git a/act/qc/arm.py b/act/qc/arm.py\n--- a/act/qc/arm.py\n+++ b/act/qc/arm.py\n@@ -20,6 +20,7 @@\n normalize_assessment=True,\n cleanup_qc=True,\n dqr_link=False,\n+ skip_location_vars=False,\n ):\n \"\"\"\n Function to query the ARM DQR web service for reports and\n@@ -63,6 +64,9 @@\n if the Dataset has already been cleaned up, no need to run.\n dqr_link : boolean\n Prints out a link for each DQR to read the full DQR. Defaults to False\n+ skip_location_vars : boolean\n+ Does not apply DQRs to location variables. This can be useful in the event\n+ the submitter has erroneously selected all variables.\n \n Returns\n -------\n@@ -104,7 +108,11 @@\n variable = [variable]\n \n # Loop through each variable and call web service for that variable\n+ loc_vars = ['lat', 'lon', 'alt', 'latitude', 'longitude', 'altitude']\n for var_name in variable:\n+ if skip_location_vars:\n+ if var_name in loc_vars:\n+ continue\n # Create URL\n url = 'http://www.archive.arm.gov/dqrws/ARMDQR?datastream='\n url += datastream\n@@ -146,9 +154,13 @@\n starttime = np.datetime64(dt.datetime.utcfromtimestamp(int(line[1])))\n endtime = np.datetime64(dt.datetime.utcfromtimestamp(int(line[2])))\n ind = np.where((time >= starttime) & (time <= endtime))\n+\n if ind[0].size == 0:\n continue\n \n+ if 'time' not in ds[var_name].dims:\n+ ind = np.where((ds[var_name].values == ds[var_name].values) | (np.isnan(ds[var_name].values)))\n+\n if dqr_no in dqr_results.keys():\n dqr_results[dqr_no]['index'] = np.append(dqr_results[dqr_no]['index'], ind)\n else:\n", "issue": "Handling Incorrect ARM DQRs when applied with ACT function\nWe have implemented a function to query the ARM Data Quality Report database to return time periods when data is flagged. There are three levels of flagging within the DQRs, with two of them able to replace the variable values with NaN. ARM has a lot of DQRs where the list of variables is not accurate, and is over listing the affected variables.\r\n\r\nFor example a DQR may indicate the instrument is malfunctioning and select all variables in the netCDF file. While the data is incorrect the location, time, and QC variables are correct. Currently we loop over variables and apply the DQR to variables listed in the DQR. Some variables listed are scalars which is causing an exception that we catch and handle. Handling the exception has a print statement indicating the scalar variable could not be indexed.\r\n\r\nWe should discuss a better way to handle these variables as the print statement may confuse users when there is nothing wrong with the scalar variables. A couple of options include:\r\n\r\n1) Correctly handling the replacement of data values with NaN for the scalar variables\r\n2) Setting a keyword to enact the print statement, but set to False as default\r\n3) Skip over these most likely incorrectly listed variables.\r\n\r\nAn example output for sgpaosccn2colaE13.b1 on 20170903 using D170905.3\r\n```\r\nSkipping 'eta_lookup_table' DQR application because of IndexError\r\nSkipping 'base_time' DQR application because of IndexError\r\nSkipping 'alt' DQR application because of IndexError\r\nSkipping 'droplet_size_bounds' DQR application because of IndexError\r\nSkipping 'lon' DQR application because of IndexError\r\nSkipping 'lat' DQR application because of IndexError\r\n```\n", "before_files": [{"content": "\"\"\"\nFunctions specifically for working with QC/DQRs from\nthe Atmospheric Radiation Measurement Program (ARM).\n\n\"\"\"\n\nimport datetime as dt\nimport numpy as np\nimport requests\n\nfrom act.config import DEFAULT_DATASTREAM_NAME\n\n\ndef add_dqr_to_qc(\n ds,\n variable=None,\n assessment='incorrect,suspect',\n exclude=None,\n include=None,\n normalize_assessment=True,\n cleanup_qc=True,\n dqr_link=False,\n):\n \"\"\"\n Function to query the ARM DQR web service for reports and\n add as a new quality control test to ancillary quality control\n variable. If no anicllary quality control variable exist a new\n one will be created and lined to the data variable through\n ancillary_variables attribure.\n\n See online documentation from ARM Data\n Quality Office on the use of the DQR web service.\n\n https://code.arm.gov/docs/dqrws-examples/wikis/home\n\n Information about the DQR web-service avaible at\n https://adc.arm.gov/dqrws/\n\n Parameters\n ----------\n ds : xarray.Dataset\n Xarray dataset\n variable : string, or list of str, or None\n Variables to check DQR web service. If set to None will\n attempt to update all variables.\n assessment : string\n assessment type to get DQRs. Current options include\n 'missing', 'suspect', 'incorrect' or any combination separated\n by a comma.\n exclude : list of strings\n DQR IDs to exclude from adding into QC\n include : list of strings\n List of DQR IDs to include in flagging of data. Any other DQR IDs\n will be ignored.\n normalize_assessment : boolean\n The DQR assessment term is different than the embedded QC\n term. Embedded QC uses \"Bad\" and \"Indeterminate\" while\n DQRs use \"Incorrect\" and \"Suspect\". Setting this will ensure\n the same terms are used for both.\n cleanup_qc : boolean\n Call clean.cleanup() method to convert to standardized ancillary\n quality control variables. Has a little bit of overhead so\n if the Dataset has already been cleaned up, no need to run.\n dqr_link : boolean\n Prints out a link for each DQR to read the full DQR. Defaults to False\n\n Returns\n -------\n ds : xarray.Dataset\n Xarray dataset containing new quality control variables\n\n Examples\n --------\n .. code-block:: python\n\n from act.qc.arm import add_dqr_to_qc\n ds = add_dqr_to_qc(ds, variable=['temp_mean', 'atmos_pressure'])\n\n\n \"\"\"\n\n # DQR Webservice goes off datastreams, pull from the dataset\n if 'datastream' in ds.attrs:\n datastream = ds.attrs['datastream']\n elif '_datastream' in ds.attrs:\n datastream = ds.attrs['_datastream']\n else:\n raise ValueError('Dataset does not have datastream attribute')\n\n if datastream == DEFAULT_DATASTREAM_NAME:\n raise ValueError(\"'datastream' name required for DQR service set to default value \"\n f\"{datastream}. Unable to perform DQR service query.\")\n\n # Clean up QC to conform to CF conventions\n if cleanup_qc:\n ds.clean.cleanup()\n\n # In order to properly flag data, get all variables if None. Exclude QC variables.\n if variable is None:\n variable = list(set(ds.data_vars) - set(ds.clean.matched_qc_variables))\n\n # Check to ensure variable is list\n if not isinstance(variable, (list, tuple)):\n variable = [variable]\n\n # Loop through each variable and call web service for that variable\n for var_name in variable:\n # Create URL\n url = 'http://www.archive.arm.gov/dqrws/ARMDQR?datastream='\n url += datastream\n url += '&varname=' + var_name\n url += ''.join(\n [\n '&searchmetric=',\n assessment,\n '&dqrfields=dqrid,starttime,endtime,metric,subject',\n ]\n )\n\n # Call web service\n req = requests.get(url)\n\n # Check status values and raise error if not successful\n status = req.status_code\n if status == 400:\n raise ValueError('Check parameters')\n if status == 500:\n raise ValueError('DQR Webservice Temporarily Down')\n\n # Get data and run through each dqr\n dqrs = req.text.splitlines()\n time = ds['time'].values\n dqr_results = {}\n for line in dqrs:\n line = line.split('|')\n dqr_no = line[0]\n\n # Exclude DQRs if in list\n if exclude is not None and dqr_no in exclude:\n continue\n\n # Only include if in include list\n if include is not None and dqr_no not in include:\n continue\n\n starttime = np.datetime64(dt.datetime.utcfromtimestamp(int(line[1])))\n endtime = np.datetime64(dt.datetime.utcfromtimestamp(int(line[2])))\n ind = np.where((time >= starttime) & (time <= endtime))\n if ind[0].size == 0:\n continue\n\n if dqr_no in dqr_results.keys():\n dqr_results[dqr_no]['index'] = np.append(dqr_results[dqr_no]['index'], ind)\n else:\n dqr_results[dqr_no] = {\n 'index': ind,\n 'test_assessment': line[3],\n 'test_meaning': ': '.join([dqr_no, line[-1]]),\n }\n if dqr_link:\n print_url = 'https://adc.arm.gov/ArchiveServices/DQRService?dqrid=' + str(dqr_no)\n print(dqr_no, '-', line[3], ':', print_url)\n for key, value in dqr_results.items():\n try:\n ds.qcfilter.add_test(\n var_name,\n index=value['index'],\n test_meaning=value['test_meaning'],\n test_assessment=value['test_assessment'],\n )\n except IndexError:\n print(f\"Skipping '{var_name}' DQR application because of IndexError\")\n\n if normalize_assessment:\n ds.clean.normalize_assessment(variables=var_name)\n\n return ds\n", "path": "act/qc/arm.py"}], "after_files": [{"content": "\"\"\"\nFunctions specifically for working with QC/DQRs from\nthe Atmospheric Radiation Measurement Program (ARM).\n\n\"\"\"\n\nimport datetime as dt\nimport numpy as np\nimport requests\n\nfrom act.config import DEFAULT_DATASTREAM_NAME\n\n\ndef add_dqr_to_qc(\n ds,\n variable=None,\n assessment='incorrect,suspect',\n exclude=None,\n include=None,\n normalize_assessment=True,\n cleanup_qc=True,\n dqr_link=False,\n skip_location_vars=False,\n):\n \"\"\"\n Function to query the ARM DQR web service for reports and\n add as a new quality control test to ancillary quality control\n variable. If no anicllary quality control variable exist a new\n one will be created and lined to the data variable through\n ancillary_variables attribure.\n\n See online documentation from ARM Data\n Quality Office on the use of the DQR web service.\n\n https://code.arm.gov/docs/dqrws-examples/wikis/home\n\n Information about the DQR web-service avaible at\n https://adc.arm.gov/dqrws/\n\n Parameters\n ----------\n ds : xarray.Dataset\n Xarray dataset\n variable : string, or list of str, or None\n Variables to check DQR web service. If set to None will\n attempt to update all variables.\n assessment : string\n assessment type to get DQRs. Current options include\n 'missing', 'suspect', 'incorrect' or any combination separated\n by a comma.\n exclude : list of strings\n DQR IDs to exclude from adding into QC\n include : list of strings\n List of DQR IDs to include in flagging of data. Any other DQR IDs\n will be ignored.\n normalize_assessment : boolean\n The DQR assessment term is different than the embedded QC\n term. Embedded QC uses \"Bad\" and \"Indeterminate\" while\n DQRs use \"Incorrect\" and \"Suspect\". Setting this will ensure\n the same terms are used for both.\n cleanup_qc : boolean\n Call clean.cleanup() method to convert to standardized ancillary\n quality control variables. Has a little bit of overhead so\n if the Dataset has already been cleaned up, no need to run.\n dqr_link : boolean\n Prints out a link for each DQR to read the full DQR. Defaults to False\n skip_location_vars : boolean\n Does not apply DQRs to location variables. This can be useful in the event\n the submitter has erroneously selected all variables.\n\n Returns\n -------\n ds : xarray.Dataset\n Xarray dataset containing new quality control variables\n\n Examples\n --------\n .. code-block:: python\n\n from act.qc.arm import add_dqr_to_qc\n ds = add_dqr_to_qc(ds, variable=['temp_mean', 'atmos_pressure'])\n\n\n \"\"\"\n\n # DQR Webservice goes off datastreams, pull from the dataset\n if 'datastream' in ds.attrs:\n datastream = ds.attrs['datastream']\n elif '_datastream' in ds.attrs:\n datastream = ds.attrs['_datastream']\n else:\n raise ValueError('Dataset does not have datastream attribute')\n\n if datastream == DEFAULT_DATASTREAM_NAME:\n raise ValueError(\"'datastream' name required for DQR service set to default value \"\n f\"{datastream}. Unable to perform DQR service query.\")\n\n # Clean up QC to conform to CF conventions\n if cleanup_qc:\n ds.clean.cleanup()\n\n # In order to properly flag data, get all variables if None. Exclude QC variables.\n if variable is None:\n variable = list(set(ds.data_vars) - set(ds.clean.matched_qc_variables))\n\n # Check to ensure variable is list\n if not isinstance(variable, (list, tuple)):\n variable = [variable]\n\n # Loop through each variable and call web service for that variable\n loc_vars = ['lat', 'lon', 'alt', 'latitude', 'longitude', 'altitude']\n for var_name in variable:\n if skip_location_vars:\n if var_name in loc_vars:\n continue\n # Create URL\n url = 'http://www.archive.arm.gov/dqrws/ARMDQR?datastream='\n url += datastream\n url += '&varname=' + var_name\n url += ''.join(\n [\n '&searchmetric=',\n assessment,\n '&dqrfields=dqrid,starttime,endtime,metric,subject',\n ]\n )\n\n # Call web service\n req = requests.get(url)\n\n # Check status values and raise error if not successful\n status = req.status_code\n if status == 400:\n raise ValueError('Check parameters')\n if status == 500:\n raise ValueError('DQR Webservice Temporarily Down')\n\n # Get data and run through each dqr\n dqrs = req.text.splitlines()\n time = ds['time'].values\n dqr_results = {}\n for line in dqrs:\n line = line.split('|')\n dqr_no = line[0]\n\n # Exclude DQRs if in list\n if exclude is not None and dqr_no in exclude:\n continue\n\n # Only include if in include list\n if include is not None and dqr_no not in include:\n continue\n\n starttime = np.datetime64(dt.datetime.utcfromtimestamp(int(line[1])))\n endtime = np.datetime64(dt.datetime.utcfromtimestamp(int(line[2])))\n ind = np.where((time >= starttime) & (time <= endtime))\n\n if ind[0].size == 0:\n continue\n\n if 'time' not in ds[var_name].dims:\n ind = np.where((ds[var_name].values == ds[var_name].values) | (np.isnan(ds[var_name].values)))\n\n if dqr_no in dqr_results.keys():\n dqr_results[dqr_no]['index'] = np.append(dqr_results[dqr_no]['index'], ind)\n else:\n dqr_results[dqr_no] = {\n 'index': ind,\n 'test_assessment': line[3],\n 'test_meaning': ': '.join([dqr_no, line[-1]]),\n }\n if dqr_link:\n print_url = 'https://adc.arm.gov/ArchiveServices/DQRService?dqrid=' + str(dqr_no)\n print(dqr_no, '-', line[3], ':', print_url)\n for key, value in dqr_results.items():\n try:\n ds.qcfilter.add_test(\n var_name,\n index=value['index'],\n test_meaning=value['test_meaning'],\n test_assessment=value['test_assessment'],\n )\n except IndexError:\n print(f\"Skipping '{var_name}' DQR application because of IndexError\")\n\n if normalize_assessment:\n ds.clean.normalize_assessment(variables=var_name)\n\n return ds\n", "path": "act/qc/arm.py"}]} | 2,432 | 475 |
gh_patches_debug_24209 | rasdani/github-patches | git_diff | nilearn__nilearn-3969 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[DOC] order of contributors
Adding the citation.cff allows to "cite" nilearn from the github UI in bibtex or APA format.
See below.
By default it will list contributors in the order they appear in the citation.cff file: so alphabetically for now.
Opening an issue to discuss if we want a different order and if so which one.
```bibtex
@software{Chamma_nilearn,
author = {Chamma, Ahmad and Frau-Pascual, Aina and Rothberg, Alex and Abadie, Alexandre and Abraham, Alexandre and Gramfort, Alexandre and Savio, Alexandre and Thual, Alexis and Kanaan, Amadeus and Pinho, Ana Luisa and Idrobo, Andrés Hoyos and Kieslinger, Anne-Sophie and Rokem, Ariel and Mensch, Arthur and Duran, Audrey and Cipollini, Ben and Thirion, Bertrand and Nguyen, Binh and Cakan, Caglar and Gorgolewski, Chris and Markiewicz, Chris and Horea, Christian and Gerloff, Christian and Reininger, Colin and Lane, Connor and Delettre, Céline and Gale, Dan and Gomez, Daniel and Bzdok, Danilo and Ellis, David G and Wassermann, Demian and Pisner, Derek and Orfanos, Dimitri Papadopoulos and DuPre, Elizabeth and Dohmatob, Elvis and Larson, Eric and Edmond, Evan and Pedregosa, Fabian and Liem, Franz and Varoquaux, Gael and Hollander, Gilles de and Kiar, Greg and Gilmore, Greydon and Lemaitre, Guillaume and Wang, Hao-Ting and Aggarwal, Himanshu and Abenes, Ian and Vogel, Jake and Margeta, Jan and Grobler, Jaques and Gors, Jason and Rasero, Javier and Kossaifi, Jean and King, Jean-Rémi and Dalenberg, Jelle Roelof and Lefort-Besnard, Jeremy and Dockes, Jerome and Chevalier, Jerome-Alexis and Wiesner, Johannes and Gorrono, Jon Haitz Legarreta and Sassenhagen, Jona and Teves, Joshua and Huntenburg, Julia and Peraza, Julio A and Daddy, Kamalakar Reddy and Sitek, Kevin and Helwegen, Koen and Shmelkov, Konstantin and Chawla, Kshitij and CHEN, Kun and Sasse, Leonard and Estève, Loic and Tetrel, Loic and Paz, Luz and Pietrantoni, Manon and Perez-Guevara, Martin and Wegrzyn, Martin and Goncalves, Mathias and Ekman, Matthias and Joulot, Matthieu and Rahim, Mehdi and Eickenberg, Michael and Hanke, Michael and Notter, Michael and Waskom, Michael and Wang, Michelle and Torabi, Mohammad and Boos, Moritz and Song, Myeong Seop and Clarke, Natasha and Shah, Neelay and Gensollen, Nicolas and Esteban, Oscar and Bogdan, Paul and Sanz-Leon, Paula and Herholz, Peer and Gervais, Philippe and Bellec, Pierre and Glaser, Pierre and Quirion, Pierre-Olivier and Raamana, Pradeep Reddy and Meudec, Raphael and Luke, Robert and Williamson, Robert and Guidotti, Roberto and Phlypo, Ronald and Hammonds, Ryan and Gau, Rémi and Patalasingh, Sachin and Hahn, Sage and Bougacha, Salma and Johnson, Sam Buck and Jawhar, Sami and Steinkamp, Simon and Singh, Sourav and Meisler, Steven and Lan, Sylvain and Takerkart, Sylvain and Samanta, Tarun and Salo, Taylor and Bazeille, Thomas and Vanasse, Tom and Diogo, Vasco and Michel, Vincent and Fritsch, Virgile and Halchenko, Yaroslav and Mzayek, Yasmin and Baratz, Zvi and Nájera, Óscar},
license = {BSD-4-Clause},
title = {{nilearn}},
url = {https://github.com/nilearn/nilearn}
}
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `maint_tools/citation_cff_maint.py`
Content:
```
1 """Update AUTHORS and names from CITATION.cff file."""
2 from __future__ import annotations
3
4 from pathlib import Path
5 from typing import Any
6
7 import ruamel.yaml
8
9 yaml = ruamel.yaml.YAML()
10 yaml.indent(mapping=2, sequence=4, offset=2)
11
12 CORE_DEVS = [
13 "Alexis Thual",
14 "Bertrand Thirion",
15 "Elizabeth DuPre",
16 "Hao-Ting Wang",
17 "Jerome Dockes",
18 "Nicolas Gensollen",
19 "Rémi Gau",
20 "Taylor Salo",
21 "Yasmin Mzayek",
22 ]
23
24
25 def root_dir() -> Path:
26 """Return path to root directory."""
27 return Path(__file__).parent.parent
28
29
30 def names_rst() -> Path:
31 """Return path to names.rst file."""
32 return root_dir() / "doc" / "changes" / "names.rst"
33
34
35 def citation_file() -> Path:
36 """Return path to CITATIONS.cff file."""
37 return root_dir() / "CITATION.cff"
38
39
40 def authors_file() -> Path:
41 """Return path to AUTHORS.rst file."""
42 return root_dir() / "AUTHORS.rst"
43
44
45 def read_citation_cff() -> dict[str, Any]:
46 """Read CITATION.cff file."""
47 print(f"Reading file: {citation_file()}")
48 with open(citation_file(), encoding="utf8") as f:
49 citation = yaml.load(f)
50 return citation
51
52
53 def write_citation_cff(citation: dict[str, Any]) -> None:
54 """Write CITATION.cff file."""
55 print(f"Writing file: {citation_file()}")
56 with open(citation_file(), "w", encoding="utf8") as f:
57 yaml.dump(citation, f)
58
59
60 def write_names_rst(citation: list[dict[str, str]]) -> None:
61 """Write names.rst file."""
62 print(f"Writing file: {names_rst()}")
63 with open(names_rst(), "w", encoding="utf8") as f:
64 header = """.. This file is automatically generated.
65 Do not edit manually.
66 If you want to add to add yourself to the list of authors,
67 please edit CITATION.cff and run maint_tools/citation_cff_maint.py.
68
69 """
70 print(header, file=f)
71
72 for i, author in enumerate(citation["authors"]):
73 line = (
74 f'.. _{author["given-names"]} {author["family-names"]}: '
75 f'{author["website"]}'
76 )
77 print(line, file=f)
78 if i < len(citation["authors"]) - 1:
79 print("", file=f)
80
81
82 def read_authors_file() -> list[str]:
83 """Read AUTHORS.rst file."""
84 print(f"Reading file: {authors_file()}")
85 with open(authors_file(), encoding="utf8") as f:
86 authors_file_content = f.readlines()
87 return authors_file_content
88
89
90 def write_authors_file(authors: list[dict[str, str]]) -> None:
91 """Write AUTHORS.rst file."""
92 authors_file_content = read_authors_file()
93 print(f"Writing file: {authors_file()}")
94 with open(authors_file(), "w", encoding="utf8") as f:
95 writing_team_section = False
96 for line in authors_file_content:
97 if ".. CORE DEV SECTION STARTS HERE" in line:
98 writing_team_section = True
99 write_team_section(f, authors)
100 if "Funding" in line:
101 writing_team_section = False
102 if not writing_team_section:
103 f.write(line)
104
105
106 def write_team_section(f, authors: list[dict[str, str]]) -> None:
107 """Write team section."""
108 print(" Updating team section")
109 f.write(
110 """.. CORE DEV SECTION STARTS HERE
111 The Core developers section is added automatically
112 and should not be edited manually.
113
114 .. _core_devs:
115
116 Core developers
117 ...............
118
119 The nilearn core developers are:
120
121 """
122 )
123
124 write_core_devs(f)
125
126 f.write(
127 """.. CORE DEV SECTION ENDS HERE
128 """
129 )
130
131 f.write(
132 """
133 .. OTHER CONTRIBUTION SECTION STARTS HERE
134 The Other contributors section is added automatically
135 and should not be edited manually.
136
137 Other contributors
138 ..................
139
140 Some other past or present contributors are:
141
142 """
143 )
144 for author_ in authors:
145 f.write(f"* `{author_['given-names']} {author_['family-names']}`_")
146 if author_.get("affiliation"):
147 f.write(f": {author_['affiliation']}")
148 f.write("\n")
149
150 f.write(
151 """
152 .. OTHER CONTRIBUTION SECTION ENDS HERE
153
154 """
155 )
156
157
158 def write_core_devs(f):
159 """Add core devs."""
160 for dev in CORE_DEVS:
161 f.write(f"* `{dev}`_\n")
162 f.write("\n")
163
164
165 def sort_authors(authors: list[dict[str, str]]) -> list[dict[str, str]]:
166 """Sort authors by given name."""
167 print(" Sorting authors by given name")
168 authors.sort(key=lambda x: x["given-names"])
169 return authors
170
171
172 def count_authors() -> int:
173 """Count authors in names.rst."""
174 nb_authors = 0
175 with open(names_rst(), encoding="utf8") as f:
176 # count authors
177 lines = f.readlines()
178 for line in lines:
179 if line.startswith(".. _"):
180 nb_authors += 1
181 return nb_authors
182
183
184 def main():
185 """Update names.rst and AUTHORS.rst files."""
186 citation = read_citation_cff()
187 citation["authors"] = sort_authors(citation["authors"])
188
189 nb_authors = count_authors()
190 write_names_rst(citation)
191 new_nb_authors = count_authors()
192 # Sanity check to make sure we have not lost anyone
193 assert nb_authors <= new_nb_authors
194
195 write_citation_cff(citation)
196
197 write_authors_file(citation["authors"])
198
199
200 if __name__ == "__main__":
201 main()
202
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/maint_tools/citation_cff_maint.py b/maint_tools/citation_cff_maint.py
--- a/maint_tools/citation_cff_maint.py
+++ b/maint_tools/citation_cff_maint.py
@@ -181,9 +181,25 @@
return nb_authors
+def remove_consortium(authors: list[dict[str, str]]) -> list[dict[str, str]]:
+ """Remove consortium from authors."""
+ authors = [
+ author
+ for author in authors
+ if author["family-names"] != "Nilearn contributors"
+ ]
+ return authors
+
+
+def add_consortium(authors: list[dict[str, str]]) -> list[dict[str, str]]:
+ """Add consortium to authors."""
+ return [{"family-names": "Nilearn contributors"}] + authors
+
+
def main():
"""Update names.rst and AUTHORS.rst files."""
citation = read_citation_cff()
+ citation["authors"] = remove_consortium(citation["authors"])
citation["authors"] = sort_authors(citation["authors"])
nb_authors = count_authors()
@@ -192,10 +208,11 @@
# Sanity check to make sure we have not lost anyone
assert nb_authors <= new_nb_authors
- write_citation_cff(citation)
-
write_authors_file(citation["authors"])
+ citation["authors"] = add_consortium(citation["authors"])
+ write_citation_cff(citation)
+
if __name__ == "__main__":
main()
| {"golden_diff": "diff --git a/maint_tools/citation_cff_maint.py b/maint_tools/citation_cff_maint.py\n--- a/maint_tools/citation_cff_maint.py\n+++ b/maint_tools/citation_cff_maint.py\n@@ -181,9 +181,25 @@\n return nb_authors\n \n \n+def remove_consortium(authors: list[dict[str, str]]) -> list[dict[str, str]]:\n+ \"\"\"Remove consortium from authors.\"\"\"\n+ authors = [\n+ author\n+ for author in authors\n+ if author[\"family-names\"] != \"Nilearn contributors\"\n+ ]\n+ return authors\n+\n+\n+def add_consortium(authors: list[dict[str, str]]) -> list[dict[str, str]]:\n+ \"\"\"Add consortium to authors.\"\"\"\n+ return [{\"family-names\": \"Nilearn contributors\"}] + authors\n+\n+\n def main():\n \"\"\"Update names.rst and AUTHORS.rst files.\"\"\"\n citation = read_citation_cff()\n+ citation[\"authors\"] = remove_consortium(citation[\"authors\"])\n citation[\"authors\"] = sort_authors(citation[\"authors\"])\n \n nb_authors = count_authors()\n@@ -192,10 +208,11 @@\n # Sanity check to make sure we have not lost anyone\n assert nb_authors <= new_nb_authors\n \n- write_citation_cff(citation)\n-\n write_authors_file(citation[\"authors\"])\n \n+ citation[\"authors\"] = add_consortium(citation[\"authors\"])\n+ write_citation_cff(citation)\n+\n \n if __name__ == \"__main__\":\n main()\n", "issue": "[DOC] order of contributors\nAdding the citation.cff allows to \"cite\" nilearn from the github UI in bibtex or APA format. \r\nSee below.\r\n\r\nBy default it will list contributors in the order they appear in the citation.cff file: so alphabetically for now.\r\n\r\nOpening an issue to discuss if we want a different order and if so which one.\r\n\r\n\r\n```bibtex\r\n@software{Chamma_nilearn,\r\nauthor = {Chamma, Ahmad and Frau-Pascual, Aina and Rothberg, Alex and Abadie, Alexandre and Abraham, Alexandre and Gramfort, Alexandre and Savio, Alexandre and Thual, Alexis and Kanaan, Amadeus and Pinho, Ana Luisa and Idrobo, Andr\u00e9s Hoyos and Kieslinger, Anne-Sophie and Rokem, Ariel and Mensch, Arthur and Duran, Audrey and Cipollini, Ben and Thirion, Bertrand and Nguyen, Binh and Cakan, Caglar and Gorgolewski, Chris and Markiewicz, Chris and Horea, Christian and Gerloff, Christian and Reininger, Colin and Lane, Connor and Delettre, C\u00e9line and Gale, Dan and Gomez, Daniel and Bzdok, Danilo and Ellis, David G and Wassermann, Demian and Pisner, Derek and Orfanos, Dimitri Papadopoulos and DuPre, Elizabeth and Dohmatob, Elvis and Larson, Eric and Edmond, Evan and Pedregosa, Fabian and Liem, Franz and Varoquaux, Gael and Hollander, Gilles de and Kiar, Greg and Gilmore, Greydon and Lemaitre, Guillaume and Wang, Hao-Ting and Aggarwal, Himanshu and Abenes, Ian and Vogel, Jake and Margeta, Jan and Grobler, Jaques and Gors, Jason and Rasero, Javier and Kossaifi, Jean and King, Jean-R\u00e9mi and Dalenberg, Jelle Roelof and Lefort-Besnard, Jeremy and Dockes, Jerome and Chevalier, Jerome-Alexis and Wiesner, Johannes and Gorrono, Jon Haitz Legarreta and Sassenhagen, Jona and Teves, Joshua and Huntenburg, Julia and Peraza, Julio A and Daddy, Kamalakar Reddy and Sitek, Kevin and Helwegen, Koen and Shmelkov, Konstantin and Chawla, Kshitij and CHEN, Kun and Sasse, Leonard and Est\u00e8ve, Loic and Tetrel, Loic and Paz, Luz and Pietrantoni, Manon and Perez-Guevara, Martin and Wegrzyn, Martin and Goncalves, Mathias and Ekman, Matthias and Joulot, Matthieu and Rahim, Mehdi and Eickenberg, Michael and Hanke, Michael and Notter, Michael and Waskom, Michael and Wang, Michelle and Torabi, Mohammad and Boos, Moritz and Song, Myeong Seop and Clarke, Natasha and Shah, Neelay and Gensollen, Nicolas and Esteban, Oscar and Bogdan, Paul and Sanz-Leon, Paula and Herholz, Peer and Gervais, Philippe and Bellec, Pierre and Glaser, Pierre and Quirion, Pierre-Olivier and Raamana, Pradeep Reddy and Meudec, Raphael and Luke, Robert and Williamson, Robert and Guidotti, Roberto and Phlypo, Ronald and Hammonds, Ryan and Gau, R\u00e9mi and Patalasingh, Sachin and Hahn, Sage and Bougacha, Salma and Johnson, Sam Buck and Jawhar, Sami and Steinkamp, Simon and Singh, Sourav and Meisler, Steven and Lan, Sylvain and Takerkart, Sylvain and Samanta, Tarun and Salo, Taylor and Bazeille, Thomas and Vanasse, Tom and Diogo, Vasco and Michel, Vincent and Fritsch, Virgile and Halchenko, Yaroslav and Mzayek, Yasmin and Baratz, Zvi and N\u00e1jera, \u00d3scar},\r\nlicense = {BSD-4-Clause},\r\ntitle = {{nilearn}},\r\nurl = {https://github.com/nilearn/nilearn}\r\n}\r\n```\n", "before_files": [{"content": "\"\"\"Update AUTHORS and names from CITATION.cff file.\"\"\"\nfrom __future__ import annotations\n\nfrom pathlib import Path\nfrom typing import Any\n\nimport ruamel.yaml\n\nyaml = ruamel.yaml.YAML()\nyaml.indent(mapping=2, sequence=4, offset=2)\n\nCORE_DEVS = [\n \"Alexis Thual\",\n \"Bertrand Thirion\",\n \"Elizabeth DuPre\",\n \"Hao-Ting Wang\",\n \"Jerome Dockes\",\n \"Nicolas Gensollen\",\n \"R\u00e9mi Gau\",\n \"Taylor Salo\",\n \"Yasmin Mzayek\",\n]\n\n\ndef root_dir() -> Path:\n \"\"\"Return path to root directory.\"\"\"\n return Path(__file__).parent.parent\n\n\ndef names_rst() -> Path:\n \"\"\"Return path to names.rst file.\"\"\"\n return root_dir() / \"doc\" / \"changes\" / \"names.rst\"\n\n\ndef citation_file() -> Path:\n \"\"\"Return path to CITATIONS.cff file.\"\"\"\n return root_dir() / \"CITATION.cff\"\n\n\ndef authors_file() -> Path:\n \"\"\"Return path to AUTHORS.rst file.\"\"\"\n return root_dir() / \"AUTHORS.rst\"\n\n\ndef read_citation_cff() -> dict[str, Any]:\n \"\"\"Read CITATION.cff file.\"\"\"\n print(f\"Reading file: {citation_file()}\")\n with open(citation_file(), encoding=\"utf8\") as f:\n citation = yaml.load(f)\n return citation\n\n\ndef write_citation_cff(citation: dict[str, Any]) -> None:\n \"\"\"Write CITATION.cff file.\"\"\"\n print(f\"Writing file: {citation_file()}\")\n with open(citation_file(), \"w\", encoding=\"utf8\") as f:\n yaml.dump(citation, f)\n\n\ndef write_names_rst(citation: list[dict[str, str]]) -> None:\n \"\"\"Write names.rst file.\"\"\"\n print(f\"Writing file: {names_rst()}\")\n with open(names_rst(), \"w\", encoding=\"utf8\") as f:\n header = \"\"\".. This file is automatically generated.\n Do not edit manually.\n If you want to add to add yourself to the list of authors,\n please edit CITATION.cff and run maint_tools/citation_cff_maint.py.\n\n\"\"\"\n print(header, file=f)\n\n for i, author in enumerate(citation[\"authors\"]):\n line = (\n f'.. _{author[\"given-names\"]} {author[\"family-names\"]}: '\n f'{author[\"website\"]}'\n )\n print(line, file=f)\n if i < len(citation[\"authors\"]) - 1:\n print(\"\", file=f)\n\n\ndef read_authors_file() -> list[str]:\n \"\"\"Read AUTHORS.rst file.\"\"\"\n print(f\"Reading file: {authors_file()}\")\n with open(authors_file(), encoding=\"utf8\") as f:\n authors_file_content = f.readlines()\n return authors_file_content\n\n\ndef write_authors_file(authors: list[dict[str, str]]) -> None:\n \"\"\"Write AUTHORS.rst file.\"\"\"\n authors_file_content = read_authors_file()\n print(f\"Writing file: {authors_file()}\")\n with open(authors_file(), \"w\", encoding=\"utf8\") as f:\n writing_team_section = False\n for line in authors_file_content:\n if \".. CORE DEV SECTION STARTS HERE\" in line:\n writing_team_section = True\n write_team_section(f, authors)\n if \"Funding\" in line:\n writing_team_section = False\n if not writing_team_section:\n f.write(line)\n\n\ndef write_team_section(f, authors: list[dict[str, str]]) -> None:\n \"\"\"Write team section.\"\"\"\n print(\" Updating team section\")\n f.write(\n \"\"\".. CORE DEV SECTION STARTS HERE\n The Core developers section is added automatically\n and should not be edited manually.\n\n.. _core_devs:\n\nCore developers\n...............\n\nThe nilearn core developers are:\n\n\"\"\"\n )\n\n write_core_devs(f)\n\n f.write(\n \"\"\".. CORE DEV SECTION ENDS HERE\n\"\"\"\n )\n\n f.write(\n \"\"\"\n.. OTHER CONTRIBUTION SECTION STARTS HERE\n The Other contributors section is added automatically\n and should not be edited manually.\n\nOther contributors\n..................\n\nSome other past or present contributors are:\n\n\"\"\"\n )\n for author_ in authors:\n f.write(f\"* `{author_['given-names']} {author_['family-names']}`_\")\n if author_.get(\"affiliation\"):\n f.write(f\": {author_['affiliation']}\")\n f.write(\"\\n\")\n\n f.write(\n \"\"\"\n.. OTHER CONTRIBUTION SECTION ENDS HERE\n\n\"\"\"\n )\n\n\ndef write_core_devs(f):\n \"\"\"Add core devs.\"\"\"\n for dev in CORE_DEVS:\n f.write(f\"* `{dev}`_\\n\")\n f.write(\"\\n\")\n\n\ndef sort_authors(authors: list[dict[str, str]]) -> list[dict[str, str]]:\n \"\"\"Sort authors by given name.\"\"\"\n print(\" Sorting authors by given name\")\n authors.sort(key=lambda x: x[\"given-names\"])\n return authors\n\n\ndef count_authors() -> int:\n \"\"\"Count authors in names.rst.\"\"\"\n nb_authors = 0\n with open(names_rst(), encoding=\"utf8\") as f:\n # count authors\n lines = f.readlines()\n for line in lines:\n if line.startswith(\".. _\"):\n nb_authors += 1\n return nb_authors\n\n\ndef main():\n \"\"\"Update names.rst and AUTHORS.rst files.\"\"\"\n citation = read_citation_cff()\n citation[\"authors\"] = sort_authors(citation[\"authors\"])\n\n nb_authors = count_authors()\n write_names_rst(citation)\n new_nb_authors = count_authors()\n # Sanity check to make sure we have not lost anyone\n assert nb_authors <= new_nb_authors\n\n write_citation_cff(citation)\n\n write_authors_file(citation[\"authors\"])\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "maint_tools/citation_cff_maint.py"}], "after_files": [{"content": "\"\"\"Update AUTHORS and names from CITATION.cff file.\"\"\"\nfrom __future__ import annotations\n\nfrom pathlib import Path\nfrom typing import Any\n\nimport ruamel.yaml\n\nyaml = ruamel.yaml.YAML()\nyaml.indent(mapping=2, sequence=4, offset=2)\n\nCORE_DEVS = [\n \"Alexis Thual\",\n \"Bertrand Thirion\",\n \"Elizabeth DuPre\",\n \"Hao-Ting Wang\",\n \"Jerome Dockes\",\n \"Nicolas Gensollen\",\n \"R\u00e9mi Gau\",\n \"Taylor Salo\",\n \"Yasmin Mzayek\",\n]\n\n\ndef root_dir() -> Path:\n \"\"\"Return path to root directory.\"\"\"\n return Path(__file__).parent.parent\n\n\ndef names_rst() -> Path:\n \"\"\"Return path to names.rst file.\"\"\"\n return root_dir() / \"doc\" / \"changes\" / \"names.rst\"\n\n\ndef citation_file() -> Path:\n \"\"\"Return path to CITATIONS.cff file.\"\"\"\n return root_dir() / \"CITATION.cff\"\n\n\ndef authors_file() -> Path:\n \"\"\"Return path to AUTHORS.rst file.\"\"\"\n return root_dir() / \"AUTHORS.rst\"\n\n\ndef read_citation_cff() -> dict[str, Any]:\n \"\"\"Read CITATION.cff file.\"\"\"\n print(f\"Reading file: {citation_file()}\")\n with open(citation_file(), encoding=\"utf8\") as f:\n citation = yaml.load(f)\n return citation\n\n\ndef write_citation_cff(citation: dict[str, Any]) -> None:\n \"\"\"Write CITATION.cff file.\"\"\"\n print(f\"Writing file: {citation_file()}\")\n with open(citation_file(), \"w\", encoding=\"utf8\") as f:\n yaml.dump(citation, f)\n\n\ndef write_names_rst(citation: list[dict[str, str]]) -> None:\n \"\"\"Write names.rst file.\"\"\"\n print(f\"Writing file: {names_rst()}\")\n with open(names_rst(), \"w\", encoding=\"utf8\") as f:\n header = \"\"\".. This file is automatically generated.\n Do not edit manually.\n If you want to add to add yourself to the list of authors,\n please edit CITATION.cff and run maint_tools/citation_cff_maint.py.\n\n\"\"\"\n print(header, file=f)\n\n for i, author in enumerate(citation[\"authors\"]):\n line = (\n f'.. _{author[\"given-names\"]} {author[\"family-names\"]}: '\n f'{author[\"website\"]}'\n )\n print(line, file=f)\n if i < len(citation[\"authors\"]) - 1:\n print(\"\", file=f)\n\n\ndef read_authors_file() -> list[str]:\n \"\"\"Read AUTHORS.rst file.\"\"\"\n print(f\"Reading file: {authors_file()}\")\n with open(authors_file(), encoding=\"utf8\") as f:\n authors_file_content = f.readlines()\n return authors_file_content\n\n\ndef write_authors_file(authors: list[dict[str, str]]) -> None:\n \"\"\"Write AUTHORS.rst file.\"\"\"\n authors_file_content = read_authors_file()\n print(f\"Writing file: {authors_file()}\")\n with open(authors_file(), \"w\", encoding=\"utf8\") as f:\n writing_team_section = False\n for line in authors_file_content:\n if \".. CORE DEV SECTION STARTS HERE\" in line:\n writing_team_section = True\n write_team_section(f, authors)\n if \"Funding\" in line:\n writing_team_section = False\n if not writing_team_section:\n f.write(line)\n\n\ndef write_team_section(f, authors: list[dict[str, str]]) -> None:\n \"\"\"Write team section.\"\"\"\n print(\" Updating team section\")\n f.write(\n \"\"\".. CORE DEV SECTION STARTS HERE\n The Core developers section is added automatically\n and should not be edited manually.\n\n.. _core_devs:\n\nCore developers\n...............\n\nThe nilearn core developers are:\n\n\"\"\"\n )\n\n write_core_devs(f)\n\n f.write(\n \"\"\".. CORE DEV SECTION ENDS HERE\n\"\"\"\n )\n\n f.write(\n \"\"\"\n.. OTHER CONTRIBUTION SECTION STARTS HERE\n The Other contributors section is added automatically\n and should not be edited manually.\n\nOther contributors\n..................\n\nSome other past or present contributors are:\n\n\"\"\"\n )\n for author_ in authors:\n f.write(f\"* `{author_['given-names']} {author_['family-names']}`_\")\n if author_.get(\"affiliation\"):\n f.write(f\": {author_['affiliation']}\")\n f.write(\"\\n\")\n\n f.write(\n \"\"\"\n.. OTHER CONTRIBUTION SECTION ENDS HERE\n\n\"\"\"\n )\n\n\ndef write_core_devs(f):\n \"\"\"Add core devs.\"\"\"\n for dev in CORE_DEVS:\n f.write(f\"* `{dev}`_\\n\")\n f.write(\"\\n\")\n\n\ndef sort_authors(authors: list[dict[str, str]]) -> list[dict[str, str]]:\n \"\"\"Sort authors by given name.\"\"\"\n print(\" Sorting authors by given name\")\n authors.sort(key=lambda x: x[\"given-names\"])\n return authors\n\n\ndef count_authors() -> int:\n \"\"\"Count authors in names.rst.\"\"\"\n nb_authors = 0\n with open(names_rst(), encoding=\"utf8\") as f:\n # count authors\n lines = f.readlines()\n for line in lines:\n if line.startswith(\".. _\"):\n nb_authors += 1\n return nb_authors\n\n\ndef remove_consortium(authors: list[dict[str, str]]) -> list[dict[str, str]]:\n \"\"\"Remove consortium from authors.\"\"\"\n authors = [\n author\n for author in authors\n if author[\"family-names\"] != \"Nilearn contributors\"\n ]\n return authors\n\n\ndef add_consortium(authors: list[dict[str, str]]) -> list[dict[str, str]]:\n \"\"\"Add consortium to authors.\"\"\"\n return [{\"family-names\": \"Nilearn contributors\"}] + authors\n\n\ndef main():\n \"\"\"Update names.rst and AUTHORS.rst files.\"\"\"\n citation = read_citation_cff()\n citation[\"authors\"] = remove_consortium(citation[\"authors\"])\n citation[\"authors\"] = sort_authors(citation[\"authors\"])\n\n nb_authors = count_authors()\n write_names_rst(citation)\n new_nb_authors = count_authors()\n # Sanity check to make sure we have not lost anyone\n assert nb_authors <= new_nb_authors\n\n write_authors_file(citation[\"authors\"])\n\n citation[\"authors\"] = add_consortium(citation[\"authors\"])\n write_citation_cff(citation)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "maint_tools/citation_cff_maint.py"}]} | 3,012 | 363 |
gh_patches_debug_3003 | rasdani/github-patches | git_diff | lightly-ai__lightly-656 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Incorrect inputsize for BarlowTwins Lightning Example Code
Should the input_size in [1] be `32` instead of `224`?
In [2], we use `input_size=32`.
[1] https://github.com/lightly-ai/lightly/blob/master/examples/pytorch_lightning/barlowtwins.py#L44
[2] https://github.com/lightly-ai/lightly/blob/master/examples/pytorch/barlowtwins.py#L35
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/pytorch_lightning/barlowtwins.py`
Content:
```
1 import torch
2 from torch import nn
3 import torchvision
4 import pytorch_lightning as pl
5
6 from lightly.data import LightlyDataset
7 from lightly.data import ImageCollateFunction
8 from lightly.loss import BarlowTwinsLoss
9 from lightly.models.modules import BarlowTwinsProjectionHead
10
11
12 class BarlowTwins(pl.LightningModule):
13 def __init__(self):
14 super().__init__()
15 resnet = torchvision.models.resnet18()
16 self.backbone = nn.Sequential(*list(resnet.children())[:-1])
17 self.projection_head = BarlowTwinsProjectionHead(512, 2048, 2048)
18 self.criterion = BarlowTwinsLoss()
19
20 def forward(self, x):
21 x = self.backbone(x).flatten(start_dim=1)
22 z = self.projection_head(x)
23 return z
24
25 def training_step(self, batch, batch_index):
26 (x0, x1), _, _ = batch
27 z0 = self.forward(x0)
28 z1 = self.forward(x1)
29 loss = self.criterion(z0, z1)
30 return loss
31
32 def configure_optimizers(self):
33 optim = torch.optim.SGD(self.parameters(), lr=0.06)
34 return optim
35
36
37 model = BarlowTwins()
38
39 cifar10 = torchvision.datasets.CIFAR10("datasets/cifar10", download=True)
40 dataset = LightlyDataset.from_torch_dataset(cifar10)
41 # or create a dataset from a folder containing images or videos:
42 # dataset = LightlyDataset("path/to/folder")
43
44 collate_fn = ImageCollateFunction(input_size=224)
45
46 dataloader = torch.utils.data.DataLoader(
47 dataset,
48 batch_size=256,
49 collate_fn=collate_fn,
50 shuffle=True,
51 drop_last=True,
52 num_workers=8,
53 )
54
55 gpus = 1 if torch.cuda.is_available() else 0
56
57 trainer = pl.Trainer(max_epochs=10, gpus=gpus)
58 trainer.fit(model=model, train_dataloaders=dataloader)
59
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/examples/pytorch_lightning/barlowtwins.py b/examples/pytorch_lightning/barlowtwins.py
--- a/examples/pytorch_lightning/barlowtwins.py
+++ b/examples/pytorch_lightning/barlowtwins.py
@@ -41,7 +41,7 @@
# or create a dataset from a folder containing images or videos:
# dataset = LightlyDataset("path/to/folder")
-collate_fn = ImageCollateFunction(input_size=224)
+collate_fn = ImageCollateFunction(input_size=32)
dataloader = torch.utils.data.DataLoader(
dataset,
| {"golden_diff": "diff --git a/examples/pytorch_lightning/barlowtwins.py b/examples/pytorch_lightning/barlowtwins.py\n--- a/examples/pytorch_lightning/barlowtwins.py\n+++ b/examples/pytorch_lightning/barlowtwins.py\n@@ -41,7 +41,7 @@\n # or create a dataset from a folder containing images or videos:\n # dataset = LightlyDataset(\"path/to/folder\")\n \n-collate_fn = ImageCollateFunction(input_size=224)\n+collate_fn = ImageCollateFunction(input_size=32)\n \n dataloader = torch.utils.data.DataLoader(\n dataset,\n", "issue": "Incorrect inputsize for BarlowTwins Lightning Example Code\nShould the input_size in [1] be `32` instead of `224`?\r\nIn [2], we use `input_size=32`.\r\n\r\n[1] https://github.com/lightly-ai/lightly/blob/master/examples/pytorch_lightning/barlowtwins.py#L44\r\n[2] https://github.com/lightly-ai/lightly/blob/master/examples/pytorch/barlowtwins.py#L35\r\n\r\n\n", "before_files": [{"content": "import torch\nfrom torch import nn\nimport torchvision\nimport pytorch_lightning as pl\n\nfrom lightly.data import LightlyDataset\nfrom lightly.data import ImageCollateFunction\nfrom lightly.loss import BarlowTwinsLoss\nfrom lightly.models.modules import BarlowTwinsProjectionHead\n\n\nclass BarlowTwins(pl.LightningModule):\n def __init__(self):\n super().__init__()\n resnet = torchvision.models.resnet18()\n self.backbone = nn.Sequential(*list(resnet.children())[:-1])\n self.projection_head = BarlowTwinsProjectionHead(512, 2048, 2048)\n self.criterion = BarlowTwinsLoss()\n\n def forward(self, x):\n x = self.backbone(x).flatten(start_dim=1)\n z = self.projection_head(x)\n return z\n\n def training_step(self, batch, batch_index):\n (x0, x1), _, _ = batch\n z0 = self.forward(x0)\n z1 = self.forward(x1)\n loss = self.criterion(z0, z1)\n return loss\n\n def configure_optimizers(self):\n optim = torch.optim.SGD(self.parameters(), lr=0.06)\n return optim\n\n\nmodel = BarlowTwins()\n\ncifar10 = torchvision.datasets.CIFAR10(\"datasets/cifar10\", download=True)\ndataset = LightlyDataset.from_torch_dataset(cifar10)\n# or create a dataset from a folder containing images or videos:\n# dataset = LightlyDataset(\"path/to/folder\")\n\ncollate_fn = ImageCollateFunction(input_size=224)\n\ndataloader = torch.utils.data.DataLoader(\n dataset,\n batch_size=256,\n collate_fn=collate_fn,\n shuffle=True,\n drop_last=True,\n num_workers=8,\n)\n\ngpus = 1 if torch.cuda.is_available() else 0\n\ntrainer = pl.Trainer(max_epochs=10, gpus=gpus)\ntrainer.fit(model=model, train_dataloaders=dataloader)\n", "path": "examples/pytorch_lightning/barlowtwins.py"}], "after_files": [{"content": "import torch\nfrom torch import nn\nimport torchvision\nimport pytorch_lightning as pl\n\nfrom lightly.data import LightlyDataset\nfrom lightly.data import ImageCollateFunction\nfrom lightly.loss import BarlowTwinsLoss\nfrom lightly.models.modules import BarlowTwinsProjectionHead\n\n\nclass BarlowTwins(pl.LightningModule):\n def __init__(self):\n super().__init__()\n resnet = torchvision.models.resnet18()\n self.backbone = nn.Sequential(*list(resnet.children())[:-1])\n self.projection_head = BarlowTwinsProjectionHead(512, 2048, 2048)\n self.criterion = BarlowTwinsLoss()\n\n def forward(self, x):\n x = self.backbone(x).flatten(start_dim=1)\n z = self.projection_head(x)\n return z\n\n def training_step(self, batch, batch_index):\n (x0, x1), _, _ = batch\n z0 = self.forward(x0)\n z1 = self.forward(x1)\n loss = self.criterion(z0, z1)\n return loss\n\n def configure_optimizers(self):\n optim = torch.optim.SGD(self.parameters(), lr=0.06)\n return optim\n\n\nmodel = BarlowTwins()\n\ncifar10 = torchvision.datasets.CIFAR10(\"datasets/cifar10\", download=True)\ndataset = LightlyDataset.from_torch_dataset(cifar10)\n# or create a dataset from a folder containing images or videos:\n# dataset = LightlyDataset(\"path/to/folder\")\n\ncollate_fn = ImageCollateFunction(input_size=32)\n\ndataloader = torch.utils.data.DataLoader(\n dataset,\n batch_size=256,\n collate_fn=collate_fn,\n shuffle=True,\n drop_last=True,\n num_workers=8,\n)\n\ngpus = 1 if torch.cuda.is_available() else 0\n\ntrainer = pl.Trainer(max_epochs=10, gpus=gpus)\ntrainer.fit(model=model, train_dataloaders=dataloader)\n", "path": "examples/pytorch_lightning/barlowtwins.py"}]} | 934 | 135 |
gh_patches_debug_23350 | rasdani/github-patches | git_diff | bridgecrewio__checkov-5768 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Documentation formatter is turning "--" into "–" (en dash) in the description field
In the [CLI Command Reference](https://www.checkov.io/2.Basics/CLI%20Command%20Reference.html) documentation, all the examples that use `--` options in the "Description" column are coming through as `–` (en dash). This makes it impossible to actually copy these examples for use. They are correct in [the Markdown source file](https://github.com/bridgecrewio/checkov/blob/main/docs/2.Basics/CLI%20Command%20Reference.md). To verify, use the browser search for `--output-file-path` and you'll find only one search result instead of two.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `checkov/common/parallelizer/parallel_runner.py`
Content:
```
1 from __future__ import annotations
2
3 import concurrent.futures
4 import logging
5 import multiprocessing
6 import os
7 import platform
8 from collections.abc import Iterator, Iterable
9 from multiprocessing.pool import Pool
10 from typing import Any, List, Generator, Callable, Optional, TypeVar, TYPE_CHECKING
11
12 from checkov.common.models.enums import ParallelizationType
13
14 if TYPE_CHECKING:
15 from multiprocessing.connection import Connection
16
17 _T = TypeVar("_T")
18
19
20 class ParallelRunner:
21 def __init__(
22 self, workers_number: int | None = None, parallelization_type: ParallelizationType = ParallelizationType.FORK
23 ) -> None:
24 self.workers_number = (workers_number if workers_number else os.cpu_count()) or 1
25 self.os = platform.system()
26 self.type: str | ParallelizationType = parallelization_type
27
28 custom_type = os.getenv("CHECKOV_PARALLELIZATION_TYPE")
29 if custom_type:
30 self.type = custom_type.lower()
31
32 if not custom_type and os.getenv("PYCHARM_HOSTED") == "1":
33 # PYCHARM_HOSTED env variable equals 1 when debugging via jetbrains IDE.
34 # To prevent JetBrains IDE from crashing on debug run sequentially
35 self.type = ParallelizationType.NONE
36 elif self.os == "Windows":
37 # 'fork' mode is not supported on 'Windows'
38 # 'spawn' mode results in a strange error, which needs to be investigated on an actual Windows machine
39 self.type = ParallelizationType.THREAD
40
41 def run_function(
42 self,
43 func: Callable[..., _T],
44 items: List[Any],
45 group_size: Optional[int] = None,
46 ) -> Iterable[_T]:
47 if self.type == ParallelizationType.THREAD:
48 return self._run_function_multithreaded(func, items)
49 elif self.type == ParallelizationType.FORK:
50 return self._run_function_multiprocess_fork(func, items, group_size)
51 elif self.type == ParallelizationType.SPAWN:
52 return self._run_function_multiprocess_spawn(func, items, group_size)
53 else:
54 return self._run_function_sequential(func, items)
55
56 def _run_function_multiprocess_fork(
57 self, func: Callable[[Any], _T], items: List[Any], group_size: Optional[int]
58 ) -> Generator[_T, None, Iterable[_T]]:
59 if multiprocessing.current_process().daemon:
60 # can't fork, when already inside a pool
61 return self._run_function_multithreaded(func, items) # noqa: B901
62
63 if not group_size:
64 group_size = int(len(items) / self.workers_number) + 1
65 groups_of_items = [items[i : i + group_size] for i in range(0, len(items), group_size)]
66
67 def func_wrapper(original_func: Callable[[Any], _T], items_group: List[Any], connection: Connection) -> None:
68 for item in items_group:
69 try:
70 if isinstance(item, tuple):
71 # unpack a tuple to pass multiple arguments to the target function
72 result = original_func(*item)
73 else:
74 result = original_func(item)
75 except Exception:
76 logging.error(
77 f"Failed to invoke function {func.__code__.co_filename.replace('.py', '')}.{func.__name__} with {item}",
78 exc_info=True,
79 )
80 result = None
81
82 connection.send(result)
83 connection.close()
84
85 logging.debug(
86 f"Running function {func.__code__.co_filename.replace('.py', '')}.{func.__name__} with parallelization type 'fork'"
87 )
88 processes = []
89 for group_of_items in groups_of_items:
90 parent_conn, child_conn = multiprocessing.Pipe(duplex=False)
91 process = multiprocessing.get_context("fork").Process(
92 target=func_wrapper, args=(func, group_of_items, child_conn)
93 )
94 processes.append((process, parent_conn, len(group_of_items)))
95 process.start()
96
97 for _, parent_conn, group_len in processes:
98 for _ in range(group_len):
99 try:
100 yield parent_conn.recv()
101 except EOFError:
102 pass
103
104 return []
105
106 def _run_function_multiprocess_spawn(
107 self, func: Callable[[Any], _T], items: list[Any], group_size: int | None
108 ) -> Iterable[_T]:
109 if multiprocessing.current_process().daemon:
110 # can't create a new pool, when already inside a pool
111 return self._run_function_multithreaded(func, items)
112
113 if not group_size:
114 group_size = int(len(items) / self.workers_number) + 1
115
116 logging.debug(
117 f"Running function {func.__code__.co_filename.replace('.py', '')}.{func.__name__} with parallelization type 'spawn'"
118 )
119 with Pool(processes=self.workers_number, context=multiprocessing.get_context("spawn")) as p:
120 if items and isinstance(items[0], tuple):
121 # need to use 'starmap' to pass multiple arguments to the target function
122 return p.starmap(func, items, chunksize=group_size)
123
124 return p.map(func, items, chunksize=group_size)
125
126 def _run_function_multithreaded(self, func: Callable[[Any], _T], items: List[Any]) -> Iterator[_T]:
127 logging.debug(
128 f"Running function {func.__code__.co_filename.replace('.py', '')}.{func.__name__} with parallelization type 'thread'"
129 )
130 with concurrent.futures.ThreadPoolExecutor(max_workers=self.workers_number) as executor:
131 if items and isinstance(items[0], tuple):
132 # split a list of tuple into tuples of the positioned values of the tuple
133 return executor.map(func, *list(zip(*items))) # noqa[B905] # no need to set 'strict' otherwise 'mypy' complains
134
135 return executor.map(func, items)
136
137 def _run_function_sequential(self, func: Callable[[Any], _T], items: List[Any]) -> Iterator[_T]:
138 logging.debug(
139 f"Running function {func.__code__.co_filename.replace('.py', '')}.{func.__name__} with parallelization type 'none'"
140 )
141 if items and isinstance(items[0], tuple):
142 # unpack a tuple to pass multiple arguments to the target function
143 return (func(*item) for item in items)
144
145 return (func(item) for item in items)
146
147
148 parallel_runner = ParallelRunner()
149
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/checkov/common/parallelizer/parallel_runner.py b/checkov/common/parallelizer/parallel_runner.py
--- a/checkov/common/parallelizer/parallel_runner.py
+++ b/checkov/common/parallelizer/parallel_runner.py
@@ -55,10 +55,12 @@
def _run_function_multiprocess_fork(
self, func: Callable[[Any], _T], items: List[Any], group_size: Optional[int]
- ) -> Generator[_T, None, Iterable[_T]]:
+ ) -> Generator[_T, None, None]:
if multiprocessing.current_process().daemon:
# can't fork, when already inside a pool
- return self._run_function_multithreaded(func, items) # noqa: B901
+ for result in self._run_function_multithreaded(func, items):
+ yield result
+ return
if not group_size:
group_size = int(len(items) / self.workers_number) + 1
@@ -101,8 +103,6 @@
except EOFError:
pass
- return []
-
def _run_function_multiprocess_spawn(
self, func: Callable[[Any], _T], items: list[Any], group_size: int | None
) -> Iterable[_T]:
| {"golden_diff": "diff --git a/checkov/common/parallelizer/parallel_runner.py b/checkov/common/parallelizer/parallel_runner.py\n--- a/checkov/common/parallelizer/parallel_runner.py\n+++ b/checkov/common/parallelizer/parallel_runner.py\n@@ -55,10 +55,12 @@\n \n def _run_function_multiprocess_fork(\n self, func: Callable[[Any], _T], items: List[Any], group_size: Optional[int]\n- ) -> Generator[_T, None, Iterable[_T]]:\n+ ) -> Generator[_T, None, None]:\n if multiprocessing.current_process().daemon:\n # can't fork, when already inside a pool\n- return self._run_function_multithreaded(func, items) # noqa: B901\n+ for result in self._run_function_multithreaded(func, items):\n+ yield result\n+ return\n \n if not group_size:\n group_size = int(len(items) / self.workers_number) + 1\n@@ -101,8 +103,6 @@\n except EOFError:\n pass\n \n- return []\n-\n def _run_function_multiprocess_spawn(\n self, func: Callable[[Any], _T], items: list[Any], group_size: int | None\n ) -> Iterable[_T]:\n", "issue": "Documentation formatter is turning \"--\" into \"\u2013\" (en dash) in the description field\nIn the [CLI Command Reference](https://www.checkov.io/2.Basics/CLI%20Command%20Reference.html) documentation, all the examples that use `--` options in the \"Description\" column are coming through as `\u2013` (en dash). This makes it impossible to actually copy these examples for use. They are correct in [the Markdown source file](https://github.com/bridgecrewio/checkov/blob/main/docs/2.Basics/CLI%20Command%20Reference.md). To verify, use the browser search for `--output-file-path` and you'll find only one search result instead of two.\n", "before_files": [{"content": "from __future__ import annotations\n\nimport concurrent.futures\nimport logging\nimport multiprocessing\nimport os\nimport platform\nfrom collections.abc import Iterator, Iterable\nfrom multiprocessing.pool import Pool\nfrom typing import Any, List, Generator, Callable, Optional, TypeVar, TYPE_CHECKING\n\nfrom checkov.common.models.enums import ParallelizationType\n\nif TYPE_CHECKING:\n from multiprocessing.connection import Connection\n\n_T = TypeVar(\"_T\")\n\n\nclass ParallelRunner:\n def __init__(\n self, workers_number: int | None = None, parallelization_type: ParallelizationType = ParallelizationType.FORK\n ) -> None:\n self.workers_number = (workers_number if workers_number else os.cpu_count()) or 1\n self.os = platform.system()\n self.type: str | ParallelizationType = parallelization_type\n\n custom_type = os.getenv(\"CHECKOV_PARALLELIZATION_TYPE\")\n if custom_type:\n self.type = custom_type.lower()\n\n if not custom_type and os.getenv(\"PYCHARM_HOSTED\") == \"1\":\n # PYCHARM_HOSTED env variable equals 1 when debugging via jetbrains IDE.\n # To prevent JetBrains IDE from crashing on debug run sequentially\n self.type = ParallelizationType.NONE\n elif self.os == \"Windows\":\n # 'fork' mode is not supported on 'Windows'\n # 'spawn' mode results in a strange error, which needs to be investigated on an actual Windows machine\n self.type = ParallelizationType.THREAD\n\n def run_function(\n self,\n func: Callable[..., _T],\n items: List[Any],\n group_size: Optional[int] = None,\n ) -> Iterable[_T]:\n if self.type == ParallelizationType.THREAD:\n return self._run_function_multithreaded(func, items)\n elif self.type == ParallelizationType.FORK:\n return self._run_function_multiprocess_fork(func, items, group_size)\n elif self.type == ParallelizationType.SPAWN:\n return self._run_function_multiprocess_spawn(func, items, group_size)\n else:\n return self._run_function_sequential(func, items)\n\n def _run_function_multiprocess_fork(\n self, func: Callable[[Any], _T], items: List[Any], group_size: Optional[int]\n ) -> Generator[_T, None, Iterable[_T]]:\n if multiprocessing.current_process().daemon:\n # can't fork, when already inside a pool\n return self._run_function_multithreaded(func, items) # noqa: B901\n\n if not group_size:\n group_size = int(len(items) / self.workers_number) + 1\n groups_of_items = [items[i : i + group_size] for i in range(0, len(items), group_size)]\n\n def func_wrapper(original_func: Callable[[Any], _T], items_group: List[Any], connection: Connection) -> None:\n for item in items_group:\n try:\n if isinstance(item, tuple):\n # unpack a tuple to pass multiple arguments to the target function\n result = original_func(*item)\n else:\n result = original_func(item)\n except Exception:\n logging.error(\n f\"Failed to invoke function {func.__code__.co_filename.replace('.py', '')}.{func.__name__} with {item}\",\n exc_info=True,\n )\n result = None\n\n connection.send(result)\n connection.close()\n\n logging.debug(\n f\"Running function {func.__code__.co_filename.replace('.py', '')}.{func.__name__} with parallelization type 'fork'\"\n )\n processes = []\n for group_of_items in groups_of_items:\n parent_conn, child_conn = multiprocessing.Pipe(duplex=False)\n process = multiprocessing.get_context(\"fork\").Process(\n target=func_wrapper, args=(func, group_of_items, child_conn)\n )\n processes.append((process, parent_conn, len(group_of_items)))\n process.start()\n\n for _, parent_conn, group_len in processes:\n for _ in range(group_len):\n try:\n yield parent_conn.recv()\n except EOFError:\n pass\n\n return []\n\n def _run_function_multiprocess_spawn(\n self, func: Callable[[Any], _T], items: list[Any], group_size: int | None\n ) -> Iterable[_T]:\n if multiprocessing.current_process().daemon:\n # can't create a new pool, when already inside a pool\n return self._run_function_multithreaded(func, items)\n\n if not group_size:\n group_size = int(len(items) / self.workers_number) + 1\n\n logging.debug(\n f\"Running function {func.__code__.co_filename.replace('.py', '')}.{func.__name__} with parallelization type 'spawn'\"\n )\n with Pool(processes=self.workers_number, context=multiprocessing.get_context(\"spawn\")) as p:\n if items and isinstance(items[0], tuple):\n # need to use 'starmap' to pass multiple arguments to the target function\n return p.starmap(func, items, chunksize=group_size)\n\n return p.map(func, items, chunksize=group_size)\n\n def _run_function_multithreaded(self, func: Callable[[Any], _T], items: List[Any]) -> Iterator[_T]:\n logging.debug(\n f\"Running function {func.__code__.co_filename.replace('.py', '')}.{func.__name__} with parallelization type 'thread'\"\n )\n with concurrent.futures.ThreadPoolExecutor(max_workers=self.workers_number) as executor:\n if items and isinstance(items[0], tuple):\n # split a list of tuple into tuples of the positioned values of the tuple\n return executor.map(func, *list(zip(*items))) # noqa[B905] # no need to set 'strict' otherwise 'mypy' complains\n\n return executor.map(func, items)\n\n def _run_function_sequential(self, func: Callable[[Any], _T], items: List[Any]) -> Iterator[_T]:\n logging.debug(\n f\"Running function {func.__code__.co_filename.replace('.py', '')}.{func.__name__} with parallelization type 'none'\"\n )\n if items and isinstance(items[0], tuple):\n # unpack a tuple to pass multiple arguments to the target function\n return (func(*item) for item in items)\n\n return (func(item) for item in items)\n\n\nparallel_runner = ParallelRunner()\n", "path": "checkov/common/parallelizer/parallel_runner.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport concurrent.futures\nimport logging\nimport multiprocessing\nimport os\nimport platform\nfrom collections.abc import Iterator, Iterable\nfrom multiprocessing.pool import Pool\nfrom typing import Any, List, Generator, Callable, Optional, TypeVar, TYPE_CHECKING\n\nfrom checkov.common.models.enums import ParallelizationType\n\nif TYPE_CHECKING:\n from multiprocessing.connection import Connection\n\n_T = TypeVar(\"_T\")\n\n\nclass ParallelRunner:\n def __init__(\n self, workers_number: int | None = None, parallelization_type: ParallelizationType = ParallelizationType.FORK\n ) -> None:\n self.workers_number = (workers_number if workers_number else os.cpu_count()) or 1\n self.os = platform.system()\n self.type: str | ParallelizationType = parallelization_type\n\n custom_type = os.getenv(\"CHECKOV_PARALLELIZATION_TYPE\")\n if custom_type:\n self.type = custom_type.lower()\n\n if not custom_type and os.getenv(\"PYCHARM_HOSTED\") == \"1\":\n # PYCHARM_HOSTED env variable equals 1 when debugging via jetbrains IDE.\n # To prevent JetBrains IDE from crashing on debug run sequentially\n self.type = ParallelizationType.NONE\n elif self.os == \"Windows\":\n # 'fork' mode is not supported on 'Windows'\n # 'spawn' mode results in a strange error, which needs to be investigated on an actual Windows machine\n self.type = ParallelizationType.THREAD\n\n def run_function(\n self,\n func: Callable[..., _T],\n items: List[Any],\n group_size: Optional[int] = None,\n ) -> Iterable[_T]:\n if self.type == ParallelizationType.THREAD:\n return self._run_function_multithreaded(func, items)\n elif self.type == ParallelizationType.FORK:\n return self._run_function_multiprocess_fork(func, items, group_size)\n elif self.type == ParallelizationType.SPAWN:\n return self._run_function_multiprocess_spawn(func, items, group_size)\n else:\n return self._run_function_sequential(func, items)\n\n def _run_function_multiprocess_fork(\n self, func: Callable[[Any], _T], items: List[Any], group_size: Optional[int]\n ) -> Generator[_T, None, None]:\n if multiprocessing.current_process().daemon:\n # can't fork, when already inside a pool\n for result in self._run_function_multithreaded(func, items):\n yield result\n return\n\n if not group_size:\n group_size = int(len(items) / self.workers_number) + 1\n groups_of_items = [items[i : i + group_size] for i in range(0, len(items), group_size)]\n\n def func_wrapper(original_func: Callable[[Any], _T], items_group: List[Any], connection: Connection) -> None:\n for item in items_group:\n try:\n if isinstance(item, tuple):\n # unpack a tuple to pass multiple arguments to the target function\n result = original_func(*item)\n else:\n result = original_func(item)\n except Exception:\n logging.error(\n f\"Failed to invoke function {func.__code__.co_filename.replace('.py', '')}.{func.__name__} with {item}\",\n exc_info=True,\n )\n result = None\n\n connection.send(result)\n connection.close()\n\n logging.debug(\n f\"Running function {func.__code__.co_filename.replace('.py', '')}.{func.__name__} with parallelization type 'fork'\"\n )\n processes = []\n for group_of_items in groups_of_items:\n parent_conn, child_conn = multiprocessing.Pipe(duplex=False)\n process = multiprocessing.get_context(\"fork\").Process(\n target=func_wrapper, args=(func, group_of_items, child_conn)\n )\n processes.append((process, parent_conn, len(group_of_items)))\n process.start()\n\n for _, parent_conn, group_len in processes:\n for _ in range(group_len):\n try:\n yield parent_conn.recv()\n except EOFError:\n pass\n\n def _run_function_multiprocess_spawn(\n self, func: Callable[[Any], _T], items: list[Any], group_size: int | None\n ) -> Iterable[_T]:\n if multiprocessing.current_process().daemon:\n # can't create a new pool, when already inside a pool\n return self._run_function_multithreaded(func, items)\n\n if not group_size:\n group_size = int(len(items) / self.workers_number) + 1\n\n logging.debug(\n f\"Running function {func.__code__.co_filename.replace('.py', '')}.{func.__name__} with parallelization type 'spawn'\"\n )\n with Pool(processes=self.workers_number, context=multiprocessing.get_context(\"spawn\")) as p:\n if items and isinstance(items[0], tuple):\n # need to use 'starmap' to pass multiple arguments to the target function\n return p.starmap(func, items, chunksize=group_size)\n\n return p.map(func, items, chunksize=group_size)\n\n def _run_function_multithreaded(self, func: Callable[[Any], _T], items: List[Any]) -> Iterator[_T]:\n logging.debug(\n f\"Running function {func.__code__.co_filename.replace('.py', '')}.{func.__name__} with parallelization type 'thread'\"\n )\n with concurrent.futures.ThreadPoolExecutor(max_workers=self.workers_number) as executor:\n if items and isinstance(items[0], tuple):\n # split a list of tuple into tuples of the positioned values of the tuple\n return executor.map(func, *list(zip(*items))) # noqa[B905] # no need to set 'strict' otherwise 'mypy' complains\n\n return executor.map(func, items)\n\n def _run_function_sequential(self, func: Callable[[Any], _T], items: List[Any]) -> Iterator[_T]:\n logging.debug(\n f\"Running function {func.__code__.co_filename.replace('.py', '')}.{func.__name__} with parallelization type 'none'\"\n )\n if items and isinstance(items[0], tuple):\n # unpack a tuple to pass multiple arguments to the target function\n return (func(*item) for item in items)\n\n return (func(item) for item in items)\n\n\nparallel_runner = ParallelRunner()\n", "path": "checkov/common/parallelizer/parallel_runner.py"}]} | 2,149 | 287 |
gh_patches_debug_924 | rasdani/github-patches | git_diff | joke2k__faker-993 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
text-unidecode is released under the Artistic license
`text-unidecode` is released under the Artistic license v1.0, which is considered non-free by the FSF (and therefore not compatible with the GPL). I believe this clause is also of concern to commercial users of faker too:
> 5. You may charge a reasonable copying fee for any distribution of this Package. You may charge any fee you choose for support of this Package. You may not charge a fee for this Package itself. However, you may distribute this Package in aggregate with other (possibly commercial) programs as part of a larger (possibly commercial) software distribution provided that you do not advertise this Package as a product of your own.
Not being able to charge a fee for the software is problematic for those of us who are contractors, for example.
I realise there aren't really any good alternatives (`unidecode` is GPL licensed as pointed out in #628 , `isounidecode` doesn't support Python 3), so would a patch making `text-unidecode` an optional dependency be acceptable?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 # coding=utf-8
3
4 import io
5 import os
6
7 from setuptools import find_packages, setup
8
9 here = os.path.abspath(os.path.dirname(__file__))
10 with io.open(os.path.join(here, 'README.rst'), encoding='utf-8') as fp:
11 README = fp.read()
12
13 with io.open(os.path.join(here, 'VERSION')) as version_file:
14 VERSION = version_file.read().strip()
15
16
17 # this module can be zip-safe if the zipimporter implements iter_modules or if
18 # pkgutil.iter_importer_modules has registered a dispatch for the zipimporter.
19 try:
20 import pkgutil
21 import zipimport
22 zip_safe = hasattr(zipimport.zipimporter, "iter_modules") or \
23 zipimport.zipimporter in pkgutil.iter_importer_modules.registry.keys()
24 except (ImportError, AttributeError):
25 zip_safe = False
26
27 setup(
28 name='Faker',
29 version=VERSION,
30 description="Faker is a Python package that generates fake data for you.",
31 long_description=README,
32 entry_points={
33 'console_scripts': ['faker=faker.cli:execute_from_command_line'],
34 },
35 classifiers=[
36 # See https://pypi.org/pypi?%3Aaction=list_classifiers
37 'Development Status :: 5 - Production/Stable',
38 'Environment :: Console',
39 'Intended Audience :: Developers',
40 'Programming Language :: Python',
41 'Programming Language :: Python :: 2',
42 'Programming Language :: Python :: 2.7',
43 'Programming Language :: Python :: 3',
44 'Programming Language :: Python :: 3.4',
45 'Programming Language :: Python :: 3.5',
46 'Programming Language :: Python :: 3.6',
47 'Programming Language :: Python :: 3.7',
48 'Programming Language :: Python :: Implementation :: CPython',
49 'Programming Language :: Python :: Implementation :: PyPy',
50 'Topic :: Software Development :: Libraries :: Python Modules',
51 'Topic :: Software Development :: Testing',
52 'Topic :: Utilities',
53 'License :: OSI Approved :: MIT License',
54 ],
55 keywords='faker fixtures data test mock generator',
56 author='joke2k',
57 author_email='[email protected]',
58 url='https://github.com/joke2k/faker',
59 license='MIT License',
60 packages=find_packages(exclude=["docs", "tests", "tests.*"]),
61 platforms=["any"],
62 test_suite='tests',
63 zip_safe=zip_safe,
64 python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*",
65 setup_requires=["pytest-runner"],
66 install_requires=[
67 "python-dateutil>=2.4",
68 "six>=1.10",
69 "text-unidecode==1.2",
70 ],
71 tests_require=[
72 "validators>=0.13.0",
73 "ukpostcodeparser>=1.1.1",
74 "mock ; python_version < '3.3'",
75 "pytest>=3.8.0,<3.9",
76 "more-itertools<6.0.0 ; python_version < '3.0'",
77 # restricted because they may drop python2 support in future versions
78 # https://github.com/joke2k/faker/issues/970
79 "random2<1.1",
80 "freezegun<0.4",
81 ],
82 extras_require={
83 ':python_version<"3.3"': [
84 'ipaddress',
85 ],
86 },
87 )
88
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -66,7 +66,7 @@
install_requires=[
"python-dateutil>=2.4",
"six>=1.10",
- "text-unidecode==1.2",
+ "text-unidecode==1.3",
],
tests_require=[
"validators>=0.13.0",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -66,7 +66,7 @@\n install_requires=[\n \"python-dateutil>=2.4\",\n \"six>=1.10\",\n- \"text-unidecode==1.2\",\n+ \"text-unidecode==1.3\",\n ],\n tests_require=[\n \"validators>=0.13.0\",\n", "issue": "text-unidecode is released under the Artistic license\n`text-unidecode` is released under the Artistic license v1.0, which is considered non-free by the FSF (and therefore not compatible with the GPL). I believe this clause is also of concern to commercial users of faker too:\r\n\r\n> 5. You may charge a reasonable copying fee for any distribution of this Package. You may charge any fee you choose for support of this Package. You may not charge a fee for this Package itself. However, you may distribute this Package in aggregate with other (possibly commercial) programs as part of a larger (possibly commercial) software distribution provided that you do not advertise this Package as a product of your own.\r\n\r\nNot being able to charge a fee for the software is problematic for those of us who are contractors, for example.\r\n\r\nI realise there aren't really any good alternatives (`unidecode` is GPL licensed as pointed out in #628 , `isounidecode` doesn't support Python 3), so would a patch making `text-unidecode` an optional dependency be acceptable?\n", "before_files": [{"content": "#!/usr/bin/env python\n# coding=utf-8\n\nimport io\nimport os\n\nfrom setuptools import find_packages, setup\n\nhere = os.path.abspath(os.path.dirname(__file__))\nwith io.open(os.path.join(here, 'README.rst'), encoding='utf-8') as fp:\n README = fp.read()\n\nwith io.open(os.path.join(here, 'VERSION')) as version_file:\n VERSION = version_file.read().strip()\n\n\n# this module can be zip-safe if the zipimporter implements iter_modules or if\n# pkgutil.iter_importer_modules has registered a dispatch for the zipimporter.\ntry:\n import pkgutil\n import zipimport\n zip_safe = hasattr(zipimport.zipimporter, \"iter_modules\") or \\\n zipimport.zipimporter in pkgutil.iter_importer_modules.registry.keys()\nexcept (ImportError, AttributeError):\n zip_safe = False\n\nsetup(\n name='Faker',\n version=VERSION,\n description=\"Faker is a Python package that generates fake data for you.\",\n long_description=README,\n entry_points={\n 'console_scripts': ['faker=faker.cli:execute_from_command_line'],\n },\n classifiers=[\n # See https://pypi.org/pypi?%3Aaction=list_classifiers\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: PyPy',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Software Development :: Testing',\n 'Topic :: Utilities',\n 'License :: OSI Approved :: MIT License',\n ],\n keywords='faker fixtures data test mock generator',\n author='joke2k',\n author_email='[email protected]',\n url='https://github.com/joke2k/faker',\n license='MIT License',\n packages=find_packages(exclude=[\"docs\", \"tests\", \"tests.*\"]),\n platforms=[\"any\"],\n test_suite='tests',\n zip_safe=zip_safe,\n python_requires=\">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*\",\n setup_requires=[\"pytest-runner\"],\n install_requires=[\n \"python-dateutil>=2.4\",\n \"six>=1.10\",\n \"text-unidecode==1.2\",\n ],\n tests_require=[\n \"validators>=0.13.0\",\n \"ukpostcodeparser>=1.1.1\",\n \"mock ; python_version < '3.3'\",\n \"pytest>=3.8.0,<3.9\",\n \"more-itertools<6.0.0 ; python_version < '3.0'\",\n # restricted because they may drop python2 support in future versions\n # https://github.com/joke2k/faker/issues/970\n \"random2<1.1\",\n \"freezegun<0.4\",\n ],\n extras_require={\n ':python_version<\"3.3\"': [\n 'ipaddress',\n ],\n },\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# coding=utf-8\n\nimport io\nimport os\n\nfrom setuptools import find_packages, setup\n\nhere = os.path.abspath(os.path.dirname(__file__))\nwith io.open(os.path.join(here, 'README.rst'), encoding='utf-8') as fp:\n README = fp.read()\n\nwith io.open(os.path.join(here, 'VERSION')) as version_file:\n VERSION = version_file.read().strip()\n\n\n# this module can be zip-safe if the zipimporter implements iter_modules or if\n# pkgutil.iter_importer_modules has registered a dispatch for the zipimporter.\ntry:\n import pkgutil\n import zipimport\n zip_safe = hasattr(zipimport.zipimporter, \"iter_modules\") or \\\n zipimport.zipimporter in pkgutil.iter_importer_modules.registry.keys()\nexcept (ImportError, AttributeError):\n zip_safe = False\n\nsetup(\n name='Faker',\n version=VERSION,\n description=\"Faker is a Python package that generates fake data for you.\",\n long_description=README,\n entry_points={\n 'console_scripts': ['faker=faker.cli:execute_from_command_line'],\n },\n classifiers=[\n # See https://pypi.org/pypi?%3Aaction=list_classifiers\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: PyPy',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Software Development :: Testing',\n 'Topic :: Utilities',\n 'License :: OSI Approved :: MIT License',\n ],\n keywords='faker fixtures data test mock generator',\n author='joke2k',\n author_email='[email protected]',\n url='https://github.com/joke2k/faker',\n license='MIT License',\n packages=find_packages(exclude=[\"docs\", \"tests\", \"tests.*\"]),\n platforms=[\"any\"],\n test_suite='tests',\n zip_safe=zip_safe,\n python_requires=\">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*\",\n setup_requires=[\"pytest-runner\"],\n install_requires=[\n \"python-dateutil>=2.4\",\n \"six>=1.10\",\n \"text-unidecode==1.3\",\n ],\n tests_require=[\n \"validators>=0.13.0\",\n \"ukpostcodeparser>=1.1.1\",\n \"mock ; python_version < '3.3'\",\n \"pytest>=3.8.0,<3.9\",\n \"more-itertools<6.0.0 ; python_version < '3.0'\",\n # restricted because they may drop python2 support in future versions\n # https://github.com/joke2k/faker/issues/970\n \"random2<1.1\",\n \"freezegun<0.4\",\n ],\n extras_require={\n ':python_version<\"3.3\"': [\n 'ipaddress',\n ],\n },\n)\n", "path": "setup.py"}]} | 1,404 | 96 |
gh_patches_debug_22236 | rasdani/github-patches | git_diff | onnx__onnx-4386 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Make C++ and Python checker API consistent
Python checker API supports `full_check` arg:
https://github.com/onnx/onnx/blob/fa6f8cfdce3d86346e8a7494f3062b98416c85fb/onnx/checker.py#L94
C++ does not.
It'd be nice for them to be consistent.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `onnx/checker.py`
Content:
```
1 # SPDX-License-Identifier: Apache-2.0
2 """onnx checker
3
4 This implements graphalities that allows us to check whether a serialized
5 proto is legal.
6 """
7
8 import functools
9
10 from onnx import (ValueInfoProto,
11 AttributeProto,
12 TensorProto,
13 SparseTensorProto,
14 NodeProto,
15 ModelProto,
16 GraphProto,
17 IR_VERSION)
18 import onnx.onnx_cpp2py_export.checker as C
19 import onnx.defs
20 from google.protobuf.message import Message
21 from typing import TypeVar, Callable, Any, Type, cast, Union
22 import onnx.shape_inference
23 import sys
24
25
26 # Limitation of single protobuf file is 2GB
27 MAXIMUM_PROTOBUF = 2000000000
28
29 # TODO: This thing where we reserialize the protobuf back into the
30 # string, only to deserialize it at the call site, is really goofy.
31 # Stop doing that.
32
33
34 # NB: Please don't edit this context!
35 DEFAULT_CONTEXT = C.CheckerContext()
36 DEFAULT_CONTEXT.ir_version = IR_VERSION
37 # TODO: Maybe ONNX-ML should also be defaulted?
38 DEFAULT_CONTEXT.opset_imports = {'': onnx.defs.onnx_opset_version()}
39
40
41 FuncType = TypeVar('FuncType', bound=Callable[..., Any])
42
43
44 # TODO: This really doesn't seem worth the metaprogramming...
45 def _create_checker(proto_type: Type[Message]) -> Callable[[FuncType], FuncType]:
46 def decorator(py_func: FuncType) -> FuncType:
47 @functools.wraps(py_func)
48 def checker(proto: Message, ctx: C.CheckerContext = DEFAULT_CONTEXT) -> Any:
49 if not isinstance(proto, proto_type):
50 raise RuntimeError(
51 'You cannot pass an object that is not of type {}'.format(
52 proto_type.__name__))
53 return getattr(C, py_func.__name__)(
54 proto.SerializeToString(), ctx)
55 return cast(FuncType, checker)
56 return decorator
57
58
59 @_create_checker(ValueInfoProto)
60 def check_value_info(value_info: ValueInfoProto, ctx: C.CheckerContext = DEFAULT_CONTEXT) -> None:
61 pass
62
63
64 @_create_checker(TensorProto)
65 def check_tensor(tensor: TensorProto, ctx: C.CheckerContext = DEFAULT_CONTEXT) -> None:
66 pass
67
68
69 @_create_checker(AttributeProto)
70 def check_attribute(attr: AttributeProto, ctx: C.CheckerContext = DEFAULT_CONTEXT) -> None:
71 pass
72
73
74 @_create_checker(NodeProto)
75 def check_node(node: NodeProto, ctx: C.CheckerContext = DEFAULT_CONTEXT) -> None:
76 pass
77
78
79 @_create_checker(GraphProto)
80 def check_graph(graph: GraphProto, ctx: C.CheckerContext = DEFAULT_CONTEXT) -> None:
81 pass
82
83
84 def check_sparse_tensor(sparse: SparseTensorProto, ctx: C.CheckerContext = DEFAULT_CONTEXT) -> None:
85 C.check_sparse_tensor(sparse.SerializeToString(), ctx)
86
87
88 def check_model(model: Union[ModelProto, str, bytes], full_check: bool = False) -> None:
89 """Check the consistency of a model. An exception is raised if the test fails.
90
91 Arguments:
92 model (ModelProto): model to check
93 full_check (bool): if True, the function checks shapes can be inferred
94 """
95 # If model is a path instead of ModelProto
96 if isinstance(model, str):
97 C.check_model_path(model)
98 if full_check:
99 onnx.shape_inference.infer_shapes_path(model, check_type=True, strict_mode=True)
100 else:
101 protobuf_string = model if isinstance(model, bytes) else model.SerializeToString()
102 # If the protobuf is larger than 2GB,
103 # remind users should use the model path to check
104 if sys.getsizeof(protobuf_string) > MAXIMUM_PROTOBUF:
105 raise ValueError('This protobuf of onnx model is too large (>2GB). Call check_model with model path instead.')
106 C.check_model(protobuf_string)
107 if full_check:
108 onnx.shape_inference.infer_shapes(model, check_type=True, strict_mode=True)
109
110
111 ValidationError = C.ValidationError
112
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/onnx/checker.py b/onnx/checker.py
--- a/onnx/checker.py
+++ b/onnx/checker.py
@@ -94,18 +94,14 @@
"""
# If model is a path instead of ModelProto
if isinstance(model, str):
- C.check_model_path(model)
- if full_check:
- onnx.shape_inference.infer_shapes_path(model, check_type=True, strict_mode=True)
+ C.check_model_path(model, full_check)
else:
protobuf_string = model if isinstance(model, bytes) else model.SerializeToString()
# If the protobuf is larger than 2GB,
# remind users should use the model path to check
if sys.getsizeof(protobuf_string) > MAXIMUM_PROTOBUF:
raise ValueError('This protobuf of onnx model is too large (>2GB). Call check_model with model path instead.')
- C.check_model(protobuf_string)
- if full_check:
- onnx.shape_inference.infer_shapes(model, check_type=True, strict_mode=True)
+ C.check_model(protobuf_string, full_check)
ValidationError = C.ValidationError
| {"golden_diff": "diff --git a/onnx/checker.py b/onnx/checker.py\n--- a/onnx/checker.py\n+++ b/onnx/checker.py\n@@ -94,18 +94,14 @@\n \"\"\"\n # If model is a path instead of ModelProto\n if isinstance(model, str):\n- C.check_model_path(model)\n- if full_check:\n- onnx.shape_inference.infer_shapes_path(model, check_type=True, strict_mode=True)\n+ C.check_model_path(model, full_check)\n else:\n protobuf_string = model if isinstance(model, bytes) else model.SerializeToString()\n # If the protobuf is larger than 2GB,\n # remind users should use the model path to check\n if sys.getsizeof(protobuf_string) > MAXIMUM_PROTOBUF:\n raise ValueError('This protobuf of onnx model is too large (>2GB). Call check_model with model path instead.')\n- C.check_model(protobuf_string)\n- if full_check:\n- onnx.shape_inference.infer_shapes(model, check_type=True, strict_mode=True)\n+ C.check_model(protobuf_string, full_check)\n \n \n ValidationError = C.ValidationError\n", "issue": "Make C++ and Python checker API consistent\nPython checker API supports `full_check` arg:\r\nhttps://github.com/onnx/onnx/blob/fa6f8cfdce3d86346e8a7494f3062b98416c85fb/onnx/checker.py#L94\r\n\r\nC++ does not.\r\nIt'd be nice for them to be consistent.\n", "before_files": [{"content": "# SPDX-License-Identifier: Apache-2.0\n\"\"\"onnx checker\n\nThis implements graphalities that allows us to check whether a serialized\nproto is legal.\n\"\"\"\n\nimport functools\n\nfrom onnx import (ValueInfoProto,\n AttributeProto,\n TensorProto,\n SparseTensorProto,\n NodeProto,\n ModelProto,\n GraphProto,\n IR_VERSION)\nimport onnx.onnx_cpp2py_export.checker as C\nimport onnx.defs\nfrom google.protobuf.message import Message\nfrom typing import TypeVar, Callable, Any, Type, cast, Union\nimport onnx.shape_inference\nimport sys\n\n\n# Limitation of single protobuf file is 2GB\nMAXIMUM_PROTOBUF = 2000000000\n\n# TODO: This thing where we reserialize the protobuf back into the\n# string, only to deserialize it at the call site, is really goofy.\n# Stop doing that.\n\n\n# NB: Please don't edit this context!\nDEFAULT_CONTEXT = C.CheckerContext()\nDEFAULT_CONTEXT.ir_version = IR_VERSION\n# TODO: Maybe ONNX-ML should also be defaulted?\nDEFAULT_CONTEXT.opset_imports = {'': onnx.defs.onnx_opset_version()}\n\n\nFuncType = TypeVar('FuncType', bound=Callable[..., Any])\n\n\n# TODO: This really doesn't seem worth the metaprogramming...\ndef _create_checker(proto_type: Type[Message]) -> Callable[[FuncType], FuncType]:\n def decorator(py_func: FuncType) -> FuncType:\n @functools.wraps(py_func)\n def checker(proto: Message, ctx: C.CheckerContext = DEFAULT_CONTEXT) -> Any:\n if not isinstance(proto, proto_type):\n raise RuntimeError(\n 'You cannot pass an object that is not of type {}'.format(\n proto_type.__name__))\n return getattr(C, py_func.__name__)(\n proto.SerializeToString(), ctx)\n return cast(FuncType, checker)\n return decorator\n\n\n@_create_checker(ValueInfoProto)\ndef check_value_info(value_info: ValueInfoProto, ctx: C.CheckerContext = DEFAULT_CONTEXT) -> None:\n pass\n\n\n@_create_checker(TensorProto)\ndef check_tensor(tensor: TensorProto, ctx: C.CheckerContext = DEFAULT_CONTEXT) -> None:\n pass\n\n\n@_create_checker(AttributeProto)\ndef check_attribute(attr: AttributeProto, ctx: C.CheckerContext = DEFAULT_CONTEXT) -> None:\n pass\n\n\n@_create_checker(NodeProto)\ndef check_node(node: NodeProto, ctx: C.CheckerContext = DEFAULT_CONTEXT) -> None:\n pass\n\n\n@_create_checker(GraphProto)\ndef check_graph(graph: GraphProto, ctx: C.CheckerContext = DEFAULT_CONTEXT) -> None:\n pass\n\n\ndef check_sparse_tensor(sparse: SparseTensorProto, ctx: C.CheckerContext = DEFAULT_CONTEXT) -> None:\n C.check_sparse_tensor(sparse.SerializeToString(), ctx)\n\n\ndef check_model(model: Union[ModelProto, str, bytes], full_check: bool = False) -> None:\n \"\"\"Check the consistency of a model. An exception is raised if the test fails.\n\n Arguments:\n model (ModelProto): model to check\n full_check (bool): if True, the function checks shapes can be inferred\n \"\"\"\n # If model is a path instead of ModelProto\n if isinstance(model, str):\n C.check_model_path(model)\n if full_check:\n onnx.shape_inference.infer_shapes_path(model, check_type=True, strict_mode=True)\n else:\n protobuf_string = model if isinstance(model, bytes) else model.SerializeToString()\n # If the protobuf is larger than 2GB,\n # remind users should use the model path to check\n if sys.getsizeof(protobuf_string) > MAXIMUM_PROTOBUF:\n raise ValueError('This protobuf of onnx model is too large (>2GB). Call check_model with model path instead.')\n C.check_model(protobuf_string)\n if full_check:\n onnx.shape_inference.infer_shapes(model, check_type=True, strict_mode=True)\n\n\nValidationError = C.ValidationError\n", "path": "onnx/checker.py"}], "after_files": [{"content": "# SPDX-License-Identifier: Apache-2.0\n\"\"\"onnx checker\n\nThis implements graphalities that allows us to check whether a serialized\nproto is legal.\n\"\"\"\n\nimport functools\n\nfrom onnx import (ValueInfoProto,\n AttributeProto,\n TensorProto,\n SparseTensorProto,\n NodeProto,\n ModelProto,\n GraphProto,\n IR_VERSION)\nimport onnx.onnx_cpp2py_export.checker as C\nimport onnx.defs\nfrom google.protobuf.message import Message\nfrom typing import TypeVar, Callable, Any, Type, cast, Union\nimport onnx.shape_inference\nimport sys\n\n\n# Limitation of single protobuf file is 2GB\nMAXIMUM_PROTOBUF = 2000000000\n\n# TODO: This thing where we reserialize the protobuf back into the\n# string, only to deserialize it at the call site, is really goofy.\n# Stop doing that.\n\n\n# NB: Please don't edit this context!\nDEFAULT_CONTEXT = C.CheckerContext()\nDEFAULT_CONTEXT.ir_version = IR_VERSION\n# TODO: Maybe ONNX-ML should also be defaulted?\nDEFAULT_CONTEXT.opset_imports = {'': onnx.defs.onnx_opset_version()}\n\n\nFuncType = TypeVar('FuncType', bound=Callable[..., Any])\n\n\n# TODO: This really doesn't seem worth the metaprogramming...\ndef _create_checker(proto_type: Type[Message]) -> Callable[[FuncType], FuncType]:\n def decorator(py_func: FuncType) -> FuncType:\n @functools.wraps(py_func)\n def checker(proto: Message, ctx: C.CheckerContext = DEFAULT_CONTEXT) -> Any:\n if not isinstance(proto, proto_type):\n raise RuntimeError(\n 'You cannot pass an object that is not of type {}'.format(\n proto_type.__name__))\n return getattr(C, py_func.__name__)(\n proto.SerializeToString(), ctx)\n return cast(FuncType, checker)\n return decorator\n\n\n@_create_checker(ValueInfoProto)\ndef check_value_info(value_info: ValueInfoProto, ctx: C.CheckerContext = DEFAULT_CONTEXT) -> None:\n pass\n\n\n@_create_checker(TensorProto)\ndef check_tensor(tensor: TensorProto, ctx: C.CheckerContext = DEFAULT_CONTEXT) -> None:\n pass\n\n\n@_create_checker(AttributeProto)\ndef check_attribute(attr: AttributeProto, ctx: C.CheckerContext = DEFAULT_CONTEXT) -> None:\n pass\n\n\n@_create_checker(NodeProto)\ndef check_node(node: NodeProto, ctx: C.CheckerContext = DEFAULT_CONTEXT) -> None:\n pass\n\n\n@_create_checker(GraphProto)\ndef check_graph(graph: GraphProto, ctx: C.CheckerContext = DEFAULT_CONTEXT) -> None:\n pass\n\n\ndef check_sparse_tensor(sparse: SparseTensorProto, ctx: C.CheckerContext = DEFAULT_CONTEXT) -> None:\n C.check_sparse_tensor(sparse.SerializeToString(), ctx)\n\n\ndef check_model(model: Union[ModelProto, str, bytes], full_check: bool = False) -> None:\n \"\"\"Check the consistency of a model. An exception is raised if the test fails.\n\n Arguments:\n model (ModelProto): model to check\n full_check (bool): if True, the function checks shapes can be inferred\n \"\"\"\n # If model is a path instead of ModelProto\n if isinstance(model, str):\n C.check_model_path(model, full_check)\n else:\n protobuf_string = model if isinstance(model, bytes) else model.SerializeToString()\n # If the protobuf is larger than 2GB,\n # remind users should use the model path to check\n if sys.getsizeof(protobuf_string) > MAXIMUM_PROTOBUF:\n raise ValueError('This protobuf of onnx model is too large (>2GB). Call check_model with model path instead.')\n C.check_model(protobuf_string, full_check)\n\n\nValidationError = C.ValidationError\n", "path": "onnx/checker.py"}]} | 1,450 | 249 |
gh_patches_debug_37440 | rasdani/github-patches | git_diff | arviz-devs__arviz-636 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Allow normalization in plot_parallel
It would be great if plot_parallel had a keyword arg `normalize` (or standardize), that centers and rescales the variables before plotting. That would make it easier to see things if some posteriors are much more tight than others:

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `arviz/plots/parallelplot.py`
Content:
```
1 """Parallel coordinates plot showing posterior points with and without divergences marked."""
2 import matplotlib.pyplot as plt
3 import numpy as np
4
5 from ..data import convert_to_dataset
6 from .plot_utils import _scale_fig_size, xarray_to_ndarray, get_coords
7 from ..utils import _var_names
8
9
10 def plot_parallel(
11 data,
12 var_names=None,
13 coords=None,
14 figsize=None,
15 textsize=None,
16 legend=True,
17 colornd="k",
18 colord="C1",
19 shadend=0.025,
20 ax=None,
21 ):
22 """
23 Plot parallel coordinates plot showing posterior points with and without divergences.
24
25 Described by https://arxiv.org/abs/1709.01449, suggested by Ari Hartikainen
26
27 Parameters
28 ----------
29 data : obj
30 Any object that can be converted to an az.InferenceData object
31 Refer to documentation of az.convert_to_dataset for details
32 var_names : list of variable names
33 Variables to be plotted, if None all variable are plotted. Can be used to change the order
34 of the plotted variables
35 coords : mapping, optional
36 Coordinates of var_names to be plotted. Passed to `Dataset.sel`
37 figsize : tuple
38 Figure size. If None it will be defined automatically.
39 textsize: float
40 Text size scaling factor for labels, titles and lines. If None it will be autoscaled based
41 on figsize.
42 legend : bool
43 Flag for plotting legend (defaults to True)
44 colornd : valid matplotlib color
45 color for non-divergent points. Defaults to 'k'
46 colord : valid matplotlib color
47 color for divergent points. Defaults to 'C1'
48 shadend : float
49 Alpha blending value for non-divergent points, between 0 (invisible) and 1 (opaque).
50 Defaults to .025
51 ax : axes
52 Matplotlib axes.
53
54 Returns
55 -------
56 ax : matplotlib axes
57 """
58 if coords is None:
59 coords = {}
60
61 # Get diverging draws and combine chains
62 divergent_data = convert_to_dataset(data, group="sample_stats")
63 _, diverging_mask = xarray_to_ndarray(divergent_data, var_names=("diverging",), combined=True)
64 diverging_mask = np.squeeze(diverging_mask)
65
66 # Get posterior draws and combine chains
67 posterior_data = convert_to_dataset(data, group="posterior")
68 var_names = _var_names(var_names, posterior_data)
69 var_names, _posterior = xarray_to_ndarray(
70 get_coords(posterior_data, coords), var_names=var_names, combined=True
71 )
72
73 if len(var_names) < 2:
74 raise ValueError("This plot needs at least two variables")
75
76 figsize, _, _, xt_labelsize, _, _ = _scale_fig_size(figsize, textsize, 1, 1)
77
78 if ax is None:
79 _, ax = plt.subplots(figsize=figsize, constrained_layout=True)
80
81 ax.plot(_posterior[:, ~diverging_mask], color=colornd, alpha=shadend)
82
83 if np.any(diverging_mask):
84 ax.plot(_posterior[:, diverging_mask], color=colord, lw=1)
85
86 ax.tick_params(labelsize=textsize)
87 ax.set_xticks(range(len(var_names)))
88 ax.set_xticklabels(var_names)
89
90 if legend:
91 ax.plot([], color=colornd, label="non-divergent")
92 if np.any(diverging_mask):
93 ax.plot([], color=colord, label="divergent")
94 ax.legend(fontsize=xt_labelsize)
95
96 return ax
97
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/arviz/plots/parallelplot.py b/arviz/plots/parallelplot.py
--- a/arviz/plots/parallelplot.py
+++ b/arviz/plots/parallelplot.py
@@ -2,6 +2,7 @@
import matplotlib.pyplot as plt
import numpy as np
+from scipy.stats.mstats import rankdata
from ..data import convert_to_dataset
from .plot_utils import _scale_fig_size, xarray_to_ndarray, get_coords
from ..utils import _var_names
@@ -18,6 +19,7 @@
colord="C1",
shadend=0.025,
ax=None,
+ norm_method=None,
):
"""
Plot parallel coordinates plot showing posterior points with and without divergences.
@@ -50,10 +52,33 @@
Defaults to .025
ax : axes
Matplotlib axes.
+ norm_method : str
+ Method for normalizing the data. Methods include normal, minmax and rank.
+ Defaults to none.
Returns
-------
ax : matplotlib axes
+
+ Examples
+ --------
+ Plot default parallel plot
+
+ .. plot::
+ :context: close-figs
+
+ >>> import arviz as az
+ >>> data = az.load_arviz_data('centered_eight')
+ >>> az.plot_parallel(data, var_names=["mu", "tau"])
+
+
+ Plot parallel plot with normalization
+
+ .. plot::
+ :context: close-figs
+
+ >>> az.plot_parallel(data, var_names=["mu", "tau"], norm_method='normal')
+
"""
if coords is None:
coords = {}
@@ -69,9 +94,23 @@
var_names, _posterior = xarray_to_ndarray(
get_coords(posterior_data, coords), var_names=var_names, combined=True
)
-
if len(var_names) < 2:
raise ValueError("This plot needs at least two variables")
+ if norm_method is not None:
+ if norm_method == "normal":
+ mean = np.mean(_posterior, axis=1)
+ standard_deviation = np.std(_posterior, axis=1)
+ for i in range(0, np.shape(mean)[0]):
+ _posterior[i, :] = (_posterior[i, :] - mean[i]) / standard_deviation[i]
+ elif norm_method == "minmax":
+ min_elem = np.min(_posterior, axis=1)
+ max_elem = np.max(_posterior, axis=1)
+ for i in range(0, np.shape(min_elem)[0]):
+ _posterior[i, :] = ((_posterior[i, :]) - min_elem[i]) / (max_elem[i] - min_elem[i])
+ elif norm_method == "rank":
+ _posterior = rankdata(_posterior, axis=1)
+ else:
+ raise ValueError("{} is not supported. Use normal, minmax or rank.".format(norm_method))
figsize, _, _, xt_labelsize, _, _ = _scale_fig_size(figsize, textsize, 1, 1)
| {"golden_diff": "diff --git a/arviz/plots/parallelplot.py b/arviz/plots/parallelplot.py\n--- a/arviz/plots/parallelplot.py\n+++ b/arviz/plots/parallelplot.py\n@@ -2,6 +2,7 @@\n import matplotlib.pyplot as plt\n import numpy as np\n \n+from scipy.stats.mstats import rankdata\n from ..data import convert_to_dataset\n from .plot_utils import _scale_fig_size, xarray_to_ndarray, get_coords\n from ..utils import _var_names\n@@ -18,6 +19,7 @@\n colord=\"C1\",\n shadend=0.025,\n ax=None,\n+ norm_method=None,\n ):\n \"\"\"\n Plot parallel coordinates plot showing posterior points with and without divergences.\n@@ -50,10 +52,33 @@\n Defaults to .025\n ax : axes\n Matplotlib axes.\n+ norm_method : str\n+ Method for normalizing the data. Methods include normal, minmax and rank.\n+ Defaults to none.\n \n Returns\n -------\n ax : matplotlib axes\n+\n+ Examples\n+ --------\n+ Plot default parallel plot\n+\n+ .. plot::\n+ :context: close-figs\n+\n+ >>> import arviz as az\n+ >>> data = az.load_arviz_data('centered_eight')\n+ >>> az.plot_parallel(data, var_names=[\"mu\", \"tau\"])\n+\n+\n+ Plot parallel plot with normalization\n+\n+ .. plot::\n+ :context: close-figs\n+\n+ >>> az.plot_parallel(data, var_names=[\"mu\", \"tau\"], norm_method='normal')\n+\n \"\"\"\n if coords is None:\n coords = {}\n@@ -69,9 +94,23 @@\n var_names, _posterior = xarray_to_ndarray(\n get_coords(posterior_data, coords), var_names=var_names, combined=True\n )\n-\n if len(var_names) < 2:\n raise ValueError(\"This plot needs at least two variables\")\n+ if norm_method is not None:\n+ if norm_method == \"normal\":\n+ mean = np.mean(_posterior, axis=1)\n+ standard_deviation = np.std(_posterior, axis=1)\n+ for i in range(0, np.shape(mean)[0]):\n+ _posterior[i, :] = (_posterior[i, :] - mean[i]) / standard_deviation[i]\n+ elif norm_method == \"minmax\":\n+ min_elem = np.min(_posterior, axis=1)\n+ max_elem = np.max(_posterior, axis=1)\n+ for i in range(0, np.shape(min_elem)[0]):\n+ _posterior[i, :] = ((_posterior[i, :]) - min_elem[i]) / (max_elem[i] - min_elem[i])\n+ elif norm_method == \"rank\":\n+ _posterior = rankdata(_posterior, axis=1)\n+ else:\n+ raise ValueError(\"{} is not supported. Use normal, minmax or rank.\".format(norm_method))\n \n figsize, _, _, xt_labelsize, _, _ = _scale_fig_size(figsize, textsize, 1, 1)\n", "issue": "Allow normalization in plot_parallel\nIt would be great if plot_parallel had a keyword arg `normalize` (or standardize), that centers and rescales the variables before plotting. That would make it easier to see things if some posteriors are much more tight than others:\r\n\n", "before_files": [{"content": "\"\"\"Parallel coordinates plot showing posterior points with and without divergences marked.\"\"\"\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom ..data import convert_to_dataset\nfrom .plot_utils import _scale_fig_size, xarray_to_ndarray, get_coords\nfrom ..utils import _var_names\n\n\ndef plot_parallel(\n data,\n var_names=None,\n coords=None,\n figsize=None,\n textsize=None,\n legend=True,\n colornd=\"k\",\n colord=\"C1\",\n shadend=0.025,\n ax=None,\n):\n \"\"\"\n Plot parallel coordinates plot showing posterior points with and without divergences.\n\n Described by https://arxiv.org/abs/1709.01449, suggested by Ari Hartikainen\n\n Parameters\n ----------\n data : obj\n Any object that can be converted to an az.InferenceData object\n Refer to documentation of az.convert_to_dataset for details\n var_names : list of variable names\n Variables to be plotted, if None all variable are plotted. Can be used to change the order\n of the plotted variables\n coords : mapping, optional\n Coordinates of var_names to be plotted. Passed to `Dataset.sel`\n figsize : tuple\n Figure size. If None it will be defined automatically.\n textsize: float\n Text size scaling factor for labels, titles and lines. If None it will be autoscaled based\n on figsize.\n legend : bool\n Flag for plotting legend (defaults to True)\n colornd : valid matplotlib color\n color for non-divergent points. Defaults to 'k'\n colord : valid matplotlib color\n color for divergent points. Defaults to 'C1'\n shadend : float\n Alpha blending value for non-divergent points, between 0 (invisible) and 1 (opaque).\n Defaults to .025\n ax : axes\n Matplotlib axes.\n\n Returns\n -------\n ax : matplotlib axes\n \"\"\"\n if coords is None:\n coords = {}\n\n # Get diverging draws and combine chains\n divergent_data = convert_to_dataset(data, group=\"sample_stats\")\n _, diverging_mask = xarray_to_ndarray(divergent_data, var_names=(\"diverging\",), combined=True)\n diverging_mask = np.squeeze(diverging_mask)\n\n # Get posterior draws and combine chains\n posterior_data = convert_to_dataset(data, group=\"posterior\")\n var_names = _var_names(var_names, posterior_data)\n var_names, _posterior = xarray_to_ndarray(\n get_coords(posterior_data, coords), var_names=var_names, combined=True\n )\n\n if len(var_names) < 2:\n raise ValueError(\"This plot needs at least two variables\")\n\n figsize, _, _, xt_labelsize, _, _ = _scale_fig_size(figsize, textsize, 1, 1)\n\n if ax is None:\n _, ax = plt.subplots(figsize=figsize, constrained_layout=True)\n\n ax.plot(_posterior[:, ~diverging_mask], color=colornd, alpha=shadend)\n\n if np.any(diverging_mask):\n ax.plot(_posterior[:, diverging_mask], color=colord, lw=1)\n\n ax.tick_params(labelsize=textsize)\n ax.set_xticks(range(len(var_names)))\n ax.set_xticklabels(var_names)\n\n if legend:\n ax.plot([], color=colornd, label=\"non-divergent\")\n if np.any(diverging_mask):\n ax.plot([], color=colord, label=\"divergent\")\n ax.legend(fontsize=xt_labelsize)\n\n return ax\n", "path": "arviz/plots/parallelplot.py"}], "after_files": [{"content": "\"\"\"Parallel coordinates plot showing posterior points with and without divergences marked.\"\"\"\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom scipy.stats.mstats import rankdata\nfrom ..data import convert_to_dataset\nfrom .plot_utils import _scale_fig_size, xarray_to_ndarray, get_coords\nfrom ..utils import _var_names\n\n\ndef plot_parallel(\n data,\n var_names=None,\n coords=None,\n figsize=None,\n textsize=None,\n legend=True,\n colornd=\"k\",\n colord=\"C1\",\n shadend=0.025,\n ax=None,\n norm_method=None,\n):\n \"\"\"\n Plot parallel coordinates plot showing posterior points with and without divergences.\n\n Described by https://arxiv.org/abs/1709.01449, suggested by Ari Hartikainen\n\n Parameters\n ----------\n data : obj\n Any object that can be converted to an az.InferenceData object\n Refer to documentation of az.convert_to_dataset for details\n var_names : list of variable names\n Variables to be plotted, if None all variable are plotted. Can be used to change the order\n of the plotted variables\n coords : mapping, optional\n Coordinates of var_names to be plotted. Passed to `Dataset.sel`\n figsize : tuple\n Figure size. If None it will be defined automatically.\n textsize: float\n Text size scaling factor for labels, titles and lines. If None it will be autoscaled based\n on figsize.\n legend : bool\n Flag for plotting legend (defaults to True)\n colornd : valid matplotlib color\n color for non-divergent points. Defaults to 'k'\n colord : valid matplotlib color\n color for divergent points. Defaults to 'C1'\n shadend : float\n Alpha blending value for non-divergent points, between 0 (invisible) and 1 (opaque).\n Defaults to .025\n ax : axes\n Matplotlib axes.\n norm_method : str\n Method for normalizing the data. Methods include normal, minmax and rank.\n Defaults to none.\n\n Returns\n -------\n ax : matplotlib axes\n\n Examples\n --------\n Plot default parallel plot\n\n .. plot::\n :context: close-figs\n\n >>> import arviz as az\n >>> data = az.load_arviz_data('centered_eight')\n >>> az.plot_parallel(data, var_names=[\"mu\", \"tau\"])\n\n\n Plot parallel plot with normalization\n\n .. plot::\n :context: close-figs\n\n >>> az.plot_parallel(data, var_names=[\"mu\", \"tau\"], norm_method='normal')\n\n \"\"\"\n if coords is None:\n coords = {}\n\n # Get diverging draws and combine chains\n divergent_data = convert_to_dataset(data, group=\"sample_stats\")\n _, diverging_mask = xarray_to_ndarray(divergent_data, var_names=(\"diverging\",), combined=True)\n diverging_mask = np.squeeze(diverging_mask)\n\n # Get posterior draws and combine chains\n posterior_data = convert_to_dataset(data, group=\"posterior\")\n var_names = _var_names(var_names, posterior_data)\n var_names, _posterior = xarray_to_ndarray(\n get_coords(posterior_data, coords), var_names=var_names, combined=True\n )\n if len(var_names) < 2:\n raise ValueError(\"This plot needs at least two variables\")\n if norm_method is not None:\n if norm_method == \"normal\":\n mean = np.mean(_posterior, axis=1)\n standard_deviation = np.std(_posterior, axis=1)\n for i in range(0, np.shape(mean)[0]):\n _posterior[i, :] = (_posterior[i, :] - mean[i]) / standard_deviation[i]\n elif norm_method == \"minmax\":\n min_elem = np.min(_posterior, axis=1)\n max_elem = np.max(_posterior, axis=1)\n for i in range(0, np.shape(min_elem)[0]):\n _posterior[i, :] = ((_posterior[i, :]) - min_elem[i]) / (max_elem[i] - min_elem[i])\n elif norm_method == \"rank\":\n _posterior = rankdata(_posterior, axis=1)\n else:\n raise ValueError(\"{} is not supported. Use normal, minmax or rank.\".format(norm_method))\n\n figsize, _, _, xt_labelsize, _, _ = _scale_fig_size(figsize, textsize, 1, 1)\n\n if ax is None:\n _, ax = plt.subplots(figsize=figsize, constrained_layout=True)\n\n ax.plot(_posterior[:, ~diverging_mask], color=colornd, alpha=shadend)\n\n if np.any(diverging_mask):\n ax.plot(_posterior[:, diverging_mask], color=colord, lw=1)\n\n ax.tick_params(labelsize=textsize)\n ax.set_xticks(range(len(var_names)))\n ax.set_xticklabels(var_names)\n\n if legend:\n ax.plot([], color=colornd, label=\"non-divergent\")\n if np.any(diverging_mask):\n ax.plot([], color=colord, label=\"divergent\")\n ax.legend(fontsize=xt_labelsize)\n\n return ax\n", "path": "arviz/plots/parallelplot.py"}]} | 1,352 | 702 |
gh_patches_debug_30482 | rasdani/github-patches | git_diff | SeldonIO__MLServer-605 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add docker build option to not use cache
To ensure fresh environment and while potentially developing a new model version users will not want to use cached layers in the docker build for their image.
In docker this is the --no-cache option, I would make it a boolean option within the cli and also add the --rm option (although default is true might be worth making sure) to remove any intermediate containers after success to again ensure a clean environment in CI/CD as well as locally.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mlserver/cli/build.py`
Content:
```
1 import subprocess
2 import os
3
4 from tempfile import TemporaryDirectory
5
6 from .. import __version__
7 from ..logging import logger
8
9 from .constants import (
10 DockerfileName,
11 DockerfileTemplate,
12 DockerignoreName,
13 Dockerignore,
14 )
15
16
17 def generate_dockerfile() -> str:
18 return DockerfileTemplate.format(version=__version__)
19
20
21 def write_dockerfile(
22 folder: str, dockerfile: str, include_dockerignore: bool = True
23 ) -> str:
24 dockerfile_path = os.path.join(folder, DockerfileName)
25 with open(dockerfile_path, "w") as dockerfile_handler:
26 logger.info(f"Writing Dockerfile in {dockerfile_path}")
27 dockerfile_handler.write(dockerfile)
28
29 if include_dockerignore:
30 # Point to our own .dockerignore
31 # https://docs.docker.com/engine/reference/commandline/build/#use-a-dockerignore-file
32 dockerignore_path = dockerfile_path + DockerignoreName
33 with open(dockerignore_path, "w") as dockerignore_handler:
34 logger.info(f"Writing .dockerignore in {dockerignore_path}")
35 dockerignore_handler.write(Dockerignore)
36
37 return dockerfile_path
38
39
40 def build_image(folder: str, dockerfile: str, image_tag: str) -> str:
41 logger.info(f"Building Docker image with tag {image_tag}")
42 with TemporaryDirectory() as tmp_dir:
43 dockerfile_path = write_dockerfile(tmp_dir, dockerfile)
44
45 build_cmd = f"docker build {folder} -f {dockerfile_path} -t {image_tag}"
46 build_env = os.environ.copy()
47 build_env["DOCKER_BUILDKIT"] = "1"
48 subprocess.run(build_cmd, check=True, shell=True, env=build_env)
49
50 return image_tag
51
```
Path: `mlserver/cli/main.py`
Content:
```
1 """
2 Command-line interface to manage MLServer models.
3 """
4 import click
5 import asyncio
6
7 from functools import wraps
8
9 from ..server import MLServer
10 from ..logging import logger, configure_logger
11 from ..utils import install_uvloop_event_loop
12
13 from .build import generate_dockerfile, build_image, write_dockerfile
14 from .serve import load_settings
15
16
17 def click_async(f):
18 @wraps(f)
19 def wrapper(*args, **kwargs):
20 return asyncio.run(f(*args, **kwargs))
21
22 return wrapper
23
24
25 @click.group()
26 @click.version_option()
27 def root():
28 """
29 Command-line interface to manage MLServer models.
30 """
31 pass
32
33
34 @root.command("start")
35 @click.argument("folder", nargs=1)
36 @click_async
37 async def start(folder: str):
38 """
39 Start serving a machine learning model with MLServer.
40 """
41 settings, models_settings = await load_settings(folder)
42
43 server = MLServer(settings)
44 await server.start(models_settings)
45
46
47 @root.command("build")
48 @click.argument("folder", nargs=1)
49 @click.option("-t", "--tag", type=str)
50 @click_async
51 async def build(folder: str, tag: str):
52 """
53 Build a Docker image for a custom MLServer runtime.
54 """
55 dockerfile = generate_dockerfile()
56 build_image(folder, dockerfile, tag)
57 logger.info(f"Successfully built custom Docker image with tag {tag}")
58
59
60 @root.command("dockerfile")
61 @click.argument("folder", nargs=1)
62 @click.option("-i", "--include-dockerignore", is_flag=True)
63 @click_async
64 async def dockerfile(folder: str, include_dockerignore: bool):
65 """
66 Generate a Dockerfile
67 """
68 dockerfile = generate_dockerfile()
69 dockerfile_path = write_dockerfile(
70 folder, dockerfile, include_dockerignore=include_dockerignore
71 )
72 logger.info(f"Successfully written Dockerfile in {dockerfile_path}")
73
74
75 def main():
76 configure_logger()
77 install_uvloop_event_loop()
78 root()
79
80
81 if __name__ == "__main__":
82 main()
83
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mlserver/cli/build.py b/mlserver/cli/build.py
--- a/mlserver/cli/build.py
+++ b/mlserver/cli/build.py
@@ -37,12 +37,18 @@
return dockerfile_path
-def build_image(folder: str, dockerfile: str, image_tag: str) -> str:
+def build_image(
+ folder: str, dockerfile: str, image_tag: str, no_cache: bool = False
+) -> str:
logger.info(f"Building Docker image with tag {image_tag}")
+ _docker_command_prefix = "docker build --rm "
with TemporaryDirectory() as tmp_dir:
dockerfile_path = write_dockerfile(tmp_dir, dockerfile)
-
- build_cmd = f"docker build {folder} -f {dockerfile_path} -t {image_tag}"
+ _docker_command_suffix = f"{folder} -f {dockerfile_path} -t {image_tag}"
+ if no_cache:
+ build_cmd = _docker_command_prefix + "--no-cache " + _docker_command_suffix
+ else:
+ build_cmd = _docker_command_prefix + _docker_command_suffix
build_env = os.environ.copy()
build_env["DOCKER_BUILDKIT"] = "1"
subprocess.run(build_cmd, check=True, shell=True, env=build_env)
diff --git a/mlserver/cli/main.py b/mlserver/cli/main.py
--- a/mlserver/cli/main.py
+++ b/mlserver/cli/main.py
@@ -47,13 +47,14 @@
@root.command("build")
@click.argument("folder", nargs=1)
@click.option("-t", "--tag", type=str)
[email protected]("--no-cache", default=False, is_flag=True)
@click_async
-async def build(folder: str, tag: str):
+async def build(folder: str, tag: str, no_cache: bool = False):
"""
Build a Docker image for a custom MLServer runtime.
"""
dockerfile = generate_dockerfile()
- build_image(folder, dockerfile, tag)
+ build_image(folder, dockerfile, tag, no_cache=no_cache)
logger.info(f"Successfully built custom Docker image with tag {tag}")
| {"golden_diff": "diff --git a/mlserver/cli/build.py b/mlserver/cli/build.py\n--- a/mlserver/cli/build.py\n+++ b/mlserver/cli/build.py\n@@ -37,12 +37,18 @@\n return dockerfile_path\n \n \n-def build_image(folder: str, dockerfile: str, image_tag: str) -> str:\n+def build_image(\n+ folder: str, dockerfile: str, image_tag: str, no_cache: bool = False\n+) -> str:\n logger.info(f\"Building Docker image with tag {image_tag}\")\n+ _docker_command_prefix = \"docker build --rm \"\n with TemporaryDirectory() as tmp_dir:\n dockerfile_path = write_dockerfile(tmp_dir, dockerfile)\n-\n- build_cmd = f\"docker build {folder} -f {dockerfile_path} -t {image_tag}\"\n+ _docker_command_suffix = f\"{folder} -f {dockerfile_path} -t {image_tag}\"\n+ if no_cache:\n+ build_cmd = _docker_command_prefix + \"--no-cache \" + _docker_command_suffix\n+ else:\n+ build_cmd = _docker_command_prefix + _docker_command_suffix\n build_env = os.environ.copy()\n build_env[\"DOCKER_BUILDKIT\"] = \"1\"\n subprocess.run(build_cmd, check=True, shell=True, env=build_env)\ndiff --git a/mlserver/cli/main.py b/mlserver/cli/main.py\n--- a/mlserver/cli/main.py\n+++ b/mlserver/cli/main.py\n@@ -47,13 +47,14 @@\n @root.command(\"build\")\n @click.argument(\"folder\", nargs=1)\n @click.option(\"-t\", \"--tag\", type=str)\[email protected](\"--no-cache\", default=False, is_flag=True)\n @click_async\n-async def build(folder: str, tag: str):\n+async def build(folder: str, tag: str, no_cache: bool = False):\n \"\"\"\n Build a Docker image for a custom MLServer runtime.\n \"\"\"\n dockerfile = generate_dockerfile()\n- build_image(folder, dockerfile, tag)\n+ build_image(folder, dockerfile, tag, no_cache=no_cache)\n logger.info(f\"Successfully built custom Docker image with tag {tag}\")\n", "issue": "Add docker build option to not use cache\nTo ensure fresh environment and while potentially developing a new model version users will not want to use cached layers in the docker build for their image.\r\n\r\nIn docker this is the --no-cache option, I would make it a boolean option within the cli and also add the --rm option (although default is true might be worth making sure) to remove any intermediate containers after success to again ensure a clean environment in CI/CD as well as locally. \n", "before_files": [{"content": "import subprocess\nimport os\n\nfrom tempfile import TemporaryDirectory\n\nfrom .. import __version__\nfrom ..logging import logger\n\nfrom .constants import (\n DockerfileName,\n DockerfileTemplate,\n DockerignoreName,\n Dockerignore,\n)\n\n\ndef generate_dockerfile() -> str:\n return DockerfileTemplate.format(version=__version__)\n\n\ndef write_dockerfile(\n folder: str, dockerfile: str, include_dockerignore: bool = True\n) -> str:\n dockerfile_path = os.path.join(folder, DockerfileName)\n with open(dockerfile_path, \"w\") as dockerfile_handler:\n logger.info(f\"Writing Dockerfile in {dockerfile_path}\")\n dockerfile_handler.write(dockerfile)\n\n if include_dockerignore:\n # Point to our own .dockerignore\n # https://docs.docker.com/engine/reference/commandline/build/#use-a-dockerignore-file\n dockerignore_path = dockerfile_path + DockerignoreName\n with open(dockerignore_path, \"w\") as dockerignore_handler:\n logger.info(f\"Writing .dockerignore in {dockerignore_path}\")\n dockerignore_handler.write(Dockerignore)\n\n return dockerfile_path\n\n\ndef build_image(folder: str, dockerfile: str, image_tag: str) -> str:\n logger.info(f\"Building Docker image with tag {image_tag}\")\n with TemporaryDirectory() as tmp_dir:\n dockerfile_path = write_dockerfile(tmp_dir, dockerfile)\n\n build_cmd = f\"docker build {folder} -f {dockerfile_path} -t {image_tag}\"\n build_env = os.environ.copy()\n build_env[\"DOCKER_BUILDKIT\"] = \"1\"\n subprocess.run(build_cmd, check=True, shell=True, env=build_env)\n\n return image_tag\n", "path": "mlserver/cli/build.py"}, {"content": "\"\"\"\nCommand-line interface to manage MLServer models.\n\"\"\"\nimport click\nimport asyncio\n\nfrom functools import wraps\n\nfrom ..server import MLServer\nfrom ..logging import logger, configure_logger\nfrom ..utils import install_uvloop_event_loop\n\nfrom .build import generate_dockerfile, build_image, write_dockerfile\nfrom .serve import load_settings\n\n\ndef click_async(f):\n @wraps(f)\n def wrapper(*args, **kwargs):\n return asyncio.run(f(*args, **kwargs))\n\n return wrapper\n\n\[email protected]()\[email protected]_option()\ndef root():\n \"\"\"\n Command-line interface to manage MLServer models.\n \"\"\"\n pass\n\n\[email protected](\"start\")\[email protected](\"folder\", nargs=1)\n@click_async\nasync def start(folder: str):\n \"\"\"\n Start serving a machine learning model with MLServer.\n \"\"\"\n settings, models_settings = await load_settings(folder)\n\n server = MLServer(settings)\n await server.start(models_settings)\n\n\[email protected](\"build\")\[email protected](\"folder\", nargs=1)\[email protected](\"-t\", \"--tag\", type=str)\n@click_async\nasync def build(folder: str, tag: str):\n \"\"\"\n Build a Docker image for a custom MLServer runtime.\n \"\"\"\n dockerfile = generate_dockerfile()\n build_image(folder, dockerfile, tag)\n logger.info(f\"Successfully built custom Docker image with tag {tag}\")\n\n\[email protected](\"dockerfile\")\[email protected](\"folder\", nargs=1)\[email protected](\"-i\", \"--include-dockerignore\", is_flag=True)\n@click_async\nasync def dockerfile(folder: str, include_dockerignore: bool):\n \"\"\"\n Generate a Dockerfile\n \"\"\"\n dockerfile = generate_dockerfile()\n dockerfile_path = write_dockerfile(\n folder, dockerfile, include_dockerignore=include_dockerignore\n )\n logger.info(f\"Successfully written Dockerfile in {dockerfile_path}\")\n\n\ndef main():\n configure_logger()\n install_uvloop_event_loop()\n root()\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "mlserver/cli/main.py"}], "after_files": [{"content": "import subprocess\nimport os\n\nfrom tempfile import TemporaryDirectory\n\nfrom .. import __version__\nfrom ..logging import logger\n\nfrom .constants import (\n DockerfileName,\n DockerfileTemplate,\n DockerignoreName,\n Dockerignore,\n)\n\n\ndef generate_dockerfile() -> str:\n return DockerfileTemplate.format(version=__version__)\n\n\ndef write_dockerfile(\n folder: str, dockerfile: str, include_dockerignore: bool = True\n) -> str:\n dockerfile_path = os.path.join(folder, DockerfileName)\n with open(dockerfile_path, \"w\") as dockerfile_handler:\n logger.info(f\"Writing Dockerfile in {dockerfile_path}\")\n dockerfile_handler.write(dockerfile)\n\n if include_dockerignore:\n # Point to our own .dockerignore\n # https://docs.docker.com/engine/reference/commandline/build/#use-a-dockerignore-file\n dockerignore_path = dockerfile_path + DockerignoreName\n with open(dockerignore_path, \"w\") as dockerignore_handler:\n logger.info(f\"Writing .dockerignore in {dockerignore_path}\")\n dockerignore_handler.write(Dockerignore)\n\n return dockerfile_path\n\n\ndef build_image(\n folder: str, dockerfile: str, image_tag: str, no_cache: bool = False\n) -> str:\n logger.info(f\"Building Docker image with tag {image_tag}\")\n _docker_command_prefix = \"docker build --rm \"\n with TemporaryDirectory() as tmp_dir:\n dockerfile_path = write_dockerfile(tmp_dir, dockerfile)\n _docker_command_suffix = f\"{folder} -f {dockerfile_path} -t {image_tag}\"\n if no_cache:\n build_cmd = _docker_command_prefix + \"--no-cache \" + _docker_command_suffix\n else:\n build_cmd = _docker_command_prefix + _docker_command_suffix\n build_env = os.environ.copy()\n build_env[\"DOCKER_BUILDKIT\"] = \"1\"\n subprocess.run(build_cmd, check=True, shell=True, env=build_env)\n\n return image_tag\n", "path": "mlserver/cli/build.py"}, {"content": "\"\"\"\nCommand-line interface to manage MLServer models.\n\"\"\"\nimport click\nimport asyncio\n\nfrom functools import wraps\n\nfrom ..server import MLServer\nfrom ..logging import logger, configure_logger\nfrom ..utils import install_uvloop_event_loop\n\nfrom .build import generate_dockerfile, build_image, write_dockerfile\nfrom .serve import load_settings\n\n\ndef click_async(f):\n @wraps(f)\n def wrapper(*args, **kwargs):\n return asyncio.run(f(*args, **kwargs))\n\n return wrapper\n\n\[email protected]()\[email protected]_option()\ndef root():\n \"\"\"\n Command-line interface to manage MLServer models.\n \"\"\"\n pass\n\n\[email protected](\"start\")\[email protected](\"folder\", nargs=1)\n@click_async\nasync def start(folder: str):\n \"\"\"\n Start serving a machine learning model with MLServer.\n \"\"\"\n settings, models_settings = await load_settings(folder)\n\n server = MLServer(settings)\n await server.start(models_settings)\n\n\[email protected](\"build\")\[email protected](\"folder\", nargs=1)\[email protected](\"-t\", \"--tag\", type=str)\[email protected](\"--no-cache\", default=False, is_flag=True)\n@click_async\nasync def build(folder: str, tag: str, no_cache: bool = False):\n \"\"\"\n Build a Docker image for a custom MLServer runtime.\n \"\"\"\n dockerfile = generate_dockerfile()\n build_image(folder, dockerfile, tag, no_cache=no_cache)\n logger.info(f\"Successfully built custom Docker image with tag {tag}\")\n\n\[email protected](\"dockerfile\")\[email protected](\"folder\", nargs=1)\[email protected](\"-i\", \"--include-dockerignore\", is_flag=True)\n@click_async\nasync def dockerfile(folder: str, include_dockerignore: bool):\n \"\"\"\n Generate a Dockerfile\n \"\"\"\n dockerfile = generate_dockerfile()\n dockerfile_path = write_dockerfile(\n folder, dockerfile, include_dockerignore=include_dockerignore\n )\n logger.info(f\"Successfully written Dockerfile in {dockerfile_path}\")\n\n\ndef main():\n configure_logger()\n install_uvloop_event_loop()\n root()\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "mlserver/cli/main.py"}]} | 1,461 | 479 |
gh_patches_debug_7147 | rasdani/github-patches | git_diff | streamlink__streamlink-5854 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
plugins.vimeo: live event URL triggers ValidationError / DASH errors
### Checklist
- [X] This is a [plugin issue](https://streamlink.github.io/plugins.html) and not [a different kind of issue](https://github.com/streamlink/streamlink/issues/new/choose)
- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)
- [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22)
- [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)
### Streamlink version
6.4.2 on windows 10
### Description
A live event on vimeo fails while being launched as event or video page
### Debug log
```text
D:\_Downloads\TV+Streaming\youtubedl-streamlink\bin> .\streamlink.exe https://vimeo.com/event/3924129
[cli][debug] OS: Windows 10
[cli][debug] Python: 3.11.6
[cli][debug] OpenSSL: OpenSSL 3.0.11 19 Sep 2023
[cli][debug] Streamlink: 6.4.2
[cli][debug] Dependencies:
[cli][debug] certifi: 2023.11.17
[cli][debug] isodate: 0.6.1
[cli][debug] lxml: 4.9.3
[cli][debug] pycountry: 22.3.5
[cli][debug] pycryptodome: 3.19.0
[cli][debug] PySocks: 1.7.1
[cli][debug] requests: 2.31.0
[cli][debug] trio: 0.22.2
[cli][debug] trio-websocket: 0.11.1
[cli][debug] typing-extensions: 4.8.0
[cli][debug] urllib3: 2.0.6
[cli][debug] websocket-client: 1.6.3
[cli][debug] Arguments:
[cli][debug] url=https://vimeo.com/event/3924129
[cli][debug] --loglevel=debug
[cli][debug] --player=D:\Program Files\VideoLAN\VLC\vlc.exe
[cli][debug] --player-args=--qt-minimal-view
[cli][debug] --default-stream=['720p', '720p_alt', '720p60', 'best']
[cli][debug] --retry-max=2
[cli][debug] --stream-segment-threads=3
[cli][debug] --stream-timeout=20.0
[cli][debug] --hls-playlist-reload-attempts=3
[cli][debug] --ffmpeg-ffmpeg="D:\_Downloads\TV+Streaming\youtubedl-streamlink\ffmpeg\ffmpeg.exe"
[cli][debug] --generic-playlist-max=3
[cli][debug] --generic-ignore-same-url=True
[cli][info] Found matching plugin vimeo for URL https://vimeo.com/event/3924129
[cli][error] Unable to validate response text: ValidationError(dict):
Key 'uri' not found in <{'type': 'video', 'version': '1.0', 'provider_name': 'V...>
[cli][info] Waiting for streams, retrying every 1 second(s)
[cli][error] Unable to validate response text: ValidationError(dict):
Key 'uri' not found in <{'type': 'video', 'version': '1.0', 'provider_name': 'V...>
[cli][error] Unable to validate response text: ValidationError(dict):
Key 'uri' not found in <{'type': 'video', 'version': '1.0', 'provider_name': 'V...>
error: No playable streams found on this URL: https://vimeo.com/event/3924129
=========================
D:\_Downloads\TV+Streaming\youtubedl-streamlink\bin> .\streamlink.exe https://vimeo.com/890064882
[cli][debug] OS: Windows 10
[cli][debug] Python: 3.11.6
[cli][debug] OpenSSL: OpenSSL 3.0.11 19 Sep 2023
[cli][debug] Streamlink: 6.4.2
[cli][debug] Dependencies:
[cli][debug] certifi: 2023.11.17
[cli][debug] isodate: 0.6.1
[cli][debug] lxml: 4.9.3
[cli][debug] pycountry: 22.3.5
[cli][debug] pycryptodome: 3.19.0
[cli][debug] PySocks: 1.7.1
[cli][debug] requests: 2.31.0
[cli][debug] trio: 0.22.2
[cli][debug] trio-websocket: 0.11.1
[cli][debug] typing-extensions: 4.8.0
[cli][debug] urllib3: 2.0.6
[cli][debug] websocket-client: 1.6.3
[cli][debug] Arguments:
[cli][debug] url=https://vimeo.com/890064882
[cli][debug] --loglevel=debug
[cli][debug] --player=D:\Program Files\VideoLAN\VLC\vlc.exe
[cli][debug] --player-args=--qt-minimal-view
[cli][debug] --default-stream=['720p', '720p_alt', '720p60', 'best']
[cli][debug] --retry-max=2
[cli][debug] --stream-segment-threads=3
[cli][debug] --stream-timeout=20.0
[cli][debug] --hls-playlist-reload-attempts=3
[cli][debug] --ffmpeg-ffmpeg="D:\_Downloads\TV+Streaming\youtubedl-streamlink\ffmpeg\ffmpeg.exe"
[cli][debug] --generic-playlist-max=3
[cli][debug] --generic-ignore-same-url=True
[cli][info] Found matching plugin vimeo for URL https://vimeo.com/890064882
[utils.l10n][debug] Language code: en_US
Traceback (most recent call last):
File "<frozen runpy>", line 198, in _run_module_as_main
File "<frozen runpy>", line 88, in _run_code
File "D:\_Downloads\TV+Streaming\youtubedl-streamlink\bin\streamlink.exe\__main__.py", line 18, in <module>
File "D:\_Downloads\TV+Streaming\youtubedl-streamlink\pkgs\streamlink_cli\main.py", line 929, in main
handle_url()
File "D:\_Downloads\TV+Streaming\youtubedl-streamlink\pkgs\streamlink_cli\main.py", line 549, in handle_url
streams = fetch_streams_with_retry(plugin, retry_streams, retry_max)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "D:\_Downloads\TV+Streaming\youtubedl-streamlink\pkgs\streamlink_cli\main.py", line 458, in fetch_streams_with_retry
streams = fetch_streams(plugin)
^^^^^^^^^^^^^^^^^^^^^
File "D:\_Downloads\TV+Streaming\youtubedl-streamlink\pkgs\streamlink_cli\main.py", line 449, in fetch_streams
return plugin.streams(stream_types=args.stream_types,
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "D:\_Downloads\TV+Streaming\youtubedl-streamlink\pkgs\streamlink\plugin\plugin.py", line 375, in streams
ostreams = list(ostreams)
^^^^^^^^^^^^^^
File "D:\_Downloads\TV+Streaming\youtubedl-streamlink\pkgs\streamlink\plugins\vimeo.py", line 178, in _get_streams
for url in dash.values():
^^^^^^^^^^^
AttributeError: 'NoneType' object has no attribute 'values'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/streamlink/plugins/vimeo.py`
Content:
```
1 """
2 $description Global live-streaming and video hosting social platform.
3 $url vimeo.com
4 $type live, vod
5 $metadata id
6 $metadata author
7 $metadata title
8 $notes Password protected streams are not supported
9 """
10
11 import logging
12 import re
13 from urllib.parse import urljoin, urlparse
14
15 from streamlink.plugin import Plugin, pluginmatcher
16 from streamlink.plugin.api import validate
17 from streamlink.stream.dash import DASHStream
18 from streamlink.stream.ffmpegmux import MuxedStream
19 from streamlink.stream.hls import HLSStream
20 from streamlink.stream.http import HTTPStream
21 from streamlink.utils.url import update_scheme
22
23
24 log = logging.getLogger(__name__)
25
26
27 @pluginmatcher(
28 name="default",
29 pattern=re.compile(r"https?://(?:www\.)?vimeo\.com/.+"),
30 )
31 @pluginmatcher(
32 name="player",
33 pattern=re.compile(r"https?://player\.vimeo\.com/video/\d+"),
34 )
35 class Vimeo(Plugin):
36 VIEWER_URL = "https://vimeo.com/_next/viewer"
37 OEMBED_URL = "https://vimeo.com/api/oembed.json"
38
39 @staticmethod
40 def _schema_config(config):
41 schema_cdns = validate.all(
42 {
43 "cdns": {
44 str: validate.all(
45 {"url": validate.url()},
46 validate.get("url"),
47 ),
48 },
49 },
50 validate.get("cdns"),
51 )
52 schema_config = validate.Schema(
53 {
54 "request": {
55 "files": {
56 validate.optional("hls"): schema_cdns,
57 validate.optional("dash"): schema_cdns,
58 validate.optional("progressive"): [
59 validate.all(
60 {
61 "url": validate.url(),
62 "quality": str,
63 },
64 validate.union_get("quality", "url"),
65 ),
66 ],
67 },
68 validate.optional("text_tracks"): [
69 validate.all(
70 {
71 "url": str,
72 "lang": str,
73 },
74 validate.union_get("lang", "url"),
75 ),
76 ],
77 },
78 validate.optional("video"): validate.none_or_all(
79 {
80 "id": int,
81 "title": str,
82 "owner": {
83 "name": str,
84 },
85 },
86 validate.union_get(
87 "id",
88 ("owner", "name"),
89 "title",
90 ),
91 ),
92 },
93 validate.union_get(
94 ("request", "files", "hls"),
95 ("request", "files", "dash"),
96 ("request", "files", "progressive"),
97 ("request", "text_tracks"),
98 "video",
99 ),
100 )
101
102 return schema_config.validate(config)
103
104 def _get_dash_url(self, url):
105 return self.session.http.get(url, schema=validate.Schema(
106 validate.parse_json(),
107 {"url": validate.url()},
108 validate.get("url"),
109 ))
110
111 def _query_player(self):
112 return self.session.http.get(self.url, schema=validate.Schema(
113 re.compile(r"playerConfig\s*=\s*({.+?})\s*var"),
114 validate.none_or_all(
115 validate.get(1),
116 validate.parse_json(),
117 validate.transform(self._schema_config),
118 ),
119 ))
120
121 def _query_api(self):
122 jwt, api_url = self.session.http.get(
123 self.VIEWER_URL,
124 schema=validate.Schema(
125 validate.parse_json(),
126 {
127 "jwt": str,
128 "apiUrl": str,
129 },
130 validate.union_get("jwt", "apiUrl"),
131 ),
132 )
133 uri = self.session.http.get(
134 self.OEMBED_URL,
135 params={"url": self.url},
136 schema=validate.Schema(
137 validate.parse_json(),
138 {"uri": str},
139 validate.get("uri"),
140 ),
141 )
142 player_config_url = urljoin(update_scheme("https://", api_url), uri)
143 config_url = self.session.http.get(
144 player_config_url,
145 params={"fields": "config_url"},
146 headers={"Authorization": f"jwt {jwt}"},
147 schema=validate.Schema(
148 validate.parse_json(),
149 {"config_url": validate.url()},
150 validate.get("config_url"),
151 ),
152 )
153
154 return self.session.http.get(config_url, schema=validate.Schema(
155 validate.parse_json(),
156 validate.transform(self._schema_config),
157 ))
158
159 def _get_streams(self):
160 if self.matches["player"]:
161 data = self._query_player()
162 else:
163 data = self._query_api()
164
165 if not data:
166 return
167
168 hls, dash, progressive, text_tracks, metadata = data
169 if metadata:
170 self.id, self.author, self.title = metadata
171
172 streams = []
173
174 for url in hls.values():
175 streams.extend(HLSStream.parse_variant_playlist(self.session, url).items())
176 break
177
178 for url in dash.values():
179 p = urlparse(url)
180 if p.path.endswith("dash.mpd"):
181 # LIVE
182 url = self._get_dash_url(url)
183 elif p.path.endswith("master.json"):
184 # VOD
185 url = url.replace("master.json", "master.mpd")
186 else:
187 log.error(f"Unsupported DASH path: {p.path}")
188 continue
189
190 streams.extend(DASHStream.parse_manifest(self.session, url).items())
191 break
192
193 streams.extend(
194 (quality, HTTPStream(self.session, url))
195 for quality, url in progressive or []
196 )
197
198 if text_tracks and self.session.get_option("mux-subtitles"):
199 substreams = {
200 lang: HTTPStream(self.session, urljoin("https://vimeo.com/", url))
201 for lang, url in text_tracks
202 }
203 for quality, stream in streams:
204 yield quality, MuxedStream(self.session, stream, subtitles=substreams)
205 else:
206 yield from streams
207
208
209 __plugin__ = Vimeo
210
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/streamlink/plugins/vimeo.py b/src/streamlink/plugins/vimeo.py
--- a/src/streamlink/plugins/vimeo.py
+++ b/src/streamlink/plugins/vimeo.py
@@ -171,10 +171,12 @@
streams = []
+ hls = hls or {}
for url in hls.values():
streams.extend(HLSStream.parse_variant_playlist(self.session, url).items())
break
+ dash = dash or {}
for url in dash.values():
p = urlparse(url)
if p.path.endswith("dash.mpd"):
| {"golden_diff": "diff --git a/src/streamlink/plugins/vimeo.py b/src/streamlink/plugins/vimeo.py\n--- a/src/streamlink/plugins/vimeo.py\n+++ b/src/streamlink/plugins/vimeo.py\n@@ -171,10 +171,12 @@\n \n streams = []\n \n+ hls = hls or {}\n for url in hls.values():\n streams.extend(HLSStream.parse_variant_playlist(self.session, url).items())\n break\n \n+ dash = dash or {}\n for url in dash.values():\n p = urlparse(url)\n if p.path.endswith(\"dash.mpd\"):\n", "issue": "plugins.vimeo: live event URL triggers ValidationError / DASH errors\n### Checklist\r\n\r\n- [X] This is a [plugin issue](https://streamlink.github.io/plugins.html) and not [a different kind of issue](https://github.com/streamlink/streamlink/issues/new/choose)\r\n- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)\r\n- [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22)\r\n- [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)\r\n\r\n### Streamlink version\r\n\r\n6.4.2 on windows 10\r\n\r\n### Description\r\n\r\nA live event on vimeo fails while being launched as event or video page\r\n\r\n### Debug log\r\n\r\n```text\r\n D:\\_Downloads\\TV+Streaming\\youtubedl-streamlink\\bin> .\\streamlink.exe https://vimeo.com/event/3924129\r\n[cli][debug] OS: Windows 10\r\n[cli][debug] Python: 3.11.6\r\n[cli][debug] OpenSSL: OpenSSL 3.0.11 19 Sep 2023\r\n[cli][debug] Streamlink: 6.4.2\r\n[cli][debug] Dependencies:\r\n[cli][debug] certifi: 2023.11.17\r\n[cli][debug] isodate: 0.6.1\r\n[cli][debug] lxml: 4.9.3\r\n[cli][debug] pycountry: 22.3.5\r\n[cli][debug] pycryptodome: 3.19.0\r\n[cli][debug] PySocks: 1.7.1\r\n[cli][debug] requests: 2.31.0\r\n[cli][debug] trio: 0.22.2\r\n[cli][debug] trio-websocket: 0.11.1\r\n[cli][debug] typing-extensions: 4.8.0\r\n[cli][debug] urllib3: 2.0.6\r\n[cli][debug] websocket-client: 1.6.3\r\n[cli][debug] Arguments:\r\n[cli][debug] url=https://vimeo.com/event/3924129\r\n[cli][debug] --loglevel=debug\r\n[cli][debug] --player=D:\\Program Files\\VideoLAN\\VLC\\vlc.exe\r\n[cli][debug] --player-args=--qt-minimal-view\r\n[cli][debug] --default-stream=['720p', '720p_alt', '720p60', 'best']\r\n[cli][debug] --retry-max=2\r\n[cli][debug] --stream-segment-threads=3\r\n[cli][debug] --stream-timeout=20.0\r\n[cli][debug] --hls-playlist-reload-attempts=3\r\n[cli][debug] --ffmpeg-ffmpeg=\"D:\\_Downloads\\TV+Streaming\\youtubedl-streamlink\\ffmpeg\\ffmpeg.exe\"\r\n[cli][debug] --generic-playlist-max=3\r\n[cli][debug] --generic-ignore-same-url=True\r\n[cli][info] Found matching plugin vimeo for URL https://vimeo.com/event/3924129\r\n[cli][error] Unable to validate response text: ValidationError(dict):\r\n Key 'uri' not found in <{'type': 'video', 'version': '1.0', 'provider_name': 'V...>\r\n[cli][info] Waiting for streams, retrying every 1 second(s)\r\n[cli][error] Unable to validate response text: ValidationError(dict):\r\n Key 'uri' not found in <{'type': 'video', 'version': '1.0', 'provider_name': 'V...>\r\n[cli][error] Unable to validate response text: ValidationError(dict):\r\n Key 'uri' not found in <{'type': 'video', 'version': '1.0', 'provider_name': 'V...>\r\nerror: No playable streams found on this URL: https://vimeo.com/event/3924129\r\n\r\n\r\n\r\n=========================\r\n\r\n\r\nD:\\_Downloads\\TV+Streaming\\youtubedl-streamlink\\bin> .\\streamlink.exe https://vimeo.com/890064882\r\n[cli][debug] OS: Windows 10\r\n[cli][debug] Python: 3.11.6\r\n[cli][debug] OpenSSL: OpenSSL 3.0.11 19 Sep 2023\r\n[cli][debug] Streamlink: 6.4.2\r\n[cli][debug] Dependencies:\r\n[cli][debug] certifi: 2023.11.17\r\n[cli][debug] isodate: 0.6.1\r\n[cli][debug] lxml: 4.9.3\r\n[cli][debug] pycountry: 22.3.5\r\n[cli][debug] pycryptodome: 3.19.0\r\n[cli][debug] PySocks: 1.7.1\r\n[cli][debug] requests: 2.31.0\r\n[cli][debug] trio: 0.22.2\r\n[cli][debug] trio-websocket: 0.11.1\r\n[cli][debug] typing-extensions: 4.8.0\r\n[cli][debug] urllib3: 2.0.6\r\n[cli][debug] websocket-client: 1.6.3\r\n[cli][debug] Arguments:\r\n[cli][debug] url=https://vimeo.com/890064882\r\n[cli][debug] --loglevel=debug\r\n[cli][debug] --player=D:\\Program Files\\VideoLAN\\VLC\\vlc.exe\r\n[cli][debug] --player-args=--qt-minimal-view\r\n[cli][debug] --default-stream=['720p', '720p_alt', '720p60', 'best']\r\n[cli][debug] --retry-max=2\r\n[cli][debug] --stream-segment-threads=3\r\n[cli][debug] --stream-timeout=20.0\r\n[cli][debug] --hls-playlist-reload-attempts=3\r\n[cli][debug] --ffmpeg-ffmpeg=\"D:\\_Downloads\\TV+Streaming\\youtubedl-streamlink\\ffmpeg\\ffmpeg.exe\"\r\n[cli][debug] --generic-playlist-max=3\r\n[cli][debug] --generic-ignore-same-url=True\r\n[cli][info] Found matching plugin vimeo for URL https://vimeo.com/890064882\r\n[utils.l10n][debug] Language code: en_US\r\nTraceback (most recent call last):\r\n File \"<frozen runpy>\", line 198, in _run_module_as_main\r\n File \"<frozen runpy>\", line 88, in _run_code\r\n File \"D:\\_Downloads\\TV+Streaming\\youtubedl-streamlink\\bin\\streamlink.exe\\__main__.py\", line 18, in <module>\r\n File \"D:\\_Downloads\\TV+Streaming\\youtubedl-streamlink\\pkgs\\streamlink_cli\\main.py\", line 929, in main\r\n handle_url()\r\n File \"D:\\_Downloads\\TV+Streaming\\youtubedl-streamlink\\pkgs\\streamlink_cli\\main.py\", line 549, in handle_url\r\n streams = fetch_streams_with_retry(plugin, retry_streams, retry_max)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"D:\\_Downloads\\TV+Streaming\\youtubedl-streamlink\\pkgs\\streamlink_cli\\main.py\", line 458, in fetch_streams_with_retry\r\n streams = fetch_streams(plugin)\r\n ^^^^^^^^^^^^^^^^^^^^^\r\n File \"D:\\_Downloads\\TV+Streaming\\youtubedl-streamlink\\pkgs\\streamlink_cli\\main.py\", line 449, in fetch_streams\r\n return plugin.streams(stream_types=args.stream_types,\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"D:\\_Downloads\\TV+Streaming\\youtubedl-streamlink\\pkgs\\streamlink\\plugin\\plugin.py\", line 375, in streams\r\n ostreams = list(ostreams)\r\n ^^^^^^^^^^^^^^\r\n File \"D:\\_Downloads\\TV+Streaming\\youtubedl-streamlink\\pkgs\\streamlink\\plugins\\vimeo.py\", line 178, in _get_streams\r\n for url in dash.values():\r\n ^^^^^^^^^^^\r\nAttributeError: 'NoneType' object has no attribute 'values'\r\n```\r\n\n", "before_files": [{"content": "\"\"\"\n$description Global live-streaming and video hosting social platform.\n$url vimeo.com\n$type live, vod\n$metadata id\n$metadata author\n$metadata title\n$notes Password protected streams are not supported\n\"\"\"\n\nimport logging\nimport re\nfrom urllib.parse import urljoin, urlparse\n\nfrom streamlink.plugin import Plugin, pluginmatcher\nfrom streamlink.plugin.api import validate\nfrom streamlink.stream.dash import DASHStream\nfrom streamlink.stream.ffmpegmux import MuxedStream\nfrom streamlink.stream.hls import HLSStream\nfrom streamlink.stream.http import HTTPStream\nfrom streamlink.utils.url import update_scheme\n\n\nlog = logging.getLogger(__name__)\n\n\n@pluginmatcher(\n name=\"default\",\n pattern=re.compile(r\"https?://(?:www\\.)?vimeo\\.com/.+\"),\n)\n@pluginmatcher(\n name=\"player\",\n pattern=re.compile(r\"https?://player\\.vimeo\\.com/video/\\d+\"),\n)\nclass Vimeo(Plugin):\n VIEWER_URL = \"https://vimeo.com/_next/viewer\"\n OEMBED_URL = \"https://vimeo.com/api/oembed.json\"\n\n @staticmethod\n def _schema_config(config):\n schema_cdns = validate.all(\n {\n \"cdns\": {\n str: validate.all(\n {\"url\": validate.url()},\n validate.get(\"url\"),\n ),\n },\n },\n validate.get(\"cdns\"),\n )\n schema_config = validate.Schema(\n {\n \"request\": {\n \"files\": {\n validate.optional(\"hls\"): schema_cdns,\n validate.optional(\"dash\"): schema_cdns,\n validate.optional(\"progressive\"): [\n validate.all(\n {\n \"url\": validate.url(),\n \"quality\": str,\n },\n validate.union_get(\"quality\", \"url\"),\n ),\n ],\n },\n validate.optional(\"text_tracks\"): [\n validate.all(\n {\n \"url\": str,\n \"lang\": str,\n },\n validate.union_get(\"lang\", \"url\"),\n ),\n ],\n },\n validate.optional(\"video\"): validate.none_or_all(\n {\n \"id\": int,\n \"title\": str,\n \"owner\": {\n \"name\": str,\n },\n },\n validate.union_get(\n \"id\",\n (\"owner\", \"name\"),\n \"title\",\n ),\n ),\n },\n validate.union_get(\n (\"request\", \"files\", \"hls\"),\n (\"request\", \"files\", \"dash\"),\n (\"request\", \"files\", \"progressive\"),\n (\"request\", \"text_tracks\"),\n \"video\",\n ),\n )\n\n return schema_config.validate(config)\n\n def _get_dash_url(self, url):\n return self.session.http.get(url, schema=validate.Schema(\n validate.parse_json(),\n {\"url\": validate.url()},\n validate.get(\"url\"),\n ))\n\n def _query_player(self):\n return self.session.http.get(self.url, schema=validate.Schema(\n re.compile(r\"playerConfig\\s*=\\s*({.+?})\\s*var\"),\n validate.none_or_all(\n validate.get(1),\n validate.parse_json(),\n validate.transform(self._schema_config),\n ),\n ))\n\n def _query_api(self):\n jwt, api_url = self.session.http.get(\n self.VIEWER_URL,\n schema=validate.Schema(\n validate.parse_json(),\n {\n \"jwt\": str,\n \"apiUrl\": str,\n },\n validate.union_get(\"jwt\", \"apiUrl\"),\n ),\n )\n uri = self.session.http.get(\n self.OEMBED_URL,\n params={\"url\": self.url},\n schema=validate.Schema(\n validate.parse_json(),\n {\"uri\": str},\n validate.get(\"uri\"),\n ),\n )\n player_config_url = urljoin(update_scheme(\"https://\", api_url), uri)\n config_url = self.session.http.get(\n player_config_url,\n params={\"fields\": \"config_url\"},\n headers={\"Authorization\": f\"jwt {jwt}\"},\n schema=validate.Schema(\n validate.parse_json(),\n {\"config_url\": validate.url()},\n validate.get(\"config_url\"),\n ),\n )\n\n return self.session.http.get(config_url, schema=validate.Schema(\n validate.parse_json(),\n validate.transform(self._schema_config),\n ))\n\n def _get_streams(self):\n if self.matches[\"player\"]:\n data = self._query_player()\n else:\n data = self._query_api()\n\n if not data:\n return\n\n hls, dash, progressive, text_tracks, metadata = data\n if metadata:\n self.id, self.author, self.title = metadata\n\n streams = []\n\n for url in hls.values():\n streams.extend(HLSStream.parse_variant_playlist(self.session, url).items())\n break\n\n for url in dash.values():\n p = urlparse(url)\n if p.path.endswith(\"dash.mpd\"):\n # LIVE\n url = self._get_dash_url(url)\n elif p.path.endswith(\"master.json\"):\n # VOD\n url = url.replace(\"master.json\", \"master.mpd\")\n else:\n log.error(f\"Unsupported DASH path: {p.path}\")\n continue\n\n streams.extend(DASHStream.parse_manifest(self.session, url).items())\n break\n\n streams.extend(\n (quality, HTTPStream(self.session, url))\n for quality, url in progressive or []\n )\n\n if text_tracks and self.session.get_option(\"mux-subtitles\"):\n substreams = {\n lang: HTTPStream(self.session, urljoin(\"https://vimeo.com/\", url))\n for lang, url in text_tracks\n }\n for quality, stream in streams:\n yield quality, MuxedStream(self.session, stream, subtitles=substreams)\n else:\n yield from streams\n\n\n__plugin__ = Vimeo\n", "path": "src/streamlink/plugins/vimeo.py"}], "after_files": [{"content": "\"\"\"\n$description Global live-streaming and video hosting social platform.\n$url vimeo.com\n$type live, vod\n$metadata id\n$metadata author\n$metadata title\n$notes Password protected streams are not supported\n\"\"\"\n\nimport logging\nimport re\nfrom urllib.parse import urljoin, urlparse\n\nfrom streamlink.plugin import Plugin, pluginmatcher\nfrom streamlink.plugin.api import validate\nfrom streamlink.stream.dash import DASHStream\nfrom streamlink.stream.ffmpegmux import MuxedStream\nfrom streamlink.stream.hls import HLSStream\nfrom streamlink.stream.http import HTTPStream\nfrom streamlink.utils.url import update_scheme\n\n\nlog = logging.getLogger(__name__)\n\n\n@pluginmatcher(\n name=\"default\",\n pattern=re.compile(r\"https?://(?:www\\.)?vimeo\\.com/.+\"),\n)\n@pluginmatcher(\n name=\"player\",\n pattern=re.compile(r\"https?://player\\.vimeo\\.com/video/\\d+\"),\n)\nclass Vimeo(Plugin):\n VIEWER_URL = \"https://vimeo.com/_next/viewer\"\n OEMBED_URL = \"https://vimeo.com/api/oembed.json\"\n\n @staticmethod\n def _schema_config(config):\n schema_cdns = validate.all(\n {\n \"cdns\": {\n str: validate.all(\n {\"url\": validate.url()},\n validate.get(\"url\"),\n ),\n },\n },\n validate.get(\"cdns\"),\n )\n schema_config = validate.Schema(\n {\n \"request\": {\n \"files\": {\n validate.optional(\"hls\"): schema_cdns,\n validate.optional(\"dash\"): schema_cdns,\n validate.optional(\"progressive\"): [\n validate.all(\n {\n \"url\": validate.url(),\n \"quality\": str,\n },\n validate.union_get(\"quality\", \"url\"),\n ),\n ],\n },\n validate.optional(\"text_tracks\"): [\n validate.all(\n {\n \"url\": str,\n \"lang\": str,\n },\n validate.union_get(\"lang\", \"url\"),\n ),\n ],\n },\n validate.optional(\"video\"): validate.none_or_all(\n {\n \"id\": int,\n \"title\": str,\n \"owner\": {\n \"name\": str,\n },\n },\n validate.union_get(\n \"id\",\n (\"owner\", \"name\"),\n \"title\",\n ),\n ),\n },\n validate.union_get(\n (\"request\", \"files\", \"hls\"),\n (\"request\", \"files\", \"dash\"),\n (\"request\", \"files\", \"progressive\"),\n (\"request\", \"text_tracks\"),\n \"video\",\n ),\n )\n\n return schema_config.validate(config)\n\n def _get_dash_url(self, url):\n return self.session.http.get(url, schema=validate.Schema(\n validate.parse_json(),\n {\"url\": validate.url()},\n validate.get(\"url\"),\n ))\n\n def _query_player(self):\n return self.session.http.get(self.url, schema=validate.Schema(\n re.compile(r\"playerConfig\\s*=\\s*({.+?})\\s*var\"),\n validate.none_or_all(\n validate.get(1),\n validate.parse_json(),\n validate.transform(self._schema_config),\n ),\n ))\n\n def _query_api(self):\n jwt, api_url = self.session.http.get(\n self.VIEWER_URL,\n schema=validate.Schema(\n validate.parse_json(),\n {\n \"jwt\": str,\n \"apiUrl\": str,\n },\n validate.union_get(\"jwt\", \"apiUrl\"),\n ),\n )\n uri = self.session.http.get(\n self.OEMBED_URL,\n params={\"url\": self.url},\n schema=validate.Schema(\n validate.parse_json(),\n {\"uri\": str},\n validate.get(\"uri\"),\n ),\n )\n player_config_url = urljoin(update_scheme(\"https://\", api_url), uri)\n config_url = self.session.http.get(\n player_config_url,\n params={\"fields\": \"config_url\"},\n headers={\"Authorization\": f\"jwt {jwt}\"},\n schema=validate.Schema(\n validate.parse_json(),\n {\"config_url\": validate.url()},\n validate.get(\"config_url\"),\n ),\n )\n\n return self.session.http.get(config_url, schema=validate.Schema(\n validate.parse_json(),\n validate.transform(self._schema_config),\n ))\n\n def _get_streams(self):\n if self.matches[\"player\"]:\n data = self._query_player()\n else:\n data = self._query_api()\n\n if not data:\n return\n\n hls, dash, progressive, text_tracks, metadata = data\n if metadata:\n self.id, self.author, self.title = metadata\n\n streams = []\n\n hls = hls or {}\n for url in hls.values():\n streams.extend(HLSStream.parse_variant_playlist(self.session, url).items())\n break\n\n dash = dash or {}\n for url in dash.values():\n p = urlparse(url)\n if p.path.endswith(\"dash.mpd\"):\n # LIVE\n url = self._get_dash_url(url)\n elif p.path.endswith(\"master.json\"):\n # VOD\n url = url.replace(\"master.json\", \"master.mpd\")\n else:\n log.error(f\"Unsupported DASH path: {p.path}\")\n continue\n\n streams.extend(DASHStream.parse_manifest(self.session, url).items())\n break\n\n streams.extend(\n (quality, HTTPStream(self.session, url))\n for quality, url in progressive or []\n )\n\n if text_tracks and self.session.get_option(\"mux-subtitles\"):\n substreams = {\n lang: HTTPStream(self.session, urljoin(\"https://vimeo.com/\", url))\n for lang, url in text_tracks\n }\n for quality, stream in streams:\n yield quality, MuxedStream(self.session, stream, subtitles=substreams)\n else:\n yield from streams\n\n\n__plugin__ = Vimeo\n", "path": "src/streamlink/plugins/vimeo.py"}]} | 4,053 | 128 |
gh_patches_debug_30463 | rasdani/github-patches | git_diff | psf__black-3217 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Preview option support for blackd
It seems that **blackd** does not support [the preview option](https://black.readthedocs.io/en/stable/the_black_code_style/future_style.html?highlight=preview#preview-style) now.
I suggest implementing an option for that like it is done for other features https://black.readthedocs.io/en/stable/usage_and_configuration/black_as_a_server.html#protocol
Something like `X-PREVIEW` would be fine.
I faced this while using **[intellij-blackconnect](https://github.com/lensvol/intellij-blackconnect/)** and there is an issue about that too https://github.com/lensvol/intellij-blackconnect/issues/37
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/blackd/__init__.py`
Content:
```
1 import asyncio
2 import logging
3 from concurrent.futures import Executor, ProcessPoolExecutor
4 from datetime import datetime
5 from functools import partial
6 from multiprocessing import freeze_support
7 from typing import Set, Tuple
8
9 try:
10 from aiohttp import web
11
12 from .middlewares import cors
13 except ImportError as ie:
14 raise ImportError(
15 f"aiohttp dependency is not installed: {ie}. "
16 + "Please re-install black with the '[d]' extra install "
17 + "to obtain aiohttp_cors: `pip install black[d]`"
18 ) from None
19
20 import click
21
22 import black
23 from _black_version import version as __version__
24 from black.concurrency import maybe_install_uvloop
25
26 # This is used internally by tests to shut down the server prematurely
27 _stop_signal = asyncio.Event()
28
29 # Request headers
30 PROTOCOL_VERSION_HEADER = "X-Protocol-Version"
31 LINE_LENGTH_HEADER = "X-Line-Length"
32 PYTHON_VARIANT_HEADER = "X-Python-Variant"
33 SKIP_STRING_NORMALIZATION_HEADER = "X-Skip-String-Normalization"
34 SKIP_MAGIC_TRAILING_COMMA = "X-Skip-Magic-Trailing-Comma"
35 FAST_OR_SAFE_HEADER = "X-Fast-Or-Safe"
36 DIFF_HEADER = "X-Diff"
37
38 BLACK_HEADERS = [
39 PROTOCOL_VERSION_HEADER,
40 LINE_LENGTH_HEADER,
41 PYTHON_VARIANT_HEADER,
42 SKIP_STRING_NORMALIZATION_HEADER,
43 SKIP_MAGIC_TRAILING_COMMA,
44 FAST_OR_SAFE_HEADER,
45 DIFF_HEADER,
46 ]
47
48 # Response headers
49 BLACK_VERSION_HEADER = "X-Black-Version"
50
51
52 class InvalidVariantHeader(Exception):
53 pass
54
55
56 @click.command(context_settings={"help_option_names": ["-h", "--help"]})
57 @click.option(
58 "--bind-host", type=str, help="Address to bind the server to.", default="localhost"
59 )
60 @click.option("--bind-port", type=int, help="Port to listen on", default=45484)
61 @click.version_option(version=black.__version__)
62 def main(bind_host: str, bind_port: int) -> None:
63 logging.basicConfig(level=logging.INFO)
64 app = make_app()
65 ver = black.__version__
66 black.out(f"blackd version {ver} listening on {bind_host} port {bind_port}")
67 web.run_app(app, host=bind_host, port=bind_port, handle_signals=True, print=None)
68
69
70 def make_app() -> web.Application:
71 app = web.Application(
72 middlewares=[cors(allow_headers=(*BLACK_HEADERS, "Content-Type"))]
73 )
74 executor = ProcessPoolExecutor()
75 app.add_routes([web.post("/", partial(handle, executor=executor))])
76 return app
77
78
79 async def handle(request: web.Request, executor: Executor) -> web.Response:
80 headers = {BLACK_VERSION_HEADER: __version__}
81 try:
82 if request.headers.get(PROTOCOL_VERSION_HEADER, "1") != "1":
83 return web.Response(
84 status=501, text="This server only supports protocol version 1"
85 )
86 try:
87 line_length = int(
88 request.headers.get(LINE_LENGTH_HEADER, black.DEFAULT_LINE_LENGTH)
89 )
90 except ValueError:
91 return web.Response(status=400, text="Invalid line length header value")
92
93 if PYTHON_VARIANT_HEADER in request.headers:
94 value = request.headers[PYTHON_VARIANT_HEADER]
95 try:
96 pyi, versions = parse_python_variant_header(value)
97 except InvalidVariantHeader as e:
98 return web.Response(
99 status=400,
100 text=f"Invalid value for {PYTHON_VARIANT_HEADER}: {e.args[0]}",
101 )
102 else:
103 pyi = False
104 versions = set()
105
106 skip_string_normalization = bool(
107 request.headers.get(SKIP_STRING_NORMALIZATION_HEADER, False)
108 )
109 skip_magic_trailing_comma = bool(
110 request.headers.get(SKIP_MAGIC_TRAILING_COMMA, False)
111 )
112 fast = False
113 if request.headers.get(FAST_OR_SAFE_HEADER, "safe") == "fast":
114 fast = True
115 mode = black.FileMode(
116 target_versions=versions,
117 is_pyi=pyi,
118 line_length=line_length,
119 string_normalization=not skip_string_normalization,
120 magic_trailing_comma=not skip_magic_trailing_comma,
121 )
122 req_bytes = await request.content.read()
123 charset = request.charset if request.charset is not None else "utf8"
124 req_str = req_bytes.decode(charset)
125 then = datetime.utcnow()
126
127 loop = asyncio.get_event_loop()
128 formatted_str = await loop.run_in_executor(
129 executor, partial(black.format_file_contents, req_str, fast=fast, mode=mode)
130 )
131
132 # Only output the diff in the HTTP response
133 only_diff = bool(request.headers.get(DIFF_HEADER, False))
134 if only_diff:
135 now = datetime.utcnow()
136 src_name = f"In\t{then} +0000"
137 dst_name = f"Out\t{now} +0000"
138 loop = asyncio.get_event_loop()
139 formatted_str = await loop.run_in_executor(
140 executor,
141 partial(black.diff, req_str, formatted_str, src_name, dst_name),
142 )
143
144 return web.Response(
145 content_type=request.content_type,
146 charset=charset,
147 headers=headers,
148 text=formatted_str,
149 )
150 except black.NothingChanged:
151 return web.Response(status=204, headers=headers)
152 except black.InvalidInput as e:
153 return web.Response(status=400, headers=headers, text=str(e))
154 except Exception as e:
155 logging.exception("Exception during handling a request")
156 return web.Response(status=500, headers=headers, text=str(e))
157
158
159 def parse_python_variant_header(value: str) -> Tuple[bool, Set[black.TargetVersion]]:
160 if value == "pyi":
161 return True, set()
162 else:
163 versions = set()
164 for version in value.split(","):
165 if version.startswith("py"):
166 version = version[len("py") :]
167 if "." in version:
168 major_str, *rest = version.split(".")
169 else:
170 major_str = version[0]
171 rest = [version[1:]] if len(version) > 1 else []
172 try:
173 major = int(major_str)
174 if major not in (2, 3):
175 raise InvalidVariantHeader("major version must be 2 or 3")
176 if len(rest) > 0:
177 minor = int(rest[0])
178 if major == 2:
179 raise InvalidVariantHeader("Python 2 is not supported")
180 else:
181 # Default to lowest supported minor version.
182 minor = 7 if major == 2 else 3
183 version_str = f"PY{major}{minor}"
184 if major == 3 and not hasattr(black.TargetVersion, version_str):
185 raise InvalidVariantHeader(f"3.{minor} is not supported")
186 versions.add(black.TargetVersion[version_str])
187 except (KeyError, ValueError):
188 raise InvalidVariantHeader("expected e.g. '3.7', 'py3.5'") from None
189 return False, versions
190
191
192 def patched_main() -> None:
193 maybe_install_uvloop()
194 freeze_support()
195 black.patch_click()
196 main()
197
198
199 if __name__ == "__main__":
200 patched_main()
201
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/blackd/__init__.py b/src/blackd/__init__.py
--- a/src/blackd/__init__.py
+++ b/src/blackd/__init__.py
@@ -32,6 +32,7 @@
PYTHON_VARIANT_HEADER = "X-Python-Variant"
SKIP_STRING_NORMALIZATION_HEADER = "X-Skip-String-Normalization"
SKIP_MAGIC_TRAILING_COMMA = "X-Skip-Magic-Trailing-Comma"
+PREVIEW = "X-Preview"
FAST_OR_SAFE_HEADER = "X-Fast-Or-Safe"
DIFF_HEADER = "X-Diff"
@@ -41,6 +42,7 @@
PYTHON_VARIANT_HEADER,
SKIP_STRING_NORMALIZATION_HEADER,
SKIP_MAGIC_TRAILING_COMMA,
+ PREVIEW,
FAST_OR_SAFE_HEADER,
DIFF_HEADER,
]
@@ -109,6 +111,7 @@
skip_magic_trailing_comma = bool(
request.headers.get(SKIP_MAGIC_TRAILING_COMMA, False)
)
+ preview = bool(request.headers.get(PREVIEW, False))
fast = False
if request.headers.get(FAST_OR_SAFE_HEADER, "safe") == "fast":
fast = True
@@ -118,6 +121,7 @@
line_length=line_length,
string_normalization=not skip_string_normalization,
magic_trailing_comma=not skip_magic_trailing_comma,
+ preview=preview,
)
req_bytes = await request.content.read()
charset = request.charset if request.charset is not None else "utf8"
| {"golden_diff": "diff --git a/src/blackd/__init__.py b/src/blackd/__init__.py\n--- a/src/blackd/__init__.py\n+++ b/src/blackd/__init__.py\n@@ -32,6 +32,7 @@\n PYTHON_VARIANT_HEADER = \"X-Python-Variant\"\n SKIP_STRING_NORMALIZATION_HEADER = \"X-Skip-String-Normalization\"\n SKIP_MAGIC_TRAILING_COMMA = \"X-Skip-Magic-Trailing-Comma\"\n+PREVIEW = \"X-Preview\"\n FAST_OR_SAFE_HEADER = \"X-Fast-Or-Safe\"\n DIFF_HEADER = \"X-Diff\"\n \n@@ -41,6 +42,7 @@\n PYTHON_VARIANT_HEADER,\n SKIP_STRING_NORMALIZATION_HEADER,\n SKIP_MAGIC_TRAILING_COMMA,\n+ PREVIEW,\n FAST_OR_SAFE_HEADER,\n DIFF_HEADER,\n ]\n@@ -109,6 +111,7 @@\n skip_magic_trailing_comma = bool(\n request.headers.get(SKIP_MAGIC_TRAILING_COMMA, False)\n )\n+ preview = bool(request.headers.get(PREVIEW, False))\n fast = False\n if request.headers.get(FAST_OR_SAFE_HEADER, \"safe\") == \"fast\":\n fast = True\n@@ -118,6 +121,7 @@\n line_length=line_length,\n string_normalization=not skip_string_normalization,\n magic_trailing_comma=not skip_magic_trailing_comma,\n+ preview=preview,\n )\n req_bytes = await request.content.read()\n charset = request.charset if request.charset is not None else \"utf8\"\n", "issue": "Preview option support for blackd\nIt seems that **blackd** does not support [the preview option](https://black.readthedocs.io/en/stable/the_black_code_style/future_style.html?highlight=preview#preview-style) now.\r\n\r\nI suggest implementing an option for that like it is done for other features https://black.readthedocs.io/en/stable/usage_and_configuration/black_as_a_server.html#protocol\r\n\r\nSomething like `X-PREVIEW` would be fine.\r\n\r\nI faced this while using **[intellij-blackconnect](https://github.com/lensvol/intellij-blackconnect/)** and there is an issue about that too https://github.com/lensvol/intellij-blackconnect/issues/37\n", "before_files": [{"content": "import asyncio\nimport logging\nfrom concurrent.futures import Executor, ProcessPoolExecutor\nfrom datetime import datetime\nfrom functools import partial\nfrom multiprocessing import freeze_support\nfrom typing import Set, Tuple\n\ntry:\n from aiohttp import web\n\n from .middlewares import cors\nexcept ImportError as ie:\n raise ImportError(\n f\"aiohttp dependency is not installed: {ie}. \"\n + \"Please re-install black with the '[d]' extra install \"\n + \"to obtain aiohttp_cors: `pip install black[d]`\"\n ) from None\n\nimport click\n\nimport black\nfrom _black_version import version as __version__\nfrom black.concurrency import maybe_install_uvloop\n\n# This is used internally by tests to shut down the server prematurely\n_stop_signal = asyncio.Event()\n\n# Request headers\nPROTOCOL_VERSION_HEADER = \"X-Protocol-Version\"\nLINE_LENGTH_HEADER = \"X-Line-Length\"\nPYTHON_VARIANT_HEADER = \"X-Python-Variant\"\nSKIP_STRING_NORMALIZATION_HEADER = \"X-Skip-String-Normalization\"\nSKIP_MAGIC_TRAILING_COMMA = \"X-Skip-Magic-Trailing-Comma\"\nFAST_OR_SAFE_HEADER = \"X-Fast-Or-Safe\"\nDIFF_HEADER = \"X-Diff\"\n\nBLACK_HEADERS = [\n PROTOCOL_VERSION_HEADER,\n LINE_LENGTH_HEADER,\n PYTHON_VARIANT_HEADER,\n SKIP_STRING_NORMALIZATION_HEADER,\n SKIP_MAGIC_TRAILING_COMMA,\n FAST_OR_SAFE_HEADER,\n DIFF_HEADER,\n]\n\n# Response headers\nBLACK_VERSION_HEADER = \"X-Black-Version\"\n\n\nclass InvalidVariantHeader(Exception):\n pass\n\n\[email protected](context_settings={\"help_option_names\": [\"-h\", \"--help\"]})\[email protected](\n \"--bind-host\", type=str, help=\"Address to bind the server to.\", default=\"localhost\"\n)\[email protected](\"--bind-port\", type=int, help=\"Port to listen on\", default=45484)\[email protected]_option(version=black.__version__)\ndef main(bind_host: str, bind_port: int) -> None:\n logging.basicConfig(level=logging.INFO)\n app = make_app()\n ver = black.__version__\n black.out(f\"blackd version {ver} listening on {bind_host} port {bind_port}\")\n web.run_app(app, host=bind_host, port=bind_port, handle_signals=True, print=None)\n\n\ndef make_app() -> web.Application:\n app = web.Application(\n middlewares=[cors(allow_headers=(*BLACK_HEADERS, \"Content-Type\"))]\n )\n executor = ProcessPoolExecutor()\n app.add_routes([web.post(\"/\", partial(handle, executor=executor))])\n return app\n\n\nasync def handle(request: web.Request, executor: Executor) -> web.Response:\n headers = {BLACK_VERSION_HEADER: __version__}\n try:\n if request.headers.get(PROTOCOL_VERSION_HEADER, \"1\") != \"1\":\n return web.Response(\n status=501, text=\"This server only supports protocol version 1\"\n )\n try:\n line_length = int(\n request.headers.get(LINE_LENGTH_HEADER, black.DEFAULT_LINE_LENGTH)\n )\n except ValueError:\n return web.Response(status=400, text=\"Invalid line length header value\")\n\n if PYTHON_VARIANT_HEADER in request.headers:\n value = request.headers[PYTHON_VARIANT_HEADER]\n try:\n pyi, versions = parse_python_variant_header(value)\n except InvalidVariantHeader as e:\n return web.Response(\n status=400,\n text=f\"Invalid value for {PYTHON_VARIANT_HEADER}: {e.args[0]}\",\n )\n else:\n pyi = False\n versions = set()\n\n skip_string_normalization = bool(\n request.headers.get(SKIP_STRING_NORMALIZATION_HEADER, False)\n )\n skip_magic_trailing_comma = bool(\n request.headers.get(SKIP_MAGIC_TRAILING_COMMA, False)\n )\n fast = False\n if request.headers.get(FAST_OR_SAFE_HEADER, \"safe\") == \"fast\":\n fast = True\n mode = black.FileMode(\n target_versions=versions,\n is_pyi=pyi,\n line_length=line_length,\n string_normalization=not skip_string_normalization,\n magic_trailing_comma=not skip_magic_trailing_comma,\n )\n req_bytes = await request.content.read()\n charset = request.charset if request.charset is not None else \"utf8\"\n req_str = req_bytes.decode(charset)\n then = datetime.utcnow()\n\n loop = asyncio.get_event_loop()\n formatted_str = await loop.run_in_executor(\n executor, partial(black.format_file_contents, req_str, fast=fast, mode=mode)\n )\n\n # Only output the diff in the HTTP response\n only_diff = bool(request.headers.get(DIFF_HEADER, False))\n if only_diff:\n now = datetime.utcnow()\n src_name = f\"In\\t{then} +0000\"\n dst_name = f\"Out\\t{now} +0000\"\n loop = asyncio.get_event_loop()\n formatted_str = await loop.run_in_executor(\n executor,\n partial(black.diff, req_str, formatted_str, src_name, dst_name),\n )\n\n return web.Response(\n content_type=request.content_type,\n charset=charset,\n headers=headers,\n text=formatted_str,\n )\n except black.NothingChanged:\n return web.Response(status=204, headers=headers)\n except black.InvalidInput as e:\n return web.Response(status=400, headers=headers, text=str(e))\n except Exception as e:\n logging.exception(\"Exception during handling a request\")\n return web.Response(status=500, headers=headers, text=str(e))\n\n\ndef parse_python_variant_header(value: str) -> Tuple[bool, Set[black.TargetVersion]]:\n if value == \"pyi\":\n return True, set()\n else:\n versions = set()\n for version in value.split(\",\"):\n if version.startswith(\"py\"):\n version = version[len(\"py\") :]\n if \".\" in version:\n major_str, *rest = version.split(\".\")\n else:\n major_str = version[0]\n rest = [version[1:]] if len(version) > 1 else []\n try:\n major = int(major_str)\n if major not in (2, 3):\n raise InvalidVariantHeader(\"major version must be 2 or 3\")\n if len(rest) > 0:\n minor = int(rest[0])\n if major == 2:\n raise InvalidVariantHeader(\"Python 2 is not supported\")\n else:\n # Default to lowest supported minor version.\n minor = 7 if major == 2 else 3\n version_str = f\"PY{major}{minor}\"\n if major == 3 and not hasattr(black.TargetVersion, version_str):\n raise InvalidVariantHeader(f\"3.{minor} is not supported\")\n versions.add(black.TargetVersion[version_str])\n except (KeyError, ValueError):\n raise InvalidVariantHeader(\"expected e.g. '3.7', 'py3.5'\") from None\n return False, versions\n\n\ndef patched_main() -> None:\n maybe_install_uvloop()\n freeze_support()\n black.patch_click()\n main()\n\n\nif __name__ == \"__main__\":\n patched_main()\n", "path": "src/blackd/__init__.py"}], "after_files": [{"content": "import asyncio\nimport logging\nfrom concurrent.futures import Executor, ProcessPoolExecutor\nfrom datetime import datetime\nfrom functools import partial\nfrom multiprocessing import freeze_support\nfrom typing import Set, Tuple\n\ntry:\n from aiohttp import web\n\n from .middlewares import cors\nexcept ImportError as ie:\n raise ImportError(\n f\"aiohttp dependency is not installed: {ie}. \"\n + \"Please re-install black with the '[d]' extra install \"\n + \"to obtain aiohttp_cors: `pip install black[d]`\"\n ) from None\n\nimport click\n\nimport black\nfrom _black_version import version as __version__\nfrom black.concurrency import maybe_install_uvloop\n\n# This is used internally by tests to shut down the server prematurely\n_stop_signal = asyncio.Event()\n\n# Request headers\nPROTOCOL_VERSION_HEADER = \"X-Protocol-Version\"\nLINE_LENGTH_HEADER = \"X-Line-Length\"\nPYTHON_VARIANT_HEADER = \"X-Python-Variant\"\nSKIP_STRING_NORMALIZATION_HEADER = \"X-Skip-String-Normalization\"\nSKIP_MAGIC_TRAILING_COMMA = \"X-Skip-Magic-Trailing-Comma\"\nPREVIEW = \"X-Preview\"\nFAST_OR_SAFE_HEADER = \"X-Fast-Or-Safe\"\nDIFF_HEADER = \"X-Diff\"\n\nBLACK_HEADERS = [\n PROTOCOL_VERSION_HEADER,\n LINE_LENGTH_HEADER,\n PYTHON_VARIANT_HEADER,\n SKIP_STRING_NORMALIZATION_HEADER,\n SKIP_MAGIC_TRAILING_COMMA,\n PREVIEW,\n FAST_OR_SAFE_HEADER,\n DIFF_HEADER,\n]\n\n# Response headers\nBLACK_VERSION_HEADER = \"X-Black-Version\"\n\n\nclass InvalidVariantHeader(Exception):\n pass\n\n\[email protected](context_settings={\"help_option_names\": [\"-h\", \"--help\"]})\[email protected](\n \"--bind-host\", type=str, help=\"Address to bind the server to.\", default=\"localhost\"\n)\[email protected](\"--bind-port\", type=int, help=\"Port to listen on\", default=45484)\[email protected]_option(version=black.__version__)\ndef main(bind_host: str, bind_port: int) -> None:\n logging.basicConfig(level=logging.INFO)\n app = make_app()\n ver = black.__version__\n black.out(f\"blackd version {ver} listening on {bind_host} port {bind_port}\")\n web.run_app(app, host=bind_host, port=bind_port, handle_signals=True, print=None)\n\n\ndef make_app() -> web.Application:\n app = web.Application(\n middlewares=[cors(allow_headers=(*BLACK_HEADERS, \"Content-Type\"))]\n )\n executor = ProcessPoolExecutor()\n app.add_routes([web.post(\"/\", partial(handle, executor=executor))])\n return app\n\n\nasync def handle(request: web.Request, executor: Executor) -> web.Response:\n headers = {BLACK_VERSION_HEADER: __version__}\n try:\n if request.headers.get(PROTOCOL_VERSION_HEADER, \"1\") != \"1\":\n return web.Response(\n status=501, text=\"This server only supports protocol version 1\"\n )\n try:\n line_length = int(\n request.headers.get(LINE_LENGTH_HEADER, black.DEFAULT_LINE_LENGTH)\n )\n except ValueError:\n return web.Response(status=400, text=\"Invalid line length header value\")\n\n if PYTHON_VARIANT_HEADER in request.headers:\n value = request.headers[PYTHON_VARIANT_HEADER]\n try:\n pyi, versions = parse_python_variant_header(value)\n except InvalidVariantHeader as e:\n return web.Response(\n status=400,\n text=f\"Invalid value for {PYTHON_VARIANT_HEADER}: {e.args[0]}\",\n )\n else:\n pyi = False\n versions = set()\n\n skip_string_normalization = bool(\n request.headers.get(SKIP_STRING_NORMALIZATION_HEADER, False)\n )\n skip_magic_trailing_comma = bool(\n request.headers.get(SKIP_MAGIC_TRAILING_COMMA, False)\n )\n preview = bool(request.headers.get(PREVIEW, False))\n fast = False\n if request.headers.get(FAST_OR_SAFE_HEADER, \"safe\") == \"fast\":\n fast = True\n mode = black.FileMode(\n target_versions=versions,\n is_pyi=pyi,\n line_length=line_length,\n string_normalization=not skip_string_normalization,\n magic_trailing_comma=not skip_magic_trailing_comma,\n preview=preview,\n )\n req_bytes = await request.content.read()\n charset = request.charset if request.charset is not None else \"utf8\"\n req_str = req_bytes.decode(charset)\n then = datetime.utcnow()\n\n loop = asyncio.get_event_loop()\n formatted_str = await loop.run_in_executor(\n executor, partial(black.format_file_contents, req_str, fast=fast, mode=mode)\n )\n\n # Only output the diff in the HTTP response\n only_diff = bool(request.headers.get(DIFF_HEADER, False))\n if only_diff:\n now = datetime.utcnow()\n src_name = f\"In\\t{then} +0000\"\n dst_name = f\"Out\\t{now} +0000\"\n loop = asyncio.get_event_loop()\n formatted_str = await loop.run_in_executor(\n executor,\n partial(black.diff, req_str, formatted_str, src_name, dst_name),\n )\n\n return web.Response(\n content_type=request.content_type,\n charset=charset,\n headers=headers,\n text=formatted_str,\n )\n except black.NothingChanged:\n return web.Response(status=204, headers=headers)\n except black.InvalidInput as e:\n return web.Response(status=400, headers=headers, text=str(e))\n except Exception as e:\n logging.exception(\"Exception during handling a request\")\n return web.Response(status=500, headers=headers, text=str(e))\n\n\ndef parse_python_variant_header(value: str) -> Tuple[bool, Set[black.TargetVersion]]:\n if value == \"pyi\":\n return True, set()\n else:\n versions = set()\n for version in value.split(\",\"):\n if version.startswith(\"py\"):\n version = version[len(\"py\") :]\n if \".\" in version:\n major_str, *rest = version.split(\".\")\n else:\n major_str = version[0]\n rest = [version[1:]] if len(version) > 1 else []\n try:\n major = int(major_str)\n if major not in (2, 3):\n raise InvalidVariantHeader(\"major version must be 2 or 3\")\n if len(rest) > 0:\n minor = int(rest[0])\n if major == 2:\n raise InvalidVariantHeader(\"Python 2 is not supported\")\n else:\n # Default to lowest supported minor version.\n minor = 7 if major == 2 else 3\n version_str = f\"PY{major}{minor}\"\n if major == 3 and not hasattr(black.TargetVersion, version_str):\n raise InvalidVariantHeader(f\"3.{minor} is not supported\")\n versions.add(black.TargetVersion[version_str])\n except (KeyError, ValueError):\n raise InvalidVariantHeader(\"expected e.g. '3.7', 'py3.5'\") from None\n return False, versions\n\n\ndef patched_main() -> None:\n maybe_install_uvloop()\n freeze_support()\n black.patch_click()\n main()\n\n\nif __name__ == \"__main__\":\n patched_main()\n", "path": "src/blackd/__init__.py"}]} | 2,464 | 341 |
gh_patches_debug_56767 | rasdani/github-patches | git_diff | DataDog__dd-agent-2387 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[iis] Non-digit value in WMI metric name
Getting this warning in v5.7.1 and `iis.site_up` is permanently triggered.
```
2016-03-16 14:57:40 GMT Standard Time | WARNING | checks.iis(iis.py:127) | When extracting metrics with WMI, found a non digit value for property 'name'.
```
This was working fine in v5.6.1
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `checks.d/iis.py`
Content:
```
1 '''
2 Check the performance counters from IIS
3 '''
4 # 3p
5 import pythoncom
6
7 # project
8 from checks import AgentCheck
9 from checks.wmi_check import WinWMICheck, WMIMetric
10 from config import _is_affirmative
11 from utils.containers import hash_mutable
12 from utils.timeout import TimeoutException
13
14
15 class IIS(WinWMICheck):
16 METRICS = [
17 ('ServiceUptime', 'iis.uptime', 'gauge'),
18
19 # Network
20 ('TotalBytesSent','iis.net.bytes_sent', 'rate'),
21 ('TotalBytesReceived', 'iis.net.bytes_rcvd', 'rate'),
22 ('TotalBytesTransferred', 'iis.net.bytes_total', 'rate'),
23 ('CurrentConnections', 'iis.net.num_connections', 'gauge'),
24 ('TotalFilesSent', 'iis.net.files_sent', 'rate'),
25 ('TotalFilesReceived', 'iis.net.files_rcvd', 'rate'),
26 ('TotalConnectionAttemptsAllInstances', 'iis.net.connection_attempts', 'rate'),
27
28 # HTTP Methods
29 ('TotalGetRequests', 'iis.httpd_request_method.get', 'rate'),
30 ('TotalPostRequests', 'iis.httpd_request_method.post', 'rate'),
31 ('TotalHeadRequests', 'iis.httpd_request_method.head', 'rate'),
32 ('TotalPutRequests', 'iis.httpd_request_method.put', 'rate'),
33 ('TotalDeleteRequests', 'iis.httpd_request_method.delete', 'rate'),
34 ('TotalOptionsRequests', 'iis.httpd_request_method.options', 'rate'),
35 ('TotalTraceRequests', 'iis.httpd_request_method.trace', 'rate'),
36
37 # Errors
38 ('TotalNotFoundErrors', 'iis.errors.not_found', 'rate'),
39 ('TotalLockedErrors', 'iis.errors.locked', 'rate'),
40
41 # Users
42 ('TotalAnonymousUsers', 'iis.users.anon', 'rate'),
43 ('TotalNonAnonymousUsers', 'iis.users.nonanon', 'rate'),
44
45 # Requests
46 ('TotalCGIRequests', 'iis.requests.cgi', 'rate'),
47 ('TotalISAPIExtensionRequests', 'iis.requests.isapi', 'rate'),
48 ]
49 SERVICE_CHECK = "iis.site_up"
50
51 NAMESPACE = "root\\CIMV2"
52 CLASS = "Win32_PerfFormattedData_W3SVC_WebService"
53
54 def __init__(self, name, init_config, agentConfig, instances):
55 WinWMICheck.__init__(self, name, init_config, agentConfig, instances)
56
57 def check(self, instance):
58 # Connect to the WMI provider
59 host = instance.get('host', "localhost")
60 user = instance.get('username', "")
61 password = instance.get('password', "")
62 instance_tags = instance.get('tags', [])
63 sites = instance.get('sites', ['_Total'])
64 is_2008 = _is_affirmative(instance.get('is_2008', False))
65
66
67 instance_hash = hash_mutable(instance)
68 instance_key = self._get_instance_key(host, self.NAMESPACE, self.CLASS, instance_hash)
69 filters = map(lambda x: {"Name": tuple(('=', x))}, sites)
70
71 metrics_by_property, properties = self._get_wmi_properties(instance_key, self.METRICS, [])
72 if is_2008:
73 for idx, prop in enumerate(properties):
74 if prop == "TotalBytesTransferred".lower():
75 properties[idx] = "TotalBytesTransfered"
76
77 wmi_sampler = self._get_wmi_sampler(
78 instance_key,
79 self.CLASS, properties,
80 filters=filters,
81 host=host, namespace=self.NAMESPACE,
82 username=user, password=password
83 )
84
85 # Sample, extract & submit metrics
86 try:
87 wmi_sampler.sample()
88
89 metrics = self._extract_metrics(wmi_sampler, sites, instance_tags)
90 except TimeoutException:
91 self.log.warning(
92 u"[IIS] WMI query timed out."
93 u" class={wmi_class} - properties={wmi_properties} -"
94 u" filters={filters} - tags={instance_tags}".format(
95 wmi_class=self.CLASS, wmi_properties=properties,
96 filters=filters, instance_tags=instance_tags
97 )
98 )
99 except pythoncom.com_error as e:
100 if '0x80041017' in str(e):
101 self.warning("You may be running IIS6/7 which reports metrics a \
102 little differently. Try enabling the is_2008 flag for this instance.")
103 raise e
104 else:
105 self._submit_events(wmi_sampler, sites)
106 self._submit_metrics(metrics, metrics_by_property)
107
108 def _extract_metrics(self, wmi_sampler, sites, tags):
109 """
110 Extract and tag metrics from the WMISampler.
111
112 Returns: List of WMIMetric
113 ```
114 [
115 WMIMetric("freemegabytes", 19742, ["name:_total"]),
116 WMIMetric("avgdiskbytesperwrite", 1536, ["name:c:"]),
117 ]
118 ```
119 """
120 metrics = []
121
122 for wmi_obj in wmi_sampler:
123 tags = list(tags) if tags else []
124
125 # get site name
126 sitename = wmi_obj['Name']
127
128 # Skip any sites we don't specifically want.
129 if sitename not in sites:
130 continue
131 elif sitename != "_Total":
132 tags.append("site:{0}".format(self.normalize(sitename)))
133
134 # Tag with `tag_queries` parameter
135 for wmi_property, wmi_value in wmi_obj.iteritems():
136 # Tag with `tag_by` parameter
137 try:
138 metrics.append(WMIMetric(wmi_property, float(wmi_value), tags))
139 except ValueError:
140 self.log.warning(u"When extracting metrics with WMI, found a non digit value"
141 " for property '{0}'.".format(wmi_property))
142 continue
143 except TypeError:
144 self.log.warning(u"When extracting metrics with WMI, found a missing property"
145 " '{0}'".format(wmi_property))
146 continue
147 return metrics
148
149 def _submit_events(self, wmi_sampler, sites):
150 expected_sites = set(sites)
151
152 for wmi_obj in wmi_sampler:
153 sitename = wmi_obj['Name']
154 if sitename == "_Total":
155 continue
156
157 uptime = wmi_obj["ServiceUptime"]
158 status = AgentCheck.CRITICAL if uptime == 0 else AgentCheck.OK
159
160 self.service_check(self.SERVICE_CHECK, status, tags=['site:{0}'.format(self.normalize(sitename))])
161 expected_sites.remove(sitename)
162
163 for site in expected_sites:
164 self.service_check(self.SERVICE_CHECK, AgentCheck.CRITICAL,
165 tags=['site:{0}'.format(self.normalize(site))])
166
167
168 def _submit_metrics(self, wmi_metrics, metrics_by_property):
169 for m in wmi_metrics:
170 metric_name = m.name
171 # Windows 2008 sp2 reports it as TotalbytesTransfered
172 # instead of TotalBytesTransferred (single r)
173 if metric_name.lower() == "totalbytestransfered":
174 metric_name = "totalbytestransferred"
175 elif m.name not in metrics_by_property:
176 continue
177
178 metric, mtype = metrics_by_property[metric_name]
179 submittor = getattr(self, mtype)
180 submittor(metric, m.value, m.tags)
181
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/checks.d/iis.py b/checks.d/iis.py
--- a/checks.d/iis.py
+++ b/checks.d/iis.py
@@ -151,9 +151,6 @@
for wmi_obj in wmi_sampler:
sitename = wmi_obj['Name']
- if sitename == "_Total":
- continue
-
uptime = wmi_obj["ServiceUptime"]
status = AgentCheck.CRITICAL if uptime == 0 else AgentCheck.OK
| {"golden_diff": "diff --git a/checks.d/iis.py b/checks.d/iis.py\n--- a/checks.d/iis.py\n+++ b/checks.d/iis.py\n@@ -151,9 +151,6 @@\n \n for wmi_obj in wmi_sampler:\n sitename = wmi_obj['Name']\n- if sitename == \"_Total\":\n- continue\n-\n uptime = wmi_obj[\"ServiceUptime\"]\n status = AgentCheck.CRITICAL if uptime == 0 else AgentCheck.OK\n", "issue": "[iis] Non-digit value in WMI metric name\nGetting this warning in v5.7.1 and `iis.site_up` is permanently triggered.\n\n```\n2016-03-16 14:57:40 GMT Standard Time | WARNING | checks.iis(iis.py:127) | When extracting metrics with WMI, found a non digit value for property 'name'.\n```\n\nThis was working fine in v5.6.1\n\n", "before_files": [{"content": "'''\nCheck the performance counters from IIS\n'''\n# 3p\nimport pythoncom\n\n# project\nfrom checks import AgentCheck\nfrom checks.wmi_check import WinWMICheck, WMIMetric\nfrom config import _is_affirmative\nfrom utils.containers import hash_mutable\nfrom utils.timeout import TimeoutException\n\n\nclass IIS(WinWMICheck):\n METRICS = [\n ('ServiceUptime', 'iis.uptime', 'gauge'),\n\n # Network\n ('TotalBytesSent','iis.net.bytes_sent', 'rate'),\n ('TotalBytesReceived', 'iis.net.bytes_rcvd', 'rate'),\n ('TotalBytesTransferred', 'iis.net.bytes_total', 'rate'),\n ('CurrentConnections', 'iis.net.num_connections', 'gauge'),\n ('TotalFilesSent', 'iis.net.files_sent', 'rate'),\n ('TotalFilesReceived', 'iis.net.files_rcvd', 'rate'),\n ('TotalConnectionAttemptsAllInstances', 'iis.net.connection_attempts', 'rate'),\n\n # HTTP Methods\n ('TotalGetRequests', 'iis.httpd_request_method.get', 'rate'),\n ('TotalPostRequests', 'iis.httpd_request_method.post', 'rate'),\n ('TotalHeadRequests', 'iis.httpd_request_method.head', 'rate'),\n ('TotalPutRequests', 'iis.httpd_request_method.put', 'rate'),\n ('TotalDeleteRequests', 'iis.httpd_request_method.delete', 'rate'),\n ('TotalOptionsRequests', 'iis.httpd_request_method.options', 'rate'),\n ('TotalTraceRequests', 'iis.httpd_request_method.trace', 'rate'),\n\n # Errors\n ('TotalNotFoundErrors', 'iis.errors.not_found', 'rate'),\n ('TotalLockedErrors', 'iis.errors.locked', 'rate'),\n\n # Users\n ('TotalAnonymousUsers', 'iis.users.anon', 'rate'),\n ('TotalNonAnonymousUsers', 'iis.users.nonanon', 'rate'),\n\n # Requests\n ('TotalCGIRequests', 'iis.requests.cgi', 'rate'),\n ('TotalISAPIExtensionRequests', 'iis.requests.isapi', 'rate'),\n ]\n SERVICE_CHECK = \"iis.site_up\"\n\n NAMESPACE = \"root\\\\CIMV2\"\n CLASS = \"Win32_PerfFormattedData_W3SVC_WebService\"\n\n def __init__(self, name, init_config, agentConfig, instances):\n WinWMICheck.__init__(self, name, init_config, agentConfig, instances)\n\n def check(self, instance):\n # Connect to the WMI provider\n host = instance.get('host', \"localhost\")\n user = instance.get('username', \"\")\n password = instance.get('password', \"\")\n instance_tags = instance.get('tags', [])\n sites = instance.get('sites', ['_Total'])\n is_2008 = _is_affirmative(instance.get('is_2008', False))\n\n\n instance_hash = hash_mutable(instance)\n instance_key = self._get_instance_key(host, self.NAMESPACE, self.CLASS, instance_hash)\n filters = map(lambda x: {\"Name\": tuple(('=', x))}, sites)\n\n metrics_by_property, properties = self._get_wmi_properties(instance_key, self.METRICS, [])\n if is_2008:\n for idx, prop in enumerate(properties):\n if prop == \"TotalBytesTransferred\".lower():\n properties[idx] = \"TotalBytesTransfered\"\n\n wmi_sampler = self._get_wmi_sampler(\n instance_key,\n self.CLASS, properties,\n filters=filters,\n host=host, namespace=self.NAMESPACE,\n username=user, password=password\n )\n\n # Sample, extract & submit metrics\n try:\n wmi_sampler.sample()\n\n metrics = self._extract_metrics(wmi_sampler, sites, instance_tags)\n except TimeoutException:\n self.log.warning(\n u\"[IIS] WMI query timed out.\"\n u\" class={wmi_class} - properties={wmi_properties} -\"\n u\" filters={filters} - tags={instance_tags}\".format(\n wmi_class=self.CLASS, wmi_properties=properties,\n filters=filters, instance_tags=instance_tags\n )\n )\n except pythoncom.com_error as e:\n if '0x80041017' in str(e):\n self.warning(\"You may be running IIS6/7 which reports metrics a \\\n little differently. Try enabling the is_2008 flag for this instance.\")\n raise e\n else:\n self._submit_events(wmi_sampler, sites)\n self._submit_metrics(metrics, metrics_by_property)\n\n def _extract_metrics(self, wmi_sampler, sites, tags):\n \"\"\"\n Extract and tag metrics from the WMISampler.\n\n Returns: List of WMIMetric\n ```\n [\n WMIMetric(\"freemegabytes\", 19742, [\"name:_total\"]),\n WMIMetric(\"avgdiskbytesperwrite\", 1536, [\"name:c:\"]),\n ]\n ```\n \"\"\"\n metrics = []\n\n for wmi_obj in wmi_sampler:\n tags = list(tags) if tags else []\n\n # get site name\n sitename = wmi_obj['Name']\n\n # Skip any sites we don't specifically want.\n if sitename not in sites:\n continue\n elif sitename != \"_Total\":\n tags.append(\"site:{0}\".format(self.normalize(sitename)))\n\n # Tag with `tag_queries` parameter\n for wmi_property, wmi_value in wmi_obj.iteritems():\n # Tag with `tag_by` parameter\n try:\n metrics.append(WMIMetric(wmi_property, float(wmi_value), tags))\n except ValueError:\n self.log.warning(u\"When extracting metrics with WMI, found a non digit value\"\n \" for property '{0}'.\".format(wmi_property))\n continue\n except TypeError:\n self.log.warning(u\"When extracting metrics with WMI, found a missing property\"\n \" '{0}'\".format(wmi_property))\n continue\n return metrics\n\n def _submit_events(self, wmi_sampler, sites):\n expected_sites = set(sites)\n\n for wmi_obj in wmi_sampler:\n sitename = wmi_obj['Name']\n if sitename == \"_Total\":\n continue\n\n uptime = wmi_obj[\"ServiceUptime\"]\n status = AgentCheck.CRITICAL if uptime == 0 else AgentCheck.OK\n\n self.service_check(self.SERVICE_CHECK, status, tags=['site:{0}'.format(self.normalize(sitename))])\n expected_sites.remove(sitename)\n\n for site in expected_sites:\n self.service_check(self.SERVICE_CHECK, AgentCheck.CRITICAL,\n tags=['site:{0}'.format(self.normalize(site))])\n\n\n def _submit_metrics(self, wmi_metrics, metrics_by_property):\n for m in wmi_metrics:\n metric_name = m.name\n # Windows 2008 sp2 reports it as TotalbytesTransfered\n # instead of TotalBytesTransferred (single r)\n if metric_name.lower() == \"totalbytestransfered\":\n metric_name = \"totalbytestransferred\"\n elif m.name not in metrics_by_property:\n continue\n\n metric, mtype = metrics_by_property[metric_name]\n submittor = getattr(self, mtype)\n submittor(metric, m.value, m.tags)\n", "path": "checks.d/iis.py"}], "after_files": [{"content": "'''\nCheck the performance counters from IIS\n'''\n# 3p\nimport pythoncom\n\n# project\nfrom checks import AgentCheck\nfrom checks.wmi_check import WinWMICheck, WMIMetric\nfrom config import _is_affirmative\nfrom utils.containers import hash_mutable\nfrom utils.timeout import TimeoutException\n\n\nclass IIS(WinWMICheck):\n METRICS = [\n ('ServiceUptime', 'iis.uptime', 'gauge'),\n\n # Network\n ('TotalBytesSent','iis.net.bytes_sent', 'rate'),\n ('TotalBytesReceived', 'iis.net.bytes_rcvd', 'rate'),\n ('TotalBytesTransferred', 'iis.net.bytes_total', 'rate'),\n ('CurrentConnections', 'iis.net.num_connections', 'gauge'),\n ('TotalFilesSent', 'iis.net.files_sent', 'rate'),\n ('TotalFilesReceived', 'iis.net.files_rcvd', 'rate'),\n ('TotalConnectionAttemptsAllInstances', 'iis.net.connection_attempts', 'rate'),\n\n # HTTP Methods\n ('TotalGetRequests', 'iis.httpd_request_method.get', 'rate'),\n ('TotalPostRequests', 'iis.httpd_request_method.post', 'rate'),\n ('TotalHeadRequests', 'iis.httpd_request_method.head', 'rate'),\n ('TotalPutRequests', 'iis.httpd_request_method.put', 'rate'),\n ('TotalDeleteRequests', 'iis.httpd_request_method.delete', 'rate'),\n ('TotalOptionsRequests', 'iis.httpd_request_method.options', 'rate'),\n ('TotalTraceRequests', 'iis.httpd_request_method.trace', 'rate'),\n\n # Errors\n ('TotalNotFoundErrors', 'iis.errors.not_found', 'rate'),\n ('TotalLockedErrors', 'iis.errors.locked', 'rate'),\n\n # Users\n ('TotalAnonymousUsers', 'iis.users.anon', 'rate'),\n ('TotalNonAnonymousUsers', 'iis.users.nonanon', 'rate'),\n\n # Requests\n ('TotalCGIRequests', 'iis.requests.cgi', 'rate'),\n ('TotalISAPIExtensionRequests', 'iis.requests.isapi', 'rate'),\n ]\n SERVICE_CHECK = \"iis.site_up\"\n\n NAMESPACE = \"root\\\\CIMV2\"\n CLASS = \"Win32_PerfFormattedData_W3SVC_WebService\"\n\n def __init__(self, name, init_config, agentConfig, instances):\n WinWMICheck.__init__(self, name, init_config, agentConfig, instances)\n\n def check(self, instance):\n # Connect to the WMI provider\n host = instance.get('host', \"localhost\")\n user = instance.get('username', \"\")\n password = instance.get('password', \"\")\n instance_tags = instance.get('tags', [])\n sites = instance.get('sites', ['_Total'])\n is_2008 = _is_affirmative(instance.get('is_2008', False))\n\n\n instance_hash = hash_mutable(instance)\n instance_key = self._get_instance_key(host, self.NAMESPACE, self.CLASS, instance_hash)\n filters = map(lambda x: {\"Name\": tuple(('=', x))}, sites)\n\n metrics_by_property, properties = self._get_wmi_properties(instance_key, self.METRICS, [])\n if is_2008:\n for idx, prop in enumerate(properties):\n if prop == \"TotalBytesTransferred\".lower():\n properties[idx] = \"TotalBytesTransfered\"\n\n wmi_sampler = self._get_wmi_sampler(\n instance_key,\n self.CLASS, properties,\n filters=filters,\n host=host, namespace=self.NAMESPACE,\n username=user, password=password\n )\n\n # Sample, extract & submit metrics\n try:\n wmi_sampler.sample()\n\n metrics = self._extract_metrics(wmi_sampler, sites, instance_tags)\n except TimeoutException:\n self.log.warning(\n u\"[IIS] WMI query timed out.\"\n u\" class={wmi_class} - properties={wmi_properties} -\"\n u\" filters={filters} - tags={instance_tags}\".format(\n wmi_class=self.CLASS, wmi_properties=properties,\n filters=filters, instance_tags=instance_tags\n )\n )\n except pythoncom.com_error as e:\n if '0x80041017' in str(e):\n self.warning(\"You may be running IIS6/7 which reports metrics a \\\n little differently. Try enabling the is_2008 flag for this instance.\")\n raise e\n else:\n self._submit_events(wmi_sampler, sites)\n self._submit_metrics(metrics, metrics_by_property)\n\n def _extract_metrics(self, wmi_sampler, sites, tags):\n \"\"\"\n Extract and tag metrics from the WMISampler.\n\n Returns: List of WMIMetric\n ```\n [\n WMIMetric(\"freemegabytes\", 19742, [\"name:_total\"]),\n WMIMetric(\"avgdiskbytesperwrite\", 1536, [\"name:c:\"]),\n ]\n ```\n \"\"\"\n metrics = []\n\n for wmi_obj in wmi_sampler:\n tags = list(tags) if tags else []\n\n # get site name\n sitename = wmi_obj['Name']\n\n # Skip any sites we don't specifically want.\n if sitename not in sites:\n continue\n elif sitename != \"_Total\":\n tags.append(\"site:{0}\".format(self.normalize(sitename)))\n\n # Tag with `tag_queries` parameter\n for wmi_property, wmi_value in wmi_obj.iteritems():\n # Tag with `tag_by` parameter\n try:\n metrics.append(WMIMetric(wmi_property, float(wmi_value), tags))\n except ValueError:\n self.log.warning(u\"When extracting metrics with WMI, found a non digit value\"\n \" for property '{0}'.\".format(wmi_property))\n continue\n except TypeError:\n self.log.warning(u\"When extracting metrics with WMI, found a missing property\"\n \" '{0}'\".format(wmi_property))\n continue\n return metrics\n\n def _submit_events(self, wmi_sampler, sites):\n expected_sites = set(sites)\n\n for wmi_obj in wmi_sampler:\n sitename = wmi_obj['Name']\n uptime = wmi_obj[\"ServiceUptime\"]\n status = AgentCheck.CRITICAL if uptime == 0 else AgentCheck.OK\n\n self.service_check(self.SERVICE_CHECK, status, tags=['site:{0}'.format(self.normalize(sitename))])\n expected_sites.remove(sitename)\n\n for site in expected_sites:\n self.service_check(self.SERVICE_CHECK, AgentCheck.CRITICAL,\n tags=['site:{0}'.format(self.normalize(site))])\n\n\n def _submit_metrics(self, wmi_metrics, metrics_by_property):\n for m in wmi_metrics:\n metric_name = m.name\n # Windows 2008 sp2 reports it as TotalbytesTransfered\n # instead of TotalBytesTransferred (single r)\n if metric_name.lower() == \"totalbytestransfered\":\n metric_name = \"totalbytestransferred\"\n elif m.name not in metrics_by_property:\n continue\n\n metric, mtype = metrics_by_property[metric_name]\n submittor = getattr(self, mtype)\n submittor(metric, m.value, m.tags)\n", "path": "checks.d/iis.py"}]} | 2,416 | 113 |
gh_patches_debug_30513 | rasdani/github-patches | git_diff | great-expectations__great_expectations-1292 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
QueryBatchKwargsGenerator._get_raw_query produces incorrect filepath
I am using the `QueryBatchKwargsGenerator` to generate `BATCH_KWARGS` using a SQL file I have stored on disk. My `great_expectations.yml` looks like:
```yaml
datasources:
domi:
class_name: SqlAlchemyDatasource
module_name:
data_asset_type:
class_name: SqlAlchemyDataset
credentials:
drivername: postgres
host: ${HOST}
port: ${PORT}
username: ${USER}
password: ${PASSWORD}
database: ${DBNAME}
generators:
apartments-generator:
class_name: QueryBatchKwargsGenerator
name: apartments
```
I am running the following:
```python
>>> import great_expectations as get
>>> context = ge.data_context.DataContext()
>>> context.get_available_data_asset_names()
{'domi': {'apartments-generator': {'names': [('listings', 'query')]}}}
>>> context.build_batch_kwargs(
datasource='domi', generator='apartments-generator', name='listings'
)
```
Which raises this error:

This happens because `_get_raw_query` is doing `tuple('listings')` which returns: `('l', 'i', 's', 't', 'i', 'n', 'g', 's')`)
```python
def _get_raw_query(self, generator_asset):
return self._store_backend.get(tuple(generator_asset))
```
I believe this should just be replaced with:
```python
def _get_raw_query(self, generator_asset):
return self._store_backend.get((generator_asset,))
```
Currently I can get around this by providing a tuple instead of a string, i.e.
```python
context.build_batch_kwargs(
datasource='domi',
generator='apartments-generator',
name=('listings', )
)
```
But that goes against what the docs are saying [here](https://docs.greatexpectations.io/en/latest/module_docs/data_context_module.html#great_expectations.data_context.BaseDataContext.build_batch_kwargs):

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `great_expectations/datasource/generator/query_generator.py`
Content:
```
1 import os
2 import logging
3
4 from .batch_kwargs_generator import BatchKwargsGenerator
5 from great_expectations.datasource.types import SqlAlchemyDatasourceQueryBatchKwargs
6 from great_expectations.exceptions import (
7 BatchKwargsError,
8 ClassInstantiationError,
9 )
10 from ...data_context.util import instantiate_class_from_config
11
12 logger = logging.getLogger(__name__)
13
14 try:
15 import sqlalchemy
16 from sqlalchemy import create_engine
17 from sqlalchemy.engine import reflection
18 except ImportError:
19 sqlalchemy = None
20 create_engine = None
21 reflection = None
22 logger.debug("Unable to import sqlalchemy.")
23
24
25 class QueryBatchKwargsGenerator(BatchKwargsGenerator):
26 """Produce query-style batch_kwargs from sql files stored on disk
27 """
28 recognized_batch_parameters = {'query_parameters', 'partition_id'}
29
30 def __init__(self, name="default", datasource=None, query_store_backend=None, queries=None):
31 super(QueryBatchKwargsGenerator, self).__init__(name=name, datasource=datasource)
32 root_directory = None
33 if query_store_backend is None:
34 # We will choose a Tuple store if there is a configured DataContext with a root_directory,
35 # and an InMemoryStore otherwise
36 if datasource and datasource.data_context and datasource.data_context.root_directory:
37 query_store_backend = {
38 "class_name": "TupleFilesystemStoreBackend",
39 "base_directory": os.path.join(datasource.data_context.root_directory, "datasources",
40 datasource.name, "generators", name),
41 "filepath_suffix": ".sql"
42 }
43 root_directory = datasource.data_context.root_directory
44 else:
45 query_store_backend = {
46 "class_name": "InMemoryStoreBackend"
47 }
48 module_name = 'great_expectations.data_context.store'
49 self._store_backend = instantiate_class_from_config(
50 config=query_store_backend,
51 runtime_environment={
52 "root_directory": root_directory
53 },
54 config_defaults={
55 "module_name": module_name
56 }
57 )
58 if not self._store_backend:
59 raise ClassInstantiationError(
60 module_name=module_name,
61 package_name=None,
62 class_name=query_store_backend['class_name']
63 )
64 if queries is not None:
65 for query_name, query in queries.items():
66 self.add_query(query_name, query)
67
68 def _get_raw_query(self, generator_asset):
69 return self._store_backend.get(tuple(generator_asset))
70
71 def _get_iterator(self, generator_asset, query_parameters=None):
72 raw_query = self._get_raw_query(generator_asset)
73 if raw_query is None:
74 logger.warning("No query defined for generator asset: %s" % generator_asset)
75 # There is no valid query path or temp query storage defined with the generator_asset
76 return None
77
78 if query_parameters is None:
79 iter_ = iter([
80 SqlAlchemyDatasourceQueryBatchKwargs(
81 query=raw_query
82 )])
83 else:
84 iter_= iter([
85 SqlAlchemyDatasourceQueryBatchKwargs(
86 query=raw_query,
87 query_parameters=query_parameters
88 )])
89
90 return iter_
91
92 def add_query(self, generator_asset, query):
93 # Backends must have a tuple key; we use only a single-element tuple
94 self._store_backend.set(tuple(generator_asset), query)
95
96 def get_available_data_asset_names(self):
97 defined_queries = self._store_backend.list_keys()
98 # Backends must have a tuple key; we use only a single-element tuple
99 return {"names": [(query_key_tuple[0], "query") for query_key_tuple in defined_queries]}
100
101 def _build_batch_kwargs(self, batch_parameters):
102 """Build batch kwargs from a partition id."""
103 generator_asset = batch_parameters.pop("name")
104 raw_query = self._get_raw_query(generator_asset)
105 partition_id = batch_parameters.pop("partition_id", None)
106 batch_kwargs = self._datasource.process_batch_parameters(**batch_parameters)
107 batch_kwargs["query"] = raw_query
108
109 if partition_id:
110 if not batch_kwargs["query_parameters"]:
111 batch_kwargs["query_parameters"] = {}
112 batch_kwargs["query_parameters"]["partition_id"] = partition_id
113
114 return SqlAlchemyDatasourceQueryBatchKwargs(batch_kwargs)
115
116 def get_available_partition_ids(self, generator_asset):
117 raise BatchKwargsError("QueryBatchKwargsGenerator cannot identify partitions.", {})
118
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/great_expectations/datasource/generator/query_generator.py b/great_expectations/datasource/generator/query_generator.py
--- a/great_expectations/datasource/generator/query_generator.py
+++ b/great_expectations/datasource/generator/query_generator.py
@@ -25,7 +25,7 @@
class QueryBatchKwargsGenerator(BatchKwargsGenerator):
"""Produce query-style batch_kwargs from sql files stored on disk
"""
- recognized_batch_parameters = {'query_parameters', 'partition_id'}
+ recognized_batch_parameters = {'query_parameters', 'partition_id', 'name'}
def __init__(self, name="default", datasource=None, query_store_backend=None, queries=None):
super(QueryBatchKwargsGenerator, self).__init__(name=name, datasource=datasource)
@@ -66,7 +66,7 @@
self.add_query(query_name, query)
def _get_raw_query(self, generator_asset):
- return self._store_backend.get(tuple(generator_asset))
+ return self._store_backend.get((generator_asset,))
def _get_iterator(self, generator_asset, query_parameters=None):
raw_query = self._get_raw_query(generator_asset)
@@ -91,7 +91,7 @@
def add_query(self, generator_asset, query):
# Backends must have a tuple key; we use only a single-element tuple
- self._store_backend.set(tuple(generator_asset), query)
+ self._store_backend.set((generator_asset,), query)
def get_available_data_asset_names(self):
defined_queries = self._store_backend.list_keys()
| {"golden_diff": "diff --git a/great_expectations/datasource/generator/query_generator.py b/great_expectations/datasource/generator/query_generator.py\n--- a/great_expectations/datasource/generator/query_generator.py\n+++ b/great_expectations/datasource/generator/query_generator.py\n@@ -25,7 +25,7 @@\n class QueryBatchKwargsGenerator(BatchKwargsGenerator):\n \"\"\"Produce query-style batch_kwargs from sql files stored on disk\n \"\"\"\n- recognized_batch_parameters = {'query_parameters', 'partition_id'}\n+ recognized_batch_parameters = {'query_parameters', 'partition_id', 'name'}\n \n def __init__(self, name=\"default\", datasource=None, query_store_backend=None, queries=None):\n super(QueryBatchKwargsGenerator, self).__init__(name=name, datasource=datasource)\n@@ -66,7 +66,7 @@\n self.add_query(query_name, query)\n \n def _get_raw_query(self, generator_asset):\n- return self._store_backend.get(tuple(generator_asset))\n+ return self._store_backend.get((generator_asset,))\n \n def _get_iterator(self, generator_asset, query_parameters=None):\n raw_query = self._get_raw_query(generator_asset)\n@@ -91,7 +91,7 @@\n \n def add_query(self, generator_asset, query):\n # Backends must have a tuple key; we use only a single-element tuple\n- self._store_backend.set(tuple(generator_asset), query)\n+ self._store_backend.set((generator_asset,), query)\n \n def get_available_data_asset_names(self):\n defined_queries = self._store_backend.list_keys()\n", "issue": "QueryBatchKwargsGenerator._get_raw_query produces incorrect filepath\nI am using the `QueryBatchKwargsGenerator` to generate `BATCH_KWARGS` using a SQL file I have stored on disk. My `great_expectations.yml` looks like:\r\n\r\n```yaml\r\ndatasources:\r\n domi:\r\n class_name: SqlAlchemyDatasource\r\n module_name:\r\n data_asset_type:\r\n class_name: SqlAlchemyDataset\r\n credentials:\r\n drivername: postgres\r\n host: ${HOST}\r\n port: ${PORT}\r\n username: ${USER}\r\n password: ${PASSWORD}\r\n database: ${DBNAME}\r\n generators:\r\n apartments-generator:\r\n class_name: QueryBatchKwargsGenerator\r\n name: apartments\r\n```\r\n\r\nI am running the following:\r\n\r\n```python\r\n>>> import great_expectations as get\r\n\r\n>>> context = ge.data_context.DataContext()\r\n>>> context.get_available_data_asset_names()\r\n{'domi': {'apartments-generator': {'names': [('listings', 'query')]}}}\r\n\r\n>>> context.build_batch_kwargs(\r\n datasource='domi', generator='apartments-generator', name='listings'\r\n)\r\n```\r\n\r\nWhich raises this error:\r\n\r\n\r\n\r\nThis happens because `_get_raw_query` is doing `tuple('listings')` which returns: `('l', 'i', 's', 't', 'i', 'n', 'g', 's')`)\r\n\r\n```python\r\n def _get_raw_query(self, generator_asset):\r\n return self._store_backend.get(tuple(generator_asset))\r\n```\r\n\r\nI believe this should just be replaced with:\r\n\r\n```python\r\n def _get_raw_query(self, generator_asset):\r\n return self._store_backend.get((generator_asset,))\r\n```\r\n\r\nCurrently I can get around this by providing a tuple instead of a string, i.e.\r\n\r\n```python\r\ncontext.build_batch_kwargs(\r\n datasource='domi',\r\n generator='apartments-generator',\r\n name=('listings', )\r\n)\r\n```\r\n\r\nBut that goes against what the docs are saying [here](https://docs.greatexpectations.io/en/latest/module_docs/data_context_module.html#great_expectations.data_context.BaseDataContext.build_batch_kwargs):\r\n\r\n\r\n\n", "before_files": [{"content": "import os\nimport logging\n\nfrom .batch_kwargs_generator import BatchKwargsGenerator\nfrom great_expectations.datasource.types import SqlAlchemyDatasourceQueryBatchKwargs\nfrom great_expectations.exceptions import (\n BatchKwargsError,\n ClassInstantiationError,\n)\nfrom ...data_context.util import instantiate_class_from_config\n\nlogger = logging.getLogger(__name__)\n\ntry:\n import sqlalchemy\n from sqlalchemy import create_engine\n from sqlalchemy.engine import reflection\nexcept ImportError:\n sqlalchemy = None\n create_engine = None\n reflection = None\n logger.debug(\"Unable to import sqlalchemy.\")\n\n\nclass QueryBatchKwargsGenerator(BatchKwargsGenerator):\n \"\"\"Produce query-style batch_kwargs from sql files stored on disk\n \"\"\"\n recognized_batch_parameters = {'query_parameters', 'partition_id'}\n\n def __init__(self, name=\"default\", datasource=None, query_store_backend=None, queries=None):\n super(QueryBatchKwargsGenerator, self).__init__(name=name, datasource=datasource)\n root_directory = None\n if query_store_backend is None:\n # We will choose a Tuple store if there is a configured DataContext with a root_directory,\n # and an InMemoryStore otherwise\n if datasource and datasource.data_context and datasource.data_context.root_directory:\n query_store_backend = {\n \"class_name\": \"TupleFilesystemStoreBackend\",\n \"base_directory\": os.path.join(datasource.data_context.root_directory, \"datasources\",\n datasource.name, \"generators\", name),\n \"filepath_suffix\": \".sql\"\n }\n root_directory = datasource.data_context.root_directory\n else:\n query_store_backend = {\n \"class_name\": \"InMemoryStoreBackend\"\n }\n module_name = 'great_expectations.data_context.store'\n self._store_backend = instantiate_class_from_config(\n config=query_store_backend,\n runtime_environment={\n \"root_directory\": root_directory\n },\n config_defaults={\n \"module_name\": module_name\n }\n )\n if not self._store_backend:\n raise ClassInstantiationError(\n module_name=module_name,\n package_name=None,\n class_name=query_store_backend['class_name']\n )\n if queries is not None:\n for query_name, query in queries.items():\n self.add_query(query_name, query)\n\n def _get_raw_query(self, generator_asset):\n return self._store_backend.get(tuple(generator_asset))\n\n def _get_iterator(self, generator_asset, query_parameters=None):\n raw_query = self._get_raw_query(generator_asset)\n if raw_query is None:\n logger.warning(\"No query defined for generator asset: %s\" % generator_asset)\n # There is no valid query path or temp query storage defined with the generator_asset\n return None\n\n if query_parameters is None:\n iter_ = iter([\n SqlAlchemyDatasourceQueryBatchKwargs(\n query=raw_query\n )])\n else:\n iter_= iter([\n SqlAlchemyDatasourceQueryBatchKwargs(\n query=raw_query,\n query_parameters=query_parameters\n )])\n\n return iter_\n\n def add_query(self, generator_asset, query):\n # Backends must have a tuple key; we use only a single-element tuple\n self._store_backend.set(tuple(generator_asset), query)\n\n def get_available_data_asset_names(self):\n defined_queries = self._store_backend.list_keys()\n # Backends must have a tuple key; we use only a single-element tuple\n return {\"names\": [(query_key_tuple[0], \"query\") for query_key_tuple in defined_queries]}\n\n def _build_batch_kwargs(self, batch_parameters):\n \"\"\"Build batch kwargs from a partition id.\"\"\"\n generator_asset = batch_parameters.pop(\"name\")\n raw_query = self._get_raw_query(generator_asset)\n partition_id = batch_parameters.pop(\"partition_id\", None)\n batch_kwargs = self._datasource.process_batch_parameters(**batch_parameters)\n batch_kwargs[\"query\"] = raw_query\n\n if partition_id:\n if not batch_kwargs[\"query_parameters\"]:\n batch_kwargs[\"query_parameters\"] = {}\n batch_kwargs[\"query_parameters\"][\"partition_id\"] = partition_id\n\n return SqlAlchemyDatasourceQueryBatchKwargs(batch_kwargs)\n\n def get_available_partition_ids(self, generator_asset):\n raise BatchKwargsError(\"QueryBatchKwargsGenerator cannot identify partitions.\", {})\n", "path": "great_expectations/datasource/generator/query_generator.py"}], "after_files": [{"content": "import os\nimport logging\n\nfrom .batch_kwargs_generator import BatchKwargsGenerator\nfrom great_expectations.datasource.types import SqlAlchemyDatasourceQueryBatchKwargs\nfrom great_expectations.exceptions import (\n BatchKwargsError,\n ClassInstantiationError,\n)\nfrom ...data_context.util import instantiate_class_from_config\n\nlogger = logging.getLogger(__name__)\n\ntry:\n import sqlalchemy\n from sqlalchemy import create_engine\n from sqlalchemy.engine import reflection\nexcept ImportError:\n sqlalchemy = None\n create_engine = None\n reflection = None\n logger.debug(\"Unable to import sqlalchemy.\")\n\n\nclass QueryBatchKwargsGenerator(BatchKwargsGenerator):\n \"\"\"Produce query-style batch_kwargs from sql files stored on disk\n \"\"\"\n recognized_batch_parameters = {'query_parameters', 'partition_id', 'name'}\n\n def __init__(self, name=\"default\", datasource=None, query_store_backend=None, queries=None):\n super(QueryBatchKwargsGenerator, self).__init__(name=name, datasource=datasource)\n root_directory = None\n if query_store_backend is None:\n # We will choose a Tuple store if there is a configured DataContext with a root_directory,\n # and an InMemoryStore otherwise\n if datasource and datasource.data_context and datasource.data_context.root_directory:\n query_store_backend = {\n \"class_name\": \"TupleFilesystemStoreBackend\",\n \"base_directory\": os.path.join(datasource.data_context.root_directory, \"datasources\",\n datasource.name, \"generators\", name),\n \"filepath_suffix\": \".sql\"\n }\n root_directory = datasource.data_context.root_directory\n else:\n query_store_backend = {\n \"class_name\": \"InMemoryStoreBackend\"\n }\n module_name = 'great_expectations.data_context.store'\n self._store_backend = instantiate_class_from_config(\n config=query_store_backend,\n runtime_environment={\n \"root_directory\": root_directory\n },\n config_defaults={\n \"module_name\": module_name\n }\n )\n if not self._store_backend:\n raise ClassInstantiationError(\n module_name=module_name,\n package_name=None,\n class_name=query_store_backend['class_name']\n )\n if queries is not None:\n for query_name, query in queries.items():\n self.add_query(query_name, query)\n\n def _get_raw_query(self, generator_asset):\n return self._store_backend.get((generator_asset,))\n\n def _get_iterator(self, generator_asset, query_parameters=None):\n raw_query = self._get_raw_query(generator_asset)\n if raw_query is None:\n logger.warning(\"No query defined for generator asset: %s\" % generator_asset)\n # There is no valid query path or temp query storage defined with the generator_asset\n return None\n\n if query_parameters is None:\n iter_ = iter([\n SqlAlchemyDatasourceQueryBatchKwargs(\n query=raw_query\n )])\n else:\n iter_= iter([\n SqlAlchemyDatasourceQueryBatchKwargs(\n query=raw_query,\n query_parameters=query_parameters\n )])\n\n return iter_\n\n def add_query(self, generator_asset, query):\n # Backends must have a tuple key; we use only a single-element tuple\n self._store_backend.set((generator_asset,), query)\n\n def get_available_data_asset_names(self):\n defined_queries = self._store_backend.list_keys()\n # Backends must have a tuple key; we use only a single-element tuple\n return {\"names\": [(query_key_tuple[0], \"query\") for query_key_tuple in defined_queries]}\n\n def _build_batch_kwargs(self, batch_parameters):\n \"\"\"Build batch kwargs from a partition id.\"\"\"\n generator_asset = batch_parameters.pop(\"name\")\n raw_query = self._get_raw_query(generator_asset)\n partition_id = batch_parameters.pop(\"partition_id\", None)\n batch_kwargs = self._datasource.process_batch_parameters(**batch_parameters)\n batch_kwargs[\"query\"] = raw_query\n\n if partition_id:\n if not batch_kwargs[\"query_parameters\"]:\n batch_kwargs[\"query_parameters\"] = {}\n batch_kwargs[\"query_parameters\"][\"partition_id\"] = partition_id\n\n return SqlAlchemyDatasourceQueryBatchKwargs(batch_kwargs)\n\n def get_available_partition_ids(self, generator_asset):\n raise BatchKwargsError(\"QueryBatchKwargsGenerator cannot identify partitions.\", {})\n", "path": "great_expectations/datasource/generator/query_generator.py"}]} | 1,992 | 348 |
gh_patches_debug_33373 | rasdani/github-patches | git_diff | microsoft__botbuilder-python-378 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fix session injection message from msrest
Support session injection in MicrosoftAppCredentials fixing the warning of: Your credentials class does not support session injection. Performance will not be at the maximum.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `libraries/botframework-connector/botframework/connector/auth/microsoft_app_credentials.py`
Content:
```
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License.
3
4 from datetime import datetime, timedelta
5 from urllib.parse import urlparse
6 from msrest.authentication import BasicTokenAuthentication, Authentication
7 import requests
8 from .constants import Constants
9
10 # TODO: Decide to move this to Constants or viceversa (when porting OAuth)
11 AUTH_SETTINGS = {
12 "refreshEndpoint": "https://login.microsoftonline.com/botframework.com/oauth2/v2.0/token",
13 "refreshScope": "https://api.botframework.com/.default",
14 "botConnectorOpenIdMetadata": "https://login.botframework.com/v1/.well-known/openidconfiguration",
15 "botConnectorIssuer": "https://api.botframework.com",
16 "emulatorOpenIdMetadata": "https://login.microsoftonline.com/botframework.com/v2.0/"
17 ".well-known/openid-configuration",
18 "emulatorAuthV31IssuerV1": "https://sts.windows.net/d6d49420-f39b-4df7-a1dc-d59a935871db/",
19 "emulatorAuthV31IssuerV2": "https://login.microsoftonline.com/d6d49420-f39b-4df7-a1dc-d59a935871db/v2.0",
20 "emulatorAuthV32IssuerV1": "https://sts.windows.net/f8cdef31-a31e-4b4a-93e4-5f571e91255a/",
21 "emulatorAuthV32IssuerV2": "https://login.microsoftonline.com/f8cdef31-a31e-4b4a-93e4-5f571e91255a/v2.0",
22 }
23
24
25 class _OAuthResponse:
26 def __init__(self):
27 self.token_type = None
28 self.expires_in = None
29 self.access_token = None
30 self.expiration_time = None
31
32 @staticmethod
33 def from_json(json_values):
34 result = _OAuthResponse()
35 try:
36 result.token_type = json_values["token_type"]
37 result.access_token = json_values["access_token"]
38 result.expires_in = json_values["expires_in"]
39 except KeyError:
40 pass
41 return result
42
43
44 class MicrosoftAppCredentials(Authentication):
45 """
46 MicrosoftAppCredentials auth implementation and cache.
47 """
48
49 schema = "Bearer"
50
51 trustedHostNames = {
52 "state.botframework.com": datetime.max,
53 "api.botframework.com": datetime.max,
54 "token.botframework.com": datetime.max,
55 "state.botframework.azure.us": datetime.max,
56 "api.botframework.azure.us": datetime.max,
57 "token.botframework.azure.us": datetime.max,
58 }
59 cache = {}
60
61 def __init__(self, app_id: str, password: str, channel_auth_tenant: str = None):
62 """
63 Initializes a new instance of MicrosoftAppCredentials class
64 :param app_id: The Microsoft app ID.
65 :param app_password: The Microsoft app password.
66 :param channel_auth_tenant: Optional. The oauth token tenant.
67 """
68 # The configuration property for the Microsoft app ID.
69 self.microsoft_app_id = app_id
70 # The configuration property for the Microsoft app Password.
71 self.microsoft_app_password = password
72 tenant = (
73 channel_auth_tenant
74 if channel_auth_tenant
75 else Constants.DEFAULT_CHANNEL_AUTH_TENANT
76 )
77 self.oauth_endpoint = (
78 Constants.TO_CHANNEL_FROM_BOT_LOGIN_URL_PREFIX
79 + tenant
80 + Constants.TO_CHANNEL_FROM_BOT_TOKEN_ENDPOINT_PATH
81 )
82 self.oauth_scope = AUTH_SETTINGS["refreshScope"]
83 self.token_cache_key = app_id + "-cache"
84
85 def signed_session(self) -> requests.Session: # pylint: disable=arguments-differ
86 """
87 Gets the signed session.
88 :returns: Signed requests.Session object
89 """
90 auth_token = self.get_access_token()
91
92 basic_authentication = BasicTokenAuthentication({"access_token": auth_token})
93 session = basic_authentication.signed_session()
94
95 # If there is no microsoft_app_id and no self.microsoft_app_password, then there shouldn't
96 # be an "Authorization" header on the outgoing activity.
97 if not self.microsoft_app_id and not self.microsoft_app_password:
98 del session.headers["Authorization"]
99 return session
100
101 def get_access_token(self, force_refresh: bool = False) -> str:
102 """
103 Gets an OAuth access token.
104 :param force_refresh: True to force a refresh of the token; or false to get
105 a cached token if it exists.
106 :returns: Access token string
107 """
108 if self.microsoft_app_id and self.microsoft_app_password:
109 if not force_refresh:
110 # check the global cache for the token. If we have it, and it's valid, we're done.
111 oauth_token = MicrosoftAppCredentials.cache.get(
112 self.token_cache_key, None
113 )
114 if oauth_token is not None:
115 # we have the token. Is it valid?
116 if oauth_token.expiration_time > datetime.now():
117 return oauth_token.access_token
118 # We need to refresh the token, because:
119 # 1. The user requested it via the force_refresh parameter
120 # 2. We have it, but it's expired
121 # 3. We don't have it in the cache.
122 oauth_token = self.refresh_token()
123 MicrosoftAppCredentials.cache.setdefault(self.token_cache_key, oauth_token)
124 return oauth_token.access_token
125 return ""
126
127 def refresh_token(self) -> _OAuthResponse:
128 """
129 returns: _OAuthResponse
130 """
131 options = {
132 "grant_type": "client_credentials",
133 "client_id": self.microsoft_app_id,
134 "client_secret": self.microsoft_app_password,
135 "scope": self.oauth_scope,
136 }
137
138 response = requests.post(self.oauth_endpoint, data=options)
139 response.raise_for_status()
140
141 oauth_response = _OAuthResponse.from_json(response.json())
142 oauth_response.expiration_time = datetime.now() + timedelta(
143 seconds=(oauth_response.expires_in - 300)
144 )
145
146 return oauth_response
147
148 @staticmethod
149 def trust_service_url(service_url: str, expiration=None):
150 """
151 Checks if the service url is for a trusted host or not.
152 :param service_url: The service url.
153 :param expiration: The expiration time after which this service url is not trusted anymore.
154 :returns: True if the host of the service url is trusted; False otherwise.
155 """
156 if expiration is None:
157 expiration = datetime.now() + timedelta(days=1)
158 host = urlparse(service_url).hostname
159 if host is not None:
160 MicrosoftAppCredentials.trustedHostNames[host] = expiration
161
162 @staticmethod
163 def is_trusted_service(service_url: str) -> bool:
164 """
165 Checks if the service url is for a trusted host or not.
166 :param service_url: The service url.
167 :returns: True if the host of the service url is trusted; False otherwise.
168 """
169 host = urlparse(service_url).hostname
170 if host is not None:
171 return MicrosoftAppCredentials._is_trusted_url(host)
172 return False
173
174 @staticmethod
175 def _is_trusted_url(host: str) -> bool:
176 expiration = MicrosoftAppCredentials.trustedHostNames.get(host, datetime.min)
177 return expiration > (datetime.now() - timedelta(minutes=5))
178
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/libraries/botframework-connector/botframework/connector/auth/microsoft_app_credentials.py b/libraries/botframework-connector/botframework/connector/auth/microsoft_app_credentials.py
--- a/libraries/botframework-connector/botframework/connector/auth/microsoft_app_credentials.py
+++ b/libraries/botframework-connector/botframework/connector/auth/microsoft_app_credentials.py
@@ -3,8 +3,9 @@
from datetime import datetime, timedelta
from urllib.parse import urlparse
-from msrest.authentication import BasicTokenAuthentication, Authentication
import requests
+
+from msrest.authentication import Authentication
from .constants import Constants
# TODO: Decide to move this to Constants or viceversa (when porting OAuth)
@@ -82,20 +83,25 @@
self.oauth_scope = AUTH_SETTINGS["refreshScope"]
self.token_cache_key = app_id + "-cache"
- def signed_session(self) -> requests.Session: # pylint: disable=arguments-differ
+ # pylint: disable=arguments-differ
+ def signed_session(self, session: requests.Session = None) -> requests.Session:
"""
Gets the signed session.
:returns: Signed requests.Session object
"""
- auth_token = self.get_access_token()
-
- basic_authentication = BasicTokenAuthentication({"access_token": auth_token})
- session = basic_authentication.signed_session()
+ if not session:
+ session = requests.Session()
# If there is no microsoft_app_id and no self.microsoft_app_password, then there shouldn't
# be an "Authorization" header on the outgoing activity.
if not self.microsoft_app_id and not self.microsoft_app_password:
- del session.headers["Authorization"]
+ session.headers.pop("Authorization", None)
+
+ elif not session.headers.get("Authorization"):
+ auth_token = self.get_access_token()
+ header = "{} {}".format("Bearer", auth_token)
+ session.headers["Authorization"] = header
+
return session
def get_access_token(self, force_refresh: bool = False) -> str:
| {"golden_diff": "diff --git a/libraries/botframework-connector/botframework/connector/auth/microsoft_app_credentials.py b/libraries/botframework-connector/botframework/connector/auth/microsoft_app_credentials.py\n--- a/libraries/botframework-connector/botframework/connector/auth/microsoft_app_credentials.py\n+++ b/libraries/botframework-connector/botframework/connector/auth/microsoft_app_credentials.py\n@@ -3,8 +3,9 @@\n \n from datetime import datetime, timedelta\n from urllib.parse import urlparse\n-from msrest.authentication import BasicTokenAuthentication, Authentication\n import requests\n+\n+from msrest.authentication import Authentication\n from .constants import Constants\n \n # TODO: Decide to move this to Constants or viceversa (when porting OAuth)\n@@ -82,20 +83,25 @@\n self.oauth_scope = AUTH_SETTINGS[\"refreshScope\"]\n self.token_cache_key = app_id + \"-cache\"\n \n- def signed_session(self) -> requests.Session: # pylint: disable=arguments-differ\n+ # pylint: disable=arguments-differ\n+ def signed_session(self, session: requests.Session = None) -> requests.Session:\n \"\"\"\n Gets the signed session.\n :returns: Signed requests.Session object\n \"\"\"\n- auth_token = self.get_access_token()\n-\n- basic_authentication = BasicTokenAuthentication({\"access_token\": auth_token})\n- session = basic_authentication.signed_session()\n+ if not session:\n+ session = requests.Session()\n \n # If there is no microsoft_app_id and no self.microsoft_app_password, then there shouldn't\n # be an \"Authorization\" header on the outgoing activity.\n if not self.microsoft_app_id and not self.microsoft_app_password:\n- del session.headers[\"Authorization\"]\n+ session.headers.pop(\"Authorization\", None)\n+\n+ elif not session.headers.get(\"Authorization\"):\n+ auth_token = self.get_access_token()\n+ header = \"{} {}\".format(\"Bearer\", auth_token)\n+ session.headers[\"Authorization\"] = header\n+\n return session\n \n def get_access_token(self, force_refresh: bool = False) -> str:\n", "issue": "Fix session injection message from msrest\nSupport session injection in MicrosoftAppCredentials fixing the warning of: Your credentials class does not support session injection. Performance will not be at the maximum.\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nfrom datetime import datetime, timedelta\nfrom urllib.parse import urlparse\nfrom msrest.authentication import BasicTokenAuthentication, Authentication\nimport requests\nfrom .constants import Constants\n\n# TODO: Decide to move this to Constants or viceversa (when porting OAuth)\nAUTH_SETTINGS = {\n \"refreshEndpoint\": \"https://login.microsoftonline.com/botframework.com/oauth2/v2.0/token\",\n \"refreshScope\": \"https://api.botframework.com/.default\",\n \"botConnectorOpenIdMetadata\": \"https://login.botframework.com/v1/.well-known/openidconfiguration\",\n \"botConnectorIssuer\": \"https://api.botframework.com\",\n \"emulatorOpenIdMetadata\": \"https://login.microsoftonline.com/botframework.com/v2.0/\"\n \".well-known/openid-configuration\",\n \"emulatorAuthV31IssuerV1\": \"https://sts.windows.net/d6d49420-f39b-4df7-a1dc-d59a935871db/\",\n \"emulatorAuthV31IssuerV2\": \"https://login.microsoftonline.com/d6d49420-f39b-4df7-a1dc-d59a935871db/v2.0\",\n \"emulatorAuthV32IssuerV1\": \"https://sts.windows.net/f8cdef31-a31e-4b4a-93e4-5f571e91255a/\",\n \"emulatorAuthV32IssuerV2\": \"https://login.microsoftonline.com/f8cdef31-a31e-4b4a-93e4-5f571e91255a/v2.0\",\n}\n\n\nclass _OAuthResponse:\n def __init__(self):\n self.token_type = None\n self.expires_in = None\n self.access_token = None\n self.expiration_time = None\n\n @staticmethod\n def from_json(json_values):\n result = _OAuthResponse()\n try:\n result.token_type = json_values[\"token_type\"]\n result.access_token = json_values[\"access_token\"]\n result.expires_in = json_values[\"expires_in\"]\n except KeyError:\n pass\n return result\n\n\nclass MicrosoftAppCredentials(Authentication):\n \"\"\"\n MicrosoftAppCredentials auth implementation and cache.\n \"\"\"\n\n schema = \"Bearer\"\n\n trustedHostNames = {\n \"state.botframework.com\": datetime.max,\n \"api.botframework.com\": datetime.max,\n \"token.botframework.com\": datetime.max,\n \"state.botframework.azure.us\": datetime.max,\n \"api.botframework.azure.us\": datetime.max,\n \"token.botframework.azure.us\": datetime.max,\n }\n cache = {}\n\n def __init__(self, app_id: str, password: str, channel_auth_tenant: str = None):\n \"\"\"\n Initializes a new instance of MicrosoftAppCredentials class\n :param app_id: The Microsoft app ID.\n :param app_password: The Microsoft app password.\n :param channel_auth_tenant: Optional. The oauth token tenant.\n \"\"\"\n # The configuration property for the Microsoft app ID.\n self.microsoft_app_id = app_id\n # The configuration property for the Microsoft app Password.\n self.microsoft_app_password = password\n tenant = (\n channel_auth_tenant\n if channel_auth_tenant\n else Constants.DEFAULT_CHANNEL_AUTH_TENANT\n )\n self.oauth_endpoint = (\n Constants.TO_CHANNEL_FROM_BOT_LOGIN_URL_PREFIX\n + tenant\n + Constants.TO_CHANNEL_FROM_BOT_TOKEN_ENDPOINT_PATH\n )\n self.oauth_scope = AUTH_SETTINGS[\"refreshScope\"]\n self.token_cache_key = app_id + \"-cache\"\n\n def signed_session(self) -> requests.Session: # pylint: disable=arguments-differ\n \"\"\"\n Gets the signed session.\n :returns: Signed requests.Session object\n \"\"\"\n auth_token = self.get_access_token()\n\n basic_authentication = BasicTokenAuthentication({\"access_token\": auth_token})\n session = basic_authentication.signed_session()\n\n # If there is no microsoft_app_id and no self.microsoft_app_password, then there shouldn't\n # be an \"Authorization\" header on the outgoing activity.\n if not self.microsoft_app_id and not self.microsoft_app_password:\n del session.headers[\"Authorization\"]\n return session\n\n def get_access_token(self, force_refresh: bool = False) -> str:\n \"\"\"\n Gets an OAuth access token.\n :param force_refresh: True to force a refresh of the token; or false to get\n a cached token if it exists.\n :returns: Access token string\n \"\"\"\n if self.microsoft_app_id and self.microsoft_app_password:\n if not force_refresh:\n # check the global cache for the token. If we have it, and it's valid, we're done.\n oauth_token = MicrosoftAppCredentials.cache.get(\n self.token_cache_key, None\n )\n if oauth_token is not None:\n # we have the token. Is it valid?\n if oauth_token.expiration_time > datetime.now():\n return oauth_token.access_token\n # We need to refresh the token, because:\n # 1. The user requested it via the force_refresh parameter\n # 2. We have it, but it's expired\n # 3. We don't have it in the cache.\n oauth_token = self.refresh_token()\n MicrosoftAppCredentials.cache.setdefault(self.token_cache_key, oauth_token)\n return oauth_token.access_token\n return \"\"\n\n def refresh_token(self) -> _OAuthResponse:\n \"\"\"\n returns: _OAuthResponse\n \"\"\"\n options = {\n \"grant_type\": \"client_credentials\",\n \"client_id\": self.microsoft_app_id,\n \"client_secret\": self.microsoft_app_password,\n \"scope\": self.oauth_scope,\n }\n\n response = requests.post(self.oauth_endpoint, data=options)\n response.raise_for_status()\n\n oauth_response = _OAuthResponse.from_json(response.json())\n oauth_response.expiration_time = datetime.now() + timedelta(\n seconds=(oauth_response.expires_in - 300)\n )\n\n return oauth_response\n\n @staticmethod\n def trust_service_url(service_url: str, expiration=None):\n \"\"\"\n Checks if the service url is for a trusted host or not.\n :param service_url: The service url.\n :param expiration: The expiration time after which this service url is not trusted anymore.\n :returns: True if the host of the service url is trusted; False otherwise.\n \"\"\"\n if expiration is None:\n expiration = datetime.now() + timedelta(days=1)\n host = urlparse(service_url).hostname\n if host is not None:\n MicrosoftAppCredentials.trustedHostNames[host] = expiration\n\n @staticmethod\n def is_trusted_service(service_url: str) -> bool:\n \"\"\"\n Checks if the service url is for a trusted host or not.\n :param service_url: The service url.\n :returns: True if the host of the service url is trusted; False otherwise.\n \"\"\"\n host = urlparse(service_url).hostname\n if host is not None:\n return MicrosoftAppCredentials._is_trusted_url(host)\n return False\n\n @staticmethod\n def _is_trusted_url(host: str) -> bool:\n expiration = MicrosoftAppCredentials.trustedHostNames.get(host, datetime.min)\n return expiration > (datetime.now() - timedelta(minutes=5))\n", "path": "libraries/botframework-connector/botframework/connector/auth/microsoft_app_credentials.py"}], "after_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nfrom datetime import datetime, timedelta\nfrom urllib.parse import urlparse\nimport requests\n\nfrom msrest.authentication import Authentication\nfrom .constants import Constants\n\n# TODO: Decide to move this to Constants or viceversa (when porting OAuth)\nAUTH_SETTINGS = {\n \"refreshEndpoint\": \"https://login.microsoftonline.com/botframework.com/oauth2/v2.0/token\",\n \"refreshScope\": \"https://api.botframework.com/.default\",\n \"botConnectorOpenIdMetadata\": \"https://login.botframework.com/v1/.well-known/openidconfiguration\",\n \"botConnectorIssuer\": \"https://api.botframework.com\",\n \"emulatorOpenIdMetadata\": \"https://login.microsoftonline.com/botframework.com/v2.0/\"\n \".well-known/openid-configuration\",\n \"emulatorAuthV31IssuerV1\": \"https://sts.windows.net/d6d49420-f39b-4df7-a1dc-d59a935871db/\",\n \"emulatorAuthV31IssuerV2\": \"https://login.microsoftonline.com/d6d49420-f39b-4df7-a1dc-d59a935871db/v2.0\",\n \"emulatorAuthV32IssuerV1\": \"https://sts.windows.net/f8cdef31-a31e-4b4a-93e4-5f571e91255a/\",\n \"emulatorAuthV32IssuerV2\": \"https://login.microsoftonline.com/f8cdef31-a31e-4b4a-93e4-5f571e91255a/v2.0\",\n}\n\n\nclass _OAuthResponse:\n def __init__(self):\n self.token_type = None\n self.expires_in = None\n self.access_token = None\n self.expiration_time = None\n\n @staticmethod\n def from_json(json_values):\n result = _OAuthResponse()\n try:\n result.token_type = json_values[\"token_type\"]\n result.access_token = json_values[\"access_token\"]\n result.expires_in = json_values[\"expires_in\"]\n except KeyError:\n pass\n return result\n\n\nclass MicrosoftAppCredentials(Authentication):\n \"\"\"\n MicrosoftAppCredentials auth implementation and cache.\n \"\"\"\n\n schema = \"Bearer\"\n\n trustedHostNames = {\n \"state.botframework.com\": datetime.max,\n \"api.botframework.com\": datetime.max,\n \"token.botframework.com\": datetime.max,\n \"state.botframework.azure.us\": datetime.max,\n \"api.botframework.azure.us\": datetime.max,\n \"token.botframework.azure.us\": datetime.max,\n }\n cache = {}\n\n def __init__(self, app_id: str, password: str, channel_auth_tenant: str = None):\n \"\"\"\n Initializes a new instance of MicrosoftAppCredentials class\n :param app_id: The Microsoft app ID.\n :param app_password: The Microsoft app password.\n :param channel_auth_tenant: Optional. The oauth token tenant.\n \"\"\"\n # The configuration property for the Microsoft app ID.\n self.microsoft_app_id = app_id\n # The configuration property for the Microsoft app Password.\n self.microsoft_app_password = password\n tenant = (\n channel_auth_tenant\n if channel_auth_tenant\n else Constants.DEFAULT_CHANNEL_AUTH_TENANT\n )\n self.oauth_endpoint = (\n Constants.TO_CHANNEL_FROM_BOT_LOGIN_URL_PREFIX\n + tenant\n + Constants.TO_CHANNEL_FROM_BOT_TOKEN_ENDPOINT_PATH\n )\n self.oauth_scope = AUTH_SETTINGS[\"refreshScope\"]\n self.token_cache_key = app_id + \"-cache\"\n\n # pylint: disable=arguments-differ\n def signed_session(self, session: requests.Session = None) -> requests.Session:\n \"\"\"\n Gets the signed session.\n :returns: Signed requests.Session object\n \"\"\"\n if not session:\n session = requests.Session()\n\n # If there is no microsoft_app_id and no self.microsoft_app_password, then there shouldn't\n # be an \"Authorization\" header on the outgoing activity.\n if not self.microsoft_app_id and not self.microsoft_app_password:\n session.headers.pop(\"Authorization\", None)\n\n elif not session.headers.get(\"Authorization\"):\n auth_token = self.get_access_token()\n header = \"{} {}\".format(\"Bearer\", auth_token)\n session.headers[\"Authorization\"] = header\n\n return session\n\n def get_access_token(self, force_refresh: bool = False) -> str:\n \"\"\"\n Gets an OAuth access token.\n :param force_refresh: True to force a refresh of the token; or false to get\n a cached token if it exists.\n :returns: Access token string\n \"\"\"\n if self.microsoft_app_id and self.microsoft_app_password:\n if not force_refresh:\n # check the global cache for the token. If we have it, and it's valid, we're done.\n oauth_token = MicrosoftAppCredentials.cache.get(\n self.token_cache_key, None\n )\n if oauth_token is not None:\n # we have the token. Is it valid?\n if oauth_token.expiration_time > datetime.now():\n return oauth_token.access_token\n # We need to refresh the token, because:\n # 1. The user requested it via the force_refresh parameter\n # 2. We have it, but it's expired\n # 3. We don't have it in the cache.\n oauth_token = self.refresh_token()\n MicrosoftAppCredentials.cache.setdefault(self.token_cache_key, oauth_token)\n return oauth_token.access_token\n return \"\"\n\n def refresh_token(self) -> _OAuthResponse:\n \"\"\"\n returns: _OAuthResponse\n \"\"\"\n options = {\n \"grant_type\": \"client_credentials\",\n \"client_id\": self.microsoft_app_id,\n \"client_secret\": self.microsoft_app_password,\n \"scope\": self.oauth_scope,\n }\n\n response = requests.post(self.oauth_endpoint, data=options)\n response.raise_for_status()\n\n oauth_response = _OAuthResponse.from_json(response.json())\n oauth_response.expiration_time = datetime.now() + timedelta(\n seconds=(oauth_response.expires_in - 300)\n )\n\n return oauth_response\n\n @staticmethod\n def trust_service_url(service_url: str, expiration=None):\n \"\"\"\n Checks if the service url is for a trusted host or not.\n :param service_url: The service url.\n :param expiration: The expiration time after which this service url is not trusted anymore.\n :returns: True if the host of the service url is trusted; False otherwise.\n \"\"\"\n if expiration is None:\n expiration = datetime.now() + timedelta(days=1)\n host = urlparse(service_url).hostname\n if host is not None:\n MicrosoftAppCredentials.trustedHostNames[host] = expiration\n\n @staticmethod\n def is_trusted_service(service_url: str) -> bool:\n \"\"\"\n Checks if the service url is for a trusted host or not.\n :param service_url: The service url.\n :returns: True if the host of the service url is trusted; False otherwise.\n \"\"\"\n host = urlparse(service_url).hostname\n if host is not None:\n return MicrosoftAppCredentials._is_trusted_url(host)\n return False\n\n @staticmethod\n def _is_trusted_url(host: str) -> bool:\n expiration = MicrosoftAppCredentials.trustedHostNames.get(host, datetime.min)\n return expiration > (datetime.now() - timedelta(minutes=5))\n", "path": "libraries/botframework-connector/botframework/connector/auth/microsoft_app_credentials.py"}]} | 2,346 | 445 |
gh_patches_debug_10612 | rasdani/github-patches | git_diff | fedora-infra__bodhi-1450 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
There are multiple alembic heads on the develop branch
The migrations can't be applied on the develop branch because there are multiple alembic heads:
```
[vagrant@bodhi-dev bodhi]$ alembic upgrade head
/usr/lib64/python2.7/pkgutil.py:186: ImportWarning: Not importing directory '/usr/lib64/python2.7/site-packages/zope': missing __init__.py
file, filename, etc = imp.find_module(subname, path)
/usr/lib64/python2.7/pkgutil.py:186: ImportWarning: Not importing directory '/usr/lib/python2.7/site-packages/zope': missing __init__.py
file, filename, etc = imp.find_module(subname, path)
/usr/lib64/python2.7/pkgutil.py:186: ImportWarning: Not importing directory '/usr/lib/python2.7/site-packages/paste': missing __init__.py
file, filename, etc = imp.find_module(subname, path)
/usr/lib64/python2.7/pkgutil.py:186: ImportWarning: Not importing directory '/usr/lib/python2.7/site-packages/moksha': missing __init__.py
file, filename, etc = imp.find_module(subname, path)
/usr/lib64/python2.7/pkgutil.py:186: ImportWarning: Not importing directory '/usr/lib/python2.7/site-packages/repoze': missing __init__.py
file, filename, etc = imp.find_module(subname, path)
/home/vagrant/bodhi/bodhi/server/__init__.py:26: DeprecationWarning: unauthenticated_userid: As of Pyramid 1.5 the "pyramid.security.unauthenticated_userid" API is now deprecated. It will be removed in Pyramd 1.8. Use the "unauthenticated_userid" attribute of the Pyramid request instead.
from pyramid.security import unauthenticated_userid
INFO [alembic.runtime.migration] Context impl PostgresqlImpl.
INFO [alembic.runtime.migration] Will assume transactional DDL.
ERROR [alembic.util.messaging] Multiple head revisions are present for given argument 'head'; please specify a specific target revision, '<branchname>@head' to narrow to a specific head, or 'heads' for all heads
FAILED: Multiple head revisions are present for given argument 'head'; please specify a specific target revision,
'<branchname>@head' to narrow to a specific head, or 'heads' for all heads
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `alembic/versions/9241378c92ab_convert_the_builds_table_to_be_.py`
Content:
```
1 """Convert the builds table to be polymorphic.
2
3 Revision ID: 9241378c92ab
4 Revises: fc6b0169c596
5 Create Date: 2017-04-06 20:37:24.766366
6 """
7 from alembic import op
8 import sqlalchemy as sa
9
10
11 # revision identifiers, used by Alembic.
12 revision = '9241378c92ab'
13 down_revision = 'fc6b0169c596'
14
15
16 def upgrade():
17 """Add the type column to the builds table."""
18 # The default of ``1`` is the RPM Build type.
19 op.add_column('builds', sa.Column('type', sa.Integer(), nullable=False, server_default=u'1'))
20 op.alter_column('builds', 'type', server_default=None)
21
22
23 def downgrade():
24 """Remove the type column from the builds table."""
25 op.drop_column('builds', 'type')
26
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/alembic/versions/9241378c92ab_convert_the_builds_table_to_be_.py b/alembic/versions/9241378c92ab_convert_the_builds_table_to_be_.py
--- a/alembic/versions/9241378c92ab_convert_the_builds_table_to_be_.py
+++ b/alembic/versions/9241378c92ab_convert_the_builds_table_to_be_.py
@@ -1,7 +1,7 @@
"""Convert the builds table to be polymorphic.
Revision ID: 9241378c92ab
-Revises: fc6b0169c596
+Revises: 12d3e8695f90
Create Date: 2017-04-06 20:37:24.766366
"""
from alembic import op
@@ -10,7 +10,7 @@
# revision identifiers, used by Alembic.
revision = '9241378c92ab'
-down_revision = 'fc6b0169c596'
+down_revision = '12d3e8695f90'
def upgrade():
| {"golden_diff": "diff --git a/alembic/versions/9241378c92ab_convert_the_builds_table_to_be_.py b/alembic/versions/9241378c92ab_convert_the_builds_table_to_be_.py\n--- a/alembic/versions/9241378c92ab_convert_the_builds_table_to_be_.py\n+++ b/alembic/versions/9241378c92ab_convert_the_builds_table_to_be_.py\n@@ -1,7 +1,7 @@\n \"\"\"Convert the builds table to be polymorphic.\n \n Revision ID: 9241378c92ab\n-Revises: fc6b0169c596\n+Revises: 12d3e8695f90\n Create Date: 2017-04-06 20:37:24.766366\n \"\"\"\n from alembic import op\n@@ -10,7 +10,7 @@\n \n # revision identifiers, used by Alembic.\n revision = '9241378c92ab'\n-down_revision = 'fc6b0169c596'\n+down_revision = '12d3e8695f90'\n \n \n def upgrade():\n", "issue": "There are multiple alembic heads on the develop branch\nThe migrations can't be applied on the develop branch because there are multiple alembic heads:\r\n\r\n```\r\n[vagrant@bodhi-dev bodhi]$ alembic upgrade head\r\n/usr/lib64/python2.7/pkgutil.py:186: ImportWarning: Not importing directory '/usr/lib64/python2.7/site-packages/zope': missing __init__.py\r\n file, filename, etc = imp.find_module(subname, path)\r\n/usr/lib64/python2.7/pkgutil.py:186: ImportWarning: Not importing directory '/usr/lib/python2.7/site-packages/zope': missing __init__.py\r\n file, filename, etc = imp.find_module(subname, path)\r\n/usr/lib64/python2.7/pkgutil.py:186: ImportWarning: Not importing directory '/usr/lib/python2.7/site-packages/paste': missing __init__.py\r\n file, filename, etc = imp.find_module(subname, path)\r\n/usr/lib64/python2.7/pkgutil.py:186: ImportWarning: Not importing directory '/usr/lib/python2.7/site-packages/moksha': missing __init__.py\r\n file, filename, etc = imp.find_module(subname, path)\r\n/usr/lib64/python2.7/pkgutil.py:186: ImportWarning: Not importing directory '/usr/lib/python2.7/site-packages/repoze': missing __init__.py\r\n file, filename, etc = imp.find_module(subname, path)\r\n/home/vagrant/bodhi/bodhi/server/__init__.py:26: DeprecationWarning: unauthenticated_userid: As of Pyramid 1.5 the \"pyramid.security.unauthenticated_userid\" API is now deprecated. It will be removed in Pyramd 1.8. Use the \"unauthenticated_userid\" attribute of the Pyramid request instead.\r\n from pyramid.security import unauthenticated_userid\r\nINFO [alembic.runtime.migration] Context impl PostgresqlImpl.\r\nINFO [alembic.runtime.migration] Will assume transactional DDL.\r\nERROR [alembic.util.messaging] Multiple head revisions are present for given argument 'head'; please specify a specific target revision, '<branchname>@head' to narrow to a specific head, or 'heads' for all heads\r\n FAILED: Multiple head revisions are present for given argument 'head'; please specify a specific target revision,\r\n '<branchname>@head' to narrow to a specific head, or 'heads' for all heads\r\n```\n", "before_files": [{"content": "\"\"\"Convert the builds table to be polymorphic.\n\nRevision ID: 9241378c92ab\nRevises: fc6b0169c596\nCreate Date: 2017-04-06 20:37:24.766366\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '9241378c92ab'\ndown_revision = 'fc6b0169c596'\n\n\ndef upgrade():\n \"\"\"Add the type column to the builds table.\"\"\"\n # The default of ``1`` is the RPM Build type.\n op.add_column('builds', sa.Column('type', sa.Integer(), nullable=False, server_default=u'1'))\n op.alter_column('builds', 'type', server_default=None)\n\n\ndef downgrade():\n \"\"\"Remove the type column from the builds table.\"\"\"\n op.drop_column('builds', 'type')\n", "path": "alembic/versions/9241378c92ab_convert_the_builds_table_to_be_.py"}], "after_files": [{"content": "\"\"\"Convert the builds table to be polymorphic.\n\nRevision ID: 9241378c92ab\nRevises: 12d3e8695f90\nCreate Date: 2017-04-06 20:37:24.766366\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '9241378c92ab'\ndown_revision = '12d3e8695f90'\n\n\ndef upgrade():\n \"\"\"Add the type column to the builds table.\"\"\"\n # The default of ``1`` is the RPM Build type.\n op.add_column('builds', sa.Column('type', sa.Integer(), nullable=False, server_default=u'1'))\n op.alter_column('builds', 'type', server_default=None)\n\n\ndef downgrade():\n \"\"\"Remove the type column from the builds table.\"\"\"\n op.drop_column('builds', 'type')\n", "path": "alembic/versions/9241378c92ab_convert_the_builds_table_to_be_.py"}]} | 1,090 | 307 |
gh_patches_debug_3603 | rasdani/github-patches | git_diff | privacyidea__privacyidea-1570 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Realm-Select box with broken "placeholder"
In the login screen there is a realm select box.
The placeholder for the select box does not work:
https://github.com/privacyidea/privacyidea/blob/master/privacyidea/static/components/login/views/login.html#L63
We could either fix the placeholder or preselect the default realm.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `privacyidea/webui/login.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 #
3 # http://www.privacyidea.org
4 # (c) cornelius kölbel, privacyidea.org
5 #
6 # 2017-11-14 Cornelius Kölbel <[email protected]>
7 # Add custom baseline and menu
8 # 2016-01-07 Cornelius Kölbel <[email protected]>
9 # Add password reset
10 # 2015-11-04 Cornelius Kölbel <[email protected]>
11 # Add REMOTE_USER check
12 # 2014-12-22 Cornelius Kölbel, <[email protected]>
13 #
14 # This code is free software; you can redistribute it and/or
15 # modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE
16 # License as published by the Free Software Foundation; either
17 # version 3 of the License, or any later version.
18 #
19 # This code is distributed in the hope that it will be useful,
20 # but WITHOUT ANY WARRANTY; without even the implied warranty of
21 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 # GNU AFFERO GENERAL PUBLIC LICENSE for more details.
23 #
24 # You should have received a copy of the GNU Affero General Public
25 # License along with this program. If not, see <http://www.gnu.org/licenses/>.
26 #
27 from privacyidea.lib.queue import has_job_queue
28
29 __doc__ = """This is the starting point for the single web application.
30 Other html code is dynamically loaded via angularJS and located in
31 /static/views/...
32 """
33 __author__ = "Cornelius Kölbel <[email protected]>"
34
35 from flask import (Blueprint, render_template, request,
36 current_app)
37 from privacyidea.api.lib.prepolicy import is_remote_user_allowed
38 from privacyidea.lib.passwordreset import is_password_reset
39 from privacyidea.lib.error import HSMException
40 from privacyidea.lib.realm import get_realms
41 from privacyidea.lib.policy import PolicyClass, ACTION, SCOPE
42 from privacyidea.lib.subscriptions import subscription_status
43 from privacyidea.lib.utils import get_client_ip
44 from privacyidea.lib.config import get_from_config, SYSCONF
45
46 DEFAULT_THEME = "/static/contrib/css/bootstrap-theme.css"
47
48 login_blueprint = Blueprint('login_blueprint', __name__)
49
50
51 @login_blueprint.route('/', methods=['GET'])
52 def single_page_application():
53 instance = request.script_root
54 if instance == "/":
55 instance = ""
56 # The backend URL should come from the configuration of the system.
57 backend_url = ""
58
59 if current_app.config.get("PI_UI_DEACTIVATED"):
60 # Do not provide the UI
61 return render_template("deactivated.html")
62
63 # The default theme. We can change this later
64 theme = current_app.config.get("PI_CSS", DEFAULT_THEME)
65 # Get further customizations
66 customization = current_app.config.get("PI_CUSTOMIZATION",
67 "/static/customize/")
68 customization = customization.strip('/')
69 # TODO: we should add the CSS into PI_CUSTOMZATION/css
70 # Enrollment-Wizard:
71 # PI_CUSTOMIZATION/views/includes/token.enroll.pre.top.html
72 # PI_CUSTOMIZATION/views/includes/token.enroll.pre.bottom.html
73 # PI_CUSTOMIZATION/views/includes/token.enroll.post.top.html
74 # PI_CUSTOMIZATION/views/includes/token.enroll.post.bottom.html
75 # Get the hidden external links
76 external_links = current_app.config.get("PI_EXTERNAL_LINKS", True)
77 # Get the logo file
78 logo = current_app.config.get("PI_LOGO", "privacyIDEA1.png")
79 browser_lang = request.accept_languages.best_match(["en", "de", "de-DE"], default="en").split("-")[0]
80 # check if login with REMOTE_USER is allowed.
81 remote_user = ""
82 password_reset = False
83 if not hasattr(request, "all_data"):
84 request.all_data = {}
85 # Depending on displaying the realm dropdown, we fill realms or not.
86 policy_object = PolicyClass()
87 realms = ""
88 client_ip = get_client_ip(request,
89 get_from_config(SYSCONF.OVERRIDECLIENT))
90 realm_dropdown = policy_object.get_policies(action=ACTION.REALMDROPDOWN,
91 scope=SCOPE.WEBUI,
92 client=client_ip,
93 active=True)
94 if realm_dropdown:
95 try:
96 realm_dropdown_values = policy_object.get_action_values(
97 action=ACTION.REALMDROPDOWN,
98 scope=SCOPE.WEBUI,
99 client=client_ip)
100 # Use the realms from the policy.
101 realms = ",".join(realm_dropdown_values)
102 except AttributeError as ex:
103 # The policy is still a boolean realm_dropdown action
104 # Thus we display ALL realms
105 realms = ",".join(get_realms())
106 if realms:
107 realms = "," + realms
108
109 try:
110 if is_remote_user_allowed(request):
111 remote_user = request.remote_user
112 password_reset = is_password_reset()
113 hsm_ready = True
114 except HSMException:
115 hsm_ready = False
116
117 # Use policies to determine the customization of menu
118 # and baseline. get_action_values returns an array!
119 sub_state = subscription_status()
120 customization_menu_file = policy_object.get_action_values(
121 allow_white_space_in_action=True,
122 action=ACTION.CUSTOM_MENU,
123 scope=SCOPE.WEBUI,
124 client=client_ip, unique=True)
125 if len(customization_menu_file) and list(customization_menu_file)[0] \
126 and sub_state not in [1, 2]:
127 customization_menu_file = list(customization_menu_file)[0]
128 else:
129 customization_menu_file = "templates/menu.html"
130 customization_baseline_file = policy_object.get_action_values(
131 allow_white_space_in_action=True,
132 action=ACTION.CUSTOM_BASELINE,
133 scope=SCOPE.WEBUI,
134 client=client_ip, unique=True)
135 if len(customization_baseline_file) and list(customization_baseline_file)[0] \
136 and sub_state not in [1, 2]:
137 customization_baseline_file = list(customization_baseline_file)[0]
138 else:
139 customization_baseline_file = "templates/baseline.html"
140
141 login_text = policy_object.get_action_values(
142 allow_white_space_in_action=True,
143 action=ACTION.LOGIN_TEXT,
144 scope=SCOPE.WEBUI,
145 client=client_ip, unique=True
146 )
147 if len(login_text) and list(login_text)[0] and sub_state not in [1, 2]:
148 login_text = list(login_text)[0]
149 else:
150 login_text = ""
151
152 return render_template("index.html", instance=instance,
153 backendUrl=backend_url,
154 browser_lang=browser_lang,
155 remote_user=remote_user,
156 theme=theme,
157 password_reset=password_reset,
158 hsm_ready=hsm_ready,
159 has_job_queue=str(has_job_queue()),
160 customization=customization,
161 customization_menu_file=customization_menu_file,
162 customization_baseline_file=customization_baseline_file,
163 realms=realms,
164 external_links=external_links,
165 login_text=login_text,
166 logo=logo)
167
168
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/privacyidea/webui/login.py b/privacyidea/webui/login.py
--- a/privacyidea/webui/login.py
+++ b/privacyidea/webui/login.py
@@ -103,8 +103,6 @@
# The policy is still a boolean realm_dropdown action
# Thus we display ALL realms
realms = ",".join(get_realms())
- if realms:
- realms = "," + realms
try:
if is_remote_user_allowed(request):
| {"golden_diff": "diff --git a/privacyidea/webui/login.py b/privacyidea/webui/login.py\n--- a/privacyidea/webui/login.py\n+++ b/privacyidea/webui/login.py\n@@ -103,8 +103,6 @@\n # The policy is still a boolean realm_dropdown action\n # Thus we display ALL realms\n realms = \",\".join(get_realms())\n- if realms:\n- realms = \",\" + realms\n \n try:\n if is_remote_user_allowed(request):\n", "issue": "Realm-Select box with broken \"placeholder\"\nIn the login screen there is a realm select box.\r\n\r\nThe placeholder for the select box does not work:\r\nhttps://github.com/privacyidea/privacyidea/blob/master/privacyidea/static/components/login/views/login.html#L63\r\n\r\nWe could either fix the placeholder or preselect the default realm.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# http://www.privacyidea.org\n# (c) cornelius k\u00f6lbel, privacyidea.org\n#\n# 2017-11-14 Cornelius K\u00f6lbel <[email protected]>\n# Add custom baseline and menu\n# 2016-01-07 Cornelius K\u00f6lbel <[email protected]>\n# Add password reset\n# 2015-11-04 Cornelius K\u00f6lbel <[email protected]>\n# Add REMOTE_USER check\n# 2014-12-22 Cornelius K\u00f6lbel, <[email protected]>\n#\n# This code is free software; you can redistribute it and/or\n# modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE\n# License as published by the Free Software Foundation; either\n# version 3 of the License, or any later version.\n#\n# This code is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU AFFERO GENERAL PUBLIC LICENSE for more details.\n#\n# You should have received a copy of the GNU Affero General Public\n# License along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\nfrom privacyidea.lib.queue import has_job_queue\n\n__doc__ = \"\"\"This is the starting point for the single web application.\nOther html code is dynamically loaded via angularJS and located in\n/static/views/...\n\"\"\"\n__author__ = \"Cornelius K\u00f6lbel <[email protected]>\"\n\nfrom flask import (Blueprint, render_template, request,\n current_app)\nfrom privacyidea.api.lib.prepolicy import is_remote_user_allowed\nfrom privacyidea.lib.passwordreset import is_password_reset\nfrom privacyidea.lib.error import HSMException\nfrom privacyidea.lib.realm import get_realms\nfrom privacyidea.lib.policy import PolicyClass, ACTION, SCOPE\nfrom privacyidea.lib.subscriptions import subscription_status\nfrom privacyidea.lib.utils import get_client_ip\nfrom privacyidea.lib.config import get_from_config, SYSCONF\n\nDEFAULT_THEME = \"/static/contrib/css/bootstrap-theme.css\"\n\nlogin_blueprint = Blueprint('login_blueprint', __name__)\n\n\n@login_blueprint.route('/', methods=['GET'])\ndef single_page_application():\n instance = request.script_root\n if instance == \"/\":\n instance = \"\"\n # The backend URL should come from the configuration of the system.\n backend_url = \"\"\n\n if current_app.config.get(\"PI_UI_DEACTIVATED\"):\n # Do not provide the UI\n return render_template(\"deactivated.html\")\n\n # The default theme. We can change this later\n theme = current_app.config.get(\"PI_CSS\", DEFAULT_THEME)\n # Get further customizations\n customization = current_app.config.get(\"PI_CUSTOMIZATION\",\n \"/static/customize/\")\n customization = customization.strip('/')\n # TODO: we should add the CSS into PI_CUSTOMZATION/css\n # Enrollment-Wizard:\n # PI_CUSTOMIZATION/views/includes/token.enroll.pre.top.html\n # PI_CUSTOMIZATION/views/includes/token.enroll.pre.bottom.html\n # PI_CUSTOMIZATION/views/includes/token.enroll.post.top.html\n # PI_CUSTOMIZATION/views/includes/token.enroll.post.bottom.html\n # Get the hidden external links\n external_links = current_app.config.get(\"PI_EXTERNAL_LINKS\", True)\n # Get the logo file\n logo = current_app.config.get(\"PI_LOGO\", \"privacyIDEA1.png\")\n browser_lang = request.accept_languages.best_match([\"en\", \"de\", \"de-DE\"], default=\"en\").split(\"-\")[0]\n # check if login with REMOTE_USER is allowed.\n remote_user = \"\"\n password_reset = False\n if not hasattr(request, \"all_data\"):\n request.all_data = {}\n # Depending on displaying the realm dropdown, we fill realms or not.\n policy_object = PolicyClass()\n realms = \"\"\n client_ip = get_client_ip(request,\n get_from_config(SYSCONF.OVERRIDECLIENT))\n realm_dropdown = policy_object.get_policies(action=ACTION.REALMDROPDOWN,\n scope=SCOPE.WEBUI,\n client=client_ip,\n active=True)\n if realm_dropdown:\n try:\n realm_dropdown_values = policy_object.get_action_values(\n action=ACTION.REALMDROPDOWN,\n scope=SCOPE.WEBUI,\n client=client_ip)\n # Use the realms from the policy.\n realms = \",\".join(realm_dropdown_values)\n except AttributeError as ex:\n # The policy is still a boolean realm_dropdown action\n # Thus we display ALL realms\n realms = \",\".join(get_realms())\n if realms:\n realms = \",\" + realms\n\n try:\n if is_remote_user_allowed(request):\n remote_user = request.remote_user\n password_reset = is_password_reset()\n hsm_ready = True\n except HSMException:\n hsm_ready = False\n\n # Use policies to determine the customization of menu\n # and baseline. get_action_values returns an array!\n sub_state = subscription_status()\n customization_menu_file = policy_object.get_action_values(\n allow_white_space_in_action=True,\n action=ACTION.CUSTOM_MENU,\n scope=SCOPE.WEBUI,\n client=client_ip, unique=True)\n if len(customization_menu_file) and list(customization_menu_file)[0] \\\n and sub_state not in [1, 2]:\n customization_menu_file = list(customization_menu_file)[0]\n else:\n customization_menu_file = \"templates/menu.html\"\n customization_baseline_file = policy_object.get_action_values(\n allow_white_space_in_action=True,\n action=ACTION.CUSTOM_BASELINE,\n scope=SCOPE.WEBUI,\n client=client_ip, unique=True)\n if len(customization_baseline_file) and list(customization_baseline_file)[0] \\\n and sub_state not in [1, 2]:\n customization_baseline_file = list(customization_baseline_file)[0]\n else:\n customization_baseline_file = \"templates/baseline.html\"\n\n login_text = policy_object.get_action_values(\n allow_white_space_in_action=True,\n action=ACTION.LOGIN_TEXT,\n scope=SCOPE.WEBUI,\n client=client_ip, unique=True\n )\n if len(login_text) and list(login_text)[0] and sub_state not in [1, 2]:\n login_text = list(login_text)[0]\n else:\n login_text = \"\"\n\n return render_template(\"index.html\", instance=instance,\n backendUrl=backend_url,\n browser_lang=browser_lang,\n remote_user=remote_user,\n theme=theme,\n password_reset=password_reset,\n hsm_ready=hsm_ready,\n has_job_queue=str(has_job_queue()),\n customization=customization,\n customization_menu_file=customization_menu_file,\n customization_baseline_file=customization_baseline_file,\n realms=realms,\n external_links=external_links,\n login_text=login_text,\n logo=logo)\n\n", "path": "privacyidea/webui/login.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# http://www.privacyidea.org\n# (c) cornelius k\u00f6lbel, privacyidea.org\n#\n# 2017-11-14 Cornelius K\u00f6lbel <[email protected]>\n# Add custom baseline and menu\n# 2016-01-07 Cornelius K\u00f6lbel <[email protected]>\n# Add password reset\n# 2015-11-04 Cornelius K\u00f6lbel <[email protected]>\n# Add REMOTE_USER check\n# 2014-12-22 Cornelius K\u00f6lbel, <[email protected]>\n#\n# This code is free software; you can redistribute it and/or\n# modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE\n# License as published by the Free Software Foundation; either\n# version 3 of the License, or any later version.\n#\n# This code is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU AFFERO GENERAL PUBLIC LICENSE for more details.\n#\n# You should have received a copy of the GNU Affero General Public\n# License along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\nfrom privacyidea.lib.queue import has_job_queue\n\n__doc__ = \"\"\"This is the starting point for the single web application.\nOther html code is dynamically loaded via angularJS and located in\n/static/views/...\n\"\"\"\n__author__ = \"Cornelius K\u00f6lbel <[email protected]>\"\n\nfrom flask import (Blueprint, render_template, request,\n current_app)\nfrom privacyidea.api.lib.prepolicy import is_remote_user_allowed\nfrom privacyidea.lib.passwordreset import is_password_reset\nfrom privacyidea.lib.error import HSMException\nfrom privacyidea.lib.realm import get_realms\nfrom privacyidea.lib.policy import PolicyClass, ACTION, SCOPE\nfrom privacyidea.lib.subscriptions import subscription_status\nfrom privacyidea.lib.utils import get_client_ip\nfrom privacyidea.lib.config import get_from_config, SYSCONF\n\nDEFAULT_THEME = \"/static/contrib/css/bootstrap-theme.css\"\n\nlogin_blueprint = Blueprint('login_blueprint', __name__)\n\n\n@login_blueprint.route('/', methods=['GET'])\ndef single_page_application():\n instance = request.script_root\n if instance == \"/\":\n instance = \"\"\n # The backend URL should come from the configuration of the system.\n backend_url = \"\"\n\n if current_app.config.get(\"PI_UI_DEACTIVATED\"):\n # Do not provide the UI\n return render_template(\"deactivated.html\")\n\n # The default theme. We can change this later\n theme = current_app.config.get(\"PI_CSS\", DEFAULT_THEME)\n # Get further customizations\n customization = current_app.config.get(\"PI_CUSTOMIZATION\",\n \"/static/customize/\")\n customization = customization.strip('/')\n # TODO: we should add the CSS into PI_CUSTOMZATION/css\n # Enrollment-Wizard:\n # PI_CUSTOMIZATION/views/includes/token.enroll.pre.top.html\n # PI_CUSTOMIZATION/views/includes/token.enroll.pre.bottom.html\n # PI_CUSTOMIZATION/views/includes/token.enroll.post.top.html\n # PI_CUSTOMIZATION/views/includes/token.enroll.post.bottom.html\n # Get the hidden external links\n external_links = current_app.config.get(\"PI_EXTERNAL_LINKS\", True)\n # Get the logo file\n logo = current_app.config.get(\"PI_LOGO\", \"privacyIDEA1.png\")\n browser_lang = request.accept_languages.best_match([\"en\", \"de\", \"de-DE\"], default=\"en\").split(\"-\")[0]\n # check if login with REMOTE_USER is allowed.\n remote_user = \"\"\n password_reset = False\n if not hasattr(request, \"all_data\"):\n request.all_data = {}\n # Depending on displaying the realm dropdown, we fill realms or not.\n policy_object = PolicyClass()\n realms = \"\"\n client_ip = get_client_ip(request,\n get_from_config(SYSCONF.OVERRIDECLIENT))\n realm_dropdown = policy_object.get_policies(action=ACTION.REALMDROPDOWN,\n scope=SCOPE.WEBUI,\n client=client_ip,\n active=True)\n if realm_dropdown:\n try:\n realm_dropdown_values = policy_object.get_action_values(\n action=ACTION.REALMDROPDOWN,\n scope=SCOPE.WEBUI,\n client=client_ip)\n # Use the realms from the policy.\n realms = \",\".join(realm_dropdown_values)\n except AttributeError as ex:\n # The policy is still a boolean realm_dropdown action\n # Thus we display ALL realms\n realms = \",\".join(get_realms())\n\n try:\n if is_remote_user_allowed(request):\n remote_user = request.remote_user\n password_reset = is_password_reset()\n hsm_ready = True\n except HSMException:\n hsm_ready = False\n\n # Use policies to determine the customization of menu\n # and baseline. get_action_values returns an array!\n sub_state = subscription_status()\n customization_menu_file = policy_object.get_action_values(\n allow_white_space_in_action=True,\n action=ACTION.CUSTOM_MENU,\n scope=SCOPE.WEBUI,\n client=client_ip, unique=True)\n if len(customization_menu_file) and list(customization_menu_file)[0] \\\n and sub_state not in [1, 2]:\n customization_menu_file = list(customization_menu_file)[0]\n else:\n customization_menu_file = \"templates/menu.html\"\n customization_baseline_file = policy_object.get_action_values(\n allow_white_space_in_action=True,\n action=ACTION.CUSTOM_BASELINE,\n scope=SCOPE.WEBUI,\n client=client_ip, unique=True)\n if len(customization_baseline_file) and list(customization_baseline_file)[0] \\\n and sub_state not in [1, 2]:\n customization_baseline_file = list(customization_baseline_file)[0]\n else:\n customization_baseline_file = \"templates/baseline.html\"\n\n login_text = policy_object.get_action_values(\n allow_white_space_in_action=True,\n action=ACTION.LOGIN_TEXT,\n scope=SCOPE.WEBUI,\n client=client_ip, unique=True\n )\n if len(login_text) and list(login_text)[0] and sub_state not in [1, 2]:\n login_text = list(login_text)[0]\n else:\n login_text = \"\"\n\n return render_template(\"index.html\", instance=instance,\n backendUrl=backend_url,\n browser_lang=browser_lang,\n remote_user=remote_user,\n theme=theme,\n password_reset=password_reset,\n hsm_ready=hsm_ready,\n has_job_queue=str(has_job_queue()),\n customization=customization,\n customization_menu_file=customization_menu_file,\n customization_baseline_file=customization_baseline_file,\n realms=realms,\n external_links=external_links,\n login_text=login_text,\n logo=logo)\n\n", "path": "privacyidea/webui/login.py"}]} | 2,257 | 108 |
gh_patches_debug_57587 | rasdani/github-patches | git_diff | joke2k__faker-262 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
en_US SSN provider generates invalid SSNs
There's a few limitations on United States' SSNs that prevent it from being completely random.
- No group can be all 0s
- The SSN cannot start with 666
- The SSN cannot start with a number >= 900
See http://www.ssa.gov/employer/randomization.html
Could you modify the SSN generator to avoid these issues?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `faker/providers/ssn/en_US/__init__.py`
Content:
```
1 # coding=utf-8
2 from __future__ import unicode_literals
3 from .. import Provider as BaseProvider
4
5
6 class Provider(BaseProvider):
7 pass
8
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/faker/providers/ssn/en_US/__init__.py b/faker/providers/ssn/en_US/__init__.py
--- a/faker/providers/ssn/en_US/__init__.py
+++ b/faker/providers/ssn/en_US/__init__.py
@@ -4,4 +4,17 @@
class Provider(BaseProvider):
- pass
+
+ @classmethod
+ def ssn(cls):
+ # Certain numbers are invalid for U.S. SSNs. The area (first 3 digits)
+ # cannot be 666 or 900-999. The group number (middle digits) cannot be
+ # 00. The serial (last 4 digits) cannot be 0000
+ area = BaseProvider.random_int(min=1, max=899)
+ if area == 666:
+ area += 1
+ group = BaseProvider.random_int(1, 99)
+ serial = BaseProvider.random_int(1, 9999)
+
+ ssn = "{0:03d}-{1:02d}-{2:04d}".format(area, group, serial)
+ return ssn
| {"golden_diff": "diff --git a/faker/providers/ssn/en_US/__init__.py b/faker/providers/ssn/en_US/__init__.py\n--- a/faker/providers/ssn/en_US/__init__.py\n+++ b/faker/providers/ssn/en_US/__init__.py\n@@ -4,4 +4,17 @@\n \n \n class Provider(BaseProvider):\n- pass\n+\n+ @classmethod\n+ def ssn(cls):\n+ # Certain numbers are invalid for U.S. SSNs. The area (first 3 digits)\n+ # cannot be 666 or 900-999. The group number (middle digits) cannot be\n+ # 00. The serial (last 4 digits) cannot be 0000\n+ area = BaseProvider.random_int(min=1, max=899)\n+ if area == 666:\n+ area += 1\n+ group = BaseProvider.random_int(1, 99)\n+ serial = BaseProvider.random_int(1, 9999)\n+\n+ ssn = \"{0:03d}-{1:02d}-{2:04d}\".format(area, group, serial)\n+ return ssn\n", "issue": "en_US SSN provider generates invalid SSNs\nThere's a few limitations on United States' SSNs that prevent it from being completely random.\n- No group can be all 0s\n- The SSN cannot start with 666\n- The SSN cannot start with a number >= 900\n\nSee http://www.ssa.gov/employer/randomization.html\n\nCould you modify the SSN generator to avoid these issues?\n\n", "before_files": [{"content": "# coding=utf-8\nfrom __future__ import unicode_literals\nfrom .. import Provider as BaseProvider\n\n\nclass Provider(BaseProvider):\n pass\n", "path": "faker/providers/ssn/en_US/__init__.py"}], "after_files": [{"content": "# coding=utf-8\nfrom __future__ import unicode_literals\nfrom .. import Provider as BaseProvider\n\n\nclass Provider(BaseProvider):\n\n @classmethod\n def ssn(cls):\n # Certain numbers are invalid for U.S. SSNs. The area (first 3 digits)\n # cannot be 666 or 900-999. The group number (middle digits) cannot be\n # 00. The serial (last 4 digits) cannot be 0000\n area = BaseProvider.random_int(min=1, max=899)\n if area == 666:\n area += 1\n group = BaseProvider.random_int(1, 99)\n serial = BaseProvider.random_int(1, 9999)\n\n ssn = \"{0:03d}-{1:02d}-{2:04d}\".format(area, group, serial)\n return ssn\n", "path": "faker/providers/ssn/en_US/__init__.py"}]} | 395 | 269 |
gh_patches_debug_57390 | rasdani/github-patches | git_diff | translate__pootle-4187 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Change icons for v-folders
To better distinguish virtual folders (or "goals") from regular folders, let's use the following icon:

Preview:

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pootle/core/browser.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 #
4 # Copyright (C) Pootle contributors.
5 #
6 # This file is a part of the Pootle project. It is distributed under the GPL3
7 # or later license. See the LICENSE file for a copy of the license and the
8 # AUTHORS file for copyright and authorship information.
9
10 from django.utils.translation import ugettext_lazy as _
11
12
13 HEADING_CHOICES = [
14 {
15 'id': 'name',
16 'class': 'stats',
17 'display_name': _("Name"),
18 },
19 {
20 'id': 'priority',
21 'class': 'stats-number sorttable_numeric',
22 'display_name': _("Priority"),
23 },
24 {
25 'id': 'project',
26 'class': 'stats',
27 'display_name': _("Project"),
28 },
29 {
30 'id': 'language',
31 'class': 'stats',
32 'display_name': _("Language"),
33 },
34 {
35 'id': 'progress',
36 'class': 'stats',
37 # Translators: noun. The graphical representation of translation status
38 'display_name': _("Progress"),
39 },
40 {
41 'id': 'total',
42 'class': 'stats-number sorttable_numeric when-loaded',
43 # Translators: Heading representing the total number of words of a file
44 # or directory
45 'display_name': _("Total"),
46 },
47 {
48 'id': 'last-updated',
49 'class': 'stats sorttable_numeric when-loaded',
50 'display_name': _("Last updated"),
51 },
52 {
53 'id': 'need-translation',
54 'class': 'stats-number sorttable_numeric when-loaded',
55 'display_name': _("Need Translation"),
56 },
57 {
58 'id': 'suggestions',
59 'class': 'stats-number sorttable_numeric when-loaded',
60 # Translators: The number of suggestions pending review
61 'display_name': _("Suggestions"),
62 },
63 {
64 'id': 'critical',
65 'class': 'stats-number sorttable_numeric when-loaded',
66 'display_name': _("Critical"),
67 },
68 {
69 'id': 'activity',
70 'class': 'stats sorttable_numeric when-loaded',
71 'display_name': _("Last Activity"),
72 },
73 ]
74
75
76 def get_table_headings(choices):
77 """Filters the list of available table headings to the given `choices`."""
78 return filter(lambda x: x['id'] in choices, HEADING_CHOICES)
79
80
81 def make_generic_item(path_obj, **kwargs):
82 """Template variables for each row in the table."""
83 return {
84 'href': path_obj.get_absolute_url(),
85 'href_all': path_obj.get_translate_url(),
86 'href_todo': path_obj.get_translate_url(state='incomplete', **kwargs),
87 'href_sugg': path_obj.get_translate_url(state='suggestions', **kwargs),
88 'href_critical': path_obj.get_critical_url(**kwargs),
89 'title': path_obj.name,
90 'code': path_obj.code,
91 'is_disabled': getattr(path_obj, 'disabled', False),
92 }
93
94
95 def make_directory_item(directory):
96 filters = {}
97
98 if directory.has_vfolders:
99 # The directory has virtual folders, so append priority sorting to URL.
100 filters['sort'] = 'priority'
101
102 item = make_generic_item(directory, **filters)
103 item.update({
104 'icon': 'folder',
105 })
106 return item
107
108
109 def make_store_item(store):
110 item = make_generic_item(store)
111 item.update({
112 'icon': 'file',
113 })
114 return item
115
116
117 def get_parent(path_obj):
118 """Retrieves a representation of the parent object.
119
120 :param path_obj: either a `Directory` or Store` instance.
121 """
122 parent_dir = path_obj.parent
123
124 if parent_dir.is_project():
125 return None
126
127 if parent_dir.is_language():
128 label = _('Back to language')
129 else:
130 label = _('Back to parent folder')
131
132 return {
133 'title': label,
134 'href': parent_dir.get_absolute_url()
135 }
136
137
138 def make_project_item(translation_project):
139 item = make_generic_item(translation_project)
140 item.update({
141 'icon': 'project',
142 'title': translation_project.project.name,
143 })
144 return item
145
146
147 def make_language_item(translation_project):
148 item = make_generic_item(translation_project)
149 item.update({
150 'icon': 'language',
151 'title': translation_project.language.name,
152 })
153 return item
154
155
156 def make_xlanguage_item(resource_obj):
157 translation_project = resource_obj.translation_project
158 item = make_generic_item(resource_obj)
159 item.update({
160 'icon': 'language',
161 'code': translation_project.language.code,
162 'title': translation_project.language.name,
163 })
164 return item
165
166
167 def make_project_list_item(project):
168 item = make_generic_item(project)
169 item.update({
170 'icon': 'project',
171 'title': project.fullname,
172 })
173 return item
174
175
176 def get_children(directory):
177 """Returns a list of children directories and stores for this
178 ``directory``.
179
180 The elements of the list are dictionaries which keys are populated after
181 in the templates.
182 """
183 directories = [make_directory_item(child_dir)
184 for child_dir in directory.child_dirs.live().iterator()]
185
186 stores = [make_store_item(child_store)
187 for child_store in directory.child_stores.live().iterator()]
188
189 return directories + stores
190
191
192 def make_vfolder_treeitem(vfolder_treeitem):
193 return {
194 'href_all': vfolder_treeitem.get_translate_url(),
195 'href_todo': vfolder_treeitem.get_translate_url(state='incomplete'),
196 'href_sugg': vfolder_treeitem.get_translate_url(state='suggestions'),
197 'href_critical': vfolder_treeitem.get_critical_url(),
198 'title': vfolder_treeitem.vfolder.name,
199 'code': vfolder_treeitem.code,
200 'priority': vfolder_treeitem.vfolder.priority,
201 'is_grayed': not vfolder_treeitem.is_visible,
202 'icon': 'folder',
203 }
204
205
206 def get_vfolders(directory, all_vfolders=False):
207 """Return a list of virtual folders for this ``directory``.
208
209 The elements of the list are dictionaries which keys are populated after
210 in the templates.
211
212 If ``all_vfolders`` is True then all the virtual folders matching the
213 provided directory are returned. If not only the visible ones are returned.
214 """
215 return [make_vfolder_treeitem(vfolder_treeitem)
216 for vfolder_treeitem
217 in directory.vf_treeitems.order_by('-vfolder__priority').iterator()
218 if all_vfolders or vfolder_treeitem.is_visible]
219
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pootle/core/browser.py b/pootle/core/browser.py
--- a/pootle/core/browser.py
+++ b/pootle/core/browser.py
@@ -199,7 +199,7 @@
'code': vfolder_treeitem.code,
'priority': vfolder_treeitem.vfolder.priority,
'is_grayed': not vfolder_treeitem.is_visible,
- 'icon': 'folder',
+ 'icon': 'vfolder',
}
| {"golden_diff": "diff --git a/pootle/core/browser.py b/pootle/core/browser.py\n--- a/pootle/core/browser.py\n+++ b/pootle/core/browser.py\n@@ -199,7 +199,7 @@\n 'code': vfolder_treeitem.code,\n 'priority': vfolder_treeitem.vfolder.priority,\n 'is_grayed': not vfolder_treeitem.is_visible,\n- 'icon': 'folder',\n+ 'icon': 'vfolder',\n }\n", "issue": "Change icons for v-folders\nTo better distinguish virtual folders (or \"goals\") from regular folders, let's use the following icon:\n\n\n\nPreview:\n\n\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nfrom django.utils.translation import ugettext_lazy as _\n\n\nHEADING_CHOICES = [\n {\n 'id': 'name',\n 'class': 'stats',\n 'display_name': _(\"Name\"),\n },\n {\n 'id': 'priority',\n 'class': 'stats-number sorttable_numeric',\n 'display_name': _(\"Priority\"),\n },\n {\n 'id': 'project',\n 'class': 'stats',\n 'display_name': _(\"Project\"),\n },\n {\n 'id': 'language',\n 'class': 'stats',\n 'display_name': _(\"Language\"),\n },\n {\n 'id': 'progress',\n 'class': 'stats',\n # Translators: noun. The graphical representation of translation status\n 'display_name': _(\"Progress\"),\n },\n {\n 'id': 'total',\n 'class': 'stats-number sorttable_numeric when-loaded',\n # Translators: Heading representing the total number of words of a file\n # or directory\n 'display_name': _(\"Total\"),\n },\n {\n 'id': 'last-updated',\n 'class': 'stats sorttable_numeric when-loaded',\n 'display_name': _(\"Last updated\"),\n },\n {\n 'id': 'need-translation',\n 'class': 'stats-number sorttable_numeric when-loaded',\n 'display_name': _(\"Need Translation\"),\n },\n {\n 'id': 'suggestions',\n 'class': 'stats-number sorttable_numeric when-loaded',\n # Translators: The number of suggestions pending review\n 'display_name': _(\"Suggestions\"),\n },\n {\n 'id': 'critical',\n 'class': 'stats-number sorttable_numeric when-loaded',\n 'display_name': _(\"Critical\"),\n },\n {\n 'id': 'activity',\n 'class': 'stats sorttable_numeric when-loaded',\n 'display_name': _(\"Last Activity\"),\n },\n]\n\n\ndef get_table_headings(choices):\n \"\"\"Filters the list of available table headings to the given `choices`.\"\"\"\n return filter(lambda x: x['id'] in choices, HEADING_CHOICES)\n\n\ndef make_generic_item(path_obj, **kwargs):\n \"\"\"Template variables for each row in the table.\"\"\"\n return {\n 'href': path_obj.get_absolute_url(),\n 'href_all': path_obj.get_translate_url(),\n 'href_todo': path_obj.get_translate_url(state='incomplete', **kwargs),\n 'href_sugg': path_obj.get_translate_url(state='suggestions', **kwargs),\n 'href_critical': path_obj.get_critical_url(**kwargs),\n 'title': path_obj.name,\n 'code': path_obj.code,\n 'is_disabled': getattr(path_obj, 'disabled', False),\n }\n\n\ndef make_directory_item(directory):\n filters = {}\n\n if directory.has_vfolders:\n # The directory has virtual folders, so append priority sorting to URL.\n filters['sort'] = 'priority'\n\n item = make_generic_item(directory, **filters)\n item.update({\n 'icon': 'folder',\n })\n return item\n\n\ndef make_store_item(store):\n item = make_generic_item(store)\n item.update({\n 'icon': 'file',\n })\n return item\n\n\ndef get_parent(path_obj):\n \"\"\"Retrieves a representation of the parent object.\n\n :param path_obj: either a `Directory` or Store` instance.\n \"\"\"\n parent_dir = path_obj.parent\n\n if parent_dir.is_project():\n return None\n\n if parent_dir.is_language():\n label = _('Back to language')\n else:\n label = _('Back to parent folder')\n\n return {\n 'title': label,\n 'href': parent_dir.get_absolute_url()\n }\n\n\ndef make_project_item(translation_project):\n item = make_generic_item(translation_project)\n item.update({\n 'icon': 'project',\n 'title': translation_project.project.name,\n })\n return item\n\n\ndef make_language_item(translation_project):\n item = make_generic_item(translation_project)\n item.update({\n 'icon': 'language',\n 'title': translation_project.language.name,\n })\n return item\n\n\ndef make_xlanguage_item(resource_obj):\n translation_project = resource_obj.translation_project\n item = make_generic_item(resource_obj)\n item.update({\n 'icon': 'language',\n 'code': translation_project.language.code,\n 'title': translation_project.language.name,\n })\n return item\n\n\ndef make_project_list_item(project):\n item = make_generic_item(project)\n item.update({\n 'icon': 'project',\n 'title': project.fullname,\n })\n return item\n\n\ndef get_children(directory):\n \"\"\"Returns a list of children directories and stores for this\n ``directory``.\n\n The elements of the list are dictionaries which keys are populated after\n in the templates.\n \"\"\"\n directories = [make_directory_item(child_dir)\n for child_dir in directory.child_dirs.live().iterator()]\n\n stores = [make_store_item(child_store)\n for child_store in directory.child_stores.live().iterator()]\n\n return directories + stores\n\n\ndef make_vfolder_treeitem(vfolder_treeitem):\n return {\n 'href_all': vfolder_treeitem.get_translate_url(),\n 'href_todo': vfolder_treeitem.get_translate_url(state='incomplete'),\n 'href_sugg': vfolder_treeitem.get_translate_url(state='suggestions'),\n 'href_critical': vfolder_treeitem.get_critical_url(),\n 'title': vfolder_treeitem.vfolder.name,\n 'code': vfolder_treeitem.code,\n 'priority': vfolder_treeitem.vfolder.priority,\n 'is_grayed': not vfolder_treeitem.is_visible,\n 'icon': 'folder',\n }\n\n\ndef get_vfolders(directory, all_vfolders=False):\n \"\"\"Return a list of virtual folders for this ``directory``.\n\n The elements of the list are dictionaries which keys are populated after\n in the templates.\n\n If ``all_vfolders`` is True then all the virtual folders matching the\n provided directory are returned. If not only the visible ones are returned.\n \"\"\"\n return [make_vfolder_treeitem(vfolder_treeitem)\n for vfolder_treeitem\n in directory.vf_treeitems.order_by('-vfolder__priority').iterator()\n if all_vfolders or vfolder_treeitem.is_visible]\n", "path": "pootle/core/browser.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nfrom django.utils.translation import ugettext_lazy as _\n\n\nHEADING_CHOICES = [\n {\n 'id': 'name',\n 'class': 'stats',\n 'display_name': _(\"Name\"),\n },\n {\n 'id': 'priority',\n 'class': 'stats-number sorttable_numeric',\n 'display_name': _(\"Priority\"),\n },\n {\n 'id': 'project',\n 'class': 'stats',\n 'display_name': _(\"Project\"),\n },\n {\n 'id': 'language',\n 'class': 'stats',\n 'display_name': _(\"Language\"),\n },\n {\n 'id': 'progress',\n 'class': 'stats',\n # Translators: noun. The graphical representation of translation status\n 'display_name': _(\"Progress\"),\n },\n {\n 'id': 'total',\n 'class': 'stats-number sorttable_numeric when-loaded',\n # Translators: Heading representing the total number of words of a file\n # or directory\n 'display_name': _(\"Total\"),\n },\n {\n 'id': 'last-updated',\n 'class': 'stats sorttable_numeric when-loaded',\n 'display_name': _(\"Last updated\"),\n },\n {\n 'id': 'need-translation',\n 'class': 'stats-number sorttable_numeric when-loaded',\n 'display_name': _(\"Need Translation\"),\n },\n {\n 'id': 'suggestions',\n 'class': 'stats-number sorttable_numeric when-loaded',\n # Translators: The number of suggestions pending review\n 'display_name': _(\"Suggestions\"),\n },\n {\n 'id': 'critical',\n 'class': 'stats-number sorttable_numeric when-loaded',\n 'display_name': _(\"Critical\"),\n },\n {\n 'id': 'activity',\n 'class': 'stats sorttable_numeric when-loaded',\n 'display_name': _(\"Last Activity\"),\n },\n]\n\n\ndef get_table_headings(choices):\n \"\"\"Filters the list of available table headings to the given `choices`.\"\"\"\n return filter(lambda x: x['id'] in choices, HEADING_CHOICES)\n\n\ndef make_generic_item(path_obj, **kwargs):\n \"\"\"Template variables for each row in the table.\"\"\"\n return {\n 'href': path_obj.get_absolute_url(),\n 'href_all': path_obj.get_translate_url(),\n 'href_todo': path_obj.get_translate_url(state='incomplete', **kwargs),\n 'href_sugg': path_obj.get_translate_url(state='suggestions', **kwargs),\n 'href_critical': path_obj.get_critical_url(**kwargs),\n 'title': path_obj.name,\n 'code': path_obj.code,\n 'is_disabled': getattr(path_obj, 'disabled', False),\n }\n\n\ndef make_directory_item(directory):\n filters = {}\n\n if directory.has_vfolders:\n # The directory has virtual folders, so append priority sorting to URL.\n filters['sort'] = 'priority'\n\n item = make_generic_item(directory, **filters)\n item.update({\n 'icon': 'folder',\n })\n return item\n\n\ndef make_store_item(store):\n item = make_generic_item(store)\n item.update({\n 'icon': 'file',\n })\n return item\n\n\ndef get_parent(path_obj):\n \"\"\"Retrieves a representation of the parent object.\n\n :param path_obj: either a `Directory` or Store` instance.\n \"\"\"\n parent_dir = path_obj.parent\n\n if parent_dir.is_project():\n return None\n\n if parent_dir.is_language():\n label = _('Back to language')\n else:\n label = _('Back to parent folder')\n\n return {\n 'title': label,\n 'href': parent_dir.get_absolute_url()\n }\n\n\ndef make_project_item(translation_project):\n item = make_generic_item(translation_project)\n item.update({\n 'icon': 'project',\n 'title': translation_project.project.name,\n })\n return item\n\n\ndef make_language_item(translation_project):\n item = make_generic_item(translation_project)\n item.update({\n 'icon': 'language',\n 'title': translation_project.language.name,\n })\n return item\n\n\ndef make_xlanguage_item(resource_obj):\n translation_project = resource_obj.translation_project\n item = make_generic_item(resource_obj)\n item.update({\n 'icon': 'language',\n 'code': translation_project.language.code,\n 'title': translation_project.language.name,\n })\n return item\n\n\ndef make_project_list_item(project):\n item = make_generic_item(project)\n item.update({\n 'icon': 'project',\n 'title': project.fullname,\n })\n return item\n\n\ndef get_children(directory):\n \"\"\"Returns a list of children directories and stores for this\n ``directory``.\n\n The elements of the list are dictionaries which keys are populated after\n in the templates.\n \"\"\"\n directories = [make_directory_item(child_dir)\n for child_dir in directory.child_dirs.live().iterator()]\n\n stores = [make_store_item(child_store)\n for child_store in directory.child_stores.live().iterator()]\n\n return directories + stores\n\n\ndef make_vfolder_treeitem(vfolder_treeitem):\n return {\n 'href_all': vfolder_treeitem.get_translate_url(),\n 'href_todo': vfolder_treeitem.get_translate_url(state='incomplete'),\n 'href_sugg': vfolder_treeitem.get_translate_url(state='suggestions'),\n 'href_critical': vfolder_treeitem.get_critical_url(),\n 'title': vfolder_treeitem.vfolder.name,\n 'code': vfolder_treeitem.code,\n 'priority': vfolder_treeitem.vfolder.priority,\n 'is_grayed': not vfolder_treeitem.is_visible,\n 'icon': 'vfolder',\n }\n\n\ndef get_vfolders(directory, all_vfolders=False):\n \"\"\"Return a list of virtual folders for this ``directory``.\n\n The elements of the list are dictionaries which keys are populated after\n in the templates.\n\n If ``all_vfolders`` is True then all the virtual folders matching the\n provided directory are returned. If not only the visible ones are returned.\n \"\"\"\n return [make_vfolder_treeitem(vfolder_treeitem)\n for vfolder_treeitem\n in directory.vf_treeitems.order_by('-vfolder__priority').iterator()\n if all_vfolders or vfolder_treeitem.is_visible]\n", "path": "pootle/core/browser.py"}]} | 2,393 | 105 |
gh_patches_debug_57082 | rasdani/github-patches | git_diff | SeldonIO__MLServer-1171 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add OS constraint in PyPI
Mention MLServer's OS constraints as metadata in `setup.py`, so that it becomes visible in pypi.org.
```
setup(...,
classifiers=[
'Operating System :: POSIX',
],
)
```
_Originally posted by @HugoMVale in https://github.com/SeldonIO/MLServer/issues/1022#issuecomment-1456788132_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 import os
2
3 from typing import Dict
4 from setuptools import setup, find_packages
5
6 ROOT_PATH = os.path.dirname(__file__)
7 PKG_NAME = "mlserver"
8 PKG_PATH = os.path.join(ROOT_PATH, PKG_NAME)
9
10
11 def _load_version() -> str:
12 version = ""
13 version_path = os.path.join(PKG_PATH, "version.py")
14 with open(version_path) as fp:
15 version_module: Dict[str, str] = {}
16 exec(fp.read(), version_module)
17 version = version_module["__version__"]
18
19 return version
20
21
22 def _load_description() -> str:
23 readme_path = os.path.join(ROOT_PATH, "README.md")
24 with open(readme_path) as fp:
25 return fp.read()
26
27
28 env_marker_cpython = (
29 "sys_platform != 'win32'"
30 " and (sys_platform != 'cygwin'"
31 " and platform_python_implementation != 'PyPy')"
32 )
33
34 setup(
35 name=PKG_NAME,
36 version=_load_version(),
37 url="https://github.com/SeldonIO/MLServer.git",
38 author="Seldon Technologies Ltd.",
39 author_email="[email protected]",
40 description="ML server",
41 include_package_data=True,
42 packages=find_packages(exclude=["tests", "tests.*"]),
43 install_requires=[
44 "click",
45 # 0.89.0: https://github.com/tiangolo/fastapi/issues/5861
46 "fastapi >=0.88.0, <=0.89.1, !=0.89.0",
47 "python-dotenv",
48 "grpcio",
49 # The importlib-resources backport is required to use some
50 # functionality added in Python 3.10
51 # https://setuptools.pypa.io/en/latest/userguide/datafiles.html#accessing-data-files-at-runtime
52 "importlib-resources",
53 "numpy",
54 "pandas",
55 "protobuf",
56 "uvicorn",
57 "starlette_exporter",
58 "py-grpc-prometheus",
59 "uvloop;" + env_marker_cpython,
60 "aiokafka",
61 "tritonclient[http]>=2.24",
62 "aiofiles",
63 "orjson",
64 ],
65 entry_points={"console_scripts": ["mlserver=mlserver.cli:main"]},
66 long_description=_load_description(),
67 long_description_content_type="text/markdown",
68 license="Apache 2.0",
69 )
70
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -37,6 +37,7 @@
url="https://github.com/SeldonIO/MLServer.git",
author="Seldon Technologies Ltd.",
author_email="[email protected]",
+ classifiers=["Operating System :: POSIX", "Operating System :: MacOS"],
description="ML server",
include_package_data=True,
packages=find_packages(exclude=["tests", "tests.*"]),
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -37,6 +37,7 @@\n url=\"https://github.com/SeldonIO/MLServer.git\",\n author=\"Seldon Technologies Ltd.\",\n author_email=\"[email protected]\",\n+ classifiers=[\"Operating System :: POSIX\", \"Operating System :: MacOS\"],\n description=\"ML server\",\n include_package_data=True,\n packages=find_packages(exclude=[\"tests\", \"tests.*\"]),\n", "issue": "Add OS constraint in PyPI\nMention MLServer's OS constraints as metadata in `setup.py`, so that it becomes visible in pypi.org. \r\n\r\n```\r\nsetup(...,\r\n classifiers=[\r\n 'Operating System :: POSIX',\r\n ],\r\n )\r\n```\r\n\r\n_Originally posted by @HugoMVale in https://github.com/SeldonIO/MLServer/issues/1022#issuecomment-1456788132_\r\n \n", "before_files": [{"content": "import os\n\nfrom typing import Dict\nfrom setuptools import setup, find_packages\n\nROOT_PATH = os.path.dirname(__file__)\nPKG_NAME = \"mlserver\"\nPKG_PATH = os.path.join(ROOT_PATH, PKG_NAME)\n\n\ndef _load_version() -> str:\n version = \"\"\n version_path = os.path.join(PKG_PATH, \"version.py\")\n with open(version_path) as fp:\n version_module: Dict[str, str] = {}\n exec(fp.read(), version_module)\n version = version_module[\"__version__\"]\n\n return version\n\n\ndef _load_description() -> str:\n readme_path = os.path.join(ROOT_PATH, \"README.md\")\n with open(readme_path) as fp:\n return fp.read()\n\n\nenv_marker_cpython = (\n \"sys_platform != 'win32'\"\n \" and (sys_platform != 'cygwin'\"\n \" and platform_python_implementation != 'PyPy')\"\n)\n\nsetup(\n name=PKG_NAME,\n version=_load_version(),\n url=\"https://github.com/SeldonIO/MLServer.git\",\n author=\"Seldon Technologies Ltd.\",\n author_email=\"[email protected]\",\n description=\"ML server\",\n include_package_data=True,\n packages=find_packages(exclude=[\"tests\", \"tests.*\"]),\n install_requires=[\n \"click\",\n # 0.89.0: https://github.com/tiangolo/fastapi/issues/5861\n \"fastapi >=0.88.0, <=0.89.1, !=0.89.0\",\n \"python-dotenv\",\n \"grpcio\",\n # The importlib-resources backport is required to use some\n # functionality added in Python 3.10\n # https://setuptools.pypa.io/en/latest/userguide/datafiles.html#accessing-data-files-at-runtime\n \"importlib-resources\",\n \"numpy\",\n \"pandas\",\n \"protobuf\",\n \"uvicorn\",\n \"starlette_exporter\",\n \"py-grpc-prometheus\",\n \"uvloop;\" + env_marker_cpython,\n \"aiokafka\",\n \"tritonclient[http]>=2.24\",\n \"aiofiles\",\n \"orjson\",\n ],\n entry_points={\"console_scripts\": [\"mlserver=mlserver.cli:main\"]},\n long_description=_load_description(),\n long_description_content_type=\"text/markdown\",\n license=\"Apache 2.0\",\n)\n", "path": "setup.py"}], "after_files": [{"content": "import os\n\nfrom typing import Dict\nfrom setuptools import setup, find_packages\n\nROOT_PATH = os.path.dirname(__file__)\nPKG_NAME = \"mlserver\"\nPKG_PATH = os.path.join(ROOT_PATH, PKG_NAME)\n\n\ndef _load_version() -> str:\n version = \"\"\n version_path = os.path.join(PKG_PATH, \"version.py\")\n with open(version_path) as fp:\n version_module: Dict[str, str] = {}\n exec(fp.read(), version_module)\n version = version_module[\"__version__\"]\n\n return version\n\n\ndef _load_description() -> str:\n readme_path = os.path.join(ROOT_PATH, \"README.md\")\n with open(readme_path) as fp:\n return fp.read()\n\n\nenv_marker_cpython = (\n \"sys_platform != 'win32'\"\n \" and (sys_platform != 'cygwin'\"\n \" and platform_python_implementation != 'PyPy')\"\n)\n\nsetup(\n name=PKG_NAME,\n version=_load_version(),\n url=\"https://github.com/SeldonIO/MLServer.git\",\n author=\"Seldon Technologies Ltd.\",\n author_email=\"[email protected]\",\n classifiers=[\"Operating System :: POSIX\", \"Operating System :: MacOS\"],\n description=\"ML server\",\n include_package_data=True,\n packages=find_packages(exclude=[\"tests\", \"tests.*\"]),\n install_requires=[\n \"click\",\n # 0.89.0: https://github.com/tiangolo/fastapi/issues/5861\n \"fastapi >=0.88.0, <=0.89.1, !=0.89.0\",\n \"python-dotenv\",\n \"grpcio\",\n # The importlib-resources backport is required to use some\n # functionality added in Python 3.10\n # https://setuptools.pypa.io/en/latest/userguide/datafiles.html#accessing-data-files-at-runtime\n \"importlib-resources\",\n \"numpy\",\n \"pandas\",\n \"protobuf\",\n \"uvicorn\",\n \"starlette_exporter\",\n \"py-grpc-prometheus\",\n \"uvloop;\" + env_marker_cpython,\n \"aiokafka\",\n \"tritonclient[http]>=2.24\",\n \"aiofiles\",\n \"orjson\",\n ],\n entry_points={\"console_scripts\": [\"mlserver=mlserver.cli:main\"]},\n long_description=_load_description(),\n long_description_content_type=\"text/markdown\",\n license=\"Apache 2.0\",\n)\n", "path": "setup.py"}]} | 1,009 | 104 |
gh_patches_debug_41531 | rasdani/github-patches | git_diff | deepset-ai__haystack-7247 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Docstrings - `haystack.components.caching`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `haystack/components/caching/cache_checker.py`
Content:
```
1 from typing import List, Dict, Any
2
3 import importlib
4
5 import logging
6
7 from haystack import component, Document, default_from_dict, default_to_dict, DeserializationError
8 from haystack.document_stores.types import DocumentStore
9
10
11 logger = logging.getLogger(__name__)
12
13
14 @component
15 class CacheChecker:
16 """
17 CacheChecker is a component that checks for the presence of documents in a Document Store based on a specified
18 cache field.
19 """
20
21 def __init__(self, document_store: DocumentStore, cache_field: str):
22 """
23 Create a UrlCacheChecker component.
24 """
25 self.document_store = document_store
26 self.cache_field = cache_field
27
28 def to_dict(self) -> Dict[str, Any]:
29 """
30 Serialize this component to a dictionary.
31 """
32 return default_to_dict(self, document_store=self.document_store.to_dict(), cache_field=self.cache_field)
33
34 @classmethod
35 def from_dict(cls, data: Dict[str, Any]) -> "CacheChecker":
36 """
37 Deserialize this component from a dictionary.
38 """
39 init_params = data.get("init_parameters", {})
40 if "document_store" not in init_params:
41 raise DeserializationError("Missing 'document_store' in serialization data")
42 if "type" not in init_params["document_store"]:
43 raise DeserializationError("Missing 'type' in document store's serialization data")
44
45 try:
46 module_name, type_ = init_params["document_store"]["type"].rsplit(".", 1)
47 logger.debug("Trying to import %s", module_name)
48 module = importlib.import_module(module_name)
49 except (ImportError, DeserializationError) as e:
50 raise DeserializationError(
51 f"DocumentStore of type '{init_params['document_store']['type']}' not correctly imported"
52 ) from e
53
54 docstore_class = getattr(module, type_)
55 docstore = docstore_class.from_dict(init_params["document_store"])
56
57 data["init_parameters"]["document_store"] = docstore
58 return default_from_dict(cls, data)
59
60 @component.output_types(hits=List[Document], misses=List)
61 def run(self, items: List[Any]):
62 """
63 Checks if any document associated with the specified field is already present in the store. If matching documents
64 are found, they are returned as hits. If not, the items are returned as misses, indicating they are not in the cache.
65
66 :param items: A list of values associated with the cache_field to be checked against the cache.
67 :return: A dictionary with two keys: "hits" and "misses". The values are lists of documents that were found in
68 the cache and items that were not, respectively.
69 """
70 found_documents = []
71 misses = []
72
73 for item in items:
74 filters = {self.cache_field: item}
75 found = self.document_store.filter_documents(filters=filters)
76 if found:
77 found_documents.extend(found)
78 else:
79 misses.append(item)
80 return {"hits": found_documents, "misses": misses}
81
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/haystack/components/caching/cache_checker.py b/haystack/components/caching/cache_checker.py
--- a/haystack/components/caching/cache_checker.py
+++ b/haystack/components/caching/cache_checker.py
@@ -14,27 +14,63 @@
@component
class CacheChecker:
"""
- CacheChecker is a component that checks for the presence of documents in a Document Store based on a specified
- cache field.
+ Checks for the presence of documents in a Document Store based on a specified
+ field in each document's metadata.
+
+ If matching documents are found, they are returned as hits. If not, the items
+ are returned as misses, indicating they are not in the cache.
+
+ Usage example:
+ ```python
+ from haystack import Document
+ from haystack.document_stores.in_memory import InMemoryDocumentStore
+ from haystack.components.caching.cache_checker import CacheChecker
+
+ docstore = InMemoryDocumentStore()
+ documents = [
+ Document(content="doc1", meta={"url": "https://example.com/1"}),
+ Document(content="doc2", meta={"url": "https://example.com/2"}),
+ Document(content="doc3", meta={"url": "https://example.com/1"}),
+ Document(content="doc4", meta={"url": "https://example.com/2"}),
+ ]
+ docstore.write_documents(documents)
+ checker = CacheChecker(docstore, cache_field="url")
+ results = checker.run(items=["https://example.com/1", "https://example.com/5"])
+ assert results == {"hits": [documents[0], documents[2]], "misses": ["https://example.com/5"]}
+ ```
"""
def __init__(self, document_store: DocumentStore, cache_field: str):
"""
- Create a UrlCacheChecker component.
+ Create a CacheChecker component.
+
+ :param document_store:
+ Document store to check.
+ :param cache_field:
+ Name of the Document metadata field
+ to check for cache hits.
"""
self.document_store = document_store
self.cache_field = cache_field
def to_dict(self) -> Dict[str, Any]:
"""
- Serialize this component to a dictionary.
+ Serializes the component to a dictionary.
+
+ :returns:
+ Dictionary with serialized data.
"""
return default_to_dict(self, document_store=self.document_store.to_dict(), cache_field=self.cache_field)
@classmethod
def from_dict(cls, data: Dict[str, Any]) -> "CacheChecker":
"""
- Deserialize this component from a dictionary.
+ Deserializes the component from a dictionary.
+
+ :param data:
+ Dictionary to deserialize from.
+ :returns:
+ Deserialized component.
"""
init_params = data.get("init_parameters", {})
if "document_store" not in init_params:
@@ -60,12 +96,15 @@
@component.output_types(hits=List[Document], misses=List)
def run(self, items: List[Any]):
"""
- Checks if any document associated with the specified field is already present in the store. If matching documents
- are found, they are returned as hits. If not, the items are returned as misses, indicating they are not in the cache.
-
- :param items: A list of values associated with the cache_field to be checked against the cache.
- :return: A dictionary with two keys: "hits" and "misses". The values are lists of documents that were found in
- the cache and items that were not, respectively.
+ Checks if any document associated with the specified cache field
+ is already present in the store.
+
+ :param items:
+ Values to be checked against the cache field.
+ :return:
+ A dictionary with two keys:
+ - `hits` - Documents that matched with any of the items.
+ - `misses` - Items that were not present in any documents.
"""
found_documents = []
misses = []
| {"golden_diff": "diff --git a/haystack/components/caching/cache_checker.py b/haystack/components/caching/cache_checker.py\n--- a/haystack/components/caching/cache_checker.py\n+++ b/haystack/components/caching/cache_checker.py\n@@ -14,27 +14,63 @@\n @component\n class CacheChecker:\n \"\"\"\n- CacheChecker is a component that checks for the presence of documents in a Document Store based on a specified\n- cache field.\n+ Checks for the presence of documents in a Document Store based on a specified\n+ field in each document's metadata.\n+\n+ If matching documents are found, they are returned as hits. If not, the items\n+ are returned as misses, indicating they are not in the cache.\n+\n+ Usage example:\n+ ```python\n+ from haystack import Document\n+ from haystack.document_stores.in_memory import InMemoryDocumentStore\n+ from haystack.components.caching.cache_checker import CacheChecker\n+\n+ docstore = InMemoryDocumentStore()\n+ documents = [\n+ Document(content=\"doc1\", meta={\"url\": \"https://example.com/1\"}),\n+ Document(content=\"doc2\", meta={\"url\": \"https://example.com/2\"}),\n+ Document(content=\"doc3\", meta={\"url\": \"https://example.com/1\"}),\n+ Document(content=\"doc4\", meta={\"url\": \"https://example.com/2\"}),\n+ ]\n+ docstore.write_documents(documents)\n+ checker = CacheChecker(docstore, cache_field=\"url\")\n+ results = checker.run(items=[\"https://example.com/1\", \"https://example.com/5\"])\n+ assert results == {\"hits\": [documents[0], documents[2]], \"misses\": [\"https://example.com/5\"]}\n+ ```\n \"\"\"\n \n def __init__(self, document_store: DocumentStore, cache_field: str):\n \"\"\"\n- Create a UrlCacheChecker component.\n+ Create a CacheChecker component.\n+\n+ :param document_store:\n+ Document store to check.\n+ :param cache_field:\n+ Name of the Document metadata field\n+ to check for cache hits.\n \"\"\"\n self.document_store = document_store\n self.cache_field = cache_field\n \n def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n- Serialize this component to a dictionary.\n+ Serializes the component to a dictionary.\n+\n+ :returns:\n+ Dictionary with serialized data.\n \"\"\"\n return default_to_dict(self, document_store=self.document_store.to_dict(), cache_field=self.cache_field)\n \n @classmethod\n def from_dict(cls, data: Dict[str, Any]) -> \"CacheChecker\":\n \"\"\"\n- Deserialize this component from a dictionary.\n+ Deserializes the component from a dictionary.\n+\n+ :param data:\n+ Dictionary to deserialize from.\n+ :returns:\n+ Deserialized component.\n \"\"\"\n init_params = data.get(\"init_parameters\", {})\n if \"document_store\" not in init_params:\n@@ -60,12 +96,15 @@\n @component.output_types(hits=List[Document], misses=List)\n def run(self, items: List[Any]):\n \"\"\"\n- Checks if any document associated with the specified field is already present in the store. If matching documents\n- are found, they are returned as hits. If not, the items are returned as misses, indicating they are not in the cache.\n-\n- :param items: A list of values associated with the cache_field to be checked against the cache.\n- :return: A dictionary with two keys: \"hits\" and \"misses\". The values are lists of documents that were found in\n- the cache and items that were not, respectively.\n+ Checks if any document associated with the specified cache field\n+ is already present in the store.\n+\n+ :param items:\n+ Values to be checked against the cache field.\n+ :return:\n+ A dictionary with two keys:\n+ - `hits` - Documents that matched with any of the items.\n+ - `misses` - Items that were not present in any documents.\n \"\"\"\n found_documents = []\n misses = []\n", "issue": "Docstrings - `haystack.components.caching`\n\n", "before_files": [{"content": "from typing import List, Dict, Any\n\nimport importlib\n\nimport logging\n\nfrom haystack import component, Document, default_from_dict, default_to_dict, DeserializationError\nfrom haystack.document_stores.types import DocumentStore\n\n\nlogger = logging.getLogger(__name__)\n\n\n@component\nclass CacheChecker:\n \"\"\"\n CacheChecker is a component that checks for the presence of documents in a Document Store based on a specified\n cache field.\n \"\"\"\n\n def __init__(self, document_store: DocumentStore, cache_field: str):\n \"\"\"\n Create a UrlCacheChecker component.\n \"\"\"\n self.document_store = document_store\n self.cache_field = cache_field\n\n def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Serialize this component to a dictionary.\n \"\"\"\n return default_to_dict(self, document_store=self.document_store.to_dict(), cache_field=self.cache_field)\n\n @classmethod\n def from_dict(cls, data: Dict[str, Any]) -> \"CacheChecker\":\n \"\"\"\n Deserialize this component from a dictionary.\n \"\"\"\n init_params = data.get(\"init_parameters\", {})\n if \"document_store\" not in init_params:\n raise DeserializationError(\"Missing 'document_store' in serialization data\")\n if \"type\" not in init_params[\"document_store\"]:\n raise DeserializationError(\"Missing 'type' in document store's serialization data\")\n\n try:\n module_name, type_ = init_params[\"document_store\"][\"type\"].rsplit(\".\", 1)\n logger.debug(\"Trying to import %s\", module_name)\n module = importlib.import_module(module_name)\n except (ImportError, DeserializationError) as e:\n raise DeserializationError(\n f\"DocumentStore of type '{init_params['document_store']['type']}' not correctly imported\"\n ) from e\n\n docstore_class = getattr(module, type_)\n docstore = docstore_class.from_dict(init_params[\"document_store\"])\n\n data[\"init_parameters\"][\"document_store\"] = docstore\n return default_from_dict(cls, data)\n\n @component.output_types(hits=List[Document], misses=List)\n def run(self, items: List[Any]):\n \"\"\"\n Checks if any document associated with the specified field is already present in the store. If matching documents\n are found, they are returned as hits. If not, the items are returned as misses, indicating they are not in the cache.\n\n :param items: A list of values associated with the cache_field to be checked against the cache.\n :return: A dictionary with two keys: \"hits\" and \"misses\". The values are lists of documents that were found in\n the cache and items that were not, respectively.\n \"\"\"\n found_documents = []\n misses = []\n\n for item in items:\n filters = {self.cache_field: item}\n found = self.document_store.filter_documents(filters=filters)\n if found:\n found_documents.extend(found)\n else:\n misses.append(item)\n return {\"hits\": found_documents, \"misses\": misses}\n", "path": "haystack/components/caching/cache_checker.py"}], "after_files": [{"content": "from typing import List, Dict, Any\n\nimport importlib\n\nimport logging\n\nfrom haystack import component, Document, default_from_dict, default_to_dict, DeserializationError\nfrom haystack.document_stores.types import DocumentStore\n\n\nlogger = logging.getLogger(__name__)\n\n\n@component\nclass CacheChecker:\n \"\"\"\n Checks for the presence of documents in a Document Store based on a specified\n field in each document's metadata.\n\n If matching documents are found, they are returned as hits. If not, the items\n are returned as misses, indicating they are not in the cache.\n\n Usage example:\n ```python\n from haystack import Document\n from haystack.document_stores.in_memory import InMemoryDocumentStore\n from haystack.components.caching.cache_checker import CacheChecker\n\n docstore = InMemoryDocumentStore()\n documents = [\n Document(content=\"doc1\", meta={\"url\": \"https://example.com/1\"}),\n Document(content=\"doc2\", meta={\"url\": \"https://example.com/2\"}),\n Document(content=\"doc3\", meta={\"url\": \"https://example.com/1\"}),\n Document(content=\"doc4\", meta={\"url\": \"https://example.com/2\"}),\n ]\n docstore.write_documents(documents)\n checker = CacheChecker(docstore, cache_field=\"url\")\n results = checker.run(items=[\"https://example.com/1\", \"https://example.com/5\"])\n assert results == {\"hits\": [documents[0], documents[2]], \"misses\": [\"https://example.com/5\"]}\n ```\n \"\"\"\n\n def __init__(self, document_store: DocumentStore, cache_field: str):\n \"\"\"\n Create a CacheChecker component.\n\n :param document_store:\n Document store to check.\n :param cache_field:\n Name of the Document metadata field\n to check for cache hits.\n \"\"\"\n self.document_store = document_store\n self.cache_field = cache_field\n\n def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Serializes the component to a dictionary.\n\n :returns:\n Dictionary with serialized data.\n \"\"\"\n return default_to_dict(self, document_store=self.document_store.to_dict(), cache_field=self.cache_field)\n\n @classmethod\n def from_dict(cls, data: Dict[str, Any]) -> \"CacheChecker\":\n \"\"\"\n Deserializes the component from a dictionary.\n\n :param data:\n Dictionary to deserialize from.\n :returns:\n Deserialized component.\n \"\"\"\n init_params = data.get(\"init_parameters\", {})\n if \"document_store\" not in init_params:\n raise DeserializationError(\"Missing 'document_store' in serialization data\")\n if \"type\" not in init_params[\"document_store\"]:\n raise DeserializationError(\"Missing 'type' in document store's serialization data\")\n\n try:\n module_name, type_ = init_params[\"document_store\"][\"type\"].rsplit(\".\", 1)\n logger.debug(\"Trying to import %s\", module_name)\n module = importlib.import_module(module_name)\n except (ImportError, DeserializationError) as e:\n raise DeserializationError(\n f\"DocumentStore of type '{init_params['document_store']['type']}' not correctly imported\"\n ) from e\n\n docstore_class = getattr(module, type_)\n docstore = docstore_class.from_dict(init_params[\"document_store\"])\n\n data[\"init_parameters\"][\"document_store\"] = docstore\n return default_from_dict(cls, data)\n\n @component.output_types(hits=List[Document], misses=List)\n def run(self, items: List[Any]):\n \"\"\"\n Checks if any document associated with the specified cache field\n is already present in the store.\n\n :param items:\n Values to be checked against the cache field.\n :return:\n A dictionary with two keys:\n - `hits` - Documents that matched with any of the items.\n - `misses` - Items that were not present in any documents.\n \"\"\"\n found_documents = []\n misses = []\n\n for item in items:\n filters = {self.cache_field: item}\n found = self.document_store.filter_documents(filters=filters)\n if found:\n found_documents.extend(found)\n else:\n misses.append(item)\n return {\"hits\": found_documents, \"misses\": misses}\n", "path": "haystack/components/caching/cache_checker.py"}]} | 1,070 | 899 |
gh_patches_debug_19709 | rasdani/github-patches | git_diff | fossasia__open-event-server-5615 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Unable to login to the Admin Panel.
**Description:**
When we try to login to admin panel, even if the credentials belong to super-admin, it returns "Credential incorrect"
**Steps to reproduce the behavior:**
1. Go to 127.0.0.1:5000/admin
2. Enter the admin credentials
3. Click on login button
4. See error "Credential incorrect"
**Expected Behaviour:**
It should login the user to the admin panel if credentials are correct and the user is an admin.
**Screenshots**

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `app/views/__init__.py`
Content:
```
1 import flask_login as login
2 import requests
3 from flask import url_for, redirect, Blueprint, request, make_response
4 from flask_admin import Admin, AdminIndexView, expose, helpers as admin_helpers
5 from flask_admin.contrib.sqla import ModelView
6 from flask_scrypt import generate_password_hash
7 from wtforms import form, fields, validators
8
9 from app.models import db
10 from app.models.user import User
11
12
13 class AdminModelView(ModelView):
14 def is_accessible(self):
15 return login.current_user.is_authenticated
16
17 def inaccessible_callback(self, name, **kwargs):
18 # redirect to login page if user doesn't have access
19 return redirect(url_for('admin.index', next=request.url))
20
21
22 class LoginForm(form.Form):
23 login = fields.TextField(validators=[validators.required(), validators.email()], render_kw={"placeholder": "[email protected]"})
24 password = fields.PasswordField(validators=[validators.required()], render_kw={"placeholder": "xyzzy"})
25
26 def validate_login(self, field):
27 """
28 validate login
29 :param field:
30 :return:
31 """
32 user = self.get_user()
33
34 if user is None:
35 raise validators.ValidationError('User does not exist.')
36
37 if user.password != generate_password_hash(self.password.data, user.salt):
38 raise validators.ValidationError('Credentials incorrect.')
39
40 if not user.is_admin and not user.is_super_admin:
41 raise validators.ValidationError('Access Forbidden. Admin Rights Required')
42
43 def get_user(self):
44 return User.query.filter_by(email=self.login.data).first()
45
46
47 class MyAdminIndexView(AdminIndexView):
48 @expose('/')
49 def index(self):
50 """
51 /admin
52 :return:
53 """
54 if not login.current_user.is_authenticated:
55 return redirect(url_for('.login_view'))
56 return super(MyAdminIndexView, self).index()
57
58 @expose('/login/', methods=('GET', 'POST'))
59 def login_view(self):
60 """
61 login view for flask-admin
62 :return:
63 """
64 # handle user login
65 form = LoginForm(request.form)
66 if admin_helpers.validate_form_on_submit(form):
67 user = form.get_user()
68 login.login_user(user)
69
70 if login.current_user.is_authenticated:
71 return redirect(url_for('.index'))
72 self._template_args['form'] = form
73 return super(MyAdminIndexView, self).index()
74
75 @expose('/logout/')
76 def logout_view(self):
77 login.logout_user()
78 return redirect(url_for('.index'))
79
80
81 home_routes = Blueprint('home', __name__)
82
83
84 # Flask views
85 @home_routes.route('/')
86 def index():
87 """
88 Index route
89 :return:
90 """
91 r = requests.get('https://raw.githubusercontent.com/fossasia/open-event-server/gh-pages/api/v1/index.html')
92 response = make_response(r.content)
93 response.headers["Content-Type"] = "text/html"
94 return response
95
96
97 class BlueprintsManager:
98 def __init__(self):
99 pass
100
101 @staticmethod
102 def register(app):
103 """
104 Register blueprints
105 :param app: a flask app instance
106 :return:
107 """
108 app.register_blueprint(home_routes)
109 admin = Admin(app, name='Open Event API', template_mode='bootstrap3', index_view=MyAdminIndexView(),
110 base_template='admin_base.html')
111
112 # Get all the models in the db, all models should have a explicit __tablename__
113 classes, models, table_names = [], [], []
114 # noinspection PyProtectedMember
115 for class_ in list(db.Model._decl_class_registry.values()):
116 try:
117 table_names.append(class_.__tablename__)
118 classes.append(class_)
119 except:
120 pass
121 for table in list(db.metadata.tables.items()):
122 if table[0] in table_names:
123 models.append(classes[table_names.index(table[0])])
124
125 for model in models:
126 admin.add_view(AdminModelView(model, db.session))
127
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/app/views/__init__.py b/app/views/__init__.py
--- a/app/views/__init__.py
+++ b/app/views/__init__.py
@@ -3,7 +3,6 @@
from flask import url_for, redirect, Blueprint, request, make_response
from flask_admin import Admin, AdminIndexView, expose, helpers as admin_helpers
from flask_admin.contrib.sqla import ModelView
-from flask_scrypt import generate_password_hash
from wtforms import form, fields, validators
from app.models import db
@@ -34,7 +33,7 @@
if user is None:
raise validators.ValidationError('User does not exist.')
- if user.password != generate_password_hash(self.password.data, user.salt):
+ if not user.is_correct_password(self.password.data):
raise validators.ValidationError('Credentials incorrect.')
if not user.is_admin and not user.is_super_admin:
| {"golden_diff": "diff --git a/app/views/__init__.py b/app/views/__init__.py\n--- a/app/views/__init__.py\n+++ b/app/views/__init__.py\n@@ -3,7 +3,6 @@\n from flask import url_for, redirect, Blueprint, request, make_response\n from flask_admin import Admin, AdminIndexView, expose, helpers as admin_helpers\n from flask_admin.contrib.sqla import ModelView\n-from flask_scrypt import generate_password_hash\n from wtforms import form, fields, validators\n \n from app.models import db\n@@ -34,7 +33,7 @@\n if user is None:\n raise validators.ValidationError('User does not exist.')\n \n- if user.password != generate_password_hash(self.password.data, user.salt):\n+ if not user.is_correct_password(self.password.data):\n raise validators.ValidationError('Credentials incorrect.')\n \n if not user.is_admin and not user.is_super_admin:\n", "issue": "Unable to login to the Admin Panel.\n**Description:**\r\nWhen we try to login to admin panel, even if the credentials belong to super-admin, it returns \"Credential incorrect\"\r\n\r\n**Steps to reproduce the behavior:**\r\n1. Go to 127.0.0.1:5000/admin\r\n2. Enter the admin credentials\r\n3. Click on login button\r\n4. See error \"Credential incorrect\"\r\n\r\n**Expected Behaviour:**\r\nIt should login the user to the admin panel if credentials are correct and the user is an admin.\r\n\r\n**Screenshots**\r\n\r\n\n", "before_files": [{"content": "import flask_login as login\nimport requests\nfrom flask import url_for, redirect, Blueprint, request, make_response\nfrom flask_admin import Admin, AdminIndexView, expose, helpers as admin_helpers\nfrom flask_admin.contrib.sqla import ModelView\nfrom flask_scrypt import generate_password_hash\nfrom wtforms import form, fields, validators\n\nfrom app.models import db\nfrom app.models.user import User\n\n\nclass AdminModelView(ModelView):\n def is_accessible(self):\n return login.current_user.is_authenticated\n\n def inaccessible_callback(self, name, **kwargs):\n # redirect to login page if user doesn't have access\n return redirect(url_for('admin.index', next=request.url))\n\n\nclass LoginForm(form.Form):\n login = fields.TextField(validators=[validators.required(), validators.email()], render_kw={\"placeholder\": \"[email protected]\"})\n password = fields.PasswordField(validators=[validators.required()], render_kw={\"placeholder\": \"xyzzy\"})\n\n def validate_login(self, field):\n \"\"\"\n validate login\n :param field:\n :return:\n \"\"\"\n user = self.get_user()\n\n if user is None:\n raise validators.ValidationError('User does not exist.')\n\n if user.password != generate_password_hash(self.password.data, user.salt):\n raise validators.ValidationError('Credentials incorrect.')\n\n if not user.is_admin and not user.is_super_admin:\n raise validators.ValidationError('Access Forbidden. Admin Rights Required')\n\n def get_user(self):\n return User.query.filter_by(email=self.login.data).first()\n\n\nclass MyAdminIndexView(AdminIndexView):\n @expose('/')\n def index(self):\n \"\"\"\n /admin\n :return:\n \"\"\"\n if not login.current_user.is_authenticated:\n return redirect(url_for('.login_view'))\n return super(MyAdminIndexView, self).index()\n\n @expose('/login/', methods=('GET', 'POST'))\n def login_view(self):\n \"\"\"\n login view for flask-admin\n :return:\n \"\"\"\n # handle user login\n form = LoginForm(request.form)\n if admin_helpers.validate_form_on_submit(form):\n user = form.get_user()\n login.login_user(user)\n\n if login.current_user.is_authenticated:\n return redirect(url_for('.index'))\n self._template_args['form'] = form\n return super(MyAdminIndexView, self).index()\n\n @expose('/logout/')\n def logout_view(self):\n login.logout_user()\n return redirect(url_for('.index'))\n\n\nhome_routes = Blueprint('home', __name__)\n\n\n# Flask views\n@home_routes.route('/')\ndef index():\n \"\"\"\n Index route\n :return:\n \"\"\"\n r = requests.get('https://raw.githubusercontent.com/fossasia/open-event-server/gh-pages/api/v1/index.html')\n response = make_response(r.content)\n response.headers[\"Content-Type\"] = \"text/html\"\n return response\n\n\nclass BlueprintsManager:\n def __init__(self):\n pass\n\n @staticmethod\n def register(app):\n \"\"\"\n Register blueprints\n :param app: a flask app instance\n :return:\n \"\"\"\n app.register_blueprint(home_routes)\n admin = Admin(app, name='Open Event API', template_mode='bootstrap3', index_view=MyAdminIndexView(),\n base_template='admin_base.html')\n\n # Get all the models in the db, all models should have a explicit __tablename__\n classes, models, table_names = [], [], []\n # noinspection PyProtectedMember\n for class_ in list(db.Model._decl_class_registry.values()):\n try:\n table_names.append(class_.__tablename__)\n classes.append(class_)\n except:\n pass\n for table in list(db.metadata.tables.items()):\n if table[0] in table_names:\n models.append(classes[table_names.index(table[0])])\n\n for model in models:\n admin.add_view(AdminModelView(model, db.session))\n", "path": "app/views/__init__.py"}], "after_files": [{"content": "import flask_login as login\nimport requests\nfrom flask import url_for, redirect, Blueprint, request, make_response\nfrom flask_admin import Admin, AdminIndexView, expose, helpers as admin_helpers\nfrom flask_admin.contrib.sqla import ModelView\nfrom wtforms import form, fields, validators\n\nfrom app.models import db\nfrom app.models.user import User\n\n\nclass AdminModelView(ModelView):\n def is_accessible(self):\n return login.current_user.is_authenticated\n\n def inaccessible_callback(self, name, **kwargs):\n # redirect to login page if user doesn't have access\n return redirect(url_for('admin.index', next=request.url))\n\n\nclass LoginForm(form.Form):\n login = fields.TextField(validators=[validators.required(), validators.email()], render_kw={\"placeholder\": \"[email protected]\"})\n password = fields.PasswordField(validators=[validators.required()], render_kw={\"placeholder\": \"xyzzy\"})\n\n def validate_login(self, field):\n \"\"\"\n validate login\n :param field:\n :return:\n \"\"\"\n user = self.get_user()\n\n if user is None:\n raise validators.ValidationError('User does not exist.')\n\n if not user.is_correct_password(self.password.data):\n raise validators.ValidationError('Credentials incorrect.')\n\n if not user.is_admin and not user.is_super_admin:\n raise validators.ValidationError('Access Forbidden. Admin Rights Required')\n\n def get_user(self):\n return User.query.filter_by(email=self.login.data).first()\n\n\nclass MyAdminIndexView(AdminIndexView):\n @expose('/')\n def index(self):\n \"\"\"\n /admin\n :return:\n \"\"\"\n if not login.current_user.is_authenticated:\n return redirect(url_for('.login_view'))\n return super(MyAdminIndexView, self).index()\n\n @expose('/login/', methods=('GET', 'POST'))\n def login_view(self):\n \"\"\"\n login view for flask-admin\n :return:\n \"\"\"\n # handle user login\n form = LoginForm(request.form)\n if admin_helpers.validate_form_on_submit(form):\n user = form.get_user()\n login.login_user(user)\n\n if login.current_user.is_authenticated:\n return redirect(url_for('.index'))\n self._template_args['form'] = form\n return super(MyAdminIndexView, self).index()\n\n @expose('/logout/')\n def logout_view(self):\n login.logout_user()\n return redirect(url_for('.index'))\n\n\nhome_routes = Blueprint('home', __name__)\n\n\n# Flask views\n@home_routes.route('/')\ndef index():\n \"\"\"\n Index route\n :return:\n \"\"\"\n r = requests.get('https://raw.githubusercontent.com/fossasia/open-event-server/gh-pages/api/v1/index.html')\n response = make_response(r.content)\n response.headers[\"Content-Type\"] = \"text/html\"\n return response\n\n\nclass BlueprintsManager:\n def __init__(self):\n pass\n\n @staticmethod\n def register(app):\n \"\"\"\n Register blueprints\n :param app: a flask app instance\n :return:\n \"\"\"\n app.register_blueprint(home_routes)\n admin = Admin(app, name='Open Event API', template_mode='bootstrap3', index_view=MyAdminIndexView(),\n base_template='admin_base.html')\n\n # Get all the models in the db, all models should have a explicit __tablename__\n classes, models, table_names = [], [], []\n # noinspection PyProtectedMember\n for class_ in list(db.Model._decl_class_registry.values()):\n try:\n table_names.append(class_.__tablename__)\n classes.append(class_)\n except:\n pass\n for table in list(db.metadata.tables.items()):\n if table[0] in table_names:\n models.append(classes[table_names.index(table[0])])\n\n for model in models:\n admin.add_view(AdminModelView(model, db.session))\n", "path": "app/views/__init__.py"}]} | 1,546 | 193 |
gh_patches_debug_34291 | rasdani/github-patches | git_diff | deepset-ai__haystack-2908 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
DPR training is broken
**Describe the bug**
It seems that DPR training does not work at the moment. I suspect that this bug was introduced by #2703.
**Error message**
```
File "/Users/bogdan/Repositories/haystack/tutorials/Tutorial9_DPR_training.py", line 92, in <module>
tutorial9_dpr_training()
File "/Users/bogdan/Repositories/haystack/tutorials/Tutorial9_DPR_training.py", line 71, in tutorial9_dpr_training
retriever.train(
File "/Users/bogdan/Repositories/haystack/haystack/nodes/retriever/dense.py", line 680, in train
trainer.train()
File "/Users/bogdan/Repositories/haystack/haystack/modeling/training/base.py", line 290, in train
loss = self.compute_loss(batch, step)
File "/Users/bogdan/Repositories/haystack/haystack/modeling/training/base.py", line 374, in compute_loss
logits = self.model.forward(**batch)
TypeError: forward() got an unexpected keyword argument 'label_ids'
```
**To Reproduce**
Execute Tutorial 9.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `haystack/modeling/evaluation/eval.py`
Content:
```
1 from typing import Dict, List, Optional, Any
2
3 import logging
4 import numbers
5 import torch
6 import numpy as np
7 from tqdm import tqdm
8
9 from haystack.modeling.evaluation.metrics import compute_metrics, compute_report_metrics
10 from haystack.modeling.model.adaptive_model import AdaptiveModel
11 from haystack.utils.experiment_tracking import Tracker as tracker
12 from haystack.modeling.visual import BUSH_SEP
13
14
15 logger = logging.getLogger(__name__)
16
17
18 class Evaluator:
19 """
20 Handles evaluation of a given model over a specified dataset.
21 """
22
23 def __init__(self, data_loader: torch.utils.data.DataLoader, tasks, device: torch.device, report: bool = True):
24 """
25 :param data_loader: The PyTorch DataLoader that will return batches of data from the evaluation dataset
26 :param tesks:
27 :param device: The device on which the tensors should be processed. Choose from torch.device("cpu") and torch.device("cuda").
28 :param report: Whether an eval report should be generated (e.g. classification report per class).
29 """
30 self.data_loader = data_loader
31 self.tasks = tasks
32 self.device = device
33 self.report = report
34
35 def eval(
36 self,
37 model: AdaptiveModel,
38 return_preds_and_labels: bool = False,
39 calibrate_conf_scores: bool = False,
40 use_confidence_scores_for_ranking=True,
41 use_no_answer_legacy_confidence=False,
42 ) -> List[Dict]:
43 """
44 Performs evaluation on a given model.
45
46 :param model: The model on which to perform evaluation
47 :param return_preds_and_labels: Whether to add preds and labels in the returned dicts of the
48 :param calibrate_conf_scores: Whether to calibrate the temperature for temperature scaling of the confidence scores
49 :param use_confidence_scores_for_ranking: Whether to sort answers by confidence score (normalized between 0 and 1)(default) or by standard score (unbounded).
50 :param use_no_answer_legacy_confidence: Whether to use the legacy confidence definition for no_answer: difference between the best overall answer confidence and the no_answer gap confidence.
51 Otherwise we use the no_answer score normalized to a range of [0,1] by an expit function (default).
52 :return: all_results: A list of dictionaries, one for each prediction head. Each dictionary contains the metrics
53 and reports generated during evaluation.
54 """
55 model.prediction_heads[0].use_confidence_scores_for_ranking = use_confidence_scores_for_ranking
56 model.prediction_heads[0].use_no_answer_legacy_confidence = use_no_answer_legacy_confidence
57 model.eval()
58
59 # init empty lists per prediction head
60 loss_all: List = [0 for _ in model.prediction_heads]
61 preds_all: List = [[] for _ in model.prediction_heads]
62 label_all: List = [[] for _ in model.prediction_heads]
63 ids_all: List = [[] for _ in model.prediction_heads]
64 passage_start_t_all: List = [[] for _ in model.prediction_heads]
65 logits_all: List = [[] for _ in model.prediction_heads]
66
67 for step, batch in enumerate(tqdm(self.data_loader, desc="Evaluating", mininterval=10)):
68 batch = {key: batch[key].to(self.device) for key in batch}
69
70 with torch.no_grad():
71
72 logits = model.forward(
73 input_ids=batch.get("input_ids", None),
74 segment_ids=batch.get("segment_ids", None),
75 padding_mask=batch.get("padding_mask", None),
76 output_hidden_states=batch.get("output_hidden_states", False),
77 output_attentions=batch.get("output_attentions", False),
78 )
79 losses_per_head = model.logits_to_loss_per_head(logits=logits, **batch)
80 preds = model.logits_to_preds(logits=logits, **batch)
81 labels = model.prepare_labels(**batch)
82
83 # stack results of all batches per prediction head
84 for head_num, head in enumerate(model.prediction_heads):
85 loss_all[head_num] += np.sum(_to_numpy(losses_per_head[head_num]))
86 preds_all[head_num] += list(_to_numpy(preds[head_num]))
87 label_all[head_num] += list(_to_numpy(labels[head_num]))
88 if head.model_type == "span_classification":
89 ids_all[head_num] += list(_to_numpy(batch["id"]))
90 passage_start_t_all[head_num] += list(_to_numpy(batch["passage_start_t"]))
91 if calibrate_conf_scores:
92 logits_all[head_num] += list(_to_numpy(logits))
93
94 # Evaluate per prediction head
95 all_results = []
96 for head_num, head in enumerate(model.prediction_heads):
97 if head.model_type == "span_classification" and calibrate_conf_scores:
98 temperature_previous = head.temperature_for_confidence.item()
99 logger.info(f"temperature used for confidence scores before calibration: {temperature_previous}")
100 head.calibrate_conf(logits_all[head_num], label_all[head_num])
101 temperature_current = head.temperature_for_confidence.item()
102 logger.info(f"temperature used for confidence scores after calibration: {temperature_current}")
103 temperature_change = (abs(temperature_current - temperature_previous) / temperature_previous) * 100.0
104 if temperature_change > 50:
105 logger.warning(
106 f"temperature used for calibration of confidence scores changed by more than {temperature_change} percent"
107 )
108 if hasattr(head, "aggregate_preds"):
109 # Needed to convert NQ ids from np arrays to strings
110 ids_all_str = [x.astype(str) for x in ids_all[head_num]]
111 ids_all_list = [list(x) for x in ids_all_str]
112 head_ids = ["-".join(x) for x in ids_all_list]
113 preds_all[head_num], label_all[head_num] = head.aggregate_preds(
114 preds=preds_all[head_num],
115 labels=label_all[head_num],
116 passage_start_t=passage_start_t_all[head_num],
117 ids=head_ids,
118 )
119 result = {"loss": loss_all[head_num] / len(self.data_loader.dataset), "task_name": head.task_name}
120 result.update(compute_metrics(metric=head.metric, preds=preds_all[head_num], labels=label_all[head_num]))
121 # Select type of report depending on prediction head output type
122 if self.report:
123 try:
124 result["report"] = compute_report_metrics(head, preds_all[head_num], label_all[head_num])
125 except:
126 logger.error(
127 f"Couldn't create eval report for head {head_num} with following preds and labels:"
128 f"\n Preds: {preds_all[head_num]} \n Labels: {label_all[head_num]}"
129 )
130 result["report"] = "Error"
131
132 if return_preds_and_labels:
133 result["preds"] = preds_all[head_num]
134 result["labels"] = label_all[head_num]
135
136 all_results.append(result)
137
138 return all_results
139
140 @staticmethod
141 def log_results(
142 results: List[Any],
143 dataset_name: str,
144 steps: int,
145 logging: bool = True,
146 print: bool = True,
147 num_fold: Optional[int] = None,
148 ):
149 # Print a header
150 header = "\n\n"
151 header += BUSH_SEP + "\n"
152 header += "***************************************************\n"
153 if num_fold:
154 header += (
155 f"***** EVALUATION | FOLD: {num_fold} | {dataset_name.upper()} SET | AFTER {steps} BATCHES *****\n"
156 )
157 else:
158 header += f"***** EVALUATION | {dataset_name.upper()} SET | AFTER {steps} BATCHES *****\n"
159 header += "***************************************************\n"
160 header += BUSH_SEP + "\n"
161 logger.info(header)
162
163 for head_num, head in enumerate(results):
164 logger.info("\n _________ {} _________".format(head["task_name"]))
165 for metric_name, metric_val in head.items():
166 # log with experiment tracking framework (e.g. Mlflow)
167 if logging:
168 if not metric_name in ["preds", "labels"] and not metric_name.startswith("_"):
169 if isinstance(metric_val, numbers.Number):
170 tracker.track_metrics(
171 metrics={f"{dataset_name}_{metric_name}_{head['task_name']}": metric_val}, step=steps
172 )
173 # print via standard python logger
174 if print:
175 if metric_name == "report":
176 if isinstance(metric_val, str) and len(metric_val) > 8000:
177 metric_val = metric_val[:7500] + "\n ............................. \n" + metric_val[-500:]
178 logger.info("{}: \n {}".format(metric_name, metric_val))
179 else:
180 if not metric_name in ["preds", "labels"] and not metric_name.startswith("_"):
181 logger.info("{}: {}".format(metric_name, metric_val))
182
183
184 def _to_numpy(container):
185 try:
186 return container.cpu().numpy()
187 except AttributeError:
188 return container
189
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/haystack/modeling/evaluation/eval.py b/haystack/modeling/evaluation/eval.py
--- a/haystack/modeling/evaluation/eval.py
+++ b/haystack/modeling/evaluation/eval.py
@@ -8,6 +8,7 @@
from haystack.modeling.evaluation.metrics import compute_metrics, compute_report_metrics
from haystack.modeling.model.adaptive_model import AdaptiveModel
+from haystack.modeling.model.biadaptive_model import BiAdaptiveModel
from haystack.utils.experiment_tracking import Tracker as tracker
from haystack.modeling.visual import BUSH_SEP
@@ -69,13 +70,26 @@
with torch.no_grad():
- logits = model.forward(
- input_ids=batch.get("input_ids", None),
- segment_ids=batch.get("segment_ids", None),
- padding_mask=batch.get("padding_mask", None),
- output_hidden_states=batch.get("output_hidden_states", False),
- output_attentions=batch.get("output_attentions", False),
- )
+ if isinstance(model, AdaptiveModel):
+ logits = model.forward(
+ input_ids=batch.get("input_ids", None),
+ segment_ids=batch.get("segment_ids", None),
+ padding_mask=batch.get("padding_mask", None),
+ output_hidden_states=batch.get("output_hidden_states", False),
+ output_attentions=batch.get("output_attentions", False),
+ )
+ elif isinstance(model, BiAdaptiveModel):
+ logits = model.forward(
+ query_input_ids=batch.get("query_input_ids", None),
+ query_segment_ids=batch.get("query_segment_ids", None),
+ query_attention_mask=batch.get("query_attention_mask", None),
+ passage_input_ids=batch.get("passage_input_ids", None),
+ passage_segment_ids=batch.get("passage_segment_ids", None),
+ passage_attention_mask=batch.get("passage_attention_mask", None),
+ )
+ else:
+ logits = model.forward(**batch)
+
losses_per_head = model.logits_to_loss_per_head(logits=logits, **batch)
preds = model.logits_to_preds(logits=logits, **batch)
labels = model.prepare_labels(**batch)
| {"golden_diff": "diff --git a/haystack/modeling/evaluation/eval.py b/haystack/modeling/evaluation/eval.py\n--- a/haystack/modeling/evaluation/eval.py\n+++ b/haystack/modeling/evaluation/eval.py\n@@ -8,6 +8,7 @@\n \n from haystack.modeling.evaluation.metrics import compute_metrics, compute_report_metrics\n from haystack.modeling.model.adaptive_model import AdaptiveModel\n+from haystack.modeling.model.biadaptive_model import BiAdaptiveModel\n from haystack.utils.experiment_tracking import Tracker as tracker\n from haystack.modeling.visual import BUSH_SEP\n \n@@ -69,13 +70,26 @@\n \n with torch.no_grad():\n \n- logits = model.forward(\n- input_ids=batch.get(\"input_ids\", None),\n- segment_ids=batch.get(\"segment_ids\", None),\n- padding_mask=batch.get(\"padding_mask\", None),\n- output_hidden_states=batch.get(\"output_hidden_states\", False),\n- output_attentions=batch.get(\"output_attentions\", False),\n- )\n+ if isinstance(model, AdaptiveModel):\n+ logits = model.forward(\n+ input_ids=batch.get(\"input_ids\", None),\n+ segment_ids=batch.get(\"segment_ids\", None),\n+ padding_mask=batch.get(\"padding_mask\", None),\n+ output_hidden_states=batch.get(\"output_hidden_states\", False),\n+ output_attentions=batch.get(\"output_attentions\", False),\n+ )\n+ elif isinstance(model, BiAdaptiveModel):\n+ logits = model.forward(\n+ query_input_ids=batch.get(\"query_input_ids\", None),\n+ query_segment_ids=batch.get(\"query_segment_ids\", None),\n+ query_attention_mask=batch.get(\"query_attention_mask\", None),\n+ passage_input_ids=batch.get(\"passage_input_ids\", None),\n+ passage_segment_ids=batch.get(\"passage_segment_ids\", None),\n+ passage_attention_mask=batch.get(\"passage_attention_mask\", None),\n+ )\n+ else:\n+ logits = model.forward(**batch)\n+\n losses_per_head = model.logits_to_loss_per_head(logits=logits, **batch)\n preds = model.logits_to_preds(logits=logits, **batch)\n labels = model.prepare_labels(**batch)\n", "issue": "DPR training is broken\n**Describe the bug**\r\nIt seems that DPR training does not work at the moment. I suspect that this bug was introduced by #2703.\r\n\r\n**Error message**\r\n```\r\n File \"/Users/bogdan/Repositories/haystack/tutorials/Tutorial9_DPR_training.py\", line 92, in <module>\r\n tutorial9_dpr_training()\r\n File \"/Users/bogdan/Repositories/haystack/tutorials/Tutorial9_DPR_training.py\", line 71, in tutorial9_dpr_training\r\n retriever.train(\r\n File \"/Users/bogdan/Repositories/haystack/haystack/nodes/retriever/dense.py\", line 680, in train\r\n trainer.train()\r\n File \"/Users/bogdan/Repositories/haystack/haystack/modeling/training/base.py\", line 290, in train\r\n loss = self.compute_loss(batch, step)\r\n File \"/Users/bogdan/Repositories/haystack/haystack/modeling/training/base.py\", line 374, in compute_loss\r\n logits = self.model.forward(**batch)\r\nTypeError: forward() got an unexpected keyword argument 'label_ids'\r\n```\r\n\r\n**To Reproduce**\r\nExecute Tutorial 9.\r\n\r\n\n", "before_files": [{"content": "from typing import Dict, List, Optional, Any\n\nimport logging\nimport numbers\nimport torch\nimport numpy as np\nfrom tqdm import tqdm\n\nfrom haystack.modeling.evaluation.metrics import compute_metrics, compute_report_metrics\nfrom haystack.modeling.model.adaptive_model import AdaptiveModel\nfrom haystack.utils.experiment_tracking import Tracker as tracker\nfrom haystack.modeling.visual import BUSH_SEP\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass Evaluator:\n \"\"\"\n Handles evaluation of a given model over a specified dataset.\n \"\"\"\n\n def __init__(self, data_loader: torch.utils.data.DataLoader, tasks, device: torch.device, report: bool = True):\n \"\"\"\n :param data_loader: The PyTorch DataLoader that will return batches of data from the evaluation dataset\n :param tesks:\n :param device: The device on which the tensors should be processed. Choose from torch.device(\"cpu\") and torch.device(\"cuda\").\n :param report: Whether an eval report should be generated (e.g. classification report per class).\n \"\"\"\n self.data_loader = data_loader\n self.tasks = tasks\n self.device = device\n self.report = report\n\n def eval(\n self,\n model: AdaptiveModel,\n return_preds_and_labels: bool = False,\n calibrate_conf_scores: bool = False,\n use_confidence_scores_for_ranking=True,\n use_no_answer_legacy_confidence=False,\n ) -> List[Dict]:\n \"\"\"\n Performs evaluation on a given model.\n\n :param model: The model on which to perform evaluation\n :param return_preds_and_labels: Whether to add preds and labels in the returned dicts of the\n :param calibrate_conf_scores: Whether to calibrate the temperature for temperature scaling of the confidence scores\n :param use_confidence_scores_for_ranking: Whether to sort answers by confidence score (normalized between 0 and 1)(default) or by standard score (unbounded).\n :param use_no_answer_legacy_confidence: Whether to use the legacy confidence definition for no_answer: difference between the best overall answer confidence and the no_answer gap confidence.\n Otherwise we use the no_answer score normalized to a range of [0,1] by an expit function (default).\n :return: all_results: A list of dictionaries, one for each prediction head. Each dictionary contains the metrics\n and reports generated during evaluation.\n \"\"\"\n model.prediction_heads[0].use_confidence_scores_for_ranking = use_confidence_scores_for_ranking\n model.prediction_heads[0].use_no_answer_legacy_confidence = use_no_answer_legacy_confidence\n model.eval()\n\n # init empty lists per prediction head\n loss_all: List = [0 for _ in model.prediction_heads]\n preds_all: List = [[] for _ in model.prediction_heads]\n label_all: List = [[] for _ in model.prediction_heads]\n ids_all: List = [[] for _ in model.prediction_heads]\n passage_start_t_all: List = [[] for _ in model.prediction_heads]\n logits_all: List = [[] for _ in model.prediction_heads]\n\n for step, batch in enumerate(tqdm(self.data_loader, desc=\"Evaluating\", mininterval=10)):\n batch = {key: batch[key].to(self.device) for key in batch}\n\n with torch.no_grad():\n\n logits = model.forward(\n input_ids=batch.get(\"input_ids\", None),\n segment_ids=batch.get(\"segment_ids\", None),\n padding_mask=batch.get(\"padding_mask\", None),\n output_hidden_states=batch.get(\"output_hidden_states\", False),\n output_attentions=batch.get(\"output_attentions\", False),\n )\n losses_per_head = model.logits_to_loss_per_head(logits=logits, **batch)\n preds = model.logits_to_preds(logits=logits, **batch)\n labels = model.prepare_labels(**batch)\n\n # stack results of all batches per prediction head\n for head_num, head in enumerate(model.prediction_heads):\n loss_all[head_num] += np.sum(_to_numpy(losses_per_head[head_num]))\n preds_all[head_num] += list(_to_numpy(preds[head_num]))\n label_all[head_num] += list(_to_numpy(labels[head_num]))\n if head.model_type == \"span_classification\":\n ids_all[head_num] += list(_to_numpy(batch[\"id\"]))\n passage_start_t_all[head_num] += list(_to_numpy(batch[\"passage_start_t\"]))\n if calibrate_conf_scores:\n logits_all[head_num] += list(_to_numpy(logits))\n\n # Evaluate per prediction head\n all_results = []\n for head_num, head in enumerate(model.prediction_heads):\n if head.model_type == \"span_classification\" and calibrate_conf_scores:\n temperature_previous = head.temperature_for_confidence.item()\n logger.info(f\"temperature used for confidence scores before calibration: {temperature_previous}\")\n head.calibrate_conf(logits_all[head_num], label_all[head_num])\n temperature_current = head.temperature_for_confidence.item()\n logger.info(f\"temperature used for confidence scores after calibration: {temperature_current}\")\n temperature_change = (abs(temperature_current - temperature_previous) / temperature_previous) * 100.0\n if temperature_change > 50:\n logger.warning(\n f\"temperature used for calibration of confidence scores changed by more than {temperature_change} percent\"\n )\n if hasattr(head, \"aggregate_preds\"):\n # Needed to convert NQ ids from np arrays to strings\n ids_all_str = [x.astype(str) for x in ids_all[head_num]]\n ids_all_list = [list(x) for x in ids_all_str]\n head_ids = [\"-\".join(x) for x in ids_all_list]\n preds_all[head_num], label_all[head_num] = head.aggregate_preds(\n preds=preds_all[head_num],\n labels=label_all[head_num],\n passage_start_t=passage_start_t_all[head_num],\n ids=head_ids,\n )\n result = {\"loss\": loss_all[head_num] / len(self.data_loader.dataset), \"task_name\": head.task_name}\n result.update(compute_metrics(metric=head.metric, preds=preds_all[head_num], labels=label_all[head_num]))\n # Select type of report depending on prediction head output type\n if self.report:\n try:\n result[\"report\"] = compute_report_metrics(head, preds_all[head_num], label_all[head_num])\n except:\n logger.error(\n f\"Couldn't create eval report for head {head_num} with following preds and labels:\"\n f\"\\n Preds: {preds_all[head_num]} \\n Labels: {label_all[head_num]}\"\n )\n result[\"report\"] = \"Error\"\n\n if return_preds_and_labels:\n result[\"preds\"] = preds_all[head_num]\n result[\"labels\"] = label_all[head_num]\n\n all_results.append(result)\n\n return all_results\n\n @staticmethod\n def log_results(\n results: List[Any],\n dataset_name: str,\n steps: int,\n logging: bool = True,\n print: bool = True,\n num_fold: Optional[int] = None,\n ):\n # Print a header\n header = \"\\n\\n\"\n header += BUSH_SEP + \"\\n\"\n header += \"***************************************************\\n\"\n if num_fold:\n header += (\n f\"***** EVALUATION | FOLD: {num_fold} | {dataset_name.upper()} SET | AFTER {steps} BATCHES *****\\n\"\n )\n else:\n header += f\"***** EVALUATION | {dataset_name.upper()} SET | AFTER {steps} BATCHES *****\\n\"\n header += \"***************************************************\\n\"\n header += BUSH_SEP + \"\\n\"\n logger.info(header)\n\n for head_num, head in enumerate(results):\n logger.info(\"\\n _________ {} _________\".format(head[\"task_name\"]))\n for metric_name, metric_val in head.items():\n # log with experiment tracking framework (e.g. Mlflow)\n if logging:\n if not metric_name in [\"preds\", \"labels\"] and not metric_name.startswith(\"_\"):\n if isinstance(metric_val, numbers.Number):\n tracker.track_metrics(\n metrics={f\"{dataset_name}_{metric_name}_{head['task_name']}\": metric_val}, step=steps\n )\n # print via standard python logger\n if print:\n if metric_name == \"report\":\n if isinstance(metric_val, str) and len(metric_val) > 8000:\n metric_val = metric_val[:7500] + \"\\n ............................. \\n\" + metric_val[-500:]\n logger.info(\"{}: \\n {}\".format(metric_name, metric_val))\n else:\n if not metric_name in [\"preds\", \"labels\"] and not metric_name.startswith(\"_\"):\n logger.info(\"{}: {}\".format(metric_name, metric_val))\n\n\ndef _to_numpy(container):\n try:\n return container.cpu().numpy()\n except AttributeError:\n return container\n", "path": "haystack/modeling/evaluation/eval.py"}], "after_files": [{"content": "from typing import Dict, List, Optional, Any\n\nimport logging\nimport numbers\nimport torch\nimport numpy as np\nfrom tqdm import tqdm\n\nfrom haystack.modeling.evaluation.metrics import compute_metrics, compute_report_metrics\nfrom haystack.modeling.model.adaptive_model import AdaptiveModel\nfrom haystack.modeling.model.biadaptive_model import BiAdaptiveModel\nfrom haystack.utils.experiment_tracking import Tracker as tracker\nfrom haystack.modeling.visual import BUSH_SEP\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass Evaluator:\n \"\"\"\n Handles evaluation of a given model over a specified dataset.\n \"\"\"\n\n def __init__(self, data_loader: torch.utils.data.DataLoader, tasks, device: torch.device, report: bool = True):\n \"\"\"\n :param data_loader: The PyTorch DataLoader that will return batches of data from the evaluation dataset\n :param tesks:\n :param device: The device on which the tensors should be processed. Choose from torch.device(\"cpu\") and torch.device(\"cuda\").\n :param report: Whether an eval report should be generated (e.g. classification report per class).\n \"\"\"\n self.data_loader = data_loader\n self.tasks = tasks\n self.device = device\n self.report = report\n\n def eval(\n self,\n model: AdaptiveModel,\n return_preds_and_labels: bool = False,\n calibrate_conf_scores: bool = False,\n use_confidence_scores_for_ranking=True,\n use_no_answer_legacy_confidence=False,\n ) -> List[Dict]:\n \"\"\"\n Performs evaluation on a given model.\n\n :param model: The model on which to perform evaluation\n :param return_preds_and_labels: Whether to add preds and labels in the returned dicts of the\n :param calibrate_conf_scores: Whether to calibrate the temperature for temperature scaling of the confidence scores\n :param use_confidence_scores_for_ranking: Whether to sort answers by confidence score (normalized between 0 and 1)(default) or by standard score (unbounded).\n :param use_no_answer_legacy_confidence: Whether to use the legacy confidence definition for no_answer: difference between the best overall answer confidence and the no_answer gap confidence.\n Otherwise we use the no_answer score normalized to a range of [0,1] by an expit function (default).\n :return: all_results: A list of dictionaries, one for each prediction head. Each dictionary contains the metrics\n and reports generated during evaluation.\n \"\"\"\n model.prediction_heads[0].use_confidence_scores_for_ranking = use_confidence_scores_for_ranking\n model.prediction_heads[0].use_no_answer_legacy_confidence = use_no_answer_legacy_confidence\n model.eval()\n\n # init empty lists per prediction head\n loss_all: List = [0 for _ in model.prediction_heads]\n preds_all: List = [[] for _ in model.prediction_heads]\n label_all: List = [[] for _ in model.prediction_heads]\n ids_all: List = [[] for _ in model.prediction_heads]\n passage_start_t_all: List = [[] for _ in model.prediction_heads]\n logits_all: List = [[] for _ in model.prediction_heads]\n\n for step, batch in enumerate(tqdm(self.data_loader, desc=\"Evaluating\", mininterval=10)):\n batch = {key: batch[key].to(self.device) for key in batch}\n\n with torch.no_grad():\n\n if isinstance(model, AdaptiveModel):\n logits = model.forward(\n input_ids=batch.get(\"input_ids\", None),\n segment_ids=batch.get(\"segment_ids\", None),\n padding_mask=batch.get(\"padding_mask\", None),\n output_hidden_states=batch.get(\"output_hidden_states\", False),\n output_attentions=batch.get(\"output_attentions\", False),\n )\n elif isinstance(model, BiAdaptiveModel):\n logits = model.forward(\n query_input_ids=batch.get(\"query_input_ids\", None),\n query_segment_ids=batch.get(\"query_segment_ids\", None),\n query_attention_mask=batch.get(\"query_attention_mask\", None),\n passage_input_ids=batch.get(\"passage_input_ids\", None),\n passage_segment_ids=batch.get(\"passage_segment_ids\", None),\n passage_attention_mask=batch.get(\"passage_attention_mask\", None),\n )\n else:\n logits = model.forward(**batch)\n\n losses_per_head = model.logits_to_loss_per_head(logits=logits, **batch)\n preds = model.logits_to_preds(logits=logits, **batch)\n labels = model.prepare_labels(**batch)\n\n # stack results of all batches per prediction head\n for head_num, head in enumerate(model.prediction_heads):\n loss_all[head_num] += np.sum(_to_numpy(losses_per_head[head_num]))\n preds_all[head_num] += list(_to_numpy(preds[head_num]))\n label_all[head_num] += list(_to_numpy(labels[head_num]))\n if head.model_type == \"span_classification\":\n ids_all[head_num] += list(_to_numpy(batch[\"id\"]))\n passage_start_t_all[head_num] += list(_to_numpy(batch[\"passage_start_t\"]))\n if calibrate_conf_scores:\n logits_all[head_num] += list(_to_numpy(logits))\n\n # Evaluate per prediction head\n all_results = []\n for head_num, head in enumerate(model.prediction_heads):\n if head.model_type == \"span_classification\" and calibrate_conf_scores:\n temperature_previous = head.temperature_for_confidence.item()\n logger.info(f\"temperature used for confidence scores before calibration: {temperature_previous}\")\n head.calibrate_conf(logits_all[head_num], label_all[head_num])\n temperature_current = head.temperature_for_confidence.item()\n logger.info(f\"temperature used for confidence scores after calibration: {temperature_current}\")\n temperature_change = (abs(temperature_current - temperature_previous) / temperature_previous) * 100.0\n if temperature_change > 50:\n logger.warning(\n f\"temperature used for calibration of confidence scores changed by more than {temperature_change} percent\"\n )\n if hasattr(head, \"aggregate_preds\"):\n # Needed to convert NQ ids from np arrays to strings\n ids_all_str = [x.astype(str) for x in ids_all[head_num]]\n ids_all_list = [list(x) for x in ids_all_str]\n head_ids = [\"-\".join(x) for x in ids_all_list]\n preds_all[head_num], label_all[head_num] = head.aggregate_preds(\n preds=preds_all[head_num],\n labels=label_all[head_num],\n passage_start_t=passage_start_t_all[head_num],\n ids=head_ids,\n )\n result = {\"loss\": loss_all[head_num] / len(self.data_loader.dataset), \"task_name\": head.task_name}\n result.update(compute_metrics(metric=head.metric, preds=preds_all[head_num], labels=label_all[head_num]))\n # Select type of report depending on prediction head output type\n if self.report:\n try:\n result[\"report\"] = compute_report_metrics(head, preds_all[head_num], label_all[head_num])\n except:\n logger.error(\n f\"Couldn't create eval report for head {head_num} with following preds and labels:\"\n f\"\\n Preds: {preds_all[head_num]} \\n Labels: {label_all[head_num]}\"\n )\n result[\"report\"] = \"Error\"\n\n if return_preds_and_labels:\n result[\"preds\"] = preds_all[head_num]\n result[\"labels\"] = label_all[head_num]\n\n all_results.append(result)\n\n return all_results\n\n @staticmethod\n def log_results(\n results: List[Any],\n dataset_name: str,\n steps: int,\n logging: bool = True,\n print: bool = True,\n num_fold: Optional[int] = None,\n ):\n # Print a header\n header = \"\\n\\n\"\n header += BUSH_SEP + \"\\n\"\n header += \"***************************************************\\n\"\n if num_fold:\n header += (\n f\"***** EVALUATION | FOLD: {num_fold} | {dataset_name.upper()} SET | AFTER {steps} BATCHES *****\\n\"\n )\n else:\n header += f\"***** EVALUATION | {dataset_name.upper()} SET | AFTER {steps} BATCHES *****\\n\"\n header += \"***************************************************\\n\"\n header += BUSH_SEP + \"\\n\"\n logger.info(header)\n\n for head_num, head in enumerate(results):\n logger.info(\"\\n _________ {} _________\".format(head[\"task_name\"]))\n for metric_name, metric_val in head.items():\n # log with experiment tracking framework (e.g. Mlflow)\n if logging:\n if not metric_name in [\"preds\", \"labels\"] and not metric_name.startswith(\"_\"):\n if isinstance(metric_val, numbers.Number):\n tracker.track_metrics(\n metrics={f\"{dataset_name}_{metric_name}_{head['task_name']}\": metric_val}, step=steps\n )\n # print via standard python logger\n if print:\n if metric_name == \"report\":\n if isinstance(metric_val, str) and len(metric_val) > 8000:\n metric_val = metric_val[:7500] + \"\\n ............................. \\n\" + metric_val[-500:]\n logger.info(\"{}: \\n {}\".format(metric_name, metric_val))\n else:\n if not metric_name in [\"preds\", \"labels\"] and not metric_name.startswith(\"_\"):\n logger.info(\"{}: {}\".format(metric_name, metric_val))\n\n\ndef _to_numpy(container):\n try:\n return container.cpu().numpy()\n except AttributeError:\n return container\n", "path": "haystack/modeling/evaluation/eval.py"}]} | 2,918 | 470 |
gh_patches_debug_10505 | rasdani/github-patches | git_diff | cornellius-gp__gpytorch-1371 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ZeroMean for Batch Independent Multioutput GP
I'm following the Batch Independent Multioutput GP example, but instead of using a constant mean, I would like each dimension to use a zero mean (maybe this is a bad idea?).
```
class ZeroMeanIndependentMultitaskGPModel(gpytorch.models.ExactGP):
def __init__(self, train_x, train_y, likelihood, nx):
super().__init__(train_x, train_y, likelihood)
self.n = nx #output dimension
#self.mean_module = gpytorch.means.MultitaskMean([gpytorch.means.ZeroMean()]*self.n,
# num_tasks=self.n)
#self.mean_module = gpytorch.means.ConstantMean(batch_shape=torch.Size([self.n]))
self.mean_module = gpytorch.means.ZeroMean()
self.covar_module = gpytorch.kernels.ScaleKernel(
gpytorch.kernels.RBFKernel(batch_shape=torch.Size([self.n])),
batch_shape=torch.Size([self.n])
)
def forward(self, x):
mean_x = self.mean_module(x) # is this needed for ZeroMean?
covar_x = self.covar_module(x)
return gpytorch.distributions.MultitaskMultivariateNormal.from_batch_mvn(
gpytorch.distributions.MultivariateNormal(mean_x, covar_x)
)
```
When training with this, I get the error `RuntimeError: mean should be a matrix or a batch matrix (batch mode)`. It works as intended with constant mean. As you can see, I've tried a couple different things, but they don't seem to work either. I can't seem to find other people with the same issue online. Is it possible to do this with ZeroMean?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `gpytorch/means/zero_mean.py`
Content:
```
1 #!/usr/bin/env python3
2
3 import torch
4
5 from .mean import Mean
6
7
8 class ZeroMean(Mean):
9 def forward(self, input):
10 return torch.zeros(input.shape[:-1], dtype=input.dtype, device=input.device)
11
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/gpytorch/means/zero_mean.py b/gpytorch/means/zero_mean.py
--- a/gpytorch/means/zero_mean.py
+++ b/gpytorch/means/zero_mean.py
@@ -2,9 +2,18 @@
import torch
+from ..utils.broadcasting import _mul_broadcast_shape
from .mean import Mean
class ZeroMean(Mean):
+ def __init__(self, batch_shape=torch.Size(), **kwargs):
+ super(ZeroMean, self).__init__()
+ self.batch_shape = batch_shape
+
def forward(self, input):
- return torch.zeros(input.shape[:-1], dtype=input.dtype, device=input.device)
+ mean = torch.zeros(*self.batch_shape, 1, dtype=input.dtype, device=input.device)
+ if input.shape[:-2] == self.batch_shape:
+ return mean.expand(input.shape[:-1])
+ else:
+ return mean.expand(_mul_broadcast_shape(input.shape[:-1], mean.shape))
| {"golden_diff": "diff --git a/gpytorch/means/zero_mean.py b/gpytorch/means/zero_mean.py\n--- a/gpytorch/means/zero_mean.py\n+++ b/gpytorch/means/zero_mean.py\n@@ -2,9 +2,18 @@\n \n import torch\n \n+from ..utils.broadcasting import _mul_broadcast_shape\n from .mean import Mean\n \n \n class ZeroMean(Mean):\n+ def __init__(self, batch_shape=torch.Size(), **kwargs):\n+ super(ZeroMean, self).__init__()\n+ self.batch_shape = batch_shape\n+\n def forward(self, input):\n- return torch.zeros(input.shape[:-1], dtype=input.dtype, device=input.device)\n+ mean = torch.zeros(*self.batch_shape, 1, dtype=input.dtype, device=input.device)\n+ if input.shape[:-2] == self.batch_shape:\n+ return mean.expand(input.shape[:-1])\n+ else:\n+ return mean.expand(_mul_broadcast_shape(input.shape[:-1], mean.shape))\n", "issue": "ZeroMean for Batch Independent Multioutput GP\nI'm following the Batch Independent Multioutput GP example, but instead of using a constant mean, I would like each dimension to use a zero mean (maybe this is a bad idea?).\r\n\r\n```\r\nclass ZeroMeanIndependentMultitaskGPModel(gpytorch.models.ExactGP):\r\n def __init__(self, train_x, train_y, likelihood, nx):\r\n super().__init__(train_x, train_y, likelihood)\r\n self.n = nx #output dimension\r\n #self.mean_module = gpytorch.means.MultitaskMean([gpytorch.means.ZeroMean()]*self.n,\r\n # num_tasks=self.n)\r\n #self.mean_module = gpytorch.means.ConstantMean(batch_shape=torch.Size([self.n]))\r\n self.mean_module = gpytorch.means.ZeroMean()\r\n self.covar_module = gpytorch.kernels.ScaleKernel(\r\n gpytorch.kernels.RBFKernel(batch_shape=torch.Size([self.n])),\r\n batch_shape=torch.Size([self.n])\r\n )\r\n\r\n def forward(self, x):\r\n mean_x = self.mean_module(x) # is this needed for ZeroMean?\r\n covar_x = self.covar_module(x)\r\n return gpytorch.distributions.MultitaskMultivariateNormal.from_batch_mvn(\r\n gpytorch.distributions.MultivariateNormal(mean_x, covar_x)\r\n )\r\n```\r\n\r\n\r\nWhen training with this, I get the error `RuntimeError: mean should be a matrix or a batch matrix (batch mode)`. It works as intended with constant mean. As you can see, I've tried a couple different things, but they don't seem to work either. I can't seem to find other people with the same issue online. Is it possible to do this with ZeroMean?\r\n\n", "before_files": [{"content": "#!/usr/bin/env python3\n\nimport torch\n\nfrom .mean import Mean\n\n\nclass ZeroMean(Mean):\n def forward(self, input):\n return torch.zeros(input.shape[:-1], dtype=input.dtype, device=input.device)\n", "path": "gpytorch/means/zero_mean.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n\nimport torch\n\nfrom ..utils.broadcasting import _mul_broadcast_shape\nfrom .mean import Mean\n\n\nclass ZeroMean(Mean):\n def __init__(self, batch_shape=torch.Size(), **kwargs):\n super(ZeroMean, self).__init__()\n self.batch_shape = batch_shape\n\n def forward(self, input):\n mean = torch.zeros(*self.batch_shape, 1, dtype=input.dtype, device=input.device)\n if input.shape[:-2] == self.batch_shape:\n return mean.expand(input.shape[:-1])\n else:\n return mean.expand(_mul_broadcast_shape(input.shape[:-1], mean.shape))\n", "path": "gpytorch/means/zero_mean.py"}]} | 704 | 216 |
gh_patches_debug_1971 | rasdani/github-patches | git_diff | strawberry-graphql__strawberry-1994 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Postponed annotation evaluation causes `Annotated` to break
When using postponed annotation evaluation, annotating resolver arguments no longer works:
```python
from __future__ import annotations
import random
from typing import Annotated
import strawberry
@strawberry.type
class Query:
@strawberry.field
def dice_roll(
self,
sides: Annotated[
int,
strawberry.argument(description="Number of sides the die should have."),
] = 6,
) -> int:
return random.randint(1, sides)
strawberry.Schema(query=Query)
```
The example above raises this TypeError:
```
TypeError: Query fields cannot be resolved. Unexpected type 'typing.Annotated[int, <strawberry.arguments.StrawberryArgumentAnnotation object at 0x7fd12e130d00>]'
```
When the first line (`from __future__ import annotations`) is left out, everything works as intended. This will probably also break once Python 3.11 lands, since the behavior will become mandatory then. #1586 refers to a somewhat related issue.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `strawberry/auto.py`
Content:
```
1 from __future__ import annotations
2
3 from typing import Any, Optional, Union, cast
4
5 from typing_extensions import Annotated, get_args, get_origin
6
7 from strawberry.type import StrawberryType
8
9 from .annotation import StrawberryAnnotation
10
11
12 class StrawberryAutoMeta(type):
13 """Metaclass for StrawberryAuto.
14
15 This is used to make sure StrawberryAuto is a singleton and also to
16 override the behavior of `isinstance` so that it consider the following
17 cases:
18
19 >> isinstance(StrawberryAuto(), StrawberryAuto)
20 True
21 >> isinstance(StrawberryAnnotation(StrawberryAuto()), StrawberryAuto)
22 True
23 >> isinstance(Annotated[StrawberryAuto(), object()), StrawberryAuto)
24 True
25
26 """
27
28 def __init__(self, *args, **kwargs):
29 self._instance: Optional[StrawberryAuto] = None
30 super().__init__(*args, **kwargs)
31
32 def __call__(cls, *args, **kwargs):
33 if cls._instance is None:
34 cls._instance = super().__call__(*args, **kwargs)
35
36 return cls._instance
37
38 def __instancecheck__(
39 self,
40 instance: Union[StrawberryAuto, StrawberryAnnotation, StrawberryType, type],
41 ):
42 if isinstance(instance, StrawberryAnnotation):
43 resolved = instance.annotation
44 if isinstance(resolved, str):
45 namespace = instance.namespace
46 resolved = namespace and namespace.get(resolved)
47
48 if resolved is not None:
49 instance = cast(type, resolved)
50
51 if instance is auto:
52 return True
53
54 # Support uses of Annotated[auto, something()]
55 if get_origin(instance) is Annotated:
56 args = get_args(instance)
57 if args[0] is Any:
58 return any(isinstance(arg, StrawberryAuto) for arg in args[1:])
59
60 return False
61
62
63 class StrawberryAuto(metaclass=StrawberryAutoMeta):
64 def __str__(self):
65 return "auto"
66
67 def __repr__(self):
68 return "<auto>"
69
70
71 auto = Annotated[Any, StrawberryAuto()]
72
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/strawberry/auto.py b/strawberry/auto.py
--- a/strawberry/auto.py
+++ b/strawberry/auto.py
@@ -57,7 +57,7 @@
if args[0] is Any:
return any(isinstance(arg, StrawberryAuto) for arg in args[1:])
- return False
+ return instance == "strawberry.auto"
class StrawberryAuto(metaclass=StrawberryAutoMeta):
| {"golden_diff": "diff --git a/strawberry/auto.py b/strawberry/auto.py\n--- a/strawberry/auto.py\n+++ b/strawberry/auto.py\n@@ -57,7 +57,7 @@\n if args[0] is Any:\n return any(isinstance(arg, StrawberryAuto) for arg in args[1:])\n \n- return False\n+ return instance == \"strawberry.auto\"\n \n \n class StrawberryAuto(metaclass=StrawberryAutoMeta):\n", "issue": "Postponed annotation evaluation causes `Annotated` to break\nWhen using postponed annotation evaluation, annotating resolver arguments no longer works:\r\n\r\n```python\r\nfrom __future__ import annotations\r\n\r\nimport random\r\nfrom typing import Annotated\r\n\r\nimport strawberry\r\n\r\n\r\[email protected]\r\nclass Query:\r\n @strawberry.field\r\n def dice_roll(\r\n self,\r\n sides: Annotated[\r\n int,\r\n strawberry.argument(description=\"Number of sides the die should have.\"),\r\n ] = 6,\r\n ) -> int:\r\n return random.randint(1, sides)\r\n\r\n\r\nstrawberry.Schema(query=Query)\r\n```\r\n\r\nThe example above raises this TypeError:\r\n\r\n```\r\nTypeError: Query fields cannot be resolved. Unexpected type 'typing.Annotated[int, <strawberry.arguments.StrawberryArgumentAnnotation object at 0x7fd12e130d00>]'\r\n```\r\n\r\nWhen the first line (`from __future__ import annotations`) is left out, everything works as intended. This will probably also break once Python 3.11 lands, since the behavior will become mandatory then. #1586 refers to a somewhat related issue.\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom typing import Any, Optional, Union, cast\n\nfrom typing_extensions import Annotated, get_args, get_origin\n\nfrom strawberry.type import StrawberryType\n\nfrom .annotation import StrawberryAnnotation\n\n\nclass StrawberryAutoMeta(type):\n \"\"\"Metaclass for StrawberryAuto.\n\n This is used to make sure StrawberryAuto is a singleton and also to\n override the behavior of `isinstance` so that it consider the following\n cases:\n\n >> isinstance(StrawberryAuto(), StrawberryAuto)\n True\n >> isinstance(StrawberryAnnotation(StrawberryAuto()), StrawberryAuto)\n True\n >> isinstance(Annotated[StrawberryAuto(), object()), StrawberryAuto)\n True\n\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n self._instance: Optional[StrawberryAuto] = None\n super().__init__(*args, **kwargs)\n\n def __call__(cls, *args, **kwargs):\n if cls._instance is None:\n cls._instance = super().__call__(*args, **kwargs)\n\n return cls._instance\n\n def __instancecheck__(\n self,\n instance: Union[StrawberryAuto, StrawberryAnnotation, StrawberryType, type],\n ):\n if isinstance(instance, StrawberryAnnotation):\n resolved = instance.annotation\n if isinstance(resolved, str):\n namespace = instance.namespace\n resolved = namespace and namespace.get(resolved)\n\n if resolved is not None:\n instance = cast(type, resolved)\n\n if instance is auto:\n return True\n\n # Support uses of Annotated[auto, something()]\n if get_origin(instance) is Annotated:\n args = get_args(instance)\n if args[0] is Any:\n return any(isinstance(arg, StrawberryAuto) for arg in args[1:])\n\n return False\n\n\nclass StrawberryAuto(metaclass=StrawberryAutoMeta):\n def __str__(self):\n return \"auto\"\n\n def __repr__(self):\n return \"<auto>\"\n\n\nauto = Annotated[Any, StrawberryAuto()]\n", "path": "strawberry/auto.py"}], "after_files": [{"content": "from __future__ import annotations\n\nfrom typing import Any, Optional, Union, cast\n\nfrom typing_extensions import Annotated, get_args, get_origin\n\nfrom strawberry.type import StrawberryType\n\nfrom .annotation import StrawberryAnnotation\n\n\nclass StrawberryAutoMeta(type):\n \"\"\"Metaclass for StrawberryAuto.\n\n This is used to make sure StrawberryAuto is a singleton and also to\n override the behavior of `isinstance` so that it consider the following\n cases:\n\n >> isinstance(StrawberryAuto(), StrawberryAuto)\n True\n >> isinstance(StrawberryAnnotation(StrawberryAuto()), StrawberryAuto)\n True\n >> isinstance(Annotated[StrawberryAuto(), object()), StrawberryAuto)\n True\n\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n self._instance: Optional[StrawberryAuto] = None\n super().__init__(*args, **kwargs)\n\n def __call__(cls, *args, **kwargs):\n if cls._instance is None:\n cls._instance = super().__call__(*args, **kwargs)\n\n return cls._instance\n\n def __instancecheck__(\n self,\n instance: Union[StrawberryAuto, StrawberryAnnotation, StrawberryType, type],\n ):\n if isinstance(instance, StrawberryAnnotation):\n resolved = instance.annotation\n if isinstance(resolved, str):\n namespace = instance.namespace\n resolved = namespace and namespace.get(resolved)\n\n if resolved is not None:\n instance = cast(type, resolved)\n\n if instance is auto:\n return True\n\n # Support uses of Annotated[auto, something()]\n if get_origin(instance) is Annotated:\n args = get_args(instance)\n if args[0] is Any:\n return any(isinstance(arg, StrawberryAuto) for arg in args[1:])\n\n return instance == \"strawberry.auto\"\n\n\nclass StrawberryAuto(metaclass=StrawberryAutoMeta):\n def __str__(self):\n return \"auto\"\n\n def __repr__(self):\n return \"<auto>\"\n\n\nauto = Annotated[Any, StrawberryAuto()]\n", "path": "strawberry/auto.py"}]} | 1,076 | 104 |
gh_patches_debug_2541 | rasdani/github-patches | git_diff | wemake-services__wemake-python-styleguide-2452 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Site unavailable
### What's wrong
Not sure where exactly to put this, but https://wemake-python-stylegui.de/ is unavailable
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `wemake_python_styleguide/formatter.py`
Content:
```
1 """
2 Our very own ``flake8`` formatter for better error messages.
3
4 That's how all ``flake8`` formatters work:
5
6 .. mermaid::
7 :caption: ``flake8`` formatting API calls order.
8
9 graph LR
10 F2[start] --> F3[after_init]
11 F3 --> F4[start]
12 F4 --> F5[beginning]
13 F5 --> F6[handle]
14 F6 --> F7[format]
15 F6 --> F8[show_source]
16 F6 --> F9[show_statistic]
17 F7 --> F10[finished]
18 F8 --> F10[finished]
19 F9 --> F10[finished]
20 F10 -.-> F5
21 F10 --> F11[stop]
22
23 .. autoclass:: WemakeFormatter
24 :no-undoc-members:
25
26 """
27
28 from collections import defaultdict
29 from typing import ClassVar, DefaultDict, List
30
31 from flake8.formatting.base import BaseFormatter
32 from flake8.statistics import Statistics
33 from flake8.style_guide import Violation
34 from pygments import highlight
35 from pygments.formatters import TerminalFormatter
36 from pygments.lexers import PythonLexer
37 from typing_extensions import Final
38
39 from wemake_python_styleguide.version import pkg_version
40
41 #: That url is generated and hosted by Sphinx.
42 DOCS_URL_TEMPLATE: Final = (
43 'https://wemake-python-stylegui.de/en/{0}/pages/usage/violations/'
44 )
45
46 #: This url points to the specific violation page.
47 SHORTLINK_TEMPLATE: Final = (
48 'https://pyflak.es/{0}'
49 )
50
51
52 class WemakeFormatter(BaseFormatter): # noqa: WPS214
53 """
54 We need to format our style :term:`violations <violation>` beatifully.
55
56 The default formatter does not allow us to do that.
57 What things do we miss?
58
59 1. Spacing, everything is just mixed up and glued together
60 2. Colors and decoration, some information is easier
61 to gather just with colors or underlined text
62 3. Grouping, we need explicit grouping by filename
63 4. Incomplete and non-informative statistics
64
65 """
66
67 _doc_url: ClassVar[str] = DOCS_URL_TEMPLATE.format(pkg_version)
68
69 # API:
70
71 def after_init(self):
72 """Called after the original ``init`` is used to set extra fields."""
73 self._lexer = PythonLexer()
74 self._formatter = TerminalFormatter()
75
76 # Logic:
77 self._processed_filenames: List[str] = []
78 self._error_count = 0
79
80 def handle(self, error: Violation) -> None: # noqa: WPS110
81 """Processes each :term:`violation` to print it and all related."""
82 if error.filename not in self._processed_filenames:
83 self._print_header(error.filename)
84 self._processed_filenames.append(error.filename)
85
86 line = self.format(error)
87 source = self.show_source(error)
88 link = self._show_link(error)
89
90 self._write(line)
91 if link:
92 self._write(link)
93 if source:
94 self._write(source)
95
96 self._error_count += 1
97
98 def format(self, error: Violation) -> str: # noqa: WPS125
99 """Called to format each individual :term:`violation`."""
100 return '{newline} {row_col:<8} {code:<5} {text}'.format(
101 newline=self.newline if self._should_show_source(error) else '',
102 code=error.code,
103 text=error.text,
104 row_col='{0}:{1}'.format(error.line_number, error.column_number),
105 )
106
107 def show_source(self, error: Violation) -> str:
108 """Called when ``--show-source`` option is provided."""
109 if not self._should_show_source(error):
110 return ''
111
112 formatted_line = error.physical_line.lstrip()
113 adjust = len(error.physical_line) - len(formatted_line)
114
115 code = _highlight(
116 formatted_line,
117 self._lexer,
118 self._formatter,
119 )
120
121 return ' {code} {spacing}^'.format(
122 code=code,
123 spacing=' ' * (error.column_number - 1 - adjust),
124 )
125
126 def show_statistics(self, statistics: Statistics) -> None: # noqa: WPS210
127 """Called when ``--statistic`` option is passed."""
128 all_errors = 0
129 for error_code in statistics.error_codes():
130 stats_for_error_code = statistics.statistics_for(error_code)
131 statistic = next(stats_for_error_code)
132
133 count = statistic.count
134 count += sum(stat.count for stat in stats_for_error_code)
135 all_errors += count
136 error_by_file = _count_per_filename(statistics, error_code)
137
138 self._print_violation_per_file(
139 statistic,
140 error_code,
141 count,
142 error_by_file,
143 )
144
145 self._write(self.newline)
146 self._write(_underline(_bold('All errors: {0}'.format(all_errors))))
147
148 def stop(self) -> None:
149 """Runs once per app when the formatting ends."""
150 if self._error_count:
151 message = '{0}Full list of violations and explanations:{0}{1}'
152 self._write(message.format(self.newline, self._doc_url))
153
154 # Our own methods:
155
156 def _show_link(self, error: Violation) -> str:
157 """Called when ``--show-violation-links`` option is provided."""
158 if not self.options.show_violation_links:
159 return ''
160
161 return ' {spacing}-> {link}'.format(
162 spacing=' ' * 9,
163 link=SHORTLINK_TEMPLATE.format(error.code),
164 )
165
166 def _print_header(self, filename: str) -> None:
167 self._write(
168 '{newline}{filename}'.format(
169 filename=_underline(_bold(filename)),
170 newline=self.newline,
171 ),
172 )
173
174 def _print_violation_per_file(
175 self,
176 statistic: Statistics,
177 error_code: str,
178 count: int,
179 error_by_file: DefaultDict[str, int],
180 ):
181 self._write(
182 '{newline}{error_code}: {message}'.format(
183 newline=self.newline,
184 error_code=_bold(error_code),
185 message=statistic.message,
186 ),
187 )
188 for filename, error_count in error_by_file.items():
189 self._write(
190 ' {error_count:<5} {filename}'.format(
191 error_count=error_count,
192 filename=filename,
193 ),
194 )
195 self._write(_underline('Total: {0}'.format(count)))
196
197 def _should_show_source(self, error: Violation) -> bool:
198 return self.options.show_source and error.physical_line is not None
199
200
201 # Formatting text:
202
203 def _bold(text: str) -> str:
204 r"""
205 Returns bold formatted text.
206
207 >>> _bold('Hello!')
208 '\x1b[1mHello!\x1b[0m'
209
210 """
211 return '\033[1m{0}\033[0m'.format(text)
212
213
214 def _underline(text: str) -> str:
215 r"""
216 Returns underlined formatted text.
217
218 >>> _underline('Hello!')
219 '\x1b[4mHello!\x1b[0m'
220
221 """
222 return '\033[4m{0}\033[0m'.format(text)
223
224
225 def _highlight(source: str, lexer, formatter) -> str:
226 """
227 Highlights source code. Might fail.
228
229 See also:
230 https://github.com/wemake-services/wemake-python-styleguide/issues/794
231
232 """
233 try:
234 return highlight(source, lexer, formatter)
235 except Exception: # pragma: no cover
236 # Might fail on some systems, when colors are set incorrectly,
237 # or not available at all. In this case code will be just text.
238 return source
239
240
241 # Helpers:
242
243 def _count_per_filename(
244 statistics: Statistics,
245 error_code: str,
246 ) -> DefaultDict[str, int]:
247 filenames: DefaultDict[str, int] = defaultdict(int)
248 stats_for_error_code = statistics.statistics_for(error_code)
249
250 for stat in stats_for_error_code:
251 filenames[stat.filename] += stat.count
252
253 return filenames
254
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/wemake_python_styleguide/formatter.py b/wemake_python_styleguide/formatter.py
--- a/wemake_python_styleguide/formatter.py
+++ b/wemake_python_styleguide/formatter.py
@@ -40,7 +40,7 @@
#: That url is generated and hosted by Sphinx.
DOCS_URL_TEMPLATE: Final = (
- 'https://wemake-python-stylegui.de/en/{0}/pages/usage/violations/'
+ 'https://wemake-python-styleguide.rtfd.io/en/{0}/pages/usage/violations/'
)
#: This url points to the specific violation page.
| {"golden_diff": "diff --git a/wemake_python_styleguide/formatter.py b/wemake_python_styleguide/formatter.py\n--- a/wemake_python_styleguide/formatter.py\n+++ b/wemake_python_styleguide/formatter.py\n@@ -40,7 +40,7 @@\n \n #: That url is generated and hosted by Sphinx.\n DOCS_URL_TEMPLATE: Final = (\n- 'https://wemake-python-stylegui.de/en/{0}/pages/usage/violations/'\n+ 'https://wemake-python-styleguide.rtfd.io/en/{0}/pages/usage/violations/'\n )\n \n #: This url points to the specific violation page.\n", "issue": "Site unavailable\n### What's wrong\r\n\r\nNot sure where exactly to put this, but https://wemake-python-stylegui.de/ is unavailable\n", "before_files": [{"content": "\"\"\"\nOur very own ``flake8`` formatter for better error messages.\n\nThat's how all ``flake8`` formatters work:\n\n.. mermaid::\n :caption: ``flake8`` formatting API calls order.\n\n graph LR\n F2[start] --> F3[after_init]\n F3 --> F4[start]\n F4 --> F5[beginning]\n F5 --> F6[handle]\n F6 --> F7[format]\n F6\t --> F8[show_source]\n F6\t --> F9[show_statistic]\n F7 --> F10[finished]\n F8 --> F10[finished]\n F9 --> F10[finished]\n F10 -.-> F5\n F10 --> F11[stop]\n\n.. autoclass:: WemakeFormatter\n :no-undoc-members:\n\n\"\"\"\n\nfrom collections import defaultdict\nfrom typing import ClassVar, DefaultDict, List\n\nfrom flake8.formatting.base import BaseFormatter\nfrom flake8.statistics import Statistics\nfrom flake8.style_guide import Violation\nfrom pygments import highlight\nfrom pygments.formatters import TerminalFormatter\nfrom pygments.lexers import PythonLexer\nfrom typing_extensions import Final\n\nfrom wemake_python_styleguide.version import pkg_version\n\n#: That url is generated and hosted by Sphinx.\nDOCS_URL_TEMPLATE: Final = (\n 'https://wemake-python-stylegui.de/en/{0}/pages/usage/violations/'\n)\n\n#: This url points to the specific violation page.\nSHORTLINK_TEMPLATE: Final = (\n 'https://pyflak.es/{0}'\n)\n\n\nclass WemakeFormatter(BaseFormatter): # noqa: WPS214\n \"\"\"\n We need to format our style :term:`violations <violation>` beatifully.\n\n The default formatter does not allow us to do that.\n What things do we miss?\n\n 1. Spacing, everything is just mixed up and glued together\n 2. Colors and decoration, some information is easier\n to gather just with colors or underlined text\n 3. Grouping, we need explicit grouping by filename\n 4. Incomplete and non-informative statistics\n\n \"\"\"\n\n _doc_url: ClassVar[str] = DOCS_URL_TEMPLATE.format(pkg_version)\n\n # API:\n\n def after_init(self):\n \"\"\"Called after the original ``init`` is used to set extra fields.\"\"\"\n self._lexer = PythonLexer()\n self._formatter = TerminalFormatter()\n\n # Logic:\n self._processed_filenames: List[str] = []\n self._error_count = 0\n\n def handle(self, error: Violation) -> None: # noqa: WPS110\n \"\"\"Processes each :term:`violation` to print it and all related.\"\"\"\n if error.filename not in self._processed_filenames:\n self._print_header(error.filename)\n self._processed_filenames.append(error.filename)\n\n line = self.format(error)\n source = self.show_source(error)\n link = self._show_link(error)\n\n self._write(line)\n if link:\n self._write(link)\n if source:\n self._write(source)\n\n self._error_count += 1\n\n def format(self, error: Violation) -> str: # noqa: WPS125\n \"\"\"Called to format each individual :term:`violation`.\"\"\"\n return '{newline} {row_col:<8} {code:<5} {text}'.format(\n newline=self.newline if self._should_show_source(error) else '',\n code=error.code,\n text=error.text,\n row_col='{0}:{1}'.format(error.line_number, error.column_number),\n )\n\n def show_source(self, error: Violation) -> str:\n \"\"\"Called when ``--show-source`` option is provided.\"\"\"\n if not self._should_show_source(error):\n return ''\n\n formatted_line = error.physical_line.lstrip()\n adjust = len(error.physical_line) - len(formatted_line)\n\n code = _highlight(\n formatted_line,\n self._lexer,\n self._formatter,\n )\n\n return ' {code} {spacing}^'.format(\n code=code,\n spacing=' ' * (error.column_number - 1 - adjust),\n )\n\n def show_statistics(self, statistics: Statistics) -> None: # noqa: WPS210\n \"\"\"Called when ``--statistic`` option is passed.\"\"\"\n all_errors = 0\n for error_code in statistics.error_codes():\n stats_for_error_code = statistics.statistics_for(error_code)\n statistic = next(stats_for_error_code)\n\n count = statistic.count\n count += sum(stat.count for stat in stats_for_error_code)\n all_errors += count\n error_by_file = _count_per_filename(statistics, error_code)\n\n self._print_violation_per_file(\n statistic,\n error_code,\n count,\n error_by_file,\n )\n\n self._write(self.newline)\n self._write(_underline(_bold('All errors: {0}'.format(all_errors))))\n\n def stop(self) -> None:\n \"\"\"Runs once per app when the formatting ends.\"\"\"\n if self._error_count:\n message = '{0}Full list of violations and explanations:{0}{1}'\n self._write(message.format(self.newline, self._doc_url))\n\n # Our own methods:\n\n def _show_link(self, error: Violation) -> str:\n \"\"\"Called when ``--show-violation-links`` option is provided.\"\"\"\n if not self.options.show_violation_links:\n return ''\n\n return ' {spacing}-> {link}'.format(\n spacing=' ' * 9,\n link=SHORTLINK_TEMPLATE.format(error.code),\n )\n\n def _print_header(self, filename: str) -> None:\n self._write(\n '{newline}{filename}'.format(\n filename=_underline(_bold(filename)),\n newline=self.newline,\n ),\n )\n\n def _print_violation_per_file(\n self,\n statistic: Statistics,\n error_code: str,\n count: int,\n error_by_file: DefaultDict[str, int],\n ):\n self._write(\n '{newline}{error_code}: {message}'.format(\n newline=self.newline,\n error_code=_bold(error_code),\n message=statistic.message,\n ),\n )\n for filename, error_count in error_by_file.items():\n self._write(\n ' {error_count:<5} {filename}'.format(\n error_count=error_count,\n filename=filename,\n ),\n )\n self._write(_underline('Total: {0}'.format(count)))\n\n def _should_show_source(self, error: Violation) -> bool:\n return self.options.show_source and error.physical_line is not None\n\n\n# Formatting text:\n\ndef _bold(text: str) -> str:\n r\"\"\"\n Returns bold formatted text.\n\n >>> _bold('Hello!')\n '\\x1b[1mHello!\\x1b[0m'\n\n \"\"\"\n return '\\033[1m{0}\\033[0m'.format(text)\n\n\ndef _underline(text: str) -> str:\n r\"\"\"\n Returns underlined formatted text.\n\n >>> _underline('Hello!')\n '\\x1b[4mHello!\\x1b[0m'\n\n \"\"\"\n return '\\033[4m{0}\\033[0m'.format(text)\n\n\ndef _highlight(source: str, lexer, formatter) -> str:\n \"\"\"\n Highlights source code. Might fail.\n\n See also:\n https://github.com/wemake-services/wemake-python-styleguide/issues/794\n\n \"\"\"\n try:\n return highlight(source, lexer, formatter)\n except Exception: # pragma: no cover\n # Might fail on some systems, when colors are set incorrectly,\n # or not available at all. In this case code will be just text.\n return source\n\n\n# Helpers:\n\ndef _count_per_filename(\n statistics: Statistics,\n error_code: str,\n) -> DefaultDict[str, int]:\n filenames: DefaultDict[str, int] = defaultdict(int)\n stats_for_error_code = statistics.statistics_for(error_code)\n\n for stat in stats_for_error_code:\n filenames[stat.filename] += stat.count\n\n return filenames\n", "path": "wemake_python_styleguide/formatter.py"}], "after_files": [{"content": "\"\"\"\nOur very own ``flake8`` formatter for better error messages.\n\nThat's how all ``flake8`` formatters work:\n\n.. mermaid::\n :caption: ``flake8`` formatting API calls order.\n\n graph LR\n F2[start] --> F3[after_init]\n F3 --> F4[start]\n F4 --> F5[beginning]\n F5 --> F6[handle]\n F6 --> F7[format]\n F6\t --> F8[show_source]\n F6\t --> F9[show_statistic]\n F7 --> F10[finished]\n F8 --> F10[finished]\n F9 --> F10[finished]\n F10 -.-> F5\n F10 --> F11[stop]\n\n.. autoclass:: WemakeFormatter\n :no-undoc-members:\n\n\"\"\"\n\nfrom collections import defaultdict\nfrom typing import ClassVar, DefaultDict, List\n\nfrom flake8.formatting.base import BaseFormatter\nfrom flake8.statistics import Statistics\nfrom flake8.style_guide import Violation\nfrom pygments import highlight\nfrom pygments.formatters import TerminalFormatter\nfrom pygments.lexers import PythonLexer\nfrom typing_extensions import Final\n\nfrom wemake_python_styleguide.version import pkg_version\n\n#: That url is generated and hosted by Sphinx.\nDOCS_URL_TEMPLATE: Final = (\n 'https://wemake-python-styleguide.rtfd.io/en/{0}/pages/usage/violations/'\n)\n\n#: This url points to the specific violation page.\nSHORTLINK_TEMPLATE: Final = (\n 'https://pyflak.es/{0}'\n)\n\n\nclass WemakeFormatter(BaseFormatter): # noqa: WPS214\n \"\"\"\n We need to format our style :term:`violations <violation>` beatifully.\n\n The default formatter does not allow us to do that.\n What things do we miss?\n\n 1. Spacing, everything is just mixed up and glued together\n 2. Colors and decoration, some information is easier\n to gather just with colors or underlined text\n 3. Grouping, we need explicit grouping by filename\n 4. Incomplete and non-informative statistics\n\n \"\"\"\n\n _doc_url: ClassVar[str] = DOCS_URL_TEMPLATE.format(pkg_version)\n\n # API:\n\n def after_init(self):\n \"\"\"Called after the original ``init`` is used to set extra fields.\"\"\"\n self._lexer = PythonLexer()\n self._formatter = TerminalFormatter()\n\n # Logic:\n self._processed_filenames: List[str] = []\n self._error_count = 0\n\n def handle(self, error: Violation) -> None: # noqa: WPS110\n \"\"\"Processes each :term:`violation` to print it and all related.\"\"\"\n if error.filename not in self._processed_filenames:\n self._print_header(error.filename)\n self._processed_filenames.append(error.filename)\n\n line = self.format(error)\n source = self.show_source(error)\n link = self._show_link(error)\n\n self._write(line)\n if link:\n self._write(link)\n if source:\n self._write(source)\n\n self._error_count += 1\n\n def format(self, error: Violation) -> str: # noqa: WPS125\n \"\"\"Called to format each individual :term:`violation`.\"\"\"\n return '{newline} {row_col:<8} {code:<5} {text}'.format(\n newline=self.newline if self._should_show_source(error) else '',\n code=error.code,\n text=error.text,\n row_col='{0}:{1}'.format(error.line_number, error.column_number),\n )\n\n def show_source(self, error: Violation) -> str:\n \"\"\"Called when ``--show-source`` option is provided.\"\"\"\n if not self._should_show_source(error):\n return ''\n\n formatted_line = error.physical_line.lstrip()\n adjust = len(error.physical_line) - len(formatted_line)\n\n code = _highlight(\n formatted_line,\n self._lexer,\n self._formatter,\n )\n\n return ' {code} {spacing}^'.format(\n code=code,\n spacing=' ' * (error.column_number - 1 - adjust),\n )\n\n def show_statistics(self, statistics: Statistics) -> None: # noqa: WPS210\n \"\"\"Called when ``--statistic`` option is passed.\"\"\"\n all_errors = 0\n for error_code in statistics.error_codes():\n stats_for_error_code = statistics.statistics_for(error_code)\n statistic = next(stats_for_error_code)\n\n count = statistic.count\n count += sum(stat.count for stat in stats_for_error_code)\n all_errors += count\n error_by_file = _count_per_filename(statistics, error_code)\n\n self._print_violation_per_file(\n statistic,\n error_code,\n count,\n error_by_file,\n )\n\n self._write(self.newline)\n self._write(_underline(_bold('All errors: {0}'.format(all_errors))))\n\n def stop(self) -> None:\n \"\"\"Runs once per app when the formatting ends.\"\"\"\n if self._error_count:\n message = '{0}Full list of violations and explanations:{0}{1}'\n self._write(message.format(self.newline, self._doc_url))\n\n # Our own methods:\n\n def _show_link(self, error: Violation) -> str:\n \"\"\"Called when ``--show-violation-links`` option is provided.\"\"\"\n if not self.options.show_violation_links:\n return ''\n\n return ' {spacing}-> {link}'.format(\n spacing=' ' * 9,\n link=SHORTLINK_TEMPLATE.format(error.code),\n )\n\n def _print_header(self, filename: str) -> None:\n self._write(\n '{newline}{filename}'.format(\n filename=_underline(_bold(filename)),\n newline=self.newline,\n ),\n )\n\n def _print_violation_per_file(\n self,\n statistic: Statistics,\n error_code: str,\n count: int,\n error_by_file: DefaultDict[str, int],\n ):\n self._write(\n '{newline}{error_code}: {message}'.format(\n newline=self.newline,\n error_code=_bold(error_code),\n message=statistic.message,\n ),\n )\n for filename, error_count in error_by_file.items():\n self._write(\n ' {error_count:<5} {filename}'.format(\n error_count=error_count,\n filename=filename,\n ),\n )\n self._write(_underline('Total: {0}'.format(count)))\n\n def _should_show_source(self, error: Violation) -> bool:\n return self.options.show_source and error.physical_line is not None\n\n\n# Formatting text:\n\ndef _bold(text: str) -> str:\n r\"\"\"\n Returns bold formatted text.\n\n >>> _bold('Hello!')\n '\\x1b[1mHello!\\x1b[0m'\n\n \"\"\"\n return '\\033[1m{0}\\033[0m'.format(text)\n\n\ndef _underline(text: str) -> str:\n r\"\"\"\n Returns underlined formatted text.\n\n >>> _underline('Hello!')\n '\\x1b[4mHello!\\x1b[0m'\n\n \"\"\"\n return '\\033[4m{0}\\033[0m'.format(text)\n\n\ndef _highlight(source: str, lexer, formatter) -> str:\n \"\"\"\n Highlights source code. Might fail.\n\n See also:\n https://github.com/wemake-services/wemake-python-styleguide/issues/794\n\n \"\"\"\n try:\n return highlight(source, lexer, formatter)\n except Exception: # pragma: no cover\n # Might fail on some systems, when colors are set incorrectly,\n # or not available at all. In this case code will be just text.\n return source\n\n\n# Helpers:\n\ndef _count_per_filename(\n statistics: Statistics,\n error_code: str,\n) -> DefaultDict[str, int]:\n filenames: DefaultDict[str, int] = defaultdict(int)\n stats_for_error_code = statistics.statistics_for(error_code)\n\n for stat in stats_for_error_code:\n filenames[stat.filename] += stat.count\n\n return filenames\n", "path": "wemake_python_styleguide/formatter.py"}]} | 2,781 | 139 |
gh_patches_debug_36330 | rasdani/github-patches | git_diff | weni-ai__bothub-engine-166 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fix health checker /ping/ - infinite looping
Improve check_database_connection function
We can improve this code like that:
```python
def check_database_connection(**kwargs):
for conn in connections.all():
try:
conn.cursor()
return True
except OperationalError:
return False
return False
```
reported by @eltonplima in #158
Improve check_database_connection function
We can improve this code like that:
```python
def check_database_connection(**kwargs):
for conn in connections.all():
try:
conn.cursor()
return True
except OperationalError:
return False
return False
```
reported by @eltonplima in #158
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bothub/health/checks.py`
Content:
```
1 def check_database_connection(**kwargs):
2 from django.db import connections
3 from django.db.utils import OperationalError
4 db_conn = connections['default']
5 if not db_conn:
6 return False
7 try:
8 db_conn.cursor()
9 return True
10 except OperationalError as e:
11 return False
12
13
14 def check_accessible_api(request, **kwargs):
15 import requests
16 HTTP_HOST = request.META.get('HTTP_HOST')
17 repositories_url = 'http://{}/api/repositories/'.format(HTTP_HOST)
18 request = requests.get(repositories_url)
19 try:
20 request.raise_for_status()
21 return True
22 except requests.HTTPError as e:
23 return False
24
```
Path: `bothub/settings.py`
Content:
```
1 import os
2 import dj_database_url
3
4 from decouple import config
5
6
7 # Build paths inside the project like this: os.path.join(BASE_DIR, ...)
8 BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
9
10
11 # SECURITY WARNING: keep the secret key used in production secret!
12 SECRET_KEY = config('SECRET_KEY')
13
14 # SECURITY WARNING: don't run with debug turned on in production!
15 DEBUG = config('DEBUG', default=False, cast=bool)
16
17 ALLOWED_HOSTS = config(
18 'ALLOWED_HOSTS',
19 default='*',
20 cast=lambda v: [s.strip() for s in v.split(',')])
21
22
23 # Application definition
24
25 INSTALLED_APPS = [
26 'django.contrib.admin',
27 'django.contrib.auth',
28 'django.contrib.contenttypes',
29 'django.contrib.sessions',
30 'django.contrib.messages',
31 'django.contrib.staticfiles',
32 'rest_framework',
33 'rest_framework.authtoken',
34 'django_filters',
35 'corsheaders',
36 'bothub.authentication',
37 'bothub.common',
38 'bothub.api',
39 ]
40
41 MIDDLEWARE = [
42 'django.middleware.security.SecurityMiddleware',
43 'whitenoise.middleware.WhiteNoiseMiddleware',
44 'django.contrib.sessions.middleware.SessionMiddleware',
45 'corsheaders.middleware.CorsMiddleware',
46 'django.middleware.common.CommonMiddleware',
47 'django.middleware.csrf.CsrfViewMiddleware',
48 'django.contrib.auth.middleware.AuthenticationMiddleware',
49 'django.contrib.messages.middleware.MessageMiddleware',
50 'django.middleware.clickjacking.XFrameOptionsMiddleware',
51 ]
52
53 ROOT_URLCONF = 'bothub.urls'
54
55 TEMPLATES = [
56 {
57 'BACKEND': 'django.template.backends.django.DjangoTemplates',
58 'DIRS': [],
59 'APP_DIRS': True,
60 'OPTIONS': {
61 'context_processors': [
62 'django.template.context_processors.debug',
63 'django.template.context_processors.request',
64 'django.contrib.auth.context_processors.auth',
65 'django.contrib.messages.context_processors.messages',
66 ],
67 },
68 },
69 ]
70
71 WSGI_APPLICATION = 'bothub.wsgi.application'
72
73
74 # Database
75
76 DATABASES = {}
77 DATABASES['default'] = dj_database_url.parse(
78 config(
79 'DEFAULT_DATABASE',
80 default='sqlite:///db.sqlite3'))
81
82
83 # Auth
84
85 AUTH_USER_MODEL = 'authentication.User'
86
87
88 # Password validation
89
90 AUTH_PASSWORD_VALIDATORS = [
91 {
92 'NAME': 'django.contrib.auth.password_validation.' +
93 'UserAttributeSimilarityValidator',
94 },
95 {
96 'NAME': 'django.contrib.auth.password_validation.' +
97 'MinimumLengthValidator',
98 },
99 {
100 'NAME': 'django.contrib.auth.password_validation.' +
101 'CommonPasswordValidator',
102 },
103 {
104 'NAME': 'django.contrib.auth.password_validation.' +
105 'NumericPasswordValidator',
106 },
107 ]
108
109
110 # Internationalization
111
112 LANGUAGE_CODE = config('LANGUAGE_CODE', default='en-us')
113
114 TIME_ZONE = config('TIME_ZONE', default='UTC')
115
116 USE_I18N = True
117
118 USE_L10N = True
119
120 USE_TZ = True
121
122
123 # Static files (CSS, JavaScript, Images)
124
125 STATIC_URL = config('STATIC_URL', default='/static/')
126
127 STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
128
129 STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
130
131
132 # rest framework
133
134 REST_FRAMEWORK = {
135 'DEFAULT_AUTHENTICATION_CLASSES': [
136 'rest_framework.authentication.TokenAuthentication',
137 ],
138 'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.' +
139 'LimitOffsetPagination',
140 'PAGE_SIZE': 20,
141 'DEFAULT_FILTER_BACKENDS': [
142 'django_filters.rest_framework.DjangoFilterBackend',
143 ],
144 'DEFAULT_METADATA_CLASS': 'bothub.api.metadata.Metadata',
145 }
146
147
148 # cors headers
149
150 CORS_ORIGIN_ALLOW_ALL = True
151 CORS_URLS_REGEX = r'^/api/.*$'
152
153
154 # mail
155
156 envvar_EMAIL_HOST = config('EMAIL_HOST', default=None)
157
158 ADMINS = config(
159 'ADMINS',
160 default='',
161 cast=lambda v: [
162 (
163 s.strip().split('|')[0],
164 s.strip().split('|')[1],
165 ) for s in v.split(',')] if v else [])
166 EMAIL_SUBJECT_PREFIX = '[bothub] '
167 DEFAULT_FROM_EMAIL = config(
168 'DEFAULT_FROM_EMAIL',
169 default='webmaster@localhost')
170 SERVER_EMAIL = config('SERVER_EMAIL', default='root@localhost')
171
172 if envvar_EMAIL_HOST:
173 EMAIL_HOST = envvar_EMAIL_HOST
174 EMAIL_PORT = config('EMAIL_PORT', default=25, cast=int)
175 EMAIL_HOST_USER = config('EMAIL_HOST_USER', default='')
176 EMAIL_HOST_PASSWORD = config('EMAIL_HOST_PASSWORD', default='')
177 EMAIL_USE_SSL = config('EMAIL_USE_SSL', default=False, cast=bool)
178 EMAIL_USE_TLS = config('EMAIL_USE_TLS', default=False, cast=bool)
179 else:
180 EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
181
182
183 # webapp
184
185 BOTHUB_WEBAPP_BASE_URL = config(
186 'BOTHUB_WEBAPP_BASE_URL',
187 default='http://localhost:8080/')
188
189
190 # NLP
191
192 BOTHUB_NLP_BASE_URL = config(
193 'BOTHUB_NLP_BASE_URL',
194 default='http://localhost:8001/')
195
196
197 # CSRF
198
199 CSRF_COOKIE_DOMAIN = config(
200 'CSRF_COOKIE_DOMAIN',
201 default=None)
202
203 CSRF_COOKIE_SECURE = config(
204 'CSRF_COOKIE_SECURE',
205 default=False,
206 cast=bool)
207
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bothub/health/checks.py b/bothub/health/checks.py
--- a/bothub/health/checks.py
+++ b/bothub/health/checks.py
@@ -1,23 +1,37 @@
+import logging
+
+from rest_framework import status
+
+
+logger = logging.getLogger('bothub.health.checks')
+
+CHECK_ACCESSIBLE_API_URL = '/api/repositories/'
+
+
def check_database_connection(**kwargs):
from django.db import connections
from django.db.utils import OperationalError
- db_conn = connections['default']
- if not db_conn:
- return False
- try:
- db_conn.cursor()
- return True
- except OperationalError as e:
+ if len(connections.all()) is 0:
return False
+ logger.info('found {} database connection'.format(len(connections.all())))
+ for i, conn in enumerate(connections.all(), 1):
+ try:
+ conn.cursor()
+ logger.info('#{} db connection OKAY'.format(i))
+ except OperationalError as e:
+ logger.warning('#{} db connection ERROR'.format(i))
+ return False
+ return True
def check_accessible_api(request, **kwargs):
- import requests
- HTTP_HOST = request.META.get('HTTP_HOST')
- repositories_url = 'http://{}/api/repositories/'.format(HTTP_HOST)
- request = requests.get(repositories_url)
- try:
- request.raise_for_status()
+ from django.test import Client
+ logger.info('making request to {}'.format(CHECK_ACCESSIBLE_API_URL))
+ client = Client()
+ response = client.get(CHECK_ACCESSIBLE_API_URL)
+ logger.info('{} status code: {}'.format(
+ CHECK_ACCESSIBLE_API_URL,
+ response.status_code))
+ if response.status_code is status.HTTP_200_OK:
return True
- except requests.HTTPError as e:
- return False
+ return False
diff --git a/bothub/settings.py b/bothub/settings.py
--- a/bothub/settings.py
+++ b/bothub/settings.py
@@ -2,6 +2,7 @@
import dj_database_url
from decouple import config
+from django.utils.log import DEFAULT_LOGGING
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
@@ -191,7 +192,7 @@
BOTHUB_NLP_BASE_URL = config(
'BOTHUB_NLP_BASE_URL',
- default='http://localhost:8001/')
+ default='http://localhost:2657/')
# CSRF
@@ -204,3 +205,21 @@
'CSRF_COOKIE_SECURE',
default=False,
cast=bool)
+
+
+# Logging
+
+LOGGING = DEFAULT_LOGGING
+LOGGING['formatters']['bothub.health'] = {
+ 'format': '[bothub.health] {message}',
+ 'style': '{',
+}
+LOGGING['handlers']['bothub.health'] = {
+ 'level': 'DEBUG',
+ 'class': 'logging.StreamHandler',
+ 'formatter': 'bothub.health',
+}
+LOGGING['loggers']['bothub.health.checks'] = {
+ 'handlers': ['bothub.health'],
+ 'level': 'DEBUG',
+}
| {"golden_diff": "diff --git a/bothub/health/checks.py b/bothub/health/checks.py\n--- a/bothub/health/checks.py\n+++ b/bothub/health/checks.py\n@@ -1,23 +1,37 @@\n+import logging\n+\n+from rest_framework import status\n+\n+\n+logger = logging.getLogger('bothub.health.checks')\n+\n+CHECK_ACCESSIBLE_API_URL = '/api/repositories/'\n+\n+\n def check_database_connection(**kwargs):\n from django.db import connections\n from django.db.utils import OperationalError\n- db_conn = connections['default']\n- if not db_conn:\n- return False\n- try:\n- db_conn.cursor()\n- return True\n- except OperationalError as e:\n+ if len(connections.all()) is 0:\n return False\n+ logger.info('found {} database connection'.format(len(connections.all())))\n+ for i, conn in enumerate(connections.all(), 1):\n+ try:\n+ conn.cursor()\n+ logger.info('#{} db connection OKAY'.format(i))\n+ except OperationalError as e:\n+ logger.warning('#{} db connection ERROR'.format(i))\n+ return False\n+ return True\n \n \n def check_accessible_api(request, **kwargs):\n- import requests\n- HTTP_HOST = request.META.get('HTTP_HOST')\n- repositories_url = 'http://{}/api/repositories/'.format(HTTP_HOST)\n- request = requests.get(repositories_url)\n- try:\n- request.raise_for_status()\n+ from django.test import Client\n+ logger.info('making request to {}'.format(CHECK_ACCESSIBLE_API_URL))\n+ client = Client()\n+ response = client.get(CHECK_ACCESSIBLE_API_URL)\n+ logger.info('{} status code: {}'.format(\n+ CHECK_ACCESSIBLE_API_URL,\n+ response.status_code))\n+ if response.status_code is status.HTTP_200_OK:\n return True\n- except requests.HTTPError as e:\n- return False\n+ return False\ndiff --git a/bothub/settings.py b/bothub/settings.py\n--- a/bothub/settings.py\n+++ b/bothub/settings.py\n@@ -2,6 +2,7 @@\n import dj_database_url\n \n from decouple import config\n+from django.utils.log import DEFAULT_LOGGING\n \n \n # Build paths inside the project like this: os.path.join(BASE_DIR, ...)\n@@ -191,7 +192,7 @@\n \n BOTHUB_NLP_BASE_URL = config(\n 'BOTHUB_NLP_BASE_URL',\n- default='http://localhost:8001/')\n+ default='http://localhost:2657/')\n \n \n # CSRF\n@@ -204,3 +205,21 @@\n 'CSRF_COOKIE_SECURE',\n default=False,\n cast=bool)\n+\n+\n+# Logging\n+\n+LOGGING = DEFAULT_LOGGING\n+LOGGING['formatters']['bothub.health'] = {\n+ 'format': '[bothub.health] {message}',\n+ 'style': '{',\n+}\n+LOGGING['handlers']['bothub.health'] = {\n+ 'level': 'DEBUG',\n+ 'class': 'logging.StreamHandler',\n+ 'formatter': 'bothub.health',\n+}\n+LOGGING['loggers']['bothub.health.checks'] = {\n+ 'handlers': ['bothub.health'],\n+ 'level': 'DEBUG',\n+}\n", "issue": "Fix health checker /ping/ - infinite looping\n\nImprove check_database_connection function\nWe can improve this code like that:\r\n\r\n```python\r\ndef check_database_connection(**kwargs):\r\n for conn in connections.all():\r\n try:\r\n conn.cursor()\r\n return True\r\n except OperationalError:\r\n return False\r\n return False\r\n```\r\n\r\nreported by @eltonplima in #158 \nImprove check_database_connection function\nWe can improve this code like that:\r\n\r\n```python\r\ndef check_database_connection(**kwargs):\r\n for conn in connections.all():\r\n try:\r\n conn.cursor()\r\n return True\r\n except OperationalError:\r\n return False\r\n return False\r\n```\r\n\r\nreported by @eltonplima in #158 \n", "before_files": [{"content": "def check_database_connection(**kwargs):\n from django.db import connections\n from django.db.utils import OperationalError\n db_conn = connections['default']\n if not db_conn:\n return False\n try:\n db_conn.cursor()\n return True\n except OperationalError as e:\n return False\n\n\ndef check_accessible_api(request, **kwargs):\n import requests\n HTTP_HOST = request.META.get('HTTP_HOST')\n repositories_url = 'http://{}/api/repositories/'.format(HTTP_HOST)\n request = requests.get(repositories_url)\n try:\n request.raise_for_status()\n return True\n except requests.HTTPError as e:\n return False\n", "path": "bothub/health/checks.py"}, {"content": "import os\nimport dj_database_url\n\nfrom decouple import config\n\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = config('SECRET_KEY')\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = config('DEBUG', default=False, cast=bool)\n\nALLOWED_HOSTS = config(\n 'ALLOWED_HOSTS',\n default='*',\n cast=lambda v: [s.strip() for s in v.split(',')])\n\n\n# Application definition\n\nINSTALLED_APPS = [\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'rest_framework',\n 'rest_framework.authtoken',\n 'django_filters',\n 'corsheaders',\n 'bothub.authentication',\n 'bothub.common',\n 'bothub.api',\n]\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'whitenoise.middleware.WhiteNoiseMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'corsheaders.middleware.CorsMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n]\n\nROOT_URLCONF = 'bothub.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'bothub.wsgi.application'\n\n\n# Database\n\nDATABASES = {}\nDATABASES['default'] = dj_database_url.parse(\n config(\n 'DEFAULT_DATABASE',\n default='sqlite:///db.sqlite3'))\n\n\n# Auth\n\nAUTH_USER_MODEL = 'authentication.User'\n\n\n# Password validation\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.' +\n 'UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.' +\n 'MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.' +\n 'CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.' +\n 'NumericPasswordValidator',\n },\n]\n\n\n# Internationalization\n\nLANGUAGE_CODE = config('LANGUAGE_CODE', default='en-us')\n\nTIME_ZONE = config('TIME_ZONE', default='UTC')\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n\nSTATIC_URL = config('STATIC_URL', default='/static/')\n\nSTATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')\n\nSTATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'\n\n\n# rest framework\n\nREST_FRAMEWORK = {\n 'DEFAULT_AUTHENTICATION_CLASSES': [\n 'rest_framework.authentication.TokenAuthentication',\n ],\n 'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.' +\n 'LimitOffsetPagination',\n 'PAGE_SIZE': 20,\n 'DEFAULT_FILTER_BACKENDS': [\n 'django_filters.rest_framework.DjangoFilterBackend',\n ],\n 'DEFAULT_METADATA_CLASS': 'bothub.api.metadata.Metadata',\n}\n\n\n# cors headers\n\nCORS_ORIGIN_ALLOW_ALL = True\nCORS_URLS_REGEX = r'^/api/.*$'\n\n\n# mail\n\nenvvar_EMAIL_HOST = config('EMAIL_HOST', default=None)\n\nADMINS = config(\n 'ADMINS',\n default='',\n cast=lambda v: [\n (\n s.strip().split('|')[0],\n s.strip().split('|')[1],\n ) for s in v.split(',')] if v else [])\nEMAIL_SUBJECT_PREFIX = '[bothub] '\nDEFAULT_FROM_EMAIL = config(\n 'DEFAULT_FROM_EMAIL',\n default='webmaster@localhost')\nSERVER_EMAIL = config('SERVER_EMAIL', default='root@localhost')\n\nif envvar_EMAIL_HOST:\n EMAIL_HOST = envvar_EMAIL_HOST\n EMAIL_PORT = config('EMAIL_PORT', default=25, cast=int)\n EMAIL_HOST_USER = config('EMAIL_HOST_USER', default='')\n EMAIL_HOST_PASSWORD = config('EMAIL_HOST_PASSWORD', default='')\n EMAIL_USE_SSL = config('EMAIL_USE_SSL', default=False, cast=bool)\n EMAIL_USE_TLS = config('EMAIL_USE_TLS', default=False, cast=bool)\nelse:\n EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'\n\n\n# webapp\n\nBOTHUB_WEBAPP_BASE_URL = config(\n 'BOTHUB_WEBAPP_BASE_URL',\n default='http://localhost:8080/')\n\n\n# NLP\n\nBOTHUB_NLP_BASE_URL = config(\n 'BOTHUB_NLP_BASE_URL',\n default='http://localhost:8001/')\n\n\n# CSRF\n\nCSRF_COOKIE_DOMAIN = config(\n 'CSRF_COOKIE_DOMAIN',\n default=None)\n\nCSRF_COOKIE_SECURE = config(\n 'CSRF_COOKIE_SECURE',\n default=False,\n cast=bool)\n", "path": "bothub/settings.py"}], "after_files": [{"content": "import logging\n\nfrom rest_framework import status\n\n\nlogger = logging.getLogger('bothub.health.checks')\n\nCHECK_ACCESSIBLE_API_URL = '/api/repositories/'\n\n\ndef check_database_connection(**kwargs):\n from django.db import connections\n from django.db.utils import OperationalError\n if len(connections.all()) is 0:\n return False\n logger.info('found {} database connection'.format(len(connections.all())))\n for i, conn in enumerate(connections.all(), 1):\n try:\n conn.cursor()\n logger.info('#{} db connection OKAY'.format(i))\n except OperationalError as e:\n logger.warning('#{} db connection ERROR'.format(i))\n return False\n return True\n\n\ndef check_accessible_api(request, **kwargs):\n from django.test import Client\n logger.info('making request to {}'.format(CHECK_ACCESSIBLE_API_URL))\n client = Client()\n response = client.get(CHECK_ACCESSIBLE_API_URL)\n logger.info('{} status code: {}'.format(\n CHECK_ACCESSIBLE_API_URL,\n response.status_code))\n if response.status_code is status.HTTP_200_OK:\n return True\n return False\n", "path": "bothub/health/checks.py"}, {"content": "import os\nimport dj_database_url\n\nfrom decouple import config\nfrom django.utils.log import DEFAULT_LOGGING\n\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = config('SECRET_KEY')\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = config('DEBUG', default=False, cast=bool)\n\nALLOWED_HOSTS = config(\n 'ALLOWED_HOSTS',\n default='*',\n cast=lambda v: [s.strip() for s in v.split(',')])\n\n\n# Application definition\n\nINSTALLED_APPS = [\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'rest_framework',\n 'rest_framework.authtoken',\n 'django_filters',\n 'corsheaders',\n 'bothub.authentication',\n 'bothub.common',\n 'bothub.api',\n]\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'whitenoise.middleware.WhiteNoiseMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'corsheaders.middleware.CorsMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n]\n\nROOT_URLCONF = 'bothub.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'bothub.wsgi.application'\n\n\n# Database\n\nDATABASES = {}\nDATABASES['default'] = dj_database_url.parse(\n config(\n 'DEFAULT_DATABASE',\n default='sqlite:///db.sqlite3'))\n\n\n# Auth\n\nAUTH_USER_MODEL = 'authentication.User'\n\n\n# Password validation\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.' +\n 'UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.' +\n 'MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.' +\n 'CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.' +\n 'NumericPasswordValidator',\n },\n]\n\n\n# Internationalization\n\nLANGUAGE_CODE = config('LANGUAGE_CODE', default='en-us')\n\nTIME_ZONE = config('TIME_ZONE', default='UTC')\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n\nSTATIC_URL = config('STATIC_URL', default='/static/')\n\nSTATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')\n\nSTATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'\n\n\n# rest framework\n\nREST_FRAMEWORK = {\n 'DEFAULT_AUTHENTICATION_CLASSES': [\n 'rest_framework.authentication.TokenAuthentication',\n ],\n 'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.' +\n 'LimitOffsetPagination',\n 'PAGE_SIZE': 20,\n 'DEFAULT_FILTER_BACKENDS': [\n 'django_filters.rest_framework.DjangoFilterBackend',\n ],\n 'DEFAULT_METADATA_CLASS': 'bothub.api.metadata.Metadata',\n}\n\n\n# cors headers\n\nCORS_ORIGIN_ALLOW_ALL = True\nCORS_URLS_REGEX = r'^/api/.*$'\n\n\n# mail\n\nenvvar_EMAIL_HOST = config('EMAIL_HOST', default=None)\n\nADMINS = config(\n 'ADMINS',\n default='',\n cast=lambda v: [\n (\n s.strip().split('|')[0],\n s.strip().split('|')[1],\n ) for s in v.split(',')] if v else [])\nEMAIL_SUBJECT_PREFIX = '[bothub] '\nDEFAULT_FROM_EMAIL = config(\n 'DEFAULT_FROM_EMAIL',\n default='webmaster@localhost')\nSERVER_EMAIL = config('SERVER_EMAIL', default='root@localhost')\n\nif envvar_EMAIL_HOST:\n EMAIL_HOST = envvar_EMAIL_HOST\n EMAIL_PORT = config('EMAIL_PORT', default=25, cast=int)\n EMAIL_HOST_USER = config('EMAIL_HOST_USER', default='')\n EMAIL_HOST_PASSWORD = config('EMAIL_HOST_PASSWORD', default='')\n EMAIL_USE_SSL = config('EMAIL_USE_SSL', default=False, cast=bool)\n EMAIL_USE_TLS = config('EMAIL_USE_TLS', default=False, cast=bool)\nelse:\n EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'\n\n\n# webapp\n\nBOTHUB_WEBAPP_BASE_URL = config(\n 'BOTHUB_WEBAPP_BASE_URL',\n default='http://localhost:8080/')\n\n\n# NLP\n\nBOTHUB_NLP_BASE_URL = config(\n 'BOTHUB_NLP_BASE_URL',\n default='http://localhost:2657/')\n\n\n# CSRF\n\nCSRF_COOKIE_DOMAIN = config(\n 'CSRF_COOKIE_DOMAIN',\n default=None)\n\nCSRF_COOKIE_SECURE = config(\n 'CSRF_COOKIE_SECURE',\n default=False,\n cast=bool)\n\n\n# Logging\n\nLOGGING = DEFAULT_LOGGING\nLOGGING['formatters']['bothub.health'] = {\n 'format': '[bothub.health] {message}',\n 'style': '{',\n}\nLOGGING['handlers']['bothub.health'] = {\n 'level': 'DEBUG',\n 'class': 'logging.StreamHandler',\n 'formatter': 'bothub.health',\n}\nLOGGING['loggers']['bothub.health.checks'] = {\n 'handlers': ['bothub.health'],\n 'level': 'DEBUG',\n}\n", "path": "bothub/settings.py"}]} | 2,288 | 738 |
gh_patches_debug_10295 | rasdani/github-patches | git_diff | holoviz__panel-723 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Scipy now required dependency?
The latest dev build of panel (panel-0.7.0a15-py_0) looks like it is trying to import scipy, but scipy is not among the required dependencies that are automatically installed through conda.
```
$ conda create -n panel -c pyviz/label/dev panel
...
$ conda activate panel
(panel) $ python
Python 3.7.4 (default, Aug 13 2019, 15:17:50)
[Clang 4.0.1 (tags/RELEASE_401/final)] :: Anaconda, Inc. on darwin
Type "help", "copyright", "credits" or "license" for more information.
>>> import panel
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Users/rditlsc9/miniconda/envs/panel/lib/python3.7/site-packages/panel/__init__.py", line 6, in <module>
from . import links # noqa
File "/Users/rditlsc9/miniconda/envs/panel/lib/python3.7/site-packages/panel/links.py", line 12, in <module>
from .pane.holoviews import HoloViews, generate_panel_bokeh_map, is_bokeh_element_plot
File "/Users/rditlsc9/miniconda/envs/panel/lib/python3.7/site-packages/panel/pane/__init__.py", line 13, in <module>
from .holoviews import HoloViews # noqa
File "/Users/rditlsc9/miniconda/envs/panel/lib/python3.7/site-packages/panel/pane/holoviews.py", line 20, in <module>
from ..widgets import Player
File "/Users/rditlsc9/miniconda/envs/panel/lib/python3.7/site-packages/panel/widgets/__init__.py", line 12, in <module>
from .misc import Audio, VideoStream # noqa
File "/Users/rditlsc9/miniconda/envs/panel/lib/python3.7/site-packages/panel/widgets/misc.py", line 14, in <module>
from scipy.io import wavfile
ModuleNotFoundError: No module named 'scipy'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `panel/widgets/misc.py`
Content:
```
1 """
2 Miscellaneous widgets which do not fit into the other main categories.
3 """
4 from __future__ import absolute_import, division, unicode_literals
5
6 import os
7
8 from io import BytesIO
9 from base64 import b64encode
10 from six import string_types
11
12 import param
13 import numpy as np
14 from scipy.io import wavfile
15
16 from ..io.notebook import push
17 from ..io.state import state
18 from ..models import (Audio as _BkAudio,
19 VideoStream as _BkVideoStream)
20 from .base import Widget
21
22
23 class Audio(Widget):
24
25 loop = param.Boolean(default=False, doc="""
26 Whether the audio should loop""")
27
28 time = param.Number(default=0, doc="""
29 The current timestamp""")
30
31 throttle = param.Integer(default=250, doc="""
32 How frequently to sample the current playback time in milliseconds""")
33
34 paused = param.Boolean(default=True, doc="""
35 Whether the audio is currently paused""")
36
37 sample_rate = param.Integer(default=44100, doc="""
38 The sample_rate of the audio when given a NumPy array.""")
39
40 value = param.ClassSelector(default='', class_=(string_types + (np.ndarray,)), doc="""
41 The audio file either local or remote.""")
42
43 volume = param.Number(default=None, bounds=(0, 100), doc="""
44 The volume of the audio player.""")
45
46 _widget_type = _BkAudio
47
48 _rename = {'name': None, 'sample_rate': None}
49
50 def _from_numpy(self, data):
51 buffer = BytesIO()
52 wavfile.write(buffer, self.sample_rate, data)
53 return buffer
54
55 def _process_param_change(self, msg):
56 msg = super(Audio, self)._process_param_change(msg)
57
58 if 'value' in msg:
59 value = msg['value']
60 if isinstance(value, np.ndarray):
61 fmt = 'wav'
62 buffer = self._from_numpy(value)
63 data = b64encode(buffer.getvalue())
64 elif os.path.isfile(value):
65 fmt = value.split('.')[-1]
66 with open(value, 'rb') as f:
67 data = f.read()
68 data = b64encode(data)
69 elif value.lower().startswith('http'):
70 return msg
71 elif not value:
72 data, fmt = b'', 'wav'
73 else:
74 raise ValueError('Value should be either path to a sound file or numpy array')
75 template = 'data:audio/{mime};base64,{data}'
76 msg['value'] = template.format(data=data.decode('utf-8'),
77 mime=fmt)
78 return msg
79
80
81
82 class VideoStream(Widget):
83
84 format = param.ObjectSelector(default='png', objects=['png', 'jpeg'],
85 doc="""
86 The file format as which the video is returned.""")
87
88 paused = param.Boolean(default=False, doc="""
89 Whether the video is currently paused""")
90
91 timeout = param.Number(default=None, doc="""
92 Interval between snapshots in millisecons""")
93
94 value = param.String(default='', doc="""
95 A base64 representation of the video stream snapshot.""")
96
97 _widget_type = _BkVideoStream
98
99 _rename = {'name': None}
100
101 def snapshot(self):
102 """
103 Triggers a snapshot of the current VideoStream state to sync
104 the widget value.
105 """
106 for ref, (m, _) in self._models.items():
107 m.snapshot = not m.snapshot
108 (self, root, doc, comm) = state._views[ref]
109 if comm and 'embedded' not in root.tags:
110 push(doc, comm)
111
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/panel/widgets/misc.py b/panel/widgets/misc.py
--- a/panel/widgets/misc.py
+++ b/panel/widgets/misc.py
@@ -11,7 +11,6 @@
import param
import numpy as np
-from scipy.io import wavfile
from ..io.notebook import push
from ..io.state import state
@@ -48,6 +47,7 @@
_rename = {'name': None, 'sample_rate': None}
def _from_numpy(self, data):
+ from scipy.io import wavfile
buffer = BytesIO()
wavfile.write(buffer, self.sample_rate, data)
return buffer
| {"golden_diff": "diff --git a/panel/widgets/misc.py b/panel/widgets/misc.py\n--- a/panel/widgets/misc.py\n+++ b/panel/widgets/misc.py\n@@ -11,7 +11,6 @@\n \n import param\n import numpy as np\n-from scipy.io import wavfile\n \n from ..io.notebook import push\n from ..io.state import state\n@@ -48,6 +47,7 @@\n _rename = {'name': None, 'sample_rate': None}\n \n def _from_numpy(self, data):\n+ from scipy.io import wavfile\n buffer = BytesIO()\n wavfile.write(buffer, self.sample_rate, data)\n return buffer\n", "issue": "Scipy now required dependency?\nThe latest dev build of panel (panel-0.7.0a15-py_0) looks like it is trying to import scipy, but scipy is not among the required dependencies that are automatically installed through conda.\r\n\r\n```\r\n$ conda create -n panel -c pyviz/label/dev panel\r\n...\r\n$ conda activate panel\r\n(panel) $ python\r\nPython 3.7.4 (default, Aug 13 2019, 15:17:50) \r\n[Clang 4.0.1 (tags/RELEASE_401/final)] :: Anaconda, Inc. on darwin\r\nType \"help\", \"copyright\", \"credits\" or \"license\" for more information.\r\n>>> import panel\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\n File \"/Users/rditlsc9/miniconda/envs/panel/lib/python3.7/site-packages/panel/__init__.py\", line 6, in <module>\r\n from . import links # noqa\r\n File \"/Users/rditlsc9/miniconda/envs/panel/lib/python3.7/site-packages/panel/links.py\", line 12, in <module>\r\n from .pane.holoviews import HoloViews, generate_panel_bokeh_map, is_bokeh_element_plot\r\n File \"/Users/rditlsc9/miniconda/envs/panel/lib/python3.7/site-packages/panel/pane/__init__.py\", line 13, in <module>\r\n from .holoviews import HoloViews # noqa\r\n File \"/Users/rditlsc9/miniconda/envs/panel/lib/python3.7/site-packages/panel/pane/holoviews.py\", line 20, in <module>\r\n from ..widgets import Player\r\n File \"/Users/rditlsc9/miniconda/envs/panel/lib/python3.7/site-packages/panel/widgets/__init__.py\", line 12, in <module>\r\n from .misc import Audio, VideoStream # noqa\r\n File \"/Users/rditlsc9/miniconda/envs/panel/lib/python3.7/site-packages/panel/widgets/misc.py\", line 14, in <module>\r\n from scipy.io import wavfile\r\nModuleNotFoundError: No module named 'scipy'\r\n```\n", "before_files": [{"content": "\"\"\"\nMiscellaneous widgets which do not fit into the other main categories.\n\"\"\"\nfrom __future__ import absolute_import, division, unicode_literals\n\nimport os\n\nfrom io import BytesIO\nfrom base64 import b64encode\nfrom six import string_types\n\nimport param\nimport numpy as np\nfrom scipy.io import wavfile\n\nfrom ..io.notebook import push\nfrom ..io.state import state\nfrom ..models import (Audio as _BkAudio,\n VideoStream as _BkVideoStream)\nfrom .base import Widget\n\n\nclass Audio(Widget):\n\n loop = param.Boolean(default=False, doc=\"\"\"\n Whether the audio should loop\"\"\")\n\n time = param.Number(default=0, doc=\"\"\"\n The current timestamp\"\"\")\n\n throttle = param.Integer(default=250, doc=\"\"\"\n How frequently to sample the current playback time in milliseconds\"\"\")\n\n paused = param.Boolean(default=True, doc=\"\"\"\n Whether the audio is currently paused\"\"\")\n\n sample_rate = param.Integer(default=44100, doc=\"\"\"\n The sample_rate of the audio when given a NumPy array.\"\"\")\n\n value = param.ClassSelector(default='', class_=(string_types + (np.ndarray,)), doc=\"\"\"\n The audio file either local or remote.\"\"\")\n\n volume = param.Number(default=None, bounds=(0, 100), doc=\"\"\"\n The volume of the audio player.\"\"\")\n\n _widget_type = _BkAudio\n\n _rename = {'name': None, 'sample_rate': None}\n\n def _from_numpy(self, data):\n buffer = BytesIO()\n wavfile.write(buffer, self.sample_rate, data)\n return buffer\n\n def _process_param_change(self, msg):\n msg = super(Audio, self)._process_param_change(msg)\n\n if 'value' in msg:\n value = msg['value']\n if isinstance(value, np.ndarray):\n fmt = 'wav'\n buffer = self._from_numpy(value)\n data = b64encode(buffer.getvalue())\n elif os.path.isfile(value):\n fmt = value.split('.')[-1]\n with open(value, 'rb') as f:\n data = f.read()\n data = b64encode(data)\n elif value.lower().startswith('http'):\n return msg\n elif not value:\n data, fmt = b'', 'wav'\n else:\n raise ValueError('Value should be either path to a sound file or numpy array')\n template = 'data:audio/{mime};base64,{data}'\n msg['value'] = template.format(data=data.decode('utf-8'),\n mime=fmt)\n return msg\n\n\n\nclass VideoStream(Widget):\n\n format = param.ObjectSelector(default='png', objects=['png', 'jpeg'],\n doc=\"\"\"\n The file format as which the video is returned.\"\"\")\n\n paused = param.Boolean(default=False, doc=\"\"\"\n Whether the video is currently paused\"\"\")\n\n timeout = param.Number(default=None, doc=\"\"\"\n Interval between snapshots in millisecons\"\"\")\n\n value = param.String(default='', doc=\"\"\"\n A base64 representation of the video stream snapshot.\"\"\")\n\n _widget_type = _BkVideoStream\n\n _rename = {'name': None}\n\n def snapshot(self):\n \"\"\"\n Triggers a snapshot of the current VideoStream state to sync\n the widget value.\n \"\"\"\n for ref, (m, _) in self._models.items():\n m.snapshot = not m.snapshot\n (self, root, doc, comm) = state._views[ref]\n if comm and 'embedded' not in root.tags:\n push(doc, comm)\n", "path": "panel/widgets/misc.py"}], "after_files": [{"content": "\"\"\"\nMiscellaneous widgets which do not fit into the other main categories.\n\"\"\"\nfrom __future__ import absolute_import, division, unicode_literals\n\nimport os\n\nfrom io import BytesIO\nfrom base64 import b64encode\nfrom six import string_types\n\nimport param\nimport numpy as np\n\nfrom ..io.notebook import push\nfrom ..io.state import state\nfrom ..models import (Audio as _BkAudio,\n VideoStream as _BkVideoStream)\nfrom .base import Widget\n\n\nclass Audio(Widget):\n\n loop = param.Boolean(default=False, doc=\"\"\"\n Whether the audio should loop\"\"\")\n\n time = param.Number(default=0, doc=\"\"\"\n The current timestamp\"\"\")\n\n throttle = param.Integer(default=250, doc=\"\"\"\n How frequently to sample the current playback time in milliseconds\"\"\")\n\n paused = param.Boolean(default=True, doc=\"\"\"\n Whether the audio is currently paused\"\"\")\n\n sample_rate = param.Integer(default=44100, doc=\"\"\"\n The sample_rate of the audio when given a NumPy array.\"\"\")\n\n value = param.ClassSelector(default='', class_=(string_types + (np.ndarray,)), doc=\"\"\"\n The audio file either local or remote.\"\"\")\n\n volume = param.Number(default=None, bounds=(0, 100), doc=\"\"\"\n The volume of the audio player.\"\"\")\n\n _widget_type = _BkAudio\n\n _rename = {'name': None, 'sample_rate': None}\n\n def _from_numpy(self, data):\n from scipy.io import wavfile\n buffer = BytesIO()\n wavfile.write(buffer, self.sample_rate, data)\n return buffer\n\n def _process_param_change(self, msg):\n msg = super(Audio, self)._process_param_change(msg)\n\n if 'value' in msg:\n value = msg['value']\n if isinstance(value, np.ndarray):\n fmt = 'wav'\n buffer = self._from_numpy(value)\n data = b64encode(buffer.getvalue())\n elif os.path.isfile(value):\n fmt = value.split('.')[-1]\n with open(value, 'rb') as f:\n data = f.read()\n data = b64encode(data)\n elif value.lower().startswith('http'):\n return msg\n elif not value:\n data, fmt = b'', 'wav'\n else:\n raise ValueError('Value should be either path to a sound file or numpy array')\n template = 'data:audio/{mime};base64,{data}'\n msg['value'] = template.format(data=data.decode('utf-8'),\n mime=fmt)\n return msg\n\n\n\nclass VideoStream(Widget):\n\n format = param.ObjectSelector(default='png', objects=['png', 'jpeg'],\n doc=\"\"\"\n The file format as which the video is returned.\"\"\")\n\n paused = param.Boolean(default=False, doc=\"\"\"\n Whether the video is currently paused\"\"\")\n\n timeout = param.Number(default=None, doc=\"\"\"\n Interval between snapshots in millisecons\"\"\")\n\n value = param.String(default='', doc=\"\"\"\n A base64 representation of the video stream snapshot.\"\"\")\n\n _widget_type = _BkVideoStream\n\n _rename = {'name': None}\n\n def snapshot(self):\n \"\"\"\n Triggers a snapshot of the current VideoStream state to sync\n the widget value.\n \"\"\"\n for ref, (m, _) in self._models.items():\n m.snapshot = not m.snapshot\n (self, root, doc, comm) = state._views[ref]\n if comm and 'embedded' not in root.tags:\n push(doc, comm)\n", "path": "panel/widgets/misc.py"}]} | 1,782 | 143 |
gh_patches_debug_11102 | rasdani/github-patches | git_diff | ultralytics__yolov5-10359 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
clearml logs defaults args when use run function
### Search before asking
- [X] I have searched the YOLOv5 [issues](https://github.com/ultralytics/yolov5/issues) and found no similar bug report.
### YOLOv5 Component
_No response_
### Bug
when you want to run training with the `run` function as described here https://github.com/ultralytics/yolov5/blob/10c025d794ca395a2ca0b2a00aff65f3a92ecd8d/train.py#L622
the clearml args logging
https://github.com/allegroai/clearml/blob/7016138c849a4f8d0b4d296b319e0b23a1b7bd9e/clearml/binding/args.py#L231
skip the modification of the parameters
in the end, when you look in the clearml server on configurations Args you see the default args parameters and not the one you trained with
(Training is still with the good parameters you pass )
### Environment
doker
`from ultralytics/yolov5:latest`
### Minimal Reproducible Example
python
```
import train; train.run(data='coco128.yaml', imgsz=320, weights='yolov5m.pt')
```
in clearml server configurations Args you will see `weights='yolov5s.pt'` `imgsz=640`
### Additional
_No response_
### Are you willing to submit a PR?
- [ ] Yes I'd like to help by submitting a PR!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `utils/loggers/clearml/clearml_utils.py`
Content:
```
1 """Main Logger class for ClearML experiment tracking."""
2 import glob
3 import re
4 from pathlib import Path
5
6 import numpy as np
7 import yaml
8
9 from utils.plots import Annotator, colors
10
11 try:
12 import clearml
13 from clearml import Dataset, Task
14
15 assert hasattr(clearml, '__version__') # verify package import not local dir
16 except (ImportError, AssertionError):
17 clearml = None
18
19
20 def construct_dataset(clearml_info_string):
21 """Load in a clearml dataset and fill the internal data_dict with its contents.
22 """
23 dataset_id = clearml_info_string.replace('clearml://', '')
24 dataset = Dataset.get(dataset_id=dataset_id)
25 dataset_root_path = Path(dataset.get_local_copy())
26
27 # We'll search for the yaml file definition in the dataset
28 yaml_filenames = list(glob.glob(str(dataset_root_path / "*.yaml")) + glob.glob(str(dataset_root_path / "*.yml")))
29 if len(yaml_filenames) > 1:
30 raise ValueError('More than one yaml file was found in the dataset root, cannot determine which one contains '
31 'the dataset definition this way.')
32 elif len(yaml_filenames) == 0:
33 raise ValueError('No yaml definition found in dataset root path, check that there is a correct yaml file '
34 'inside the dataset root path.')
35 with open(yaml_filenames[0]) as f:
36 dataset_definition = yaml.safe_load(f)
37
38 assert set(dataset_definition.keys()).issuperset(
39 {'train', 'test', 'val', 'nc', 'names'}
40 ), "The right keys were not found in the yaml file, make sure it at least has the following keys: ('train', 'test', 'val', 'nc', 'names')"
41
42 data_dict = dict()
43 data_dict['train'] = str(
44 (dataset_root_path / dataset_definition['train']).resolve()) if dataset_definition['train'] else None
45 data_dict['test'] = str(
46 (dataset_root_path / dataset_definition['test']).resolve()) if dataset_definition['test'] else None
47 data_dict['val'] = str(
48 (dataset_root_path / dataset_definition['val']).resolve()) if dataset_definition['val'] else None
49 data_dict['nc'] = dataset_definition['nc']
50 data_dict['names'] = dataset_definition['names']
51
52 return data_dict
53
54
55 class ClearmlLogger:
56 """Log training runs, datasets, models, and predictions to ClearML.
57
58 This logger sends information to ClearML at app.clear.ml or to your own hosted server. By default,
59 this information includes hyperparameters, system configuration and metrics, model metrics, code information and
60 basic data metrics and analyses.
61
62 By providing additional command line arguments to train.py, datasets,
63 models and predictions can also be logged.
64 """
65
66 def __init__(self, opt, hyp):
67 """
68 - Initialize ClearML Task, this object will capture the experiment
69 - Upload dataset version to ClearML Data if opt.upload_dataset is True
70
71 arguments:
72 opt (namespace) -- Commandline arguments for this run
73 hyp (dict) -- Hyperparameters for this run
74
75 """
76 self.current_epoch = 0
77 # Keep tracked of amount of logged images to enforce a limit
78 self.current_epoch_logged_images = set()
79 # Maximum number of images to log to clearML per epoch
80 self.max_imgs_to_log_per_epoch = 16
81 # Get the interval of epochs when bounding box images should be logged
82 self.bbox_interval = opt.bbox_interval
83 self.clearml = clearml
84 self.task = None
85 self.data_dict = None
86 if self.clearml:
87 self.task = Task.init(
88 project_name=opt.project if opt.project != 'runs/train' else 'YOLOv5',
89 task_name=opt.name if opt.name != 'exp' else 'Training',
90 tags=['YOLOv5'],
91 output_uri=True,
92 auto_connect_frameworks={'pytorch': False}
93 # We disconnect pytorch auto-detection, because we added manual model save points in the code
94 )
95 # ClearML's hooks will already grab all general parameters
96 # Only the hyperparameters coming from the yaml config file
97 # will have to be added manually!
98 self.task.connect(hyp, name='Hyperparameters')
99
100 # Make sure the code is easily remotely runnable by setting the docker image to use by the remote agent
101 self.task.set_base_docker("ultralytics/yolov5:latest",
102 docker_arguments='--ipc=host -e="CLEARML_AGENT_SKIP_PYTHON_ENV_INSTALL=1"',
103 docker_setup_bash_script='pip install clearml')
104
105 # Get ClearML Dataset Version if requested
106 if opt.data.startswith('clearml://'):
107 # data_dict should have the following keys:
108 # names, nc (number of classes), test, train, val (all three relative paths to ../datasets)
109 self.data_dict = construct_dataset(opt.data)
110 # Set data to data_dict because wandb will crash without this information and opt is the best way
111 # to give it to them
112 opt.data = self.data_dict
113
114 def log_debug_samples(self, files, title='Debug Samples'):
115 """
116 Log files (images) as debug samples in the ClearML task.
117
118 arguments:
119 files (List(PosixPath)) a list of file paths in PosixPath format
120 title (str) A title that groups together images with the same values
121 """
122 for f in files:
123 if f.exists():
124 it = re.search(r'_batch(\d+)', f.name)
125 iteration = int(it.groups()[0]) if it else 0
126 self.task.get_logger().report_image(title=title,
127 series=f.name.replace(it.group(), ''),
128 local_path=str(f),
129 iteration=iteration)
130
131 def log_image_with_boxes(self, image_path, boxes, class_names, image, conf_threshold=0.25):
132 """
133 Draw the bounding boxes on a single image and report the result as a ClearML debug sample.
134
135 arguments:
136 image_path (PosixPath) the path the original image file
137 boxes (list): list of scaled predictions in the format - [xmin, ymin, xmax, ymax, confidence, class]
138 class_names (dict): dict containing mapping of class int to class name
139 image (Tensor): A torch tensor containing the actual image data
140 """
141 if len(self.current_epoch_logged_images) < self.max_imgs_to_log_per_epoch and self.current_epoch >= 0:
142 # Log every bbox_interval times and deduplicate for any intermittend extra eval runs
143 if self.current_epoch % self.bbox_interval == 0 and image_path not in self.current_epoch_logged_images:
144 im = np.ascontiguousarray(np.moveaxis(image.mul(255).clamp(0, 255).byte().cpu().numpy(), 0, 2))
145 annotator = Annotator(im=im, pil=True)
146 for i, (conf, class_nr, box) in enumerate(zip(boxes[:, 4], boxes[:, 5], boxes[:, :4])):
147 color = colors(i)
148
149 class_name = class_names[int(class_nr)]
150 confidence_percentage = round(float(conf) * 100, 2)
151 label = f"{class_name}: {confidence_percentage}%"
152
153 if conf > conf_threshold:
154 annotator.rectangle(box.cpu().numpy(), outline=color)
155 annotator.box_label(box.cpu().numpy(), label=label, color=color)
156
157 annotated_image = annotator.result()
158 self.task.get_logger().report_image(title='Bounding Boxes',
159 series=image_path.name,
160 iteration=self.current_epoch,
161 image=annotated_image)
162 self.current_epoch_logged_images.add(image_path)
163
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/utils/loggers/clearml/clearml_utils.py b/utils/loggers/clearml/clearml_utils.py
--- a/utils/loggers/clearml/clearml_utils.py
+++ b/utils/loggers/clearml/clearml_utils.py
@@ -96,6 +96,7 @@
# Only the hyperparameters coming from the yaml config file
# will have to be added manually!
self.task.connect(hyp, name='Hyperparameters')
+ self.task.connect(opt, name='Args')
# Make sure the code is easily remotely runnable by setting the docker image to use by the remote agent
self.task.set_base_docker("ultralytics/yolov5:latest",
| {"golden_diff": "diff --git a/utils/loggers/clearml/clearml_utils.py b/utils/loggers/clearml/clearml_utils.py\n--- a/utils/loggers/clearml/clearml_utils.py\n+++ b/utils/loggers/clearml/clearml_utils.py\n@@ -96,6 +96,7 @@\n # Only the hyperparameters coming from the yaml config file\n # will have to be added manually!\n self.task.connect(hyp, name='Hyperparameters')\n+ self.task.connect(opt, name='Args')\n \n # Make sure the code is easily remotely runnable by setting the docker image to use by the remote agent\n self.task.set_base_docker(\"ultralytics/yolov5:latest\",\n", "issue": "clearml logs defaults args when use run function \n### Search before asking\n\n- [X] I have searched the YOLOv5 [issues](https://github.com/ultralytics/yolov5/issues) and found no similar bug report.\n\n\n### YOLOv5 Component\n\n_No response_\n\n### Bug\n\nwhen you want to run training with the `run` function as described here https://github.com/ultralytics/yolov5/blob/10c025d794ca395a2ca0b2a00aff65f3a92ecd8d/train.py#L622 \r\nthe clearml args logging \r\nhttps://github.com/allegroai/clearml/blob/7016138c849a4f8d0b4d296b319e0b23a1b7bd9e/clearml/binding/args.py#L231 \r\nskip the modification of the parameters \r\nin the end, when you look in the clearml server on configurations Args you see the default args parameters and not the one you trained with \r\n(Training is still with the good parameters you pass )\r\n\n\n### Environment\n\ndoker \r\n`from ultralytics/yolov5:latest`\n\n### Minimal Reproducible Example\n\npython\r\n```\r\nimport train; train.run(data='coco128.yaml', imgsz=320, weights='yolov5m.pt')\r\n```\r\nin clearml server configurations Args you will see `weights='yolov5s.pt'` `imgsz=640`\n\n### Additional\n\n_No response_\n\n### Are you willing to submit a PR?\n\n- [ ] Yes I'd like to help by submitting a PR!\n", "before_files": [{"content": "\"\"\"Main Logger class for ClearML experiment tracking.\"\"\"\nimport glob\nimport re\nfrom pathlib import Path\n\nimport numpy as np\nimport yaml\n\nfrom utils.plots import Annotator, colors\n\ntry:\n import clearml\n from clearml import Dataset, Task\n\n assert hasattr(clearml, '__version__') # verify package import not local dir\nexcept (ImportError, AssertionError):\n clearml = None\n\n\ndef construct_dataset(clearml_info_string):\n \"\"\"Load in a clearml dataset and fill the internal data_dict with its contents.\n \"\"\"\n dataset_id = clearml_info_string.replace('clearml://', '')\n dataset = Dataset.get(dataset_id=dataset_id)\n dataset_root_path = Path(dataset.get_local_copy())\n\n # We'll search for the yaml file definition in the dataset\n yaml_filenames = list(glob.glob(str(dataset_root_path / \"*.yaml\")) + glob.glob(str(dataset_root_path / \"*.yml\")))\n if len(yaml_filenames) > 1:\n raise ValueError('More than one yaml file was found in the dataset root, cannot determine which one contains '\n 'the dataset definition this way.')\n elif len(yaml_filenames) == 0:\n raise ValueError('No yaml definition found in dataset root path, check that there is a correct yaml file '\n 'inside the dataset root path.')\n with open(yaml_filenames[0]) as f:\n dataset_definition = yaml.safe_load(f)\n\n assert set(dataset_definition.keys()).issuperset(\n {'train', 'test', 'val', 'nc', 'names'}\n ), \"The right keys were not found in the yaml file, make sure it at least has the following keys: ('train', 'test', 'val', 'nc', 'names')\"\n\n data_dict = dict()\n data_dict['train'] = str(\n (dataset_root_path / dataset_definition['train']).resolve()) if dataset_definition['train'] else None\n data_dict['test'] = str(\n (dataset_root_path / dataset_definition['test']).resolve()) if dataset_definition['test'] else None\n data_dict['val'] = str(\n (dataset_root_path / dataset_definition['val']).resolve()) if dataset_definition['val'] else None\n data_dict['nc'] = dataset_definition['nc']\n data_dict['names'] = dataset_definition['names']\n\n return data_dict\n\n\nclass ClearmlLogger:\n \"\"\"Log training runs, datasets, models, and predictions to ClearML.\n\n This logger sends information to ClearML at app.clear.ml or to your own hosted server. By default,\n this information includes hyperparameters, system configuration and metrics, model metrics, code information and\n basic data metrics and analyses.\n\n By providing additional command line arguments to train.py, datasets,\n models and predictions can also be logged.\n \"\"\"\n\n def __init__(self, opt, hyp):\n \"\"\"\n - Initialize ClearML Task, this object will capture the experiment\n - Upload dataset version to ClearML Data if opt.upload_dataset is True\n\n arguments:\n opt (namespace) -- Commandline arguments for this run\n hyp (dict) -- Hyperparameters for this run\n\n \"\"\"\n self.current_epoch = 0\n # Keep tracked of amount of logged images to enforce a limit\n self.current_epoch_logged_images = set()\n # Maximum number of images to log to clearML per epoch\n self.max_imgs_to_log_per_epoch = 16\n # Get the interval of epochs when bounding box images should be logged\n self.bbox_interval = opt.bbox_interval\n self.clearml = clearml\n self.task = None\n self.data_dict = None\n if self.clearml:\n self.task = Task.init(\n project_name=opt.project if opt.project != 'runs/train' else 'YOLOv5',\n task_name=opt.name if opt.name != 'exp' else 'Training',\n tags=['YOLOv5'],\n output_uri=True,\n auto_connect_frameworks={'pytorch': False}\n # We disconnect pytorch auto-detection, because we added manual model save points in the code\n )\n # ClearML's hooks will already grab all general parameters\n # Only the hyperparameters coming from the yaml config file\n # will have to be added manually!\n self.task.connect(hyp, name='Hyperparameters')\n\n # Make sure the code is easily remotely runnable by setting the docker image to use by the remote agent\n self.task.set_base_docker(\"ultralytics/yolov5:latest\",\n docker_arguments='--ipc=host -e=\"CLEARML_AGENT_SKIP_PYTHON_ENV_INSTALL=1\"',\n docker_setup_bash_script='pip install clearml')\n\n # Get ClearML Dataset Version if requested\n if opt.data.startswith('clearml://'):\n # data_dict should have the following keys:\n # names, nc (number of classes), test, train, val (all three relative paths to ../datasets)\n self.data_dict = construct_dataset(opt.data)\n # Set data to data_dict because wandb will crash without this information and opt is the best way\n # to give it to them\n opt.data = self.data_dict\n\n def log_debug_samples(self, files, title='Debug Samples'):\n \"\"\"\n Log files (images) as debug samples in the ClearML task.\n\n arguments:\n files (List(PosixPath)) a list of file paths in PosixPath format\n title (str) A title that groups together images with the same values\n \"\"\"\n for f in files:\n if f.exists():\n it = re.search(r'_batch(\\d+)', f.name)\n iteration = int(it.groups()[0]) if it else 0\n self.task.get_logger().report_image(title=title,\n series=f.name.replace(it.group(), ''),\n local_path=str(f),\n iteration=iteration)\n\n def log_image_with_boxes(self, image_path, boxes, class_names, image, conf_threshold=0.25):\n \"\"\"\n Draw the bounding boxes on a single image and report the result as a ClearML debug sample.\n\n arguments:\n image_path (PosixPath) the path the original image file\n boxes (list): list of scaled predictions in the format - [xmin, ymin, xmax, ymax, confidence, class]\n class_names (dict): dict containing mapping of class int to class name\n image (Tensor): A torch tensor containing the actual image data\n \"\"\"\n if len(self.current_epoch_logged_images) < self.max_imgs_to_log_per_epoch and self.current_epoch >= 0:\n # Log every bbox_interval times and deduplicate for any intermittend extra eval runs\n if self.current_epoch % self.bbox_interval == 0 and image_path not in self.current_epoch_logged_images:\n im = np.ascontiguousarray(np.moveaxis(image.mul(255).clamp(0, 255).byte().cpu().numpy(), 0, 2))\n annotator = Annotator(im=im, pil=True)\n for i, (conf, class_nr, box) in enumerate(zip(boxes[:, 4], boxes[:, 5], boxes[:, :4])):\n color = colors(i)\n\n class_name = class_names[int(class_nr)]\n confidence_percentage = round(float(conf) * 100, 2)\n label = f\"{class_name}: {confidence_percentage}%\"\n\n if conf > conf_threshold:\n annotator.rectangle(box.cpu().numpy(), outline=color)\n annotator.box_label(box.cpu().numpy(), label=label, color=color)\n\n annotated_image = annotator.result()\n self.task.get_logger().report_image(title='Bounding Boxes',\n series=image_path.name,\n iteration=self.current_epoch,\n image=annotated_image)\n self.current_epoch_logged_images.add(image_path)\n", "path": "utils/loggers/clearml/clearml_utils.py"}], "after_files": [{"content": "\"\"\"Main Logger class for ClearML experiment tracking.\"\"\"\nimport glob\nimport re\nfrom pathlib import Path\n\nimport numpy as np\nimport yaml\n\nfrom utils.plots import Annotator, colors\n\ntry:\n import clearml\n from clearml import Dataset, Task\n\n assert hasattr(clearml, '__version__') # verify package import not local dir\nexcept (ImportError, AssertionError):\n clearml = None\n\n\ndef construct_dataset(clearml_info_string):\n \"\"\"Load in a clearml dataset and fill the internal data_dict with its contents.\n \"\"\"\n dataset_id = clearml_info_string.replace('clearml://', '')\n dataset = Dataset.get(dataset_id=dataset_id)\n dataset_root_path = Path(dataset.get_local_copy())\n\n # We'll search for the yaml file definition in the dataset\n yaml_filenames = list(glob.glob(str(dataset_root_path / \"*.yaml\")) + glob.glob(str(dataset_root_path / \"*.yml\")))\n if len(yaml_filenames) > 1:\n raise ValueError('More than one yaml file was found in the dataset root, cannot determine which one contains '\n 'the dataset definition this way.')\n elif len(yaml_filenames) == 0:\n raise ValueError('No yaml definition found in dataset root path, check that there is a correct yaml file '\n 'inside the dataset root path.')\n with open(yaml_filenames[0]) as f:\n dataset_definition = yaml.safe_load(f)\n\n assert set(dataset_definition.keys()).issuperset(\n {'train', 'test', 'val', 'nc', 'names'}\n ), \"The right keys were not found in the yaml file, make sure it at least has the following keys: ('train', 'test', 'val', 'nc', 'names')\"\n\n data_dict = dict()\n data_dict['train'] = str(\n (dataset_root_path / dataset_definition['train']).resolve()) if dataset_definition['train'] else None\n data_dict['test'] = str(\n (dataset_root_path / dataset_definition['test']).resolve()) if dataset_definition['test'] else None\n data_dict['val'] = str(\n (dataset_root_path / dataset_definition['val']).resolve()) if dataset_definition['val'] else None\n data_dict['nc'] = dataset_definition['nc']\n data_dict['names'] = dataset_definition['names']\n\n return data_dict\n\n\nclass ClearmlLogger:\n \"\"\"Log training runs, datasets, models, and predictions to ClearML.\n\n This logger sends information to ClearML at app.clear.ml or to your own hosted server. By default,\n this information includes hyperparameters, system configuration and metrics, model metrics, code information and\n basic data metrics and analyses.\n\n By providing additional command line arguments to train.py, datasets,\n models and predictions can also be logged.\n \"\"\"\n\n def __init__(self, opt, hyp):\n \"\"\"\n - Initialize ClearML Task, this object will capture the experiment\n - Upload dataset version to ClearML Data if opt.upload_dataset is True\n\n arguments:\n opt (namespace) -- Commandline arguments for this run\n hyp (dict) -- Hyperparameters for this run\n\n \"\"\"\n self.current_epoch = 0\n # Keep tracked of amount of logged images to enforce a limit\n self.current_epoch_logged_images = set()\n # Maximum number of images to log to clearML per epoch\n self.max_imgs_to_log_per_epoch = 16\n # Get the interval of epochs when bounding box images should be logged\n self.bbox_interval = opt.bbox_interval\n self.clearml = clearml\n self.task = None\n self.data_dict = None\n if self.clearml:\n self.task = Task.init(\n project_name=opt.project if opt.project != 'runs/train' else 'YOLOv5',\n task_name=opt.name if opt.name != 'exp' else 'Training',\n tags=['YOLOv5'],\n output_uri=True,\n auto_connect_frameworks={'pytorch': False}\n # We disconnect pytorch auto-detection, because we added manual model save points in the code\n )\n # ClearML's hooks will already grab all general parameters\n # Only the hyperparameters coming from the yaml config file\n # will have to be added manually!\n self.task.connect(hyp, name='Hyperparameters')\n self.task.connect(opt, name='Args')\n\n # Make sure the code is easily remotely runnable by setting the docker image to use by the remote agent\n self.task.set_base_docker(\"ultralytics/yolov5:latest\",\n docker_arguments='--ipc=host -e=\"CLEARML_AGENT_SKIP_PYTHON_ENV_INSTALL=1\"',\n docker_setup_bash_script='pip install clearml')\n\n # Get ClearML Dataset Version if requested\n if opt.data.startswith('clearml://'):\n # data_dict should have the following keys:\n # names, nc (number of classes), test, train, val (all three relative paths to ../datasets)\n self.data_dict = construct_dataset(opt.data)\n # Set data to data_dict because wandb will crash without this information and opt is the best way\n # to give it to them\n opt.data = self.data_dict\n\n def log_debug_samples(self, files, title='Debug Samples'):\n \"\"\"\n Log files (images) as debug samples in the ClearML task.\n\n arguments:\n files (List(PosixPath)) a list of file paths in PosixPath format\n title (str) A title that groups together images with the same values\n \"\"\"\n for f in files:\n if f.exists():\n it = re.search(r'_batch(\\d+)', f.name)\n iteration = int(it.groups()[0]) if it else 0\n self.task.get_logger().report_image(title=title,\n series=f.name.replace(it.group(), ''),\n local_path=str(f),\n iteration=iteration)\n\n def log_image_with_boxes(self, image_path, boxes, class_names, image, conf_threshold=0.25):\n \"\"\"\n Draw the bounding boxes on a single image and report the result as a ClearML debug sample.\n\n arguments:\n image_path (PosixPath) the path the original image file\n boxes (list): list of scaled predictions in the format - [xmin, ymin, xmax, ymax, confidence, class]\n class_names (dict): dict containing mapping of class int to class name\n image (Tensor): A torch tensor containing the actual image data\n \"\"\"\n if len(self.current_epoch_logged_images) < self.max_imgs_to_log_per_epoch and self.current_epoch >= 0:\n # Log every bbox_interval times and deduplicate for any intermittend extra eval runs\n if self.current_epoch % self.bbox_interval == 0 and image_path not in self.current_epoch_logged_images:\n im = np.ascontiguousarray(np.moveaxis(image.mul(255).clamp(0, 255).byte().cpu().numpy(), 0, 2))\n annotator = Annotator(im=im, pil=True)\n for i, (conf, class_nr, box) in enumerate(zip(boxes[:, 4], boxes[:, 5], boxes[:, :4])):\n color = colors(i)\n\n class_name = class_names[int(class_nr)]\n confidence_percentage = round(float(conf) * 100, 2)\n label = f\"{class_name}: {confidence_percentage}%\"\n\n if conf > conf_threshold:\n annotator.rectangle(box.cpu().numpy(), outline=color)\n annotator.box_label(box.cpu().numpy(), label=label, color=color)\n\n annotated_image = annotator.result()\n self.task.get_logger().report_image(title='Bounding Boxes',\n series=image_path.name,\n iteration=self.current_epoch,\n image=annotated_image)\n self.current_epoch_logged_images.add(image_path)\n", "path": "utils/loggers/clearml/clearml_utils.py"}]} | 2,683 | 154 |
gh_patches_debug_41072 | rasdani/github-patches | git_diff | PaddlePaddle__PaddleSeg-1746 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
paddleseg/models/hrnet_contrast.py 中没有执行 init_weight
paddleseg/models/hrnet_contrast.py 中__init__()没有执行 init_weight,导致hrnet_w48_contrast 没法加载完整的模型
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `paddleseg/models/hrnet_contrast.py`
Content:
```
1 # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import paddle
16 import paddle.nn as nn
17 import paddle.nn.functional as F
18
19 from paddleseg.cvlibs import manager
20 from paddleseg.models import layers
21 from paddleseg.utils import utils
22
23
24 @manager.MODELS.add_component
25 class HRNetW48Contrast(nn.Layer):
26 """
27 The HRNetW48Contrast implementation based on PaddlePaddle.
28
29 The original article refers to
30 Wenguan Wang, Tianfei Zhou, et al. "Exploring Cross-Image Pixel Contrast for Semantic Segmentation"
31 (https://arxiv.org/abs/2101.11939).
32
33 Args:
34 in_channels (int): The output dimensions of backbone.
35 num_classes (int): The unique number of target classes.
36 backbone (Paddle.nn.Layer): Backbone network, currently support HRNet_W48.
37 drop_prob (float): The probability of dropout.
38 proj_dim (int): The projection dimensions.
39 align_corners (bool, optional): An argument of F.interpolate. It should be set to False when the feature size is even,
40 e.g. 1024x512, otherwise it is True, e.g. 769x769. Default: False.
41 pretrained (str, optional): The path or url of pretrained model. Default: None.
42 """
43 def __init__(self,
44 in_channels,
45 num_classes,
46 backbone,
47 drop_prob,
48 proj_dim,
49 align_corners=False,
50 pretrained=None):
51 super().__init__()
52 self.in_channels = in_channels
53 self.backbone = backbone
54 self.num_classes = num_classes
55 self.proj_dim = proj_dim
56 self.align_corners = align_corners
57 self.pretrained = pretrained
58
59 self.cls_head = nn.Sequential(
60 layers.ConvBNReLU(in_channels,
61 in_channels,
62 kernel_size=3,
63 stride=1,
64 padding=1),
65 nn.Dropout2D(drop_prob),
66 nn.Conv2D(in_channels,
67 num_classes,
68 kernel_size=1,
69 stride=1,
70 bias_attr=False),
71 )
72 self.proj_head = ProjectionHead(dim_in=in_channels,
73 proj_dim=self.proj_dim)
74
75 def init_weight(self):
76 if self.pretrained is not None:
77 utils.load_entire_model(self, self.pretrained)
78
79 def forward(self, x):
80 feats = self.backbone(x)[0]
81 out = self.cls_head(feats)
82 logit_list = []
83 if self.training:
84 emb = self.proj_head(feats)
85 logit_list.append(
86 F.interpolate(out,
87 paddle.shape(x)[2:],
88 mode='bilinear',
89 align_corners=self.align_corners))
90 logit_list.append({'seg': out, 'embed': emb})
91 else:
92 logit_list.append(
93 F.interpolate(out,
94 paddle.shape(x)[2:],
95 mode='bilinear',
96 align_corners=self.align_corners))
97 return logit_list
98
99
100 class ProjectionHead(nn.Layer):
101 """
102 The projection head used by contrast learning.
103 Args:
104 dim_in (int): The dimensions of input features.
105 proj_dim (int, optional): The output dimensions of projection head. Default: 256.
106 proj (str, optional): The type of projection head, only support 'linear' and 'convmlp'. Default: 'convmlp'.
107 """
108 def __init__(self, dim_in, proj_dim=256, proj='convmlp'):
109 super(ProjectionHead, self).__init__()
110 if proj == 'linear':
111 self.proj = nn.Conv2D(dim_in, proj_dim, kernel_size=1)
112 elif proj == 'convmlp':
113 self.proj = nn.Sequential(
114 layers.ConvBNReLU(dim_in, dim_in, kernel_size=1),
115 nn.Conv2D(dim_in, proj_dim, kernel_size=1),
116 )
117 else:
118 raise ValueError(
119 "The type of project head only support 'linear' and 'convmlp', but got {}."
120 .format(proj))
121
122 def forward(self, x):
123 return F.normalize(self.proj(x), p=2, axis=1)
124
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/paddleseg/models/hrnet_contrast.py b/paddleseg/models/hrnet_contrast.py
--- a/paddleseg/models/hrnet_contrast.py
+++ b/paddleseg/models/hrnet_contrast.py
@@ -40,6 +40,7 @@
e.g. 1024x512, otherwise it is True, e.g. 769x769. Default: False.
pretrained (str, optional): The path or url of pretrained model. Default: None.
"""
+
def __init__(self,
in_channels,
num_classes,
@@ -54,23 +55,23 @@
self.num_classes = num_classes
self.proj_dim = proj_dim
self.align_corners = align_corners
- self.pretrained = pretrained
self.cls_head = nn.Sequential(
- layers.ConvBNReLU(in_channels,
- in_channels,
- kernel_size=3,
- stride=1,
- padding=1),
+ layers.ConvBNReLU(
+ in_channels, in_channels, kernel_size=3, stride=1, padding=1),
nn.Dropout2D(drop_prob),
- nn.Conv2D(in_channels,
- num_classes,
- kernel_size=1,
- stride=1,
- bias_attr=False),
+ nn.Conv2D(
+ in_channels,
+ num_classes,
+ kernel_size=1,
+ stride=1,
+ bias_attr=False),
)
- self.proj_head = ProjectionHead(dim_in=in_channels,
- proj_dim=self.proj_dim)
+ self.proj_head = ProjectionHead(
+ dim_in=in_channels, proj_dim=self.proj_dim)
+
+ self.pretrained = pretrained
+ self.init_weight()
def init_weight(self):
if self.pretrained is not None:
@@ -83,17 +84,19 @@
if self.training:
emb = self.proj_head(feats)
logit_list.append(
- F.interpolate(out,
- paddle.shape(x)[2:],
- mode='bilinear',
- align_corners=self.align_corners))
+ F.interpolate(
+ out,
+ paddle.shape(x)[2:],
+ mode='bilinear',
+ align_corners=self.align_corners))
logit_list.append({'seg': out, 'embed': emb})
else:
logit_list.append(
- F.interpolate(out,
- paddle.shape(x)[2:],
- mode='bilinear',
- align_corners=self.align_corners))
+ F.interpolate(
+ out,
+ paddle.shape(x)[2:],
+ mode='bilinear',
+ align_corners=self.align_corners))
return logit_list
@@ -105,6 +108,7 @@
proj_dim (int, optional): The output dimensions of projection head. Default: 256.
proj (str, optional): The type of projection head, only support 'linear' and 'convmlp'. Default: 'convmlp'.
"""
+
def __init__(self, dim_in, proj_dim=256, proj='convmlp'):
super(ProjectionHead, self).__init__()
if proj == 'linear':
| {"golden_diff": "diff --git a/paddleseg/models/hrnet_contrast.py b/paddleseg/models/hrnet_contrast.py\n--- a/paddleseg/models/hrnet_contrast.py\n+++ b/paddleseg/models/hrnet_contrast.py\n@@ -40,6 +40,7 @@\n e.g. 1024x512, otherwise it is True, e.g. 769x769. Default: False.\n pretrained (str, optional): The path or url of pretrained model. Default: None.\n \"\"\"\n+\n def __init__(self,\n in_channels,\n num_classes,\n@@ -54,23 +55,23 @@\n self.num_classes = num_classes\n self.proj_dim = proj_dim\n self.align_corners = align_corners\n- self.pretrained = pretrained\n \n self.cls_head = nn.Sequential(\n- layers.ConvBNReLU(in_channels,\n- in_channels,\n- kernel_size=3,\n- stride=1,\n- padding=1),\n+ layers.ConvBNReLU(\n+ in_channels, in_channels, kernel_size=3, stride=1, padding=1),\n nn.Dropout2D(drop_prob),\n- nn.Conv2D(in_channels,\n- num_classes,\n- kernel_size=1,\n- stride=1,\n- bias_attr=False),\n+ nn.Conv2D(\n+ in_channels,\n+ num_classes,\n+ kernel_size=1,\n+ stride=1,\n+ bias_attr=False),\n )\n- self.proj_head = ProjectionHead(dim_in=in_channels,\n- proj_dim=self.proj_dim)\n+ self.proj_head = ProjectionHead(\n+ dim_in=in_channels, proj_dim=self.proj_dim)\n+\n+ self.pretrained = pretrained\n+ self.init_weight()\n \n def init_weight(self):\n if self.pretrained is not None:\n@@ -83,17 +84,19 @@\n if self.training:\n emb = self.proj_head(feats)\n logit_list.append(\n- F.interpolate(out,\n- paddle.shape(x)[2:],\n- mode='bilinear',\n- align_corners=self.align_corners))\n+ F.interpolate(\n+ out,\n+ paddle.shape(x)[2:],\n+ mode='bilinear',\n+ align_corners=self.align_corners))\n logit_list.append({'seg': out, 'embed': emb})\n else:\n logit_list.append(\n- F.interpolate(out,\n- paddle.shape(x)[2:],\n- mode='bilinear',\n- align_corners=self.align_corners))\n+ F.interpolate(\n+ out,\n+ paddle.shape(x)[2:],\n+ mode='bilinear',\n+ align_corners=self.align_corners))\n return logit_list\n \n \n@@ -105,6 +108,7 @@\n proj_dim (int, optional): The output dimensions of projection head. Default: 256.\n proj (str, optional): The type of projection head, only support 'linear' and 'convmlp'. Default: 'convmlp'.\n \"\"\"\n+\n def __init__(self, dim_in, proj_dim=256, proj='convmlp'):\n super(ProjectionHead, self).__init__()\n if proj == 'linear':\n", "issue": "paddleseg/models/hrnet_contrast.py \u4e2d\u6ca1\u6709\u6267\u884c init_weight\npaddleseg/models/hrnet_contrast.py \u4e2d__init__()\u6ca1\u6709\u6267\u884c init_weight\uff0c\u5bfc\u81f4hrnet_w48_contrast \u6ca1\u6cd5\u52a0\u8f7d\u5b8c\u6574\u7684\u6a21\u578b\n", "before_files": [{"content": "# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport paddle\nimport paddle.nn as nn\nimport paddle.nn.functional as F\n\nfrom paddleseg.cvlibs import manager\nfrom paddleseg.models import layers\nfrom paddleseg.utils import utils\n\n\[email protected]_component\nclass HRNetW48Contrast(nn.Layer):\n \"\"\"\n The HRNetW48Contrast implementation based on PaddlePaddle.\n\n The original article refers to\n Wenguan Wang, Tianfei Zhou, et al. \"Exploring Cross-Image Pixel Contrast for Semantic Segmentation\"\n (https://arxiv.org/abs/2101.11939).\n\n Args:\n in_channels (int): The output dimensions of backbone.\n num_classes (int): The unique number of target classes.\n backbone (Paddle.nn.Layer): Backbone network, currently support HRNet_W48.\n drop_prob (float): The probability of dropout.\n proj_dim (int): The projection dimensions.\n align_corners (bool, optional): An argument of F.interpolate. It should be set to False when the feature size is even,\n e.g. 1024x512, otherwise it is True, e.g. 769x769. Default: False.\n pretrained (str, optional): The path or url of pretrained model. Default: None.\n \"\"\"\n def __init__(self,\n in_channels,\n num_classes,\n backbone,\n drop_prob,\n proj_dim,\n align_corners=False,\n pretrained=None):\n super().__init__()\n self.in_channels = in_channels\n self.backbone = backbone\n self.num_classes = num_classes\n self.proj_dim = proj_dim\n self.align_corners = align_corners\n self.pretrained = pretrained\n\n self.cls_head = nn.Sequential(\n layers.ConvBNReLU(in_channels,\n in_channels,\n kernel_size=3,\n stride=1,\n padding=1),\n nn.Dropout2D(drop_prob),\n nn.Conv2D(in_channels,\n num_classes,\n kernel_size=1,\n stride=1,\n bias_attr=False),\n )\n self.proj_head = ProjectionHead(dim_in=in_channels,\n proj_dim=self.proj_dim)\n\n def init_weight(self):\n if self.pretrained is not None:\n utils.load_entire_model(self, self.pretrained)\n\n def forward(self, x):\n feats = self.backbone(x)[0]\n out = self.cls_head(feats)\n logit_list = []\n if self.training:\n emb = self.proj_head(feats)\n logit_list.append(\n F.interpolate(out,\n paddle.shape(x)[2:],\n mode='bilinear',\n align_corners=self.align_corners))\n logit_list.append({'seg': out, 'embed': emb})\n else:\n logit_list.append(\n F.interpolate(out,\n paddle.shape(x)[2:],\n mode='bilinear',\n align_corners=self.align_corners))\n return logit_list\n\n\nclass ProjectionHead(nn.Layer):\n \"\"\"\n The projection head used by contrast learning.\n Args:\n dim_in (int): The dimensions of input features.\n proj_dim (int, optional): The output dimensions of projection head. Default: 256.\n proj (str, optional): The type of projection head, only support 'linear' and 'convmlp'. Default: 'convmlp'.\n \"\"\"\n def __init__(self, dim_in, proj_dim=256, proj='convmlp'):\n super(ProjectionHead, self).__init__()\n if proj == 'linear':\n self.proj = nn.Conv2D(dim_in, proj_dim, kernel_size=1)\n elif proj == 'convmlp':\n self.proj = nn.Sequential(\n layers.ConvBNReLU(dim_in, dim_in, kernel_size=1),\n nn.Conv2D(dim_in, proj_dim, kernel_size=1),\n )\n else:\n raise ValueError(\n \"The type of project head only support 'linear' and 'convmlp', but got {}.\"\n .format(proj))\n\n def forward(self, x):\n return F.normalize(self.proj(x), p=2, axis=1)\n", "path": "paddleseg/models/hrnet_contrast.py"}], "after_files": [{"content": "# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport paddle\nimport paddle.nn as nn\nimport paddle.nn.functional as F\n\nfrom paddleseg.cvlibs import manager\nfrom paddleseg.models import layers\nfrom paddleseg.utils import utils\n\n\[email protected]_component\nclass HRNetW48Contrast(nn.Layer):\n \"\"\"\n The HRNetW48Contrast implementation based on PaddlePaddle.\n\n The original article refers to\n Wenguan Wang, Tianfei Zhou, et al. \"Exploring Cross-Image Pixel Contrast for Semantic Segmentation\"\n (https://arxiv.org/abs/2101.11939).\n\n Args:\n in_channels (int): The output dimensions of backbone.\n num_classes (int): The unique number of target classes.\n backbone (Paddle.nn.Layer): Backbone network, currently support HRNet_W48.\n drop_prob (float): The probability of dropout.\n proj_dim (int): The projection dimensions.\n align_corners (bool, optional): An argument of F.interpolate. It should be set to False when the feature size is even,\n e.g. 1024x512, otherwise it is True, e.g. 769x769. Default: False.\n pretrained (str, optional): The path or url of pretrained model. Default: None.\n \"\"\"\n\n def __init__(self,\n in_channels,\n num_classes,\n backbone,\n drop_prob,\n proj_dim,\n align_corners=False,\n pretrained=None):\n super().__init__()\n self.in_channels = in_channels\n self.backbone = backbone\n self.num_classes = num_classes\n self.proj_dim = proj_dim\n self.align_corners = align_corners\n\n self.cls_head = nn.Sequential(\n layers.ConvBNReLU(\n in_channels, in_channels, kernel_size=3, stride=1, padding=1),\n nn.Dropout2D(drop_prob),\n nn.Conv2D(\n in_channels,\n num_classes,\n kernel_size=1,\n stride=1,\n bias_attr=False),\n )\n self.proj_head = ProjectionHead(\n dim_in=in_channels, proj_dim=self.proj_dim)\n\n self.pretrained = pretrained\n self.init_weight()\n\n def init_weight(self):\n if self.pretrained is not None:\n utils.load_entire_model(self, self.pretrained)\n\n def forward(self, x):\n feats = self.backbone(x)[0]\n out = self.cls_head(feats)\n logit_list = []\n if self.training:\n emb = self.proj_head(feats)\n logit_list.append(\n F.interpolate(\n out,\n paddle.shape(x)[2:],\n mode='bilinear',\n align_corners=self.align_corners))\n logit_list.append({'seg': out, 'embed': emb})\n else:\n logit_list.append(\n F.interpolate(\n out,\n paddle.shape(x)[2:],\n mode='bilinear',\n align_corners=self.align_corners))\n return logit_list\n\n\nclass ProjectionHead(nn.Layer):\n \"\"\"\n The projection head used by contrast learning.\n Args:\n dim_in (int): The dimensions of input features.\n proj_dim (int, optional): The output dimensions of projection head. Default: 256.\n proj (str, optional): The type of projection head, only support 'linear' and 'convmlp'. Default: 'convmlp'.\n \"\"\"\n\n def __init__(self, dim_in, proj_dim=256, proj='convmlp'):\n super(ProjectionHead, self).__init__()\n if proj == 'linear':\n self.proj = nn.Conv2D(dim_in, proj_dim, kernel_size=1)\n elif proj == 'convmlp':\n self.proj = nn.Sequential(\n layers.ConvBNReLU(dim_in, dim_in, kernel_size=1),\n nn.Conv2D(dim_in, proj_dim, kernel_size=1),\n )\n else:\n raise ValueError(\n \"The type of project head only support 'linear' and 'convmlp', but got {}.\"\n .format(proj))\n\n def forward(self, x):\n return F.normalize(self.proj(x), p=2, axis=1)\n", "path": "paddleseg/models/hrnet_contrast.py"}]} | 1,602 | 701 |
gh_patches_debug_32021 | rasdani/github-patches | git_diff | nextcloud__appstore-298 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Modify own name error
There is an error when saving name on the user profile. It said the mail address is already registered instead of saving
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nextcloudappstore/core/api/v1/release/importer.py`
Content:
```
1 from typing import Dict, Any, Set, Tuple # type: ignore
2 from semantic_version import Version # type: ignore
3 from django.utils import timezone
4 from nextcloudappstore.core.versioning import to_spec, to_raw_spec
5 from nextcloudappstore.core.models import App, Screenshot, Category, \
6 AppRelease, ShellCommand, License, Database, DatabaseDependency, \
7 PhpExtensionDependency, PhpExtension, AppAuthor
8
9
10 def none_to_empty_string(value: str) -> str:
11 if value is None:
12 return ''
13 else:
14 return value
15
16
17 class Importer:
18 def __init__(self, importers: Dict[str, 'Importer'],
19 ignored_fields: Set[str]) -> None:
20 self.importers = importers
21 self.ignored_fields = ignored_fields
22
23 def import_data(self, key: str, value: Any, obj: Any) -> None:
24 obj = self._get_object(key, value, obj)
25 value, obj = self._before_import(key, value, obj)
26 for key, val in value.items():
27 if key not in self.ignored_fields:
28 self.importers[key].import_data(key, val, obj)
29 obj.save()
30
31 def _get_object(self, key: str, value: Any, obj: Any) -> Any:
32 raise NotImplementedError
33
34 def _before_import(self, key: str, value: Any, obj: Any) -> Tuple[Any,
35 Any]:
36 raise NotImplementedError
37
38
39 class ScalarImporter(Importer):
40 def __init__(self) -> None:
41 super().__init__({}, set())
42
43
44 class PhpExtensionImporter(ScalarImporter):
45 def import_data(self, key: str, value: Any, obj: Any) -> None:
46 for ext in value:
47 version_spec = to_spec(ext['php_extension']['min_version'],
48 ext['php_extension']['max_version'])
49 raw_version_spec = to_raw_spec(
50 ext['php_extension']['raw_min_version'],
51 ext['php_extension']['raw_max_version'])
52 extension, created = PhpExtension.objects.get_or_create(
53 id=ext['php_extension']['id'])
54 PhpExtensionDependency.objects.create(
55 version_spec=version_spec,
56 raw_version_spec=raw_version_spec,
57 app_release=obj, php_extension=extension,
58 )
59
60
61 class DatabaseImporter(ScalarImporter):
62 def import_data(self, key: str, value: Any, obj: Any) -> None:
63 for db in value:
64 version_spec = to_spec(db['database']['min_version'],
65 db['database']['max_version'])
66 raw_version_spec = to_raw_spec(db['database']['raw_min_version'],
67 db['database']['raw_max_version'])
68 # all dbs should be known already
69 database = Database.objects.get(id=db['database']['id'])
70 DatabaseDependency.objects.create(
71 version_spec=version_spec,
72 raw_version_spec=raw_version_spec,
73 app_release=obj, database=database,
74 )
75
76
77 class LicenseImporter(ScalarImporter):
78 def import_data(self, key: str, value: Any, obj: Any) -> None:
79 def map_models(data: Dict) -> License:
80 id = data['license']['id']
81 model, created = License.objects.get_or_create(id=id)
82 return model
83
84 obj.licenses.set(list(map(map_models, value)))
85
86
87 class ShellCommandImporter(ScalarImporter):
88 def import_data(self, key: str, value: Any, obj: Any) -> None:
89 def map_commands(data: Dict) -> ShellCommand:
90 name = data['shell_command']['name']
91 command, created = ShellCommand.objects.get_or_create(name=name)
92 return command
93
94 obj.shell_commands.set(list(map(map_commands, value)))
95
96
97 class AuthorImporter(ScalarImporter):
98 def import_data(self, key: str, value: Any, obj: Any) -> None:
99 def map_authors(data: Dict) -> AppAuthor:
100 author = data['author']
101 return AppAuthor.objects.create(
102 name=author['name'],
103 mail=none_to_empty_string(author['mail']),
104 homepage=none_to_empty_string(author['homepage'])
105 )
106
107 obj.authors.set(list(map(map_authors, value)))
108
109
110 class IntegerAttributeImporter(ScalarImporter):
111 def import_data(self, key: str, value: Any, obj: Any) -> None:
112 setattr(obj, key, value)
113
114
115 class StringAttributeImporter(ScalarImporter):
116 def import_data(self, key: str, value: Any, obj: Any) -> None:
117 setattr(obj, key, none_to_empty_string(value))
118
119
120 class MinVersionImporter(ScalarImporter):
121 def import_data(self, key: str, value: Any, obj: Any) -> None:
122 setattr(obj, key, value)
123
124
125 class MaxVersionImporter(ScalarImporter):
126 def import_data(self, key: str, value: Any, obj: Any) -> None:
127 setattr(obj, key, value)
128
129
130 class ScreenshotsImporter(ScalarImporter):
131 def import_data(self, key: str, value: Any, obj: Any) -> None:
132 obj.screenshots.set(list(map(
133 lambda img: Screenshot.objects.create(
134 url=img['screenshot']['url'], app=obj,
135 ordering=img['screenshot']['ordering']
136 ), value
137 )))
138
139
140 class CategoryImporter(ScalarImporter):
141 def import_data(self, key: str, value: Any, obj: Any) -> None:
142 def map_categories(cat: Dict) -> Category:
143 id = cat['category']['id']
144 category, created = Category.objects.get_or_create(id=id)
145 return category
146
147 obj.categories.set(list(map(map_categories, value)))
148
149
150 class L10NImporter(ScalarImporter):
151 def import_data(self, key: str, value: Any, obj: Any) -> None:
152 for lang, translation in value.items():
153 obj.set_current_language(lang)
154 setattr(obj, key, translation)
155 obj.save()
156
157
158 class AppReleaseImporter(Importer):
159 def __init__(self, php_extension_importer: PhpExtensionImporter,
160 database_importer: DatabaseImporter,
161 license_importer: LicenseImporter,
162 shell_command_importer: ShellCommandImporter,
163 string_attribute_importer: StringAttributeImporter,
164 integer_attribute_importer: IntegerAttributeImporter) -> None:
165 super().__init__({
166 'php_extensions': php_extension_importer,
167 'databases': database_importer,
168 'licenses': license_importer,
169 'php_version_spec': string_attribute_importer,
170 'platform_version_spec': string_attribute_importer,
171 'raw_php_version_spec': string_attribute_importer,
172 'raw_platform_version_spec': string_attribute_importer,
173 'min_int_size': integer_attribute_importer,
174 'shell_commands': shell_command_importer,
175 'signature': string_attribute_importer,
176 'download': string_attribute_importer,
177 }, {
178 'version',
179 'raw_version',
180 'php_min_version',
181 'php_max_version',
182 'raw_php_min_version',
183 'raw_php_max_version',
184 'platform_min_version',
185 'platform_max_version',
186 'raw_platform_min_version',
187 'raw_platform_max_version',
188 })
189
190 def _before_import(self, key: str, value: Any, obj: Any) -> Tuple[Any,
191 Any]:
192 # combine versions into specs
193 value['platform_version_spec'] = to_spec(
194 value['platform_min_version'], value['platform_max_version'])
195 value['php_version_spec'] = to_spec(value['php_min_version'],
196 value['php_max_version'])
197 value['raw_platform_version_spec'] = to_raw_spec(
198 value['raw_platform_min_version'],
199 value['raw_platform_max_version'])
200 value['raw_php_version_spec'] = to_raw_spec(
201 value['raw_php_min_version'],
202 value['raw_php_max_version'])
203 obj.licenses.clear()
204 obj.shell_commands.clear()
205 obj.licenses.clear()
206 obj.php_extensions.clear()
207 obj.databases.clear()
208
209 if value['version'].endswith('-nightly'):
210 AppRelease.objects.filter(
211 app__id=obj.id, version__endswith='-nightly').delete()
212
213 return value, obj
214
215 def _get_object(self, key: str, value: Any, obj: Any) -> Any:
216 release, created = AppRelease.objects.get_or_create(
217 version=value['version'], app=obj
218 )
219 return release
220
221
222 class AppImporter(Importer):
223 def __init__(self, release_importer: AppReleaseImporter,
224 screenshots_importer: ScreenshotsImporter,
225 attribute_importer: StringAttributeImporter,
226 l10n_importer: L10NImporter,
227 category_importer: CategoryImporter,
228 author_importer: AuthorImporter) -> None:
229 super().__init__({
230 'release': release_importer,
231 'screenshots': screenshots_importer,
232 'user_docs': attribute_importer,
233 'admin_docs': attribute_importer,
234 'website': attribute_importer,
235 'discussion': attribute_importer,
236 'developer_docs': attribute_importer,
237 'issue_tracker': attribute_importer,
238 'certificate': attribute_importer,
239 'name': l10n_importer,
240 'summary': l10n_importer,
241 'description': l10n_importer,
242 'categories': category_importer,
243 'authors': author_importer
244 }, {'id'})
245
246 def _get_object(self, key: str, value: Any, obj: Any) -> Any:
247 # only update app if newest or equal to newest release
248 app, created = App.objects.get_or_create(pk=value['id'])
249 return app
250
251 def _before_import(self, key: str, value: Any, obj: Any) -> Tuple[Any,
252 Any]:
253 obj.last_release = timezone.now()
254 # only new releases update an app's data
255 if not self._is_latest_version(value):
256 value = {'id': value['id'], 'release': value['release']}
257 else:
258 # clear all relations
259 obj.screenshots.all().delete()
260 obj.authors.all().delete()
261 obj.categories.clear()
262 for translation in obj.translations.all():
263 translation.delete()
264 return value, obj
265
266 def _is_latest_version(self, value: Any) -> bool:
267 releases = AppRelease.objects.filter(app__id=value['id'])
268 uploaded_version = Version(value['release']['version'])
269 for release in releases:
270 if uploaded_version < Version(release.version):
271 return False
272 return True
273
```
Path: `nextcloudappstore/core/user/forms.py`
Content:
```
1 from allauth.account.models import EmailAddress
2 from allauth.account.utils import filter_users_by_email
3 from django import forms
4 from captcha.fields import ReCaptchaField
5 from django.conf import settings
6 from django.contrib.auth import get_user_model
7 from django.forms import EmailField, CharField
8 from django.utils.translation import ugettext_lazy as _
9
10
11 class SignupFormRecaptcha(forms.Form):
12 """integrate a recaptcha field."""
13 recaptcha = ReCaptchaField()
14 first_name = CharField(max_length=30, label=_('First name'))
15 last_name = CharField(max_length=30, label=_('Last name'))
16
17 def signup(self, request, user):
18 user.first_name = self.cleaned_data['first_name']
19 user.last_name = self.cleaned_data['last_name']
20 user.save()
21
22
23 class DeleteAccountForm(forms.Form):
24 email = EmailField(required=True, label=_('Your e-mail address'))
25
26 def __init__(self, *args, **kwargs):
27 self.user = kwargs.pop('user', None)
28 super().__init__(*args, **kwargs)
29
30 def clean_email(self):
31 email = self.cleaned_data.get('email')
32 if self.user and self.user.email == email:
33 return email
34 else:
35 raise forms.ValidationError(_(
36 'The given e-mail address does not match your e-mail address'))
37
38
39 class AccountForm(forms.ModelForm):
40 class Meta:
41 model = get_user_model()
42 fields = ('first_name', 'last_name', 'email')
43
44 def clean_email(self):
45 value = self.cleaned_data["email"]
46 errors = {
47 "this_account": _("This e-mail address is already associated"
48 " with this account."),
49 "different_account": _("This e-mail address is already associated"
50 " with another account."),
51 }
52 users = filter_users_by_email(value)
53 on_this_account = [u for u in users if u.pk == self.instance.pk]
54 on_diff_account = [u for u in users if u.pk != self.instance.pk]
55
56 if on_this_account:
57 raise forms.ValidationError(errors["this_account"])
58 if on_diff_account:
59 raise forms.ValidationError(errors["different_account"])
60 return value
61
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/nextcloudappstore/core/api/v1/release/importer.py b/nextcloudappstore/core/api/v1/release/importer.py
--- a/nextcloudappstore/core/api/v1/release/importer.py
+++ b/nextcloudappstore/core/api/v1/release/importer.py
@@ -1,4 +1,6 @@
from typing import Dict, Any, Set, Tuple # type: ignore
+
+from django.conf import settings
from semantic_version import Version # type: ignore
from django.utils import timezone
from nextcloudappstore.core.versioning import to_spec, to_raw_spec
@@ -149,8 +151,9 @@
class L10NImporter(ScalarImporter):
def import_data(self, key: str, value: Any, obj: Any) -> None:
- for lang, translation in value.items():
- obj.set_current_language(lang)
+ for code, _ in settings.LANGUAGES:
+ translation = value.get(code, value.get('en'))
+ obj.set_current_language(code)
setattr(obj, key, translation)
obj.save()
diff --git a/nextcloudappstore/core/user/forms.py b/nextcloudappstore/core/user/forms.py
--- a/nextcloudappstore/core/user/forms.py
+++ b/nextcloudappstore/core/user/forms.py
@@ -43,18 +43,10 @@
def clean_email(self):
value = self.cleaned_data["email"]
- errors = {
- "this_account": _("This e-mail address is already associated"
- " with this account."),
- "different_account": _("This e-mail address is already associated"
- " with another account."),
- }
users = filter_users_by_email(value)
- on_this_account = [u for u in users if u.pk == self.instance.pk]
- on_diff_account = [u for u in users if u.pk != self.instance.pk]
-
- if on_this_account:
- raise forms.ValidationError(errors["this_account"])
- if on_diff_account:
- raise forms.ValidationError(errors["different_account"])
+ if [u for u in users if u.pk != self.instance.pk]:
+ msg = _(
+ "This e-mail address is already associated with another "
+ "account.")
+ raise forms.ValidationError(msg)
return value
| {"golden_diff": "diff --git a/nextcloudappstore/core/api/v1/release/importer.py b/nextcloudappstore/core/api/v1/release/importer.py\n--- a/nextcloudappstore/core/api/v1/release/importer.py\n+++ b/nextcloudappstore/core/api/v1/release/importer.py\n@@ -1,4 +1,6 @@\n from typing import Dict, Any, Set, Tuple # type: ignore\n+\n+from django.conf import settings\n from semantic_version import Version # type: ignore\n from django.utils import timezone\n from nextcloudappstore.core.versioning import to_spec, to_raw_spec\n@@ -149,8 +151,9 @@\n \n class L10NImporter(ScalarImporter):\n def import_data(self, key: str, value: Any, obj: Any) -> None:\n- for lang, translation in value.items():\n- obj.set_current_language(lang)\n+ for code, _ in settings.LANGUAGES:\n+ translation = value.get(code, value.get('en'))\n+ obj.set_current_language(code)\n setattr(obj, key, translation)\n obj.save()\n \ndiff --git a/nextcloudappstore/core/user/forms.py b/nextcloudappstore/core/user/forms.py\n--- a/nextcloudappstore/core/user/forms.py\n+++ b/nextcloudappstore/core/user/forms.py\n@@ -43,18 +43,10 @@\n \n def clean_email(self):\n value = self.cleaned_data[\"email\"]\n- errors = {\n- \"this_account\": _(\"This e-mail address is already associated\"\n- \" with this account.\"),\n- \"different_account\": _(\"This e-mail address is already associated\"\n- \" with another account.\"),\n- }\n users = filter_users_by_email(value)\n- on_this_account = [u for u in users if u.pk == self.instance.pk]\n- on_diff_account = [u for u in users if u.pk != self.instance.pk]\n-\n- if on_this_account:\n- raise forms.ValidationError(errors[\"this_account\"])\n- if on_diff_account:\n- raise forms.ValidationError(errors[\"different_account\"])\n+ if [u for u in users if u.pk != self.instance.pk]:\n+ msg = _(\n+ \"This e-mail address is already associated with another \"\n+ \"account.\")\n+ raise forms.ValidationError(msg)\n return value\n", "issue": "Modify own name error\nThere is an error when saving name on the user profile. It said the mail address is already registered instead of saving\n\n", "before_files": [{"content": "from typing import Dict, Any, Set, Tuple # type: ignore\nfrom semantic_version import Version # type: ignore\nfrom django.utils import timezone\nfrom nextcloudappstore.core.versioning import to_spec, to_raw_spec\nfrom nextcloudappstore.core.models import App, Screenshot, Category, \\\n AppRelease, ShellCommand, License, Database, DatabaseDependency, \\\n PhpExtensionDependency, PhpExtension, AppAuthor\n\n\ndef none_to_empty_string(value: str) -> str:\n if value is None:\n return ''\n else:\n return value\n\n\nclass Importer:\n def __init__(self, importers: Dict[str, 'Importer'],\n ignored_fields: Set[str]) -> None:\n self.importers = importers\n self.ignored_fields = ignored_fields\n\n def import_data(self, key: str, value: Any, obj: Any) -> None:\n obj = self._get_object(key, value, obj)\n value, obj = self._before_import(key, value, obj)\n for key, val in value.items():\n if key not in self.ignored_fields:\n self.importers[key].import_data(key, val, obj)\n obj.save()\n\n def _get_object(self, key: str, value: Any, obj: Any) -> Any:\n raise NotImplementedError\n\n def _before_import(self, key: str, value: Any, obj: Any) -> Tuple[Any,\n Any]:\n raise NotImplementedError\n\n\nclass ScalarImporter(Importer):\n def __init__(self) -> None:\n super().__init__({}, set())\n\n\nclass PhpExtensionImporter(ScalarImporter):\n def import_data(self, key: str, value: Any, obj: Any) -> None:\n for ext in value:\n version_spec = to_spec(ext['php_extension']['min_version'],\n ext['php_extension']['max_version'])\n raw_version_spec = to_raw_spec(\n ext['php_extension']['raw_min_version'],\n ext['php_extension']['raw_max_version'])\n extension, created = PhpExtension.objects.get_or_create(\n id=ext['php_extension']['id'])\n PhpExtensionDependency.objects.create(\n version_spec=version_spec,\n raw_version_spec=raw_version_spec,\n app_release=obj, php_extension=extension,\n )\n\n\nclass DatabaseImporter(ScalarImporter):\n def import_data(self, key: str, value: Any, obj: Any) -> None:\n for db in value:\n version_spec = to_spec(db['database']['min_version'],\n db['database']['max_version'])\n raw_version_spec = to_raw_spec(db['database']['raw_min_version'],\n db['database']['raw_max_version'])\n # all dbs should be known already\n database = Database.objects.get(id=db['database']['id'])\n DatabaseDependency.objects.create(\n version_spec=version_spec,\n raw_version_spec=raw_version_spec,\n app_release=obj, database=database,\n )\n\n\nclass LicenseImporter(ScalarImporter):\n def import_data(self, key: str, value: Any, obj: Any) -> None:\n def map_models(data: Dict) -> License:\n id = data['license']['id']\n model, created = License.objects.get_or_create(id=id)\n return model\n\n obj.licenses.set(list(map(map_models, value)))\n\n\nclass ShellCommandImporter(ScalarImporter):\n def import_data(self, key: str, value: Any, obj: Any) -> None:\n def map_commands(data: Dict) -> ShellCommand:\n name = data['shell_command']['name']\n command, created = ShellCommand.objects.get_or_create(name=name)\n return command\n\n obj.shell_commands.set(list(map(map_commands, value)))\n\n\nclass AuthorImporter(ScalarImporter):\n def import_data(self, key: str, value: Any, obj: Any) -> None:\n def map_authors(data: Dict) -> AppAuthor:\n author = data['author']\n return AppAuthor.objects.create(\n name=author['name'],\n mail=none_to_empty_string(author['mail']),\n homepage=none_to_empty_string(author['homepage'])\n )\n\n obj.authors.set(list(map(map_authors, value)))\n\n\nclass IntegerAttributeImporter(ScalarImporter):\n def import_data(self, key: str, value: Any, obj: Any) -> None:\n setattr(obj, key, value)\n\n\nclass StringAttributeImporter(ScalarImporter):\n def import_data(self, key: str, value: Any, obj: Any) -> None:\n setattr(obj, key, none_to_empty_string(value))\n\n\nclass MinVersionImporter(ScalarImporter):\n def import_data(self, key: str, value: Any, obj: Any) -> None:\n setattr(obj, key, value)\n\n\nclass MaxVersionImporter(ScalarImporter):\n def import_data(self, key: str, value: Any, obj: Any) -> None:\n setattr(obj, key, value)\n\n\nclass ScreenshotsImporter(ScalarImporter):\n def import_data(self, key: str, value: Any, obj: Any) -> None:\n obj.screenshots.set(list(map(\n lambda img: Screenshot.objects.create(\n url=img['screenshot']['url'], app=obj,\n ordering=img['screenshot']['ordering']\n ), value\n )))\n\n\nclass CategoryImporter(ScalarImporter):\n def import_data(self, key: str, value: Any, obj: Any) -> None:\n def map_categories(cat: Dict) -> Category:\n id = cat['category']['id']\n category, created = Category.objects.get_or_create(id=id)\n return category\n\n obj.categories.set(list(map(map_categories, value)))\n\n\nclass L10NImporter(ScalarImporter):\n def import_data(self, key: str, value: Any, obj: Any) -> None:\n for lang, translation in value.items():\n obj.set_current_language(lang)\n setattr(obj, key, translation)\n obj.save()\n\n\nclass AppReleaseImporter(Importer):\n def __init__(self, php_extension_importer: PhpExtensionImporter,\n database_importer: DatabaseImporter,\n license_importer: LicenseImporter,\n shell_command_importer: ShellCommandImporter,\n string_attribute_importer: StringAttributeImporter,\n integer_attribute_importer: IntegerAttributeImporter) -> None:\n super().__init__({\n 'php_extensions': php_extension_importer,\n 'databases': database_importer,\n 'licenses': license_importer,\n 'php_version_spec': string_attribute_importer,\n 'platform_version_spec': string_attribute_importer,\n 'raw_php_version_spec': string_attribute_importer,\n 'raw_platform_version_spec': string_attribute_importer,\n 'min_int_size': integer_attribute_importer,\n 'shell_commands': shell_command_importer,\n 'signature': string_attribute_importer,\n 'download': string_attribute_importer,\n }, {\n 'version',\n 'raw_version',\n 'php_min_version',\n 'php_max_version',\n 'raw_php_min_version',\n 'raw_php_max_version',\n 'platform_min_version',\n 'platform_max_version',\n 'raw_platform_min_version',\n 'raw_platform_max_version',\n })\n\n def _before_import(self, key: str, value: Any, obj: Any) -> Tuple[Any,\n Any]:\n # combine versions into specs\n value['platform_version_spec'] = to_spec(\n value['platform_min_version'], value['platform_max_version'])\n value['php_version_spec'] = to_spec(value['php_min_version'],\n value['php_max_version'])\n value['raw_platform_version_spec'] = to_raw_spec(\n value['raw_platform_min_version'],\n value['raw_platform_max_version'])\n value['raw_php_version_spec'] = to_raw_spec(\n value['raw_php_min_version'],\n value['raw_php_max_version'])\n obj.licenses.clear()\n obj.shell_commands.clear()\n obj.licenses.clear()\n obj.php_extensions.clear()\n obj.databases.clear()\n\n if value['version'].endswith('-nightly'):\n AppRelease.objects.filter(\n app__id=obj.id, version__endswith='-nightly').delete()\n\n return value, obj\n\n def _get_object(self, key: str, value: Any, obj: Any) -> Any:\n release, created = AppRelease.objects.get_or_create(\n version=value['version'], app=obj\n )\n return release\n\n\nclass AppImporter(Importer):\n def __init__(self, release_importer: AppReleaseImporter,\n screenshots_importer: ScreenshotsImporter,\n attribute_importer: StringAttributeImporter,\n l10n_importer: L10NImporter,\n category_importer: CategoryImporter,\n author_importer: AuthorImporter) -> None:\n super().__init__({\n 'release': release_importer,\n 'screenshots': screenshots_importer,\n 'user_docs': attribute_importer,\n 'admin_docs': attribute_importer,\n 'website': attribute_importer,\n 'discussion': attribute_importer,\n 'developer_docs': attribute_importer,\n 'issue_tracker': attribute_importer,\n 'certificate': attribute_importer,\n 'name': l10n_importer,\n 'summary': l10n_importer,\n 'description': l10n_importer,\n 'categories': category_importer,\n 'authors': author_importer\n }, {'id'})\n\n def _get_object(self, key: str, value: Any, obj: Any) -> Any:\n # only update app if newest or equal to newest release\n app, created = App.objects.get_or_create(pk=value['id'])\n return app\n\n def _before_import(self, key: str, value: Any, obj: Any) -> Tuple[Any,\n Any]:\n obj.last_release = timezone.now()\n # only new releases update an app's data\n if not self._is_latest_version(value):\n value = {'id': value['id'], 'release': value['release']}\n else:\n # clear all relations\n obj.screenshots.all().delete()\n obj.authors.all().delete()\n obj.categories.clear()\n for translation in obj.translations.all():\n translation.delete()\n return value, obj\n\n def _is_latest_version(self, value: Any) -> bool:\n releases = AppRelease.objects.filter(app__id=value['id'])\n uploaded_version = Version(value['release']['version'])\n for release in releases:\n if uploaded_version < Version(release.version):\n return False\n return True\n", "path": "nextcloudappstore/core/api/v1/release/importer.py"}, {"content": "from allauth.account.models import EmailAddress\nfrom allauth.account.utils import filter_users_by_email\nfrom django import forms\nfrom captcha.fields import ReCaptchaField\nfrom django.conf import settings\nfrom django.contrib.auth import get_user_model\nfrom django.forms import EmailField, CharField\nfrom django.utils.translation import ugettext_lazy as _\n\n\nclass SignupFormRecaptcha(forms.Form):\n \"\"\"integrate a recaptcha field.\"\"\"\n recaptcha = ReCaptchaField()\n first_name = CharField(max_length=30, label=_('First name'))\n last_name = CharField(max_length=30, label=_('Last name'))\n\n def signup(self, request, user):\n user.first_name = self.cleaned_data['first_name']\n user.last_name = self.cleaned_data['last_name']\n user.save()\n\n\nclass DeleteAccountForm(forms.Form):\n email = EmailField(required=True, label=_('Your e-mail address'))\n\n def __init__(self, *args, **kwargs):\n self.user = kwargs.pop('user', None)\n super().__init__(*args, **kwargs)\n\n def clean_email(self):\n email = self.cleaned_data.get('email')\n if self.user and self.user.email == email:\n return email\n else:\n raise forms.ValidationError(_(\n 'The given e-mail address does not match your e-mail address'))\n\n\nclass AccountForm(forms.ModelForm):\n class Meta:\n model = get_user_model()\n fields = ('first_name', 'last_name', 'email')\n\n def clean_email(self):\n value = self.cleaned_data[\"email\"]\n errors = {\n \"this_account\": _(\"This e-mail address is already associated\"\n \" with this account.\"),\n \"different_account\": _(\"This e-mail address is already associated\"\n \" with another account.\"),\n }\n users = filter_users_by_email(value)\n on_this_account = [u for u in users if u.pk == self.instance.pk]\n on_diff_account = [u for u in users if u.pk != self.instance.pk]\n\n if on_this_account:\n raise forms.ValidationError(errors[\"this_account\"])\n if on_diff_account:\n raise forms.ValidationError(errors[\"different_account\"])\n return value\n", "path": "nextcloudappstore/core/user/forms.py"}], "after_files": [{"content": "from typing import Dict, Any, Set, Tuple # type: ignore\n\nfrom django.conf import settings\nfrom semantic_version import Version # type: ignore\nfrom django.utils import timezone\nfrom nextcloudappstore.core.versioning import to_spec, to_raw_spec\nfrom nextcloudappstore.core.models import App, Screenshot, Category, \\\n AppRelease, ShellCommand, License, Database, DatabaseDependency, \\\n PhpExtensionDependency, PhpExtension, AppAuthor\n\n\ndef none_to_empty_string(value: str) -> str:\n if value is None:\n return ''\n else:\n return value\n\n\nclass Importer:\n def __init__(self, importers: Dict[str, 'Importer'],\n ignored_fields: Set[str]) -> None:\n self.importers = importers\n self.ignored_fields = ignored_fields\n\n def import_data(self, key: str, value: Any, obj: Any) -> None:\n obj = self._get_object(key, value, obj)\n value, obj = self._before_import(key, value, obj)\n for key, val in value.items():\n if key not in self.ignored_fields:\n self.importers[key].import_data(key, val, obj)\n obj.save()\n\n def _get_object(self, key: str, value: Any, obj: Any) -> Any:\n raise NotImplementedError\n\n def _before_import(self, key: str, value: Any, obj: Any) -> Tuple[Any,\n Any]:\n raise NotImplementedError\n\n\nclass ScalarImporter(Importer):\n def __init__(self) -> None:\n super().__init__({}, set())\n\n\nclass PhpExtensionImporter(ScalarImporter):\n def import_data(self, key: str, value: Any, obj: Any) -> None:\n for ext in value:\n version_spec = to_spec(ext['php_extension']['min_version'],\n ext['php_extension']['max_version'])\n raw_version_spec = to_raw_spec(\n ext['php_extension']['raw_min_version'],\n ext['php_extension']['raw_max_version'])\n extension, created = PhpExtension.objects.get_or_create(\n id=ext['php_extension']['id'])\n PhpExtensionDependency.objects.create(\n version_spec=version_spec,\n raw_version_spec=raw_version_spec,\n app_release=obj, php_extension=extension,\n )\n\n\nclass DatabaseImporter(ScalarImporter):\n def import_data(self, key: str, value: Any, obj: Any) -> None:\n for db in value:\n version_spec = to_spec(db['database']['min_version'],\n db['database']['max_version'])\n raw_version_spec = to_raw_spec(db['database']['raw_min_version'],\n db['database']['raw_max_version'])\n # all dbs should be known already\n database = Database.objects.get(id=db['database']['id'])\n DatabaseDependency.objects.create(\n version_spec=version_spec,\n raw_version_spec=raw_version_spec,\n app_release=obj, database=database,\n )\n\n\nclass LicenseImporter(ScalarImporter):\n def import_data(self, key: str, value: Any, obj: Any) -> None:\n def map_models(data: Dict) -> License:\n id = data['license']['id']\n model, created = License.objects.get_or_create(id=id)\n return model\n\n obj.licenses.set(list(map(map_models, value)))\n\n\nclass ShellCommandImporter(ScalarImporter):\n def import_data(self, key: str, value: Any, obj: Any) -> None:\n def map_commands(data: Dict) -> ShellCommand:\n name = data['shell_command']['name']\n command, created = ShellCommand.objects.get_or_create(name=name)\n return command\n\n obj.shell_commands.set(list(map(map_commands, value)))\n\n\nclass AuthorImporter(ScalarImporter):\n def import_data(self, key: str, value: Any, obj: Any) -> None:\n def map_authors(data: Dict) -> AppAuthor:\n author = data['author']\n return AppAuthor.objects.create(\n name=author['name'],\n mail=none_to_empty_string(author['mail']),\n homepage=none_to_empty_string(author['homepage'])\n )\n\n obj.authors.set(list(map(map_authors, value)))\n\n\nclass IntegerAttributeImporter(ScalarImporter):\n def import_data(self, key: str, value: Any, obj: Any) -> None:\n setattr(obj, key, value)\n\n\nclass StringAttributeImporter(ScalarImporter):\n def import_data(self, key: str, value: Any, obj: Any) -> None:\n setattr(obj, key, none_to_empty_string(value))\n\n\nclass MinVersionImporter(ScalarImporter):\n def import_data(self, key: str, value: Any, obj: Any) -> None:\n setattr(obj, key, value)\n\n\nclass MaxVersionImporter(ScalarImporter):\n def import_data(self, key: str, value: Any, obj: Any) -> None:\n setattr(obj, key, value)\n\n\nclass ScreenshotsImporter(ScalarImporter):\n def import_data(self, key: str, value: Any, obj: Any) -> None:\n obj.screenshots.set(list(map(\n lambda img: Screenshot.objects.create(\n url=img['screenshot']['url'], app=obj,\n ordering=img['screenshot']['ordering']\n ), value\n )))\n\n\nclass CategoryImporter(ScalarImporter):\n def import_data(self, key: str, value: Any, obj: Any) -> None:\n def map_categories(cat: Dict) -> Category:\n id = cat['category']['id']\n category, created = Category.objects.get_or_create(id=id)\n return category\n\n obj.categories.set(list(map(map_categories, value)))\n\n\nclass L10NImporter(ScalarImporter):\n def import_data(self, key: str, value: Any, obj: Any) -> None:\n for code, _ in settings.LANGUAGES:\n translation = value.get(code, value.get('en'))\n obj.set_current_language(code)\n setattr(obj, key, translation)\n obj.save()\n\n\nclass AppReleaseImporter(Importer):\n def __init__(self, php_extension_importer: PhpExtensionImporter,\n database_importer: DatabaseImporter,\n license_importer: LicenseImporter,\n shell_command_importer: ShellCommandImporter,\n string_attribute_importer: StringAttributeImporter,\n integer_attribute_importer: IntegerAttributeImporter) -> None:\n super().__init__({\n 'php_extensions': php_extension_importer,\n 'databases': database_importer,\n 'licenses': license_importer,\n 'php_version_spec': string_attribute_importer,\n 'platform_version_spec': string_attribute_importer,\n 'raw_php_version_spec': string_attribute_importer,\n 'raw_platform_version_spec': string_attribute_importer,\n 'min_int_size': integer_attribute_importer,\n 'shell_commands': shell_command_importer,\n 'signature': string_attribute_importer,\n 'download': string_attribute_importer,\n }, {\n 'version',\n 'raw_version',\n 'php_min_version',\n 'php_max_version',\n 'raw_php_min_version',\n 'raw_php_max_version',\n 'platform_min_version',\n 'platform_max_version',\n 'raw_platform_min_version',\n 'raw_platform_max_version',\n })\n\n def _before_import(self, key: str, value: Any, obj: Any) -> Tuple[Any,\n Any]:\n # combine versions into specs\n value['platform_version_spec'] = to_spec(\n value['platform_min_version'], value['platform_max_version'])\n value['php_version_spec'] = to_spec(value['php_min_version'],\n value['php_max_version'])\n value['raw_platform_version_spec'] = to_raw_spec(\n value['raw_platform_min_version'],\n value['raw_platform_max_version'])\n value['raw_php_version_spec'] = to_raw_spec(\n value['raw_php_min_version'],\n value['raw_php_max_version'])\n obj.licenses.clear()\n obj.shell_commands.clear()\n obj.licenses.clear()\n obj.php_extensions.clear()\n obj.databases.clear()\n\n if value['version'].endswith('-nightly'):\n AppRelease.objects.filter(\n app__id=obj.id, version__endswith='-nightly').delete()\n\n return value, obj\n\n def _get_object(self, key: str, value: Any, obj: Any) -> Any:\n release, created = AppRelease.objects.get_or_create(\n version=value['version'], app=obj\n )\n return release\n\n\nclass AppImporter(Importer):\n def __init__(self, release_importer: AppReleaseImporter,\n screenshots_importer: ScreenshotsImporter,\n attribute_importer: StringAttributeImporter,\n l10n_importer: L10NImporter,\n category_importer: CategoryImporter,\n author_importer: AuthorImporter) -> None:\n super().__init__({\n 'release': release_importer,\n 'screenshots': screenshots_importer,\n 'user_docs': attribute_importer,\n 'admin_docs': attribute_importer,\n 'website': attribute_importer,\n 'discussion': attribute_importer,\n 'developer_docs': attribute_importer,\n 'issue_tracker': attribute_importer,\n 'certificate': attribute_importer,\n 'name': l10n_importer,\n 'summary': l10n_importer,\n 'description': l10n_importer,\n 'categories': category_importer,\n 'authors': author_importer\n }, {'id'})\n\n def _get_object(self, key: str, value: Any, obj: Any) -> Any:\n # only update app if newest or equal to newest release\n app, created = App.objects.get_or_create(pk=value['id'])\n return app\n\n def _before_import(self, key: str, value: Any, obj: Any) -> Tuple[Any,\n Any]:\n obj.last_release = timezone.now()\n # only new releases update an app's data\n if not self._is_latest_version(value):\n value = {'id': value['id'], 'release': value['release']}\n else:\n # clear all relations\n obj.screenshots.all().delete()\n obj.authors.all().delete()\n obj.categories.clear()\n for translation in obj.translations.all():\n translation.delete()\n return value, obj\n\n def _is_latest_version(self, value: Any) -> bool:\n releases = AppRelease.objects.filter(app__id=value['id'])\n uploaded_version = Version(value['release']['version'])\n for release in releases:\n if uploaded_version < Version(release.version):\n return False\n return True\n", "path": "nextcloudappstore/core/api/v1/release/importer.py"}, {"content": "from allauth.account.models import EmailAddress\nfrom allauth.account.utils import filter_users_by_email\nfrom django import forms\nfrom captcha.fields import ReCaptchaField\nfrom django.conf import settings\nfrom django.contrib.auth import get_user_model\nfrom django.forms import EmailField, CharField\nfrom django.utils.translation import ugettext_lazy as _\n\n\nclass SignupFormRecaptcha(forms.Form):\n \"\"\"integrate a recaptcha field.\"\"\"\n recaptcha = ReCaptchaField()\n first_name = CharField(max_length=30, label=_('First name'))\n last_name = CharField(max_length=30, label=_('Last name'))\n\n def signup(self, request, user):\n user.first_name = self.cleaned_data['first_name']\n user.last_name = self.cleaned_data['last_name']\n user.save()\n\n\nclass DeleteAccountForm(forms.Form):\n email = EmailField(required=True, label=_('Your e-mail address'))\n\n def __init__(self, *args, **kwargs):\n self.user = kwargs.pop('user', None)\n super().__init__(*args, **kwargs)\n\n def clean_email(self):\n email = self.cleaned_data.get('email')\n if self.user and self.user.email == email:\n return email\n else:\n raise forms.ValidationError(_(\n 'The given e-mail address does not match your e-mail address'))\n\n\nclass AccountForm(forms.ModelForm):\n class Meta:\n model = get_user_model()\n fields = ('first_name', 'last_name', 'email')\n\n def clean_email(self):\n value = self.cleaned_data[\"email\"]\n users = filter_users_by_email(value)\n if [u for u in users if u.pk != self.instance.pk]:\n msg = _(\n \"This e-mail address is already associated with another \"\n \"account.\")\n raise forms.ValidationError(msg)\n return value\n", "path": "nextcloudappstore/core/user/forms.py"}]} | 3,840 | 508 |
gh_patches_debug_9486 | rasdani/github-patches | git_diff | cal-itp__benefits-1092 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Page titles contain extra spaces, characters
Our page titles (the string that shows up e.g. in the browser tab) are a little funky. They contain a bunch of extra spaces and weird formatting. For example, the `dev` site has this in the HTML source:
```html
<title>
Transit Benefits: Introduction |
Cal-ITP
</title>
```
In the browser this isn't really a problem; extra spaces are truncated in the tabs:

However in certain situations, especially where links are expanded for previews (like in Social Media sites, comms apps like Slack, etc.), this extra spacing and weird formatting is more apparent:

## To Reproduce
Steps to reproduce the behavior:
1. Copy a link from the public facing Benefits app, e.g. https://benefits.calitp.org
2. Paste this link into an app that can expands links for previews, like Slack
3. See the weird title formatting in the expanded preview
## Expected behavior
* There shouldn't be any extra spaces in the titles
* We should confirm this is the pattern we want (I'm thinking...probably not?) `Transit Benefits: <Page Title> | Cal-ITP`
* Alternate suggestion: `<Page Title> | Cal-ITP Benefits`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `benefits/core/viewmodels.py`
Content:
```
1 """
2 The core application: view model definitions for the root of the webapp.
3 """
4 from django.utils.translation import pgettext, gettext_lazy as _
5 from django.urls import reverse
6
7 from benefits.core import models
8
9 from . import session
10
11
12 class Button:
13 """
14 Represents a clickable button as styled <a> element (with optional label, optional transparent fallback text):
15 * classes: str, str[]
16 * id: str
17 * fallback_text: str
18 * label: str
19 * text: str
20 * url: str
21 * target: str
22 * rel: str
23 """
24
25 def __init__(self, **kwargs):
26 classes = kwargs.get("classes", [])
27 if isinstance(classes, str):
28 classes = classes.split()
29
30 self.classes = ["btn", "btn-lg"]
31 self.classes.extend(classes)
32 self.id = kwargs.get("id")
33 self.fallback_text = kwargs.get("fallback_text")
34 self.label = kwargs.get("label")
35 self.text = kwargs.get("text", "Button")
36 self.url = kwargs.get("url")
37 self.target = kwargs.get("target")
38 self.rel = kwargs.get("rel")
39
40 @staticmethod
41 def agency_contact_links(agency):
42 """Create link buttons for agency contact information."""
43 return [
44 Button.link(classes="agency", label=agency.long_name, text=agency.phone, url=f"tel:{agency.phone}"),
45 Button.link(
46 classes="agency", text=agency.info_url, url=agency.info_url, target="_blank", rel="noopener noreferrer"
47 ),
48 ]
49
50 @staticmethod
51 def home(request, text=None):
52 """Create a button back to this session's origin."""
53 if text is None:
54 text = _("core.buttons.return_home")
55
56 return Button.primary(text=text, url=session.origin(request))
57
58 @staticmethod
59 def link(**kwargs):
60 classes = kwargs.pop("classes", [])
61 if isinstance(classes, str):
62 classes = classes.split(" ")
63 classes.insert(0, "btn-link")
64 return Button(classes=classes, **kwargs)
65
66 @staticmethod
67 def primary(**kwargs):
68 classes = kwargs.pop("classes", [])
69 if isinstance(classes, str):
70 classes = classes.split(" ")
71 classes.insert(0, "btn-primary")
72 return Button(classes=classes, **kwargs)
73
74 @staticmethod
75 def outline_primary(**kwargs):
76 classes = kwargs.pop("classes", [])
77 if isinstance(classes, str):
78 classes = classes.split(" ")
79 classes.insert(0, "btn-outline-primary")
80 return Button(classes=classes, **kwargs)
81
82 @staticmethod
83 def login(**kwargs):
84 """Create a login.gov button, with a login.gov logo and fallback text"""
85 btn = Button.primary(fallback_text="Login.gov", id="login", **kwargs)
86 return btn
87
88 @staticmethod
89 def logout(**kwargs):
90 """Create a button that logs user out, with a login.gov button, with a login.gov logo and fallback text"""
91 btn = Button.primary(fallback_text="Login.gov", id="login", url=reverse("oauth:logout"), text="", **kwargs)
92 return btn
93
94 @staticmethod
95 def previous_page(url):
96 return Button(text=_("core.buttons.previous_page"), url=url)
97
98
99 class Icon:
100 """Represents an icon."""
101
102 def __init__(self, icon, alt):
103 self.src = f"img/icon/{icon}.svg"
104 self.alt = alt
105
106
107 class MediaItem:
108 """
109 Represents a media item in a list of items:
110 * icon: core.viewmodels.Icon
111 * details: str, str[]
112 * heading: str
113 * bullets: str, str[]
114 """
115
116 def __init__(self, icon: Icon, details, heading=None, bullets=None):
117 self.icon = icon
118 if isinstance(details, str):
119 self.details = [details]
120 elif isinstance(details, list):
121 self.details = details
122 else:
123 self.details = [str(details)]
124 self.heading = heading
125 if isinstance(bullets, str):
126 self.bullets = [bullets]
127 elif isinstance(bullets, list):
128 self.bullets = bullets
129
130
131 class Page:
132 """
133 Represents a page of content:
134 * title: str
135 * icon: core.viewmodels.Icon
136 * headline: str
137 * paragraphs: str[]
138 * form: django.forms.Form
139 * forms: django.forms.Form[]
140 * button: core.viewmodels.Button
141 * buttons: core.viewmodels.Button[]
142 * classes: str[]
143 """
144
145 def __init__(self, **kwargs):
146 self.title = kwargs.get("title")
147 if self.title is None:
148 self.title = _("core.pages.index.prefix")
149 else:
150 self.title = f"{_('core.pages.index.prefix')}: {self.title}"
151
152 self.icon = kwargs.get("icon")
153 self.headline = kwargs.get("headline")
154 self.paragraphs = kwargs.get("paragraphs", [])
155 self.steps = kwargs.get("steps")
156
157 self.forms = kwargs.get("forms", [])
158 if not isinstance(self.forms, list):
159 self.forms = [self.forms]
160 if "form" in kwargs:
161 self.forms.append(kwargs.get("form"))
162
163 self.buttons = kwargs.get("buttons", [])
164 if not isinstance(self.buttons, list):
165 self.buttons = [self.buttons]
166 if "button" in kwargs:
167 self.buttons.append(kwargs.get("button"))
168
169 self.classes = kwargs.get("classes", [])
170 if not isinstance(self.classes, list):
171 self.classes = self.classes.split(" ")
172
173 def context_dict(self):
174 """Return a context dict for a Page."""
175 return {"page": self}
176
177
178 class ErrorPage(Page):
179 """
180 Represents an error page:
181 * title: str
182 * icon: core.viewmodels.Icon
183 * headline: str
184 * paragraphs: str[]
185 * button: core.viewmodels.Button
186 """
187
188 def __init__(self, **kwargs):
189 super().__init__(
190 title=kwargs.get("title", _("core.pages.error.title")),
191 icon=kwargs.get("icon", Icon("sadbus", pgettext("image alt text", "core.icons.sadbus"))),
192 headline=kwargs.get("headline", _("core.pages.error.title")),
193 paragraphs=kwargs.get("paragraphs", [_("core.pages.server_error.headline")]),
194 button=kwargs.get("button"),
195 )
196
197 @staticmethod
198 def user_error(
199 title=_("core.pages.user_error.title"),
200 headline=_("core.pages.user_error.headline"),
201 paragraphs=[_("core.pages.user_error.p[0]")],
202 **kwargs,
203 ):
204 """Create a new core.viewmodels.ErrorPage instance with defaults for a user error."""
205 return ErrorPage(title=title, headline=headline, paragraphs=paragraphs, **kwargs)
206
207 @staticmethod
208 def server_error(
209 title=_("core.pages.server_error.title"),
210 headline=_("core.pages.server_error.title"),
211 paragraphs=[_("core.pages.server_error.p[0]")],
212 **kwargs,
213 ):
214 """Create a new core.viewmodels.ErrorPage instance with defaults for a generic server error."""
215 return ErrorPage(title=title, headline=headline, paragraphs=paragraphs, **kwargs)
216
217 @staticmethod
218 def not_found(
219 title=_("core.pages.not_found.title"),
220 headline=_("core.pages.not_found.headline"),
221 paragraphs=[_("core.pages.not_found.p[0]")],
222 **kwargs,
223 ):
224 """Create a new core.viewmodels.ErrorPage with defaults for a 404."""
225 path = kwargs.pop("path", None)
226 if path and title:
227 title = f"{title}: {path}"
228 elif path and not title:
229 title = path
230 return ErrorPage(title=title, headline=headline, paragraphs=paragraphs, **kwargs)
231
232
233 class PaymentProcessor:
234 """
235 Represents a core.models.PaymentProcessor:
236 * model: core.models.PaymentProcessor
237 * access_token_url: str
238 * element_id: str
239 * color: str
240 * [name: str]
241 * [loading_text: str]
242 """
243
244 def __init__(self, model, access_token_url, element_id, color, name=None, loading_text=_("core.buttons.wait")):
245 if isinstance(model, models.PaymentProcessor):
246 self.access_token_url = access_token_url
247 self.element_id = element_id
248 self.color = color
249 self.name = name or model.name
250 self.loading_text = loading_text
251 self.card_tokenize_url = model.card_tokenize_url
252 self.card_tokenize_func = model.card_tokenize_func
253 self.card_tokenize_env = model.card_tokenize_env
254
255 def context_dict(self):
256 """Return a context dict for a PaymentProcessor."""
257 return {"payment_processor": self}
258
259
260 class TransitAgency:
261 """
262 Represents a core.models.TransitAgency:
263 * model: core.models.TransitAgency
264 """
265
266 def __init__(self, model):
267 if isinstance(model, models.TransitAgency):
268 self.slug = model.slug
269 self.short_name = model.short_name
270 self.long_name = model.long_name
271 self.agency_id = model.agency_id
272 self.merchant_id = model.merchant_id
273 self.info_url = model.info_url
274 self.phone = model.phone
275
276 def context_dict(self):
277 """Return a context dict for a TransitAgency."""
278 return {"agency": self}
279
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/benefits/core/viewmodels.py b/benefits/core/viewmodels.py
--- a/benefits/core/viewmodels.py
+++ b/benefits/core/viewmodels.py
@@ -143,12 +143,9 @@
"""
def __init__(self, **kwargs):
- self.title = kwargs.get("title")
- if self.title is None:
- self.title = _("core.pages.index.prefix")
- else:
- self.title = f"{_('core.pages.index.prefix')}: {self.title}"
-
+ title = kwargs.get("title")
+ if title is not None:
+ self.title = title
self.icon = kwargs.get("icon")
self.headline = kwargs.get("headline")
self.paragraphs = kwargs.get("paragraphs", [])
| {"golden_diff": "diff --git a/benefits/core/viewmodels.py b/benefits/core/viewmodels.py\n--- a/benefits/core/viewmodels.py\n+++ b/benefits/core/viewmodels.py\n@@ -143,12 +143,9 @@\n \"\"\"\n \n def __init__(self, **kwargs):\n- self.title = kwargs.get(\"title\")\n- if self.title is None:\n- self.title = _(\"core.pages.index.prefix\")\n- else:\n- self.title = f\"{_('core.pages.index.prefix')}: {self.title}\"\n-\n+ title = kwargs.get(\"title\")\n+ if title is not None:\n+ self.title = title\n self.icon = kwargs.get(\"icon\")\n self.headline = kwargs.get(\"headline\")\n self.paragraphs = kwargs.get(\"paragraphs\", [])\n", "issue": "Page titles contain extra spaces, characters\nOur page titles (the string that shows up e.g. in the browser tab) are a little funky. They contain a bunch of extra spaces and weird formatting. For example, the `dev` site has this in the HTML source:\r\n\r\n```html\r\n<title>\r\n \r\n Transit Benefits: Introduction |\r\n \r\n Cal-ITP\r\n </title>\r\n```\r\n\r\nIn the browser this isn't really a problem; extra spaces are truncated in the tabs:\r\n\r\n\r\n\r\nHowever in certain situations, especially where links are expanded for previews (like in Social Media sites, comms apps like Slack, etc.), this extra spacing and weird formatting is more apparent:\r\n\r\n\r\n\r\n## To Reproduce\r\n\r\nSteps to reproduce the behavior:\r\n\r\n1. Copy a link from the public facing Benefits app, e.g. https://benefits.calitp.org\r\n2. Paste this link into an app that can expands links for previews, like Slack\r\n3. See the weird title formatting in the expanded preview \r\n\r\n## Expected behavior\r\n\r\n* There shouldn't be any extra spaces in the titles\r\n* We should confirm this is the pattern we want (I'm thinking...probably not?) `Transit Benefits: <Page Title> | Cal-ITP`\r\n * Alternate suggestion: `<Page Title> | Cal-ITP Benefits`\r\n\n", "before_files": [{"content": "\"\"\"\nThe core application: view model definitions for the root of the webapp.\n\"\"\"\nfrom django.utils.translation import pgettext, gettext_lazy as _\nfrom django.urls import reverse\n\nfrom benefits.core import models\n\nfrom . import session\n\n\nclass Button:\n \"\"\"\n Represents a clickable button as styled <a> element (with optional label, optional transparent fallback text):\n * classes: str, str[]\n * id: str\n * fallback_text: str\n * label: str\n * text: str\n * url: str\n * target: str\n * rel: str\n \"\"\"\n\n def __init__(self, **kwargs):\n classes = kwargs.get(\"classes\", [])\n if isinstance(classes, str):\n classes = classes.split()\n\n self.classes = [\"btn\", \"btn-lg\"]\n self.classes.extend(classes)\n self.id = kwargs.get(\"id\")\n self.fallback_text = kwargs.get(\"fallback_text\")\n self.label = kwargs.get(\"label\")\n self.text = kwargs.get(\"text\", \"Button\")\n self.url = kwargs.get(\"url\")\n self.target = kwargs.get(\"target\")\n self.rel = kwargs.get(\"rel\")\n\n @staticmethod\n def agency_contact_links(agency):\n \"\"\"Create link buttons for agency contact information.\"\"\"\n return [\n Button.link(classes=\"agency\", label=agency.long_name, text=agency.phone, url=f\"tel:{agency.phone}\"),\n Button.link(\n classes=\"agency\", text=agency.info_url, url=agency.info_url, target=\"_blank\", rel=\"noopener noreferrer\"\n ),\n ]\n\n @staticmethod\n def home(request, text=None):\n \"\"\"Create a button back to this session's origin.\"\"\"\n if text is None:\n text = _(\"core.buttons.return_home\")\n\n return Button.primary(text=text, url=session.origin(request))\n\n @staticmethod\n def link(**kwargs):\n classes = kwargs.pop(\"classes\", [])\n if isinstance(classes, str):\n classes = classes.split(\" \")\n classes.insert(0, \"btn-link\")\n return Button(classes=classes, **kwargs)\n\n @staticmethod\n def primary(**kwargs):\n classes = kwargs.pop(\"classes\", [])\n if isinstance(classes, str):\n classes = classes.split(\" \")\n classes.insert(0, \"btn-primary\")\n return Button(classes=classes, **kwargs)\n\n @staticmethod\n def outline_primary(**kwargs):\n classes = kwargs.pop(\"classes\", [])\n if isinstance(classes, str):\n classes = classes.split(\" \")\n classes.insert(0, \"btn-outline-primary\")\n return Button(classes=classes, **kwargs)\n\n @staticmethod\n def login(**kwargs):\n \"\"\"Create a login.gov button, with a login.gov logo and fallback text\"\"\"\n btn = Button.primary(fallback_text=\"Login.gov\", id=\"login\", **kwargs)\n return btn\n\n @staticmethod\n def logout(**kwargs):\n \"\"\"Create a button that logs user out, with a login.gov button, with a login.gov logo and fallback text\"\"\"\n btn = Button.primary(fallback_text=\"Login.gov\", id=\"login\", url=reverse(\"oauth:logout\"), text=\"\", **kwargs)\n return btn\n\n @staticmethod\n def previous_page(url):\n return Button(text=_(\"core.buttons.previous_page\"), url=url)\n\n\nclass Icon:\n \"\"\"Represents an icon.\"\"\"\n\n def __init__(self, icon, alt):\n self.src = f\"img/icon/{icon}.svg\"\n self.alt = alt\n\n\nclass MediaItem:\n \"\"\"\n Represents a media item in a list of items:\n * icon: core.viewmodels.Icon\n * details: str, str[]\n * heading: str\n * bullets: str, str[]\n \"\"\"\n\n def __init__(self, icon: Icon, details, heading=None, bullets=None):\n self.icon = icon\n if isinstance(details, str):\n self.details = [details]\n elif isinstance(details, list):\n self.details = details\n else:\n self.details = [str(details)]\n self.heading = heading\n if isinstance(bullets, str):\n self.bullets = [bullets]\n elif isinstance(bullets, list):\n self.bullets = bullets\n\n\nclass Page:\n \"\"\"\n Represents a page of content:\n * title: str\n * icon: core.viewmodels.Icon\n * headline: str\n * paragraphs: str[]\n * form: django.forms.Form\n * forms: django.forms.Form[]\n * button: core.viewmodels.Button\n * buttons: core.viewmodels.Button[]\n * classes: str[]\n \"\"\"\n\n def __init__(self, **kwargs):\n self.title = kwargs.get(\"title\")\n if self.title is None:\n self.title = _(\"core.pages.index.prefix\")\n else:\n self.title = f\"{_('core.pages.index.prefix')}: {self.title}\"\n\n self.icon = kwargs.get(\"icon\")\n self.headline = kwargs.get(\"headline\")\n self.paragraphs = kwargs.get(\"paragraphs\", [])\n self.steps = kwargs.get(\"steps\")\n\n self.forms = kwargs.get(\"forms\", [])\n if not isinstance(self.forms, list):\n self.forms = [self.forms]\n if \"form\" in kwargs:\n self.forms.append(kwargs.get(\"form\"))\n\n self.buttons = kwargs.get(\"buttons\", [])\n if not isinstance(self.buttons, list):\n self.buttons = [self.buttons]\n if \"button\" in kwargs:\n self.buttons.append(kwargs.get(\"button\"))\n\n self.classes = kwargs.get(\"classes\", [])\n if not isinstance(self.classes, list):\n self.classes = self.classes.split(\" \")\n\n def context_dict(self):\n \"\"\"Return a context dict for a Page.\"\"\"\n return {\"page\": self}\n\n\nclass ErrorPage(Page):\n \"\"\"\n Represents an error page:\n * title: str\n * icon: core.viewmodels.Icon\n * headline: str\n * paragraphs: str[]\n * button: core.viewmodels.Button\n \"\"\"\n\n def __init__(self, **kwargs):\n super().__init__(\n title=kwargs.get(\"title\", _(\"core.pages.error.title\")),\n icon=kwargs.get(\"icon\", Icon(\"sadbus\", pgettext(\"image alt text\", \"core.icons.sadbus\"))),\n headline=kwargs.get(\"headline\", _(\"core.pages.error.title\")),\n paragraphs=kwargs.get(\"paragraphs\", [_(\"core.pages.server_error.headline\")]),\n button=kwargs.get(\"button\"),\n )\n\n @staticmethod\n def user_error(\n title=_(\"core.pages.user_error.title\"),\n headline=_(\"core.pages.user_error.headline\"),\n paragraphs=[_(\"core.pages.user_error.p[0]\")],\n **kwargs,\n ):\n \"\"\"Create a new core.viewmodels.ErrorPage instance with defaults for a user error.\"\"\"\n return ErrorPage(title=title, headline=headline, paragraphs=paragraphs, **kwargs)\n\n @staticmethod\n def server_error(\n title=_(\"core.pages.server_error.title\"),\n headline=_(\"core.pages.server_error.title\"),\n paragraphs=[_(\"core.pages.server_error.p[0]\")],\n **kwargs,\n ):\n \"\"\"Create a new core.viewmodels.ErrorPage instance with defaults for a generic server error.\"\"\"\n return ErrorPage(title=title, headline=headline, paragraphs=paragraphs, **kwargs)\n\n @staticmethod\n def not_found(\n title=_(\"core.pages.not_found.title\"),\n headline=_(\"core.pages.not_found.headline\"),\n paragraphs=[_(\"core.pages.not_found.p[0]\")],\n **kwargs,\n ):\n \"\"\"Create a new core.viewmodels.ErrorPage with defaults for a 404.\"\"\"\n path = kwargs.pop(\"path\", None)\n if path and title:\n title = f\"{title}: {path}\"\n elif path and not title:\n title = path\n return ErrorPage(title=title, headline=headline, paragraphs=paragraphs, **kwargs)\n\n\nclass PaymentProcessor:\n \"\"\"\n Represents a core.models.PaymentProcessor:\n * model: core.models.PaymentProcessor\n * access_token_url: str\n * element_id: str\n * color: str\n * [name: str]\n * [loading_text: str]\n \"\"\"\n\n def __init__(self, model, access_token_url, element_id, color, name=None, loading_text=_(\"core.buttons.wait\")):\n if isinstance(model, models.PaymentProcessor):\n self.access_token_url = access_token_url\n self.element_id = element_id\n self.color = color\n self.name = name or model.name\n self.loading_text = loading_text\n self.card_tokenize_url = model.card_tokenize_url\n self.card_tokenize_func = model.card_tokenize_func\n self.card_tokenize_env = model.card_tokenize_env\n\n def context_dict(self):\n \"\"\"Return a context dict for a PaymentProcessor.\"\"\"\n return {\"payment_processor\": self}\n\n\nclass TransitAgency:\n \"\"\"\n Represents a core.models.TransitAgency:\n * model: core.models.TransitAgency\n \"\"\"\n\n def __init__(self, model):\n if isinstance(model, models.TransitAgency):\n self.slug = model.slug\n self.short_name = model.short_name\n self.long_name = model.long_name\n self.agency_id = model.agency_id\n self.merchant_id = model.merchant_id\n self.info_url = model.info_url\n self.phone = model.phone\n\n def context_dict(self):\n \"\"\"Return a context dict for a TransitAgency.\"\"\"\n return {\"agency\": self}\n", "path": "benefits/core/viewmodels.py"}], "after_files": [{"content": "\"\"\"\nThe core application: view model definitions for the root of the webapp.\n\"\"\"\nfrom django.utils.translation import pgettext, gettext_lazy as _\nfrom django.urls import reverse\n\nfrom benefits.core import models\n\nfrom . import session\n\n\nclass Button:\n \"\"\"\n Represents a clickable button as styled <a> element (with optional label, optional transparent fallback text):\n * classes: str, str[]\n * id: str\n * fallback_text: str\n * label: str\n * text: str\n * url: str\n * target: str\n * rel: str\n \"\"\"\n\n def __init__(self, **kwargs):\n classes = kwargs.get(\"classes\", [])\n if isinstance(classes, str):\n classes = classes.split()\n\n self.classes = [\"btn\", \"btn-lg\"]\n self.classes.extend(classes)\n self.id = kwargs.get(\"id\")\n self.fallback_text = kwargs.get(\"fallback_text\")\n self.label = kwargs.get(\"label\")\n self.text = kwargs.get(\"text\", \"Button\")\n self.url = kwargs.get(\"url\")\n self.target = kwargs.get(\"target\")\n self.rel = kwargs.get(\"rel\")\n\n @staticmethod\n def agency_contact_links(agency):\n \"\"\"Create link buttons for agency contact information.\"\"\"\n return [\n Button.link(classes=\"agency\", label=agency.long_name, text=agency.phone, url=f\"tel:{agency.phone}\"),\n Button.link(\n classes=\"agency\", text=agency.info_url, url=agency.info_url, target=\"_blank\", rel=\"noopener noreferrer\"\n ),\n ]\n\n @staticmethod\n def home(request, text=None):\n \"\"\"Create a button back to this session's origin.\"\"\"\n if text is None:\n text = _(\"core.buttons.return_home\")\n\n return Button.primary(text=text, url=session.origin(request))\n\n @staticmethod\n def link(**kwargs):\n classes = kwargs.pop(\"classes\", [])\n if isinstance(classes, str):\n classes = classes.split(\" \")\n classes.insert(0, \"btn-link\")\n return Button(classes=classes, **kwargs)\n\n @staticmethod\n def primary(**kwargs):\n classes = kwargs.pop(\"classes\", [])\n if isinstance(classes, str):\n classes = classes.split(\" \")\n classes.insert(0, \"btn-primary\")\n return Button(classes=classes, **kwargs)\n\n @staticmethod\n def outline_primary(**kwargs):\n classes = kwargs.pop(\"classes\", [])\n if isinstance(classes, str):\n classes = classes.split(\" \")\n classes.insert(0, \"btn-outline-primary\")\n return Button(classes=classes, **kwargs)\n\n @staticmethod\n def login(**kwargs):\n \"\"\"Create a login.gov button, with a login.gov logo and fallback text\"\"\"\n btn = Button.primary(fallback_text=\"Login.gov\", id=\"login\", **kwargs)\n return btn\n\n @staticmethod\n def logout(**kwargs):\n \"\"\"Create a button that logs user out, with a login.gov button, with a login.gov logo and fallback text\"\"\"\n btn = Button.primary(fallback_text=\"Login.gov\", id=\"login\", url=reverse(\"oauth:logout\"), text=\"\", **kwargs)\n return btn\n\n @staticmethod\n def previous_page(url):\n return Button(text=_(\"core.buttons.previous_page\"), url=url)\n\n\nclass Icon:\n \"\"\"Represents an icon.\"\"\"\n\n def __init__(self, icon, alt):\n self.src = f\"img/icon/{icon}.svg\"\n self.alt = alt\n\n\nclass MediaItem:\n \"\"\"\n Represents a media item in a list of items:\n * icon: core.viewmodels.Icon\n * details: str, str[]\n * heading: str\n * bullets: str, str[]\n \"\"\"\n\n def __init__(self, icon: Icon, details, heading=None, bullets=None):\n self.icon = icon\n if isinstance(details, str):\n self.details = [details]\n elif isinstance(details, list):\n self.details = details\n else:\n self.details = [str(details)]\n self.heading = heading\n if isinstance(bullets, str):\n self.bullets = [bullets]\n elif isinstance(bullets, list):\n self.bullets = bullets\n\n\nclass Page:\n \"\"\"\n Represents a page of content:\n * title: str\n * icon: core.viewmodels.Icon\n * headline: str\n * paragraphs: str[]\n * form: django.forms.Form\n * forms: django.forms.Form[]\n * button: core.viewmodels.Button\n * buttons: core.viewmodels.Button[]\n * classes: str[]\n \"\"\"\n\n def __init__(self, **kwargs):\n title = kwargs.get(\"title\")\n if title is not None:\n self.title = title\n self.icon = kwargs.get(\"icon\")\n self.headline = kwargs.get(\"headline\")\n self.paragraphs = kwargs.get(\"paragraphs\", [])\n self.steps = kwargs.get(\"steps\")\n\n self.forms = kwargs.get(\"forms\", [])\n if not isinstance(self.forms, list):\n self.forms = [self.forms]\n if \"form\" in kwargs:\n self.forms.append(kwargs.get(\"form\"))\n\n self.buttons = kwargs.get(\"buttons\", [])\n if not isinstance(self.buttons, list):\n self.buttons = [self.buttons]\n if \"button\" in kwargs:\n self.buttons.append(kwargs.get(\"button\"))\n\n self.classes = kwargs.get(\"classes\", [])\n if not isinstance(self.classes, list):\n self.classes = self.classes.split(\" \")\n\n def context_dict(self):\n \"\"\"Return a context dict for a Page.\"\"\"\n return {\"page\": self}\n\n\nclass ErrorPage(Page):\n \"\"\"\n Represents an error page:\n * title: str\n * icon: core.viewmodels.Icon\n * headline: str\n * paragraphs: str[]\n * button: core.viewmodels.Button\n \"\"\"\n\n def __init__(self, **kwargs):\n super().__init__(\n title=kwargs.get(\"title\", _(\"core.pages.error.title\")),\n icon=kwargs.get(\"icon\", Icon(\"sadbus\", pgettext(\"image alt text\", \"core.icons.sadbus\"))),\n headline=kwargs.get(\"headline\", _(\"core.pages.error.title\")),\n paragraphs=kwargs.get(\"paragraphs\", [_(\"core.pages.server_error.headline\")]),\n button=kwargs.get(\"button\"),\n )\n\n @staticmethod\n def user_error(\n title=_(\"core.pages.user_error.title\"),\n headline=_(\"core.pages.user_error.headline\"),\n paragraphs=[_(\"core.pages.user_error.p[0]\")],\n **kwargs,\n ):\n \"\"\"Create a new core.viewmodels.ErrorPage instance with defaults for a user error.\"\"\"\n return ErrorPage(title=title, headline=headline, paragraphs=paragraphs, **kwargs)\n\n @staticmethod\n def server_error(\n title=_(\"core.pages.server_error.title\"),\n headline=_(\"core.pages.server_error.title\"),\n paragraphs=[_(\"core.pages.server_error.p[0]\")],\n **kwargs,\n ):\n \"\"\"Create a new core.viewmodels.ErrorPage instance with defaults for a generic server error.\"\"\"\n return ErrorPage(title=title, headline=headline, paragraphs=paragraphs, **kwargs)\n\n @staticmethod\n def not_found(\n title=_(\"core.pages.not_found.title\"),\n headline=_(\"core.pages.not_found.headline\"),\n paragraphs=[_(\"core.pages.not_found.p[0]\")],\n **kwargs,\n ):\n \"\"\"Create a new core.viewmodels.ErrorPage with defaults for a 404.\"\"\"\n path = kwargs.pop(\"path\", None)\n if path and title:\n title = f\"{title}: {path}\"\n elif path and not title:\n title = path\n return ErrorPage(title=title, headline=headline, paragraphs=paragraphs, **kwargs)\n\n\nclass PaymentProcessor:\n \"\"\"\n Represents a core.models.PaymentProcessor:\n * model: core.models.PaymentProcessor\n * access_token_url: str\n * element_id: str\n * color: str\n * [name: str]\n * [loading_text: str]\n \"\"\"\n\n def __init__(self, model, access_token_url, element_id, color, name=None, loading_text=_(\"core.buttons.wait\")):\n if isinstance(model, models.PaymentProcessor):\n self.access_token_url = access_token_url\n self.element_id = element_id\n self.color = color\n self.name = name or model.name\n self.loading_text = loading_text\n self.card_tokenize_url = model.card_tokenize_url\n self.card_tokenize_func = model.card_tokenize_func\n self.card_tokenize_env = model.card_tokenize_env\n\n def context_dict(self):\n \"\"\"Return a context dict for a PaymentProcessor.\"\"\"\n return {\"payment_processor\": self}\n\n\nclass TransitAgency:\n \"\"\"\n Represents a core.models.TransitAgency:\n * model: core.models.TransitAgency\n \"\"\"\n\n def __init__(self, model):\n if isinstance(model, models.TransitAgency):\n self.slug = model.slug\n self.short_name = model.short_name\n self.long_name = model.long_name\n self.agency_id = model.agency_id\n self.merchant_id = model.merchant_id\n self.info_url = model.info_url\n self.phone = model.phone\n\n def context_dict(self):\n \"\"\"Return a context dict for a TransitAgency.\"\"\"\n return {\"agency\": self}\n", "path": "benefits/core/viewmodels.py"}]} | 3,392 | 176 |
gh_patches_debug_17289 | rasdani/github-patches | git_diff | psychopy__psychopy-2031 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
PIL.Image fail in combineImageEmblem
When launching 3.0.0.b in Windows
```
Traceback (most recent call last):
File "C:\a\Python\Scripts\psychopyApp.py", line 96, in <module>
start_app()
File "C:\a\Python\Scripts\psychopyApp.py", line 28, in start_app
app = PsychoPyApp(0, showSplash=showSplash)
File "C:\a\Python\lib\site-packages\psychopy\app\_psychopyApp.py", line 181, in __init__
self.onInit(testMode=testMode, **kwargs)
File "C:\a\Python\lib\site-packages\psychopy\app\_psychopyApp.py", line 307, in onInit
self.showCoder(fileList=scripts)
File "C:\a\Python\lib\site-packages\psychopy\app\_psychopyApp.py", line 499, in showCoder
files=fileList, app=self)
File "C:\a\Python\lib\site-packages\psychopy\app\coder\coder.py", line 1426, in __init__
self.makeToolbar() # must be before the paneManager for some reason
File "C:\a\Python\lib\site-packages\psychopy\app\coder\coder.py", line 2000, in makeToolbar
pavButtons.addPavloviaTools(buttons=['pavloviaSync', 'pavloviaSearch', 'pavloviaUser', ])
File "C:\a\Python\lib\site-packages\psychopy\app\pavlovia_ui\toolbar.py", line 33, in addPavloviaT
ools
emblem=join(rc, emblem), pos='bottom_right'))
File "C:\a\Python\lib\site-packages\psychopy\app\icons.py", line 69, in combineImageEmblem
main.paste(emblem, [x, y], mask=emblem)
File "C:\a\Python\lib\site-packages\PIL\Image.py", line 1339, in paste
box = box + (box[0]+size[0], box[1]+size[1])
TypeError: can only concatenate list (not "tuple") to list
```
Is fixed by change of brackets at psychopy\app\icons.py:69 :
`
main.paste(emblem, [x, y], mask=emblem)`
to
`
main.paste(emblem, (x, y), mask=emblem)`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `psychopy/app/icons.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3
4 # Part of the PsychoPy library
5 # Copyright (C) 2018 Jonathan Peirce
6 # Distributed under the terms of the GNU General Public License (GPL).
7
8 """utility classes for the Builder
9 """
10
11 from __future__ import absolute_import, division, print_function
12 from os.path import join, abspath, dirname
13
14 from pkg_resources import parse_version
15 from PIL import Image
16 import wx
17
18 from psychopy import experiment, prefs
19 from psychopy.experiment import components
20
21 resourcesPath = prefs.paths['resources']
22
23 def pilToBitmap(pil, scaleFactor=1.0):
24 if parse_version(wx.__version__) < parse_version('4.0.0a1'):
25 image = wx.EmptyImage(pil.size[0], pil.size[1])
26 else:
27 image = wx.Image(pil.size[0], pil.size[1])
28
29 # set the RGB values
30 if hasattr(pil, 'tobytes'):
31 image.SetData(pil.convert("RGB").tobytes())
32 image.SetAlphaBuffer(pil.convert("RGBA").tobytes()[3::4])
33 else:
34 image.SetData(pil.convert("RGB").tostring())
35 image.SetAlphaData(pil.convert("RGBA").tostring()[3::4])
36
37 image.Rescale(image.Width * scaleFactor, image.Height * scaleFactor)
38 return image.ConvertToBitmap() # wx.Image and wx.Bitmap are different
39
40
41 def combineImageEmblem(main, emblem, pos='top_left'):
42 """
43
44 Parameters
45 ----------
46 main: filename
47 emblem: filename
48 pos: str ('bottom_left' etc)
49 size: int (default=16)
50
51 Returns
52 -------
53 A wx.Bitmap of the combined image ready for use in wxButton
54 """
55 # load images if they aren't already loaded
56 main = Image.open(main).convert('RGBA') # might be grey or indexed colors
57 emblem = Image.open(emblem).convert('RGBA')
58 if 'bottom' in pos:
59 y = main.size[1] - emblem.size[1]
60 elif 'top' in pos:
61 y = 0
62 if 'right' in pos:
63 x = main.size[0] - emblem.size[0]
64 elif 'left' in pos:
65 x = 0
66 elif 'center' in pos:
67 x = int(main.size[0]/2-emblem.size[1]/2)
68
69 main.paste(emblem, [x, y], mask=emblem)
70 return pilToBitmap(main)
71
72 _allIcons = None
73
74
75 def getAllIcons(folderList=(), forceReload=False):
76 """load the icons for all the components
77 """
78 global _allIcons
79 if forceReload or _allIcons is None:
80 compons = experiment.getAllComponents(folderList)
81 _allIcons = {}
82 for thisName, thisCompon in compons.items():
83 if thisName in components.iconFiles:
84 _allIcons[thisName] = getIcons(components.iconFiles[thisName])
85 else:
86 _allIcons[thisName] = getIcons(None)
87 return _allIcons
88 else:
89 return _allIcons
90
91
92 def getIcons(filename=None):
93 """Creates wxBitmaps ``self.icon`` and ``self.iconAdd`` based on the the image.
94 The latter has a plus sign added over the top.
95
96 png files work best, but anything that wx.Image can import should be fine
97 """
98 icons = {}
99 if filename is None:
100 filename = join(resourcesPath, 'base.png')
101
102 # get the low-res version first
103 im = Image.open(filename)
104 icons['24'] = pilToBitmap(im, scaleFactor=0.5)
105 icons['24add'] = pilToBitmap(im, scaleFactor=0.5)
106 # try to find a 128x128 version
107 filename128 = filename[:-4]+'128.png'
108 if False: # TURN OFF FOR NOW os.path.isfile(filename128):
109 im = Image.open(filename128)
110 else:
111 im = Image.open(filename)
112 icons['48'] = pilToBitmap(im)
113 # add the plus sign
114 add = Image.open(join(resourcesPath, 'add.png'))
115 im.paste(add, [0, 0, add.size[0], add.size[1]], mask=add)
116 # im.paste(add, [im.size[0]-add.size[0], im.size[1]-add.size[1],
117 # im.size[0], im.size[1]], mask=add)
118 icons['48add'] = pilToBitmap(im)
119
120 return icons
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/psychopy/app/icons.py b/psychopy/app/icons.py
--- a/psychopy/app/icons.py
+++ b/psychopy/app/icons.py
@@ -66,7 +66,7 @@
elif 'center' in pos:
x = int(main.size[0]/2-emblem.size[1]/2)
- main.paste(emblem, [x, y], mask=emblem)
+ main.paste(emblem, (x, y), mask=emblem)
return pilToBitmap(main)
_allIcons = None
@@ -112,7 +112,7 @@
icons['48'] = pilToBitmap(im)
# add the plus sign
add = Image.open(join(resourcesPath, 'add.png'))
- im.paste(add, [0, 0, add.size[0], add.size[1]], mask=add)
+ im.paste(add, (0, 0, add.size[0], add.size[1]), mask=add)
# im.paste(add, [im.size[0]-add.size[0], im.size[1]-add.size[1],
# im.size[0], im.size[1]], mask=add)
icons['48add'] = pilToBitmap(im)
| {"golden_diff": "diff --git a/psychopy/app/icons.py b/psychopy/app/icons.py\n--- a/psychopy/app/icons.py\n+++ b/psychopy/app/icons.py\n@@ -66,7 +66,7 @@\n elif 'center' in pos:\n x = int(main.size[0]/2-emblem.size[1]/2)\n \n- main.paste(emblem, [x, y], mask=emblem)\n+ main.paste(emblem, (x, y), mask=emblem)\n return pilToBitmap(main)\n \n _allIcons = None\n@@ -112,7 +112,7 @@\n icons['48'] = pilToBitmap(im)\n # add the plus sign\n add = Image.open(join(resourcesPath, 'add.png'))\n- im.paste(add, [0, 0, add.size[0], add.size[1]], mask=add)\n+ im.paste(add, (0, 0, add.size[0], add.size[1]), mask=add)\n # im.paste(add, [im.size[0]-add.size[0], im.size[1]-add.size[1],\n # im.size[0], im.size[1]], mask=add)\n icons['48add'] = pilToBitmap(im)\n", "issue": "PIL.Image fail in combineImageEmblem\nWhen launching 3.0.0.b in Windows\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"C:\\a\\Python\\Scripts\\psychopyApp.py\", line 96, in <module>\r\n start_app()\r\n File \"C:\\a\\Python\\Scripts\\psychopyApp.py\", line 28, in start_app\r\n app = PsychoPyApp(0, showSplash=showSplash)\r\n File \"C:\\a\\Python\\lib\\site-packages\\psychopy\\app\\_psychopyApp.py\", line 181, in __init__\r\n self.onInit(testMode=testMode, **kwargs)\r\n File \"C:\\a\\Python\\lib\\site-packages\\psychopy\\app\\_psychopyApp.py\", line 307, in onInit\r\n self.showCoder(fileList=scripts)\r\n File \"C:\\a\\Python\\lib\\site-packages\\psychopy\\app\\_psychopyApp.py\", line 499, in showCoder\r\n files=fileList, app=self)\r\n File \"C:\\a\\Python\\lib\\site-packages\\psychopy\\app\\coder\\coder.py\", line 1426, in __init__\r\n self.makeToolbar() # must be before the paneManager for some reason\r\n File \"C:\\a\\Python\\lib\\site-packages\\psychopy\\app\\coder\\coder.py\", line 2000, in makeToolbar\r\n pavButtons.addPavloviaTools(buttons=['pavloviaSync', 'pavloviaSearch', 'pavloviaUser', ])\r\n File \"C:\\a\\Python\\lib\\site-packages\\psychopy\\app\\pavlovia_ui\\toolbar.py\", line 33, in addPavloviaT\r\nools\r\n emblem=join(rc, emblem), pos='bottom_right'))\r\n File \"C:\\a\\Python\\lib\\site-packages\\psychopy\\app\\icons.py\", line 69, in combineImageEmblem\r\n main.paste(emblem, [x, y], mask=emblem)\r\n File \"C:\\a\\Python\\lib\\site-packages\\PIL\\Image.py\", line 1339, in paste\r\n box = box + (box[0]+size[0], box[1]+size[1])\r\nTypeError: can only concatenate list (not \"tuple\") to list\r\n```\r\n\r\n\r\nIs fixed by change of brackets at psychopy\\app\\icons.py:69 :\r\n`\r\nmain.paste(emblem, [x, y], mask=emblem)`\r\n\r\nto \r\n`\r\nmain.paste(emblem, (x, y), mask=emblem)`\r\n\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# Part of the PsychoPy library\n# Copyright (C) 2018 Jonathan Peirce\n# Distributed under the terms of the GNU General Public License (GPL).\n\n\"\"\"utility classes for the Builder\n\"\"\"\n\nfrom __future__ import absolute_import, division, print_function\nfrom os.path import join, abspath, dirname\n\nfrom pkg_resources import parse_version\nfrom PIL import Image\nimport wx\n\nfrom psychopy import experiment, prefs\nfrom psychopy.experiment import components\n\nresourcesPath = prefs.paths['resources']\n\ndef pilToBitmap(pil, scaleFactor=1.0):\n if parse_version(wx.__version__) < parse_version('4.0.0a1'):\n image = wx.EmptyImage(pil.size[0], pil.size[1])\n else:\n image = wx.Image(pil.size[0], pil.size[1])\n\n # set the RGB values\n if hasattr(pil, 'tobytes'):\n image.SetData(pil.convert(\"RGB\").tobytes())\n image.SetAlphaBuffer(pil.convert(\"RGBA\").tobytes()[3::4])\n else:\n image.SetData(pil.convert(\"RGB\").tostring())\n image.SetAlphaData(pil.convert(\"RGBA\").tostring()[3::4])\n\n image.Rescale(image.Width * scaleFactor, image.Height * scaleFactor)\n return image.ConvertToBitmap() # wx.Image and wx.Bitmap are different\n\n\ndef combineImageEmblem(main, emblem, pos='top_left'):\n \"\"\"\n\n Parameters\n ----------\n main: filename\n emblem: filename\n pos: str ('bottom_left' etc)\n size: int (default=16)\n\n Returns\n -------\n A wx.Bitmap of the combined image ready for use in wxButton\n \"\"\"\n # load images if they aren't already loaded\n main = Image.open(main).convert('RGBA') # might be grey or indexed colors\n emblem = Image.open(emblem).convert('RGBA')\n if 'bottom' in pos:\n y = main.size[1] - emblem.size[1]\n elif 'top' in pos:\n y = 0\n if 'right' in pos:\n x = main.size[0] - emblem.size[0]\n elif 'left' in pos:\n x = 0\n elif 'center' in pos:\n x = int(main.size[0]/2-emblem.size[1]/2)\n\n main.paste(emblem, [x, y], mask=emblem)\n return pilToBitmap(main)\n\n_allIcons = None\n\n\ndef getAllIcons(folderList=(), forceReload=False):\n \"\"\"load the icons for all the components\n \"\"\"\n global _allIcons\n if forceReload or _allIcons is None:\n compons = experiment.getAllComponents(folderList)\n _allIcons = {}\n for thisName, thisCompon in compons.items():\n if thisName in components.iconFiles:\n _allIcons[thisName] = getIcons(components.iconFiles[thisName])\n else:\n _allIcons[thisName] = getIcons(None)\n return _allIcons\n else:\n return _allIcons\n\n\ndef getIcons(filename=None):\n \"\"\"Creates wxBitmaps ``self.icon`` and ``self.iconAdd`` based on the the image.\n The latter has a plus sign added over the top.\n\n png files work best, but anything that wx.Image can import should be fine\n \"\"\"\n icons = {}\n if filename is None:\n filename = join(resourcesPath, 'base.png')\n\n # get the low-res version first\n im = Image.open(filename)\n icons['24'] = pilToBitmap(im, scaleFactor=0.5)\n icons['24add'] = pilToBitmap(im, scaleFactor=0.5)\n # try to find a 128x128 version\n filename128 = filename[:-4]+'128.png'\n if False: # TURN OFF FOR NOW os.path.isfile(filename128):\n im = Image.open(filename128)\n else:\n im = Image.open(filename)\n icons['48'] = pilToBitmap(im)\n # add the plus sign\n add = Image.open(join(resourcesPath, 'add.png'))\n im.paste(add, [0, 0, add.size[0], add.size[1]], mask=add)\n # im.paste(add, [im.size[0]-add.size[0], im.size[1]-add.size[1],\n # im.size[0], im.size[1]], mask=add)\n icons['48add'] = pilToBitmap(im)\n\n return icons", "path": "psychopy/app/icons.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# Part of the PsychoPy library\n# Copyright (C) 2018 Jonathan Peirce\n# Distributed under the terms of the GNU General Public License (GPL).\n\n\"\"\"utility classes for the Builder\n\"\"\"\n\nfrom __future__ import absolute_import, division, print_function\nfrom os.path import join, abspath, dirname\n\nfrom pkg_resources import parse_version\nfrom PIL import Image\nimport wx\n\nfrom psychopy import experiment, prefs\nfrom psychopy.experiment import components\n\nresourcesPath = prefs.paths['resources']\n\ndef pilToBitmap(pil, scaleFactor=1.0):\n if parse_version(wx.__version__) < parse_version('4.0.0a1'):\n image = wx.EmptyImage(pil.size[0], pil.size[1])\n else:\n image = wx.Image(pil.size[0], pil.size[1])\n\n # set the RGB values\n if hasattr(pil, 'tobytes'):\n image.SetData(pil.convert(\"RGB\").tobytes())\n image.SetAlphaBuffer(pil.convert(\"RGBA\").tobytes()[3::4])\n else:\n image.SetData(pil.convert(\"RGB\").tostring())\n image.SetAlphaData(pil.convert(\"RGBA\").tostring()[3::4])\n\n image.Rescale(image.Width * scaleFactor, image.Height * scaleFactor)\n return image.ConvertToBitmap() # wx.Image and wx.Bitmap are different\n\n\ndef combineImageEmblem(main, emblem, pos='top_left'):\n \"\"\"\n\n Parameters\n ----------\n main: filename\n emblem: filename\n pos: str ('bottom_left' etc)\n size: int (default=16)\n\n Returns\n -------\n A wx.Bitmap of the combined image ready for use in wxButton\n \"\"\"\n # load images if they aren't already loaded\n main = Image.open(main).convert('RGBA') # might be grey or indexed colors\n emblem = Image.open(emblem).convert('RGBA')\n if 'bottom' in pos:\n y = main.size[1] - emblem.size[1]\n elif 'top' in pos:\n y = 0\n if 'right' in pos:\n x = main.size[0] - emblem.size[0]\n elif 'left' in pos:\n x = 0\n elif 'center' in pos:\n x = int(main.size[0]/2-emblem.size[1]/2)\n\n main.paste(emblem, (x, y), mask=emblem)\n return pilToBitmap(main)\n\n_allIcons = None\n\n\ndef getAllIcons(folderList=(), forceReload=False):\n \"\"\"load the icons for all the components\n \"\"\"\n global _allIcons\n if forceReload or _allIcons is None:\n compons = experiment.getAllComponents(folderList)\n _allIcons = {}\n for thisName, thisCompon in compons.items():\n if thisName in components.iconFiles:\n _allIcons[thisName] = getIcons(components.iconFiles[thisName])\n else:\n _allIcons[thisName] = getIcons(None)\n return _allIcons\n else:\n return _allIcons\n\n\ndef getIcons(filename=None):\n \"\"\"Creates wxBitmaps ``self.icon`` and ``self.iconAdd`` based on the the image.\n The latter has a plus sign added over the top.\n\n png files work best, but anything that wx.Image can import should be fine\n \"\"\"\n icons = {}\n if filename is None:\n filename = join(resourcesPath, 'base.png')\n\n # get the low-res version first\n im = Image.open(filename)\n icons['24'] = pilToBitmap(im, scaleFactor=0.5)\n icons['24add'] = pilToBitmap(im, scaleFactor=0.5)\n # try to find a 128x128 version\n filename128 = filename[:-4]+'128.png'\n if False: # TURN OFF FOR NOW os.path.isfile(filename128):\n im = Image.open(filename128)\n else:\n im = Image.open(filename)\n icons['48'] = pilToBitmap(im)\n # add the plus sign\n add = Image.open(join(resourcesPath, 'add.png'))\n im.paste(add, (0, 0, add.size[0], add.size[1]), mask=add)\n # im.paste(add, [im.size[0]-add.size[0], im.size[1]-add.size[1],\n # im.size[0], im.size[1]], mask=add)\n icons['48add'] = pilToBitmap(im)\n\n return icons", "path": "psychopy/app/icons.py"}]} | 2,110 | 280 |
gh_patches_debug_3527 | rasdani/github-patches | git_diff | qtile__qtile-738 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
lib*.so references in pangocffi.py
When upgrading from 0.9.1 to 0.10.1, I needed to modify the following references for my system (Ubuntu Vivid) in libqtile/pangocffi.py
gobject = ffi.dlopen('libgobject-2.0.so')
pango = ffi.dlopen('libpango-1.0.so')
pangocairo = ffi.dlopen('libpangocairo-1.0.so')
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `libqtile/pangocffi.py`
Content:
```
1 # Copyright (c) 2014-2015 Sean Vig
2 # Copyright (c) 2014 roger
3 # Copyright (c) 2014 Tycho Andersen
4 # Copyright (c) 2015 Craig Barnes
5 #
6 # Permission is hereby granted, free of charge, to any person obtaining a copy
7 # of this software and associated documentation files (the "Software"), to deal
8 # in the Software without restriction, including without limitation the rights
9 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 # copies of the Software, and to permit persons to whom the Software is
11 # furnished to do so, subject to the following conditions:
12 #
13 # The above copyright notice and this permission notice shall be included in
14 # all copies or substantial portions of the Software.
15 #
16 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 # SOFTWARE.
23
24 # This module is kind of a hack; you've been warned :-). Some upstream work
25 # needs to happen in order to avoid doing this, though.
26 #
27 # The problem is that we want to use pango to draw stuff. We need to create a
28 # cairo surface, in particular an XCB surface. Since we're using xcffib as the
29 # XCB binding and there is no portable way to go from cffi's PyObject* cdata
30 # wrappers to the wrapped type [1], we can't add support to pycairo for XCB
31 # surfaces via xcffib.
32 #
33 # A similar problem exists one layer of indirection down with cairocffi --
34 # python's pangocairo is almost all C, and only works by including pycairo's
35 # headers and accessing members of structs only available in C, and not in
36 # python. Since cairocffi is pure python and also cffi based, we cannot extract
37 # the raw pointer to pass to the existing pangocairo bindings.
38 #
39 # The solution here is to implement a tiny pangocffi for the small set of pango
40 # functions we call. We're doing it directly here because we can, but it would
41 # not be difficult to use more upstream libraries (e.g. cairocffi and some
42 # pangocairocffi when it exists). This also allows us to drop pygtk entirely,
43 # since we are doing our own pango binding.
44 #
45 # [1]: https://groups.google.com/forum/#!topic/python-cffi/SPND0rRmazA
46 #
47 # This is not intended to be a complete cffi-based pango binding.
48
49 import six
50
51 # PyPy < 2.6 compatibility
52 try:
53 from libqtile._ffi_pango import ffi
54 except ImportError:
55 from libqtile.ffi_build import pango_ffi as ffi
56
57 gobject = ffi.dlopen('libgobject-2.0.so')
58 pango = ffi.dlopen('libpango-1.0.so')
59 pangocairo = ffi.dlopen('libpangocairo-1.0.so')
60
61
62 def CairoContext(cairo_t):
63 def create_layout():
64 return PangoLayout(cairo_t._pointer)
65 cairo_t.create_layout = create_layout
66
67 def show_layout(layout):
68 pangocairo.pango_cairo_show_layout(cairo_t._pointer, layout._pointer)
69 cairo_t.show_layout = show_layout
70
71 return cairo_t
72
73 ALIGN_CENTER = pango.PANGO_ALIGN_CENTER
74 ELLIPSIZE_END = pango.PANGO_ELLIPSIZE_END
75 units_from_double = pango.pango_units_from_double
76
77
78 def _const_char_to_py_str(cc):
79 return ''.join(ffi.buffer(cc, len(cc)))
80
81
82 class PangoLayout(object):
83 def __init__(self, cairo_t):
84 self._cairo_t = cairo_t
85 self._pointer = pangocairo.pango_cairo_create_layout(cairo_t)
86
87 def free(p):
88 p = ffi.cast("gpointer", p)
89 gobject.g_object_unref(p)
90 self._pointer = ffi.gc(self._pointer, free)
91
92 def finalize(self):
93 self._desc = None
94 self._pointer = None
95 self._cairo_t = None
96
97 def set_font_description(self, desc):
98 # save a pointer so it doesn't get GC'd out from under us
99 self._desc = desc
100 pango.pango_layout_set_font_description(self._pointer, desc._pointer)
101
102 def get_font_description(self):
103 descr = pango.pango_layout_get_font_description(self._pointer)
104 return FontDescription(descr)
105
106 def set_alignment(self, alignment):
107 pango.pango_layout_set_alignment(self._pointer, alignment)
108
109 def set_attributes(self, attrs):
110 pango.pango_layout_set_attributes(self._pointer, attrs)
111
112 def set_text(self, text):
113 text = text.encode('utf-8')
114 pango.pango_layout_set_text(self._pointer, text, -1)
115
116 def get_text(self):
117 ret = pango.pango_layout_get_text(self._pointer)
118 return _const_char_to_py_str(ret)
119
120 def set_ellipsize(self, ellipzize):
121 pango.pango_layout_set_ellipsize(self._pointer, ellipzize)
122
123 def get_ellipsize(self):
124 return pango.pango_layout_get_ellipsize(self._pointer)
125
126 def get_pixel_size(self):
127 width = ffi.new("int[1]")
128 height = ffi.new("int[1]")
129
130 pango.pango_layout_get_pixel_size(self._pointer, width, height)
131
132 return width[0], height[0]
133
134 def set_width(self, width):
135 pango.pango_layout_set_width(self._pointer, width)
136
137
138 class FontDescription(object):
139 def __init__(self, pointer=None):
140 if pointer is None:
141 self._pointer = pango.pango_font_description_new()
142 self._pointer = ffi.gc(self._pointer, pango.pango_font_description_free)
143 else:
144 self._pointer = pointer
145
146 @classmethod
147 def from_string(cls, string):
148 pointer = pango.pango_font_description_from_string(string.encode())
149 pointer = ffi.gc(pointer, pango.pango_font_description_free)
150 return cls(pointer)
151
152 def set_family(self, family):
153 pango.pango_font_description_set_family(self._pointer, family.encode())
154
155 def get_family(self):
156 ret = pango.pango_font_description_get_family(self._pointer)
157 return _const_char_to_py_str(ret)
158
159 def set_absolute_size(self, size):
160 pango.pango_font_description_set_absolute_size(self._pointer, size)
161
162 def set_size(self, size):
163 pango.pango_font_description_set_size(self._pointer, size)
164
165 def get_size(self, size):
166 return pango.pango_font_description_get_size(self._pointer, size)
167
168
169 def parse_markup(value, accel_marker=0):
170 attr_list = ffi.new("PangoAttrList**")
171 text = ffi.new("char**")
172 error = ffi.new("GError**")
173 if six.PY3:
174 value = value.encode()
175
176 ret = pango.pango_parse_markup(value, -1, accel_marker, attr_list, text, ffi.NULL, error)
177
178 if ret == 0:
179 raise Exception("parse_markup() failed for %s" % value)
180
181 return attr_list[0], ffi.string(text[0]), six.unichr(accel_marker)
182
183
184 def markup_escape_text(text):
185 ret = gobject.g_markup_escape_text(text.encode(), -1)
186 if six.PY3:
187 return ffi.string(ret).decode()
188 return ffi.string(ret)
189
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/libqtile/pangocffi.py b/libqtile/pangocffi.py
--- a/libqtile/pangocffi.py
+++ b/libqtile/pangocffi.py
@@ -54,9 +54,9 @@
except ImportError:
from libqtile.ffi_build import pango_ffi as ffi
-gobject = ffi.dlopen('libgobject-2.0.so')
-pango = ffi.dlopen('libpango-1.0.so')
-pangocairo = ffi.dlopen('libpangocairo-1.0.so')
+gobject = ffi.dlopen('libgobject-2.0.so.0')
+pango = ffi.dlopen('libpango-1.0.so.0')
+pangocairo = ffi.dlopen('libpangocairo-1.0.so.0')
def CairoContext(cairo_t):
| {"golden_diff": "diff --git a/libqtile/pangocffi.py b/libqtile/pangocffi.py\n--- a/libqtile/pangocffi.py\n+++ b/libqtile/pangocffi.py\n@@ -54,9 +54,9 @@\n except ImportError:\n from libqtile.ffi_build import pango_ffi as ffi\n \n-gobject = ffi.dlopen('libgobject-2.0.so')\n-pango = ffi.dlopen('libpango-1.0.so')\n-pangocairo = ffi.dlopen('libpangocairo-1.0.so')\n+gobject = ffi.dlopen('libgobject-2.0.so.0')\n+pango = ffi.dlopen('libpango-1.0.so.0')\n+pangocairo = ffi.dlopen('libpangocairo-1.0.so.0')\n \n \n def CairoContext(cairo_t):\n", "issue": "lib*.so references in pangocffi.py\nWhen upgrading from 0.9.1 to 0.10.1, I needed to modify the following references for my system (Ubuntu Vivid) in libqtile/pangocffi.py\n\ngobject = ffi.dlopen('libgobject-2.0.so')\npango = ffi.dlopen('libpango-1.0.so')\npangocairo = ffi.dlopen('libpangocairo-1.0.so')\n\n", "before_files": [{"content": "# Copyright (c) 2014-2015 Sean Vig\n# Copyright (c) 2014 roger\n# Copyright (c) 2014 Tycho Andersen\n# Copyright (c) 2015 Craig Barnes\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n# This module is kind of a hack; you've been warned :-). Some upstream work\n# needs to happen in order to avoid doing this, though.\n#\n# The problem is that we want to use pango to draw stuff. We need to create a\n# cairo surface, in particular an XCB surface. Since we're using xcffib as the\n# XCB binding and there is no portable way to go from cffi's PyObject* cdata\n# wrappers to the wrapped type [1], we can't add support to pycairo for XCB\n# surfaces via xcffib.\n#\n# A similar problem exists one layer of indirection down with cairocffi --\n# python's pangocairo is almost all C, and only works by including pycairo's\n# headers and accessing members of structs only available in C, and not in\n# python. Since cairocffi is pure python and also cffi based, we cannot extract\n# the raw pointer to pass to the existing pangocairo bindings.\n#\n# The solution here is to implement a tiny pangocffi for the small set of pango\n# functions we call. We're doing it directly here because we can, but it would\n# not be difficult to use more upstream libraries (e.g. cairocffi and some\n# pangocairocffi when it exists). This also allows us to drop pygtk entirely,\n# since we are doing our own pango binding.\n#\n# [1]: https://groups.google.com/forum/#!topic/python-cffi/SPND0rRmazA\n#\n# This is not intended to be a complete cffi-based pango binding.\n\nimport six\n\n# PyPy < 2.6 compatibility\ntry:\n from libqtile._ffi_pango import ffi\nexcept ImportError:\n from libqtile.ffi_build import pango_ffi as ffi\n\ngobject = ffi.dlopen('libgobject-2.0.so')\npango = ffi.dlopen('libpango-1.0.so')\npangocairo = ffi.dlopen('libpangocairo-1.0.so')\n\n\ndef CairoContext(cairo_t):\n def create_layout():\n return PangoLayout(cairo_t._pointer)\n cairo_t.create_layout = create_layout\n\n def show_layout(layout):\n pangocairo.pango_cairo_show_layout(cairo_t._pointer, layout._pointer)\n cairo_t.show_layout = show_layout\n\n return cairo_t\n\nALIGN_CENTER = pango.PANGO_ALIGN_CENTER\nELLIPSIZE_END = pango.PANGO_ELLIPSIZE_END\nunits_from_double = pango.pango_units_from_double\n\n\ndef _const_char_to_py_str(cc):\n return ''.join(ffi.buffer(cc, len(cc)))\n\n\nclass PangoLayout(object):\n def __init__(self, cairo_t):\n self._cairo_t = cairo_t\n self._pointer = pangocairo.pango_cairo_create_layout(cairo_t)\n\n def free(p):\n p = ffi.cast(\"gpointer\", p)\n gobject.g_object_unref(p)\n self._pointer = ffi.gc(self._pointer, free)\n\n def finalize(self):\n self._desc = None\n self._pointer = None\n self._cairo_t = None\n\n def set_font_description(self, desc):\n # save a pointer so it doesn't get GC'd out from under us\n self._desc = desc\n pango.pango_layout_set_font_description(self._pointer, desc._pointer)\n\n def get_font_description(self):\n descr = pango.pango_layout_get_font_description(self._pointer)\n return FontDescription(descr)\n\n def set_alignment(self, alignment):\n pango.pango_layout_set_alignment(self._pointer, alignment)\n\n def set_attributes(self, attrs):\n pango.pango_layout_set_attributes(self._pointer, attrs)\n\n def set_text(self, text):\n text = text.encode('utf-8')\n pango.pango_layout_set_text(self._pointer, text, -1)\n\n def get_text(self):\n ret = pango.pango_layout_get_text(self._pointer)\n return _const_char_to_py_str(ret)\n\n def set_ellipsize(self, ellipzize):\n pango.pango_layout_set_ellipsize(self._pointer, ellipzize)\n\n def get_ellipsize(self):\n return pango.pango_layout_get_ellipsize(self._pointer)\n\n def get_pixel_size(self):\n width = ffi.new(\"int[1]\")\n height = ffi.new(\"int[1]\")\n\n pango.pango_layout_get_pixel_size(self._pointer, width, height)\n\n return width[0], height[0]\n\n def set_width(self, width):\n pango.pango_layout_set_width(self._pointer, width)\n\n\nclass FontDescription(object):\n def __init__(self, pointer=None):\n if pointer is None:\n self._pointer = pango.pango_font_description_new()\n self._pointer = ffi.gc(self._pointer, pango.pango_font_description_free)\n else:\n self._pointer = pointer\n\n @classmethod\n def from_string(cls, string):\n pointer = pango.pango_font_description_from_string(string.encode())\n pointer = ffi.gc(pointer, pango.pango_font_description_free)\n return cls(pointer)\n\n def set_family(self, family):\n pango.pango_font_description_set_family(self._pointer, family.encode())\n\n def get_family(self):\n ret = pango.pango_font_description_get_family(self._pointer)\n return _const_char_to_py_str(ret)\n\n def set_absolute_size(self, size):\n pango.pango_font_description_set_absolute_size(self._pointer, size)\n\n def set_size(self, size):\n pango.pango_font_description_set_size(self._pointer, size)\n\n def get_size(self, size):\n return pango.pango_font_description_get_size(self._pointer, size)\n\n\ndef parse_markup(value, accel_marker=0):\n attr_list = ffi.new(\"PangoAttrList**\")\n text = ffi.new(\"char**\")\n error = ffi.new(\"GError**\")\n if six.PY3:\n value = value.encode()\n\n ret = pango.pango_parse_markup(value, -1, accel_marker, attr_list, text, ffi.NULL, error)\n\n if ret == 0:\n raise Exception(\"parse_markup() failed for %s\" % value)\n\n return attr_list[0], ffi.string(text[0]), six.unichr(accel_marker)\n\n\ndef markup_escape_text(text):\n ret = gobject.g_markup_escape_text(text.encode(), -1)\n if six.PY3:\n return ffi.string(ret).decode()\n return ffi.string(ret)\n", "path": "libqtile/pangocffi.py"}], "after_files": [{"content": "# Copyright (c) 2014-2015 Sean Vig\n# Copyright (c) 2014 roger\n# Copyright (c) 2014 Tycho Andersen\n# Copyright (c) 2015 Craig Barnes\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n# This module is kind of a hack; you've been warned :-). Some upstream work\n# needs to happen in order to avoid doing this, though.\n#\n# The problem is that we want to use pango to draw stuff. We need to create a\n# cairo surface, in particular an XCB surface. Since we're using xcffib as the\n# XCB binding and there is no portable way to go from cffi's PyObject* cdata\n# wrappers to the wrapped type [1], we can't add support to pycairo for XCB\n# surfaces via xcffib.\n#\n# A similar problem exists one layer of indirection down with cairocffi --\n# python's pangocairo is almost all C, and only works by including pycairo's\n# headers and accessing members of structs only available in C, and not in\n# python. Since cairocffi is pure python and also cffi based, we cannot extract\n# the raw pointer to pass to the existing pangocairo bindings.\n#\n# The solution here is to implement a tiny pangocffi for the small set of pango\n# functions we call. We're doing it directly here because we can, but it would\n# not be difficult to use more upstream libraries (e.g. cairocffi and some\n# pangocairocffi when it exists). This also allows us to drop pygtk entirely,\n# since we are doing our own pango binding.\n#\n# [1]: https://groups.google.com/forum/#!topic/python-cffi/SPND0rRmazA\n#\n# This is not intended to be a complete cffi-based pango binding.\n\nimport six\n\n# PyPy < 2.6 compatibility\ntry:\n from libqtile._ffi_pango import ffi\nexcept ImportError:\n from libqtile.ffi_build import pango_ffi as ffi\n\ngobject = ffi.dlopen('libgobject-2.0.so.0')\npango = ffi.dlopen('libpango-1.0.so.0')\npangocairo = ffi.dlopen('libpangocairo-1.0.so.0')\n\n\ndef CairoContext(cairo_t):\n def create_layout():\n return PangoLayout(cairo_t._pointer)\n cairo_t.create_layout = create_layout\n\n def show_layout(layout):\n pangocairo.pango_cairo_show_layout(cairo_t._pointer, layout._pointer)\n cairo_t.show_layout = show_layout\n\n return cairo_t\n\nALIGN_CENTER = pango.PANGO_ALIGN_CENTER\nELLIPSIZE_END = pango.PANGO_ELLIPSIZE_END\nunits_from_double = pango.pango_units_from_double\n\n\ndef _const_char_to_py_str(cc):\n return ''.join(ffi.buffer(cc, len(cc)))\n\n\nclass PangoLayout(object):\n def __init__(self, cairo_t):\n self._cairo_t = cairo_t\n self._pointer = pangocairo.pango_cairo_create_layout(cairo_t)\n\n def free(p):\n p = ffi.cast(\"gpointer\", p)\n gobject.g_object_unref(p)\n self._pointer = ffi.gc(self._pointer, free)\n\n def finalize(self):\n self._desc = None\n self._pointer = None\n self._cairo_t = None\n\n def set_font_description(self, desc):\n # save a pointer so it doesn't get GC'd out from under us\n self._desc = desc\n pango.pango_layout_set_font_description(self._pointer, desc._pointer)\n\n def get_font_description(self):\n descr = pango.pango_layout_get_font_description(self._pointer)\n return FontDescription(descr)\n\n def set_alignment(self, alignment):\n pango.pango_layout_set_alignment(self._pointer, alignment)\n\n def set_attributes(self, attrs):\n pango.pango_layout_set_attributes(self._pointer, attrs)\n\n def set_text(self, text):\n text = text.encode('utf-8')\n pango.pango_layout_set_text(self._pointer, text, -1)\n\n def get_text(self):\n ret = pango.pango_layout_get_text(self._pointer)\n return _const_char_to_py_str(ret)\n\n def set_ellipsize(self, ellipzize):\n pango.pango_layout_set_ellipsize(self._pointer, ellipzize)\n\n def get_ellipsize(self):\n return pango.pango_layout_get_ellipsize(self._pointer)\n\n def get_pixel_size(self):\n width = ffi.new(\"int[1]\")\n height = ffi.new(\"int[1]\")\n\n pango.pango_layout_get_pixel_size(self._pointer, width, height)\n\n return width[0], height[0]\n\n def set_width(self, width):\n pango.pango_layout_set_width(self._pointer, width)\n\n\nclass FontDescription(object):\n def __init__(self, pointer=None):\n if pointer is None:\n self._pointer = pango.pango_font_description_new()\n self._pointer = ffi.gc(self._pointer, pango.pango_font_description_free)\n else:\n self._pointer = pointer\n\n @classmethod\n def from_string(cls, string):\n pointer = pango.pango_font_description_from_string(string.encode())\n pointer = ffi.gc(pointer, pango.pango_font_description_free)\n return cls(pointer)\n\n def set_family(self, family):\n pango.pango_font_description_set_family(self._pointer, family.encode())\n\n def get_family(self):\n ret = pango.pango_font_description_get_family(self._pointer)\n return _const_char_to_py_str(ret)\n\n def set_absolute_size(self, size):\n pango.pango_font_description_set_absolute_size(self._pointer, size)\n\n def set_size(self, size):\n pango.pango_font_description_set_size(self._pointer, size)\n\n def get_size(self, size):\n return pango.pango_font_description_get_size(self._pointer, size)\n\n\ndef parse_markup(value, accel_marker=0):\n attr_list = ffi.new(\"PangoAttrList**\")\n text = ffi.new(\"char**\")\n error = ffi.new(\"GError**\")\n if six.PY3:\n value = value.encode()\n\n ret = pango.pango_parse_markup(value, -1, accel_marker, attr_list, text, ffi.NULL, error)\n\n if ret == 0:\n raise Exception(\"parse_markup() failed for %s\" % value)\n\n return attr_list[0], ffi.string(text[0]), six.unichr(accel_marker)\n\n\ndef markup_escape_text(text):\n ret = gobject.g_markup_escape_text(text.encode(), -1)\n if six.PY3:\n return ffi.string(ret).decode()\n return ffi.string(ret)\n", "path": "libqtile/pangocffi.py"}]} | 2,552 | 194 |
gh_patches_debug_20653 | rasdani/github-patches | git_diff | ipython__ipython-6338 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Implement check_pid for .NET
This is a reminder - I added an `IPython.utils.process.check_pid` function to check if a process exists with a given PID. However, we have a `_process_cli` module containing .NET implementations of these things, and I didn't write the check_pid implementation for .NET.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `IPython/utils/process.py`
Content:
```
1 # encoding: utf-8
2 """
3 Utilities for working with external processes.
4 """
5
6 #-----------------------------------------------------------------------------
7 # Copyright (C) 2008-2011 The IPython Development Team
8 #
9 # Distributed under the terms of the BSD License. The full license is in
10 # the file COPYING, distributed as part of this software.
11 #-----------------------------------------------------------------------------
12
13 #-----------------------------------------------------------------------------
14 # Imports
15 #-----------------------------------------------------------------------------
16 from __future__ import print_function
17
18 # Stdlib
19 import os
20 import sys
21
22 # Our own
23 if sys.platform == 'win32':
24 from ._process_win32 import _find_cmd, system, getoutput, arg_split, check_pid
25 elif sys.platform == 'cli':
26 from ._process_cli import _find_cmd, system, getoutput, arg_split
27 else:
28 from ._process_posix import _find_cmd, system, getoutput, arg_split, check_pid
29
30 from ._process_common import getoutputerror, get_output_error_code, process_handler
31 from . import py3compat
32
33 #-----------------------------------------------------------------------------
34 # Code
35 #-----------------------------------------------------------------------------
36
37
38 class FindCmdError(Exception):
39 pass
40
41
42 def find_cmd(cmd):
43 """Find absolute path to executable cmd in a cross platform manner.
44
45 This function tries to determine the full path to a command line program
46 using `which` on Unix/Linux/OS X and `win32api` on Windows. Most of the
47 time it will use the version that is first on the users `PATH`.
48
49 Warning, don't use this to find IPython command line programs as there
50 is a risk you will find the wrong one. Instead find those using the
51 following code and looking for the application itself::
52
53 from IPython.utils.path import get_ipython_module_path
54 from IPython.utils.process import pycmd2argv
55 argv = pycmd2argv(get_ipython_module_path('IPython.terminal.ipapp'))
56
57 Parameters
58 ----------
59 cmd : str
60 The command line program to look for.
61 """
62 try:
63 path = _find_cmd(cmd).rstrip()
64 except OSError:
65 raise FindCmdError('command could not be found: %s' % cmd)
66 # which returns empty if not found
67 if path == '':
68 raise FindCmdError('command could not be found: %s' % cmd)
69 return os.path.abspath(path)
70
71
72 def is_cmd_found(cmd):
73 """Check whether executable `cmd` exists or not and return a bool."""
74 try:
75 find_cmd(cmd)
76 return True
77 except FindCmdError:
78 return False
79
80
81 def pycmd2argv(cmd):
82 r"""Take the path of a python command and return a list (argv-style).
83
84 This only works on Python based command line programs and will find the
85 location of the ``python`` executable using ``sys.executable`` to make
86 sure the right version is used.
87
88 For a given path ``cmd``, this returns [cmd] if cmd's extension is .exe,
89 .com or .bat, and [, cmd] otherwise.
90
91 Parameters
92 ----------
93 cmd : string
94 The path of the command.
95
96 Returns
97 -------
98 argv-style list.
99 """
100 ext = os.path.splitext(cmd)[1]
101 if ext in ['.exe', '.com', '.bat']:
102 return [cmd]
103 else:
104 return [sys.executable, cmd]
105
106
107 def abbrev_cwd():
108 """ Return abbreviated version of cwd, e.g. d:mydir """
109 cwd = py3compat.getcwd().replace('\\','/')
110 drivepart = ''
111 tail = cwd
112 if sys.platform == 'win32':
113 if len(cwd) < 4:
114 return cwd
115 drivepart,tail = os.path.splitdrive(cwd)
116
117
118 parts = tail.split('/')
119 if len(parts) > 2:
120 tail = '/'.join(parts[-2:])
121
122 return (drivepart + (
123 cwd == '/' and '/' or tail))
124
```
Path: `IPython/utils/_process_cli.py`
Content:
```
1 """cli-specific implementation of process utilities.
2
3 cli - Common Language Infrastructure for IronPython. Code
4 can run on any operating system. Check os.name for os-
5 specific settings.
6
7 This file is only meant to be imported by process.py, not by end-users.
8
9 This file is largely untested. To become a full drop-in process
10 interface for IronPython will probably require you to help fill
11 in the details.
12 """
13
14 # Import cli libraries:
15 import clr
16 import System
17
18 # Import Python libraries:
19 import os
20
21 # Import IPython libraries:
22 from IPython.utils import py3compat
23 from ._process_common import arg_split
24
25 def _find_cmd(cmd):
26 """Find the full path to a command using which."""
27 paths = System.Environment.GetEnvironmentVariable("PATH").Split(os.pathsep)
28 for path in paths:
29 filename = os.path.join(path, cmd)
30 if System.IO.File.Exists(filename):
31 return py3compat.bytes_to_str(filename)
32 raise OSError("command %r not found" % cmd)
33
34 def system(cmd):
35 """
36 system(cmd) should work in a cli environment on Mac OSX, Linux,
37 and Windows
38 """
39 psi = System.Diagnostics.ProcessStartInfo(cmd)
40 psi.RedirectStandardOutput = True
41 psi.RedirectStandardError = True
42 psi.WindowStyle = System.Diagnostics.ProcessWindowStyle.Normal
43 psi.UseShellExecute = False
44 # Start up process:
45 reg = System.Diagnostics.Process.Start(psi)
46
47 def getoutput(cmd):
48 """
49 getoutput(cmd) should work in a cli environment on Mac OSX, Linux,
50 and Windows
51 """
52 psi = System.Diagnostics.ProcessStartInfo(cmd)
53 psi.RedirectStandardOutput = True
54 psi.RedirectStandardError = True
55 psi.WindowStyle = System.Diagnostics.ProcessWindowStyle.Normal
56 psi.UseShellExecute = False
57 # Start up process:
58 reg = System.Diagnostics.Process.Start(psi)
59 myOutput = reg.StandardOutput
60 output = myOutput.ReadToEnd()
61 myError = reg.StandardError
62 error = myError.ReadToEnd()
63 return output
64
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/IPython/utils/_process_cli.py b/IPython/utils/_process_cli.py
--- a/IPython/utils/_process_cli.py
+++ b/IPython/utils/_process_cli.py
@@ -61,3 +61,18 @@
myError = reg.StandardError
error = myError.ReadToEnd()
return output
+
+def check_pid(pid):
+ """
+ Check if a process with the given PID (pid) exists
+ """
+ try:
+ System.Diagnostics.Process.GetProcessById(pid)
+ # process with given pid is running
+ return True
+ except System.InvalidOperationException:
+ # process wasn't started by this object (but is running)
+ return True
+ except System.ArgumentException:
+ # process with given pid isn't running
+ return False
diff --git a/IPython/utils/process.py b/IPython/utils/process.py
--- a/IPython/utils/process.py
+++ b/IPython/utils/process.py
@@ -23,7 +23,7 @@
if sys.platform == 'win32':
from ._process_win32 import _find_cmd, system, getoutput, arg_split, check_pid
elif sys.platform == 'cli':
- from ._process_cli import _find_cmd, system, getoutput, arg_split
+ from ._process_cli import _find_cmd, system, getoutput, arg_split, check_pid
else:
from ._process_posix import _find_cmd, system, getoutput, arg_split, check_pid
| {"golden_diff": "diff --git a/IPython/utils/_process_cli.py b/IPython/utils/_process_cli.py\n--- a/IPython/utils/_process_cli.py\n+++ b/IPython/utils/_process_cli.py\n@@ -61,3 +61,18 @@\n myError = reg.StandardError\n error = myError.ReadToEnd()\n return output\n+\n+def check_pid(pid):\n+ \"\"\"\n+ Check if a process with the given PID (pid) exists\n+ \"\"\"\n+ try:\n+ System.Diagnostics.Process.GetProcessById(pid)\n+ # process with given pid is running\n+ return True\n+ except System.InvalidOperationException:\n+ # process wasn't started by this object (but is running)\n+ return True\n+ except System.ArgumentException:\n+ # process with given pid isn't running\n+ return False \ndiff --git a/IPython/utils/process.py b/IPython/utils/process.py\n--- a/IPython/utils/process.py\n+++ b/IPython/utils/process.py\n@@ -23,7 +23,7 @@\n if sys.platform == 'win32':\n from ._process_win32 import _find_cmd, system, getoutput, arg_split, check_pid\n elif sys.platform == 'cli':\n- from ._process_cli import _find_cmd, system, getoutput, arg_split\n+ from ._process_cli import _find_cmd, system, getoutput, arg_split, check_pid\n else:\n from ._process_posix import _find_cmd, system, getoutput, arg_split, check_pid\n", "issue": "Implement check_pid for .NET\nThis is a reminder - I added an `IPython.utils.process.check_pid` function to check if a process exists with a given PID. However, we have a `_process_cli` module containing .NET implementations of these things, and I didn't write the check_pid implementation for .NET.\n\n", "before_files": [{"content": "# encoding: utf-8\n\"\"\"\nUtilities for working with external processes.\n\"\"\"\n\n#-----------------------------------------------------------------------------\n# Copyright (C) 2008-2011 The IPython Development Team\n#\n# Distributed under the terms of the BSD License. The full license is in\n# the file COPYING, distributed as part of this software.\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Imports\n#-----------------------------------------------------------------------------\nfrom __future__ import print_function\n\n# Stdlib\nimport os\nimport sys\n\n# Our own\nif sys.platform == 'win32':\n from ._process_win32 import _find_cmd, system, getoutput, arg_split, check_pid\nelif sys.platform == 'cli':\n from ._process_cli import _find_cmd, system, getoutput, arg_split\nelse:\n from ._process_posix import _find_cmd, system, getoutput, arg_split, check_pid\n\nfrom ._process_common import getoutputerror, get_output_error_code, process_handler\nfrom . import py3compat\n\n#-----------------------------------------------------------------------------\n# Code\n#-----------------------------------------------------------------------------\n\n\nclass FindCmdError(Exception):\n pass\n\n\ndef find_cmd(cmd):\n \"\"\"Find absolute path to executable cmd in a cross platform manner.\n\n This function tries to determine the full path to a command line program\n using `which` on Unix/Linux/OS X and `win32api` on Windows. Most of the\n time it will use the version that is first on the users `PATH`.\n\n Warning, don't use this to find IPython command line programs as there\n is a risk you will find the wrong one. Instead find those using the\n following code and looking for the application itself::\n\n from IPython.utils.path import get_ipython_module_path\n from IPython.utils.process import pycmd2argv\n argv = pycmd2argv(get_ipython_module_path('IPython.terminal.ipapp'))\n\n Parameters\n ----------\n cmd : str\n The command line program to look for.\n \"\"\"\n try:\n path = _find_cmd(cmd).rstrip()\n except OSError:\n raise FindCmdError('command could not be found: %s' % cmd)\n # which returns empty if not found\n if path == '':\n raise FindCmdError('command could not be found: %s' % cmd)\n return os.path.abspath(path)\n\n\ndef is_cmd_found(cmd):\n \"\"\"Check whether executable `cmd` exists or not and return a bool.\"\"\"\n try:\n find_cmd(cmd)\n return True\n except FindCmdError:\n return False\n\n\ndef pycmd2argv(cmd):\n r\"\"\"Take the path of a python command and return a list (argv-style).\n\n This only works on Python based command line programs and will find the\n location of the ``python`` executable using ``sys.executable`` to make\n sure the right version is used.\n\n For a given path ``cmd``, this returns [cmd] if cmd's extension is .exe,\n .com or .bat, and [, cmd] otherwise.\n\n Parameters\n ----------\n cmd : string\n The path of the command.\n\n Returns\n -------\n argv-style list.\n \"\"\"\n ext = os.path.splitext(cmd)[1]\n if ext in ['.exe', '.com', '.bat']:\n return [cmd]\n else:\n return [sys.executable, cmd]\n\n\ndef abbrev_cwd():\n \"\"\" Return abbreviated version of cwd, e.g. d:mydir \"\"\"\n cwd = py3compat.getcwd().replace('\\\\','/')\n drivepart = ''\n tail = cwd\n if sys.platform == 'win32':\n if len(cwd) < 4:\n return cwd\n drivepart,tail = os.path.splitdrive(cwd)\n\n\n parts = tail.split('/')\n if len(parts) > 2:\n tail = '/'.join(parts[-2:])\n\n return (drivepart + (\n cwd == '/' and '/' or tail))\n", "path": "IPython/utils/process.py"}, {"content": "\"\"\"cli-specific implementation of process utilities.\n\ncli - Common Language Infrastructure for IronPython. Code\n can run on any operating system. Check os.name for os-\n specific settings.\n\nThis file is only meant to be imported by process.py, not by end-users.\n\nThis file is largely untested. To become a full drop-in process\ninterface for IronPython will probably require you to help fill\nin the details. \n\"\"\"\n\n# Import cli libraries:\nimport clr\nimport System\n\n# Import Python libraries:\nimport os\n\n# Import IPython libraries:\nfrom IPython.utils import py3compat\nfrom ._process_common import arg_split\n\ndef _find_cmd(cmd):\n \"\"\"Find the full path to a command using which.\"\"\"\n paths = System.Environment.GetEnvironmentVariable(\"PATH\").Split(os.pathsep)\n for path in paths:\n filename = os.path.join(path, cmd)\n if System.IO.File.Exists(filename):\n return py3compat.bytes_to_str(filename)\n raise OSError(\"command %r not found\" % cmd)\n\ndef system(cmd):\n \"\"\"\n system(cmd) should work in a cli environment on Mac OSX, Linux,\n and Windows\n \"\"\"\n psi = System.Diagnostics.ProcessStartInfo(cmd)\n psi.RedirectStandardOutput = True\n psi.RedirectStandardError = True\n psi.WindowStyle = System.Diagnostics.ProcessWindowStyle.Normal\n psi.UseShellExecute = False\n # Start up process:\n reg = System.Diagnostics.Process.Start(psi)\n\ndef getoutput(cmd):\n \"\"\"\n getoutput(cmd) should work in a cli environment on Mac OSX, Linux,\n and Windows\n \"\"\"\n psi = System.Diagnostics.ProcessStartInfo(cmd)\n psi.RedirectStandardOutput = True\n psi.RedirectStandardError = True\n psi.WindowStyle = System.Diagnostics.ProcessWindowStyle.Normal\n psi.UseShellExecute = False\n # Start up process:\n reg = System.Diagnostics.Process.Start(psi)\n myOutput = reg.StandardOutput\n output = myOutput.ReadToEnd()\n myError = reg.StandardError\n error = myError.ReadToEnd()\n return output\n", "path": "IPython/utils/_process_cli.py"}], "after_files": [{"content": "# encoding: utf-8\n\"\"\"\nUtilities for working with external processes.\n\"\"\"\n\n#-----------------------------------------------------------------------------\n# Copyright (C) 2008-2011 The IPython Development Team\n#\n# Distributed under the terms of the BSD License. The full license is in\n# the file COPYING, distributed as part of this software.\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Imports\n#-----------------------------------------------------------------------------\nfrom __future__ import print_function\n\n# Stdlib\nimport os\nimport sys\n\n# Our own\nif sys.platform == 'win32':\n from ._process_win32 import _find_cmd, system, getoutput, arg_split, check_pid\nelif sys.platform == 'cli':\n from ._process_cli import _find_cmd, system, getoutput, arg_split, check_pid\nelse:\n from ._process_posix import _find_cmd, system, getoutput, arg_split, check_pid\n\nfrom ._process_common import getoutputerror, get_output_error_code, process_handler\nfrom . import py3compat\n\n#-----------------------------------------------------------------------------\n# Code\n#-----------------------------------------------------------------------------\n\n\nclass FindCmdError(Exception):\n pass\n\n\ndef find_cmd(cmd):\n \"\"\"Find absolute path to executable cmd in a cross platform manner.\n\n This function tries to determine the full path to a command line program\n using `which` on Unix/Linux/OS X and `win32api` on Windows. Most of the\n time it will use the version that is first on the users `PATH`.\n\n Warning, don't use this to find IPython command line programs as there\n is a risk you will find the wrong one. Instead find those using the\n following code and looking for the application itself::\n\n from IPython.utils.path import get_ipython_module_path\n from IPython.utils.process import pycmd2argv\n argv = pycmd2argv(get_ipython_module_path('IPython.terminal.ipapp'))\n\n Parameters\n ----------\n cmd : str\n The command line program to look for.\n \"\"\"\n try:\n path = _find_cmd(cmd).rstrip()\n except OSError:\n raise FindCmdError('command could not be found: %s' % cmd)\n # which returns empty if not found\n if path == '':\n raise FindCmdError('command could not be found: %s' % cmd)\n return os.path.abspath(path)\n\n\ndef is_cmd_found(cmd):\n \"\"\"Check whether executable `cmd` exists or not and return a bool.\"\"\"\n try:\n find_cmd(cmd)\n return True\n except FindCmdError:\n return False\n\n\ndef pycmd2argv(cmd):\n r\"\"\"Take the path of a python command and return a list (argv-style).\n\n This only works on Python based command line programs and will find the\n location of the ``python`` executable using ``sys.executable`` to make\n sure the right version is used.\n\n For a given path ``cmd``, this returns [cmd] if cmd's extension is .exe,\n .com or .bat, and [, cmd] otherwise.\n\n Parameters\n ----------\n cmd : string\n The path of the command.\n\n Returns\n -------\n argv-style list.\n \"\"\"\n ext = os.path.splitext(cmd)[1]\n if ext in ['.exe', '.com', '.bat']:\n return [cmd]\n else:\n return [sys.executable, cmd]\n\n\ndef abbrev_cwd():\n \"\"\" Return abbreviated version of cwd, e.g. d:mydir \"\"\"\n cwd = py3compat.getcwd().replace('\\\\','/')\n drivepart = ''\n tail = cwd\n if sys.platform == 'win32':\n if len(cwd) < 4:\n return cwd\n drivepart,tail = os.path.splitdrive(cwd)\n\n\n parts = tail.split('/')\n if len(parts) > 2:\n tail = '/'.join(parts[-2:])\n\n return (drivepart + (\n cwd == '/' and '/' or tail))\n", "path": "IPython/utils/process.py"}, {"content": "\"\"\"cli-specific implementation of process utilities.\n\ncli - Common Language Infrastructure for IronPython. Code\n can run on any operating system. Check os.name for os-\n specific settings.\n\nThis file is only meant to be imported by process.py, not by end-users.\n\nThis file is largely untested. To become a full drop-in process\ninterface for IronPython will probably require you to help fill\nin the details. \n\"\"\"\n\n# Import cli libraries:\nimport clr\nimport System\n\n# Import Python libraries:\nimport os\n\n# Import IPython libraries:\nfrom IPython.utils import py3compat\nfrom ._process_common import arg_split\n\ndef _find_cmd(cmd):\n \"\"\"Find the full path to a command using which.\"\"\"\n paths = System.Environment.GetEnvironmentVariable(\"PATH\").Split(os.pathsep)\n for path in paths:\n filename = os.path.join(path, cmd)\n if System.IO.File.Exists(filename):\n return py3compat.bytes_to_str(filename)\n raise OSError(\"command %r not found\" % cmd)\n\ndef system(cmd):\n \"\"\"\n system(cmd) should work in a cli environment on Mac OSX, Linux,\n and Windows\n \"\"\"\n psi = System.Diagnostics.ProcessStartInfo(cmd)\n psi.RedirectStandardOutput = True\n psi.RedirectStandardError = True\n psi.WindowStyle = System.Diagnostics.ProcessWindowStyle.Normal\n psi.UseShellExecute = False\n # Start up process:\n reg = System.Diagnostics.Process.Start(psi)\n\ndef getoutput(cmd):\n \"\"\"\n getoutput(cmd) should work in a cli environment on Mac OSX, Linux,\n and Windows\n \"\"\"\n psi = System.Diagnostics.ProcessStartInfo(cmd)\n psi.RedirectStandardOutput = True\n psi.RedirectStandardError = True\n psi.WindowStyle = System.Diagnostics.ProcessWindowStyle.Normal\n psi.UseShellExecute = False\n # Start up process:\n reg = System.Diagnostics.Process.Start(psi)\n myOutput = reg.StandardOutput\n output = myOutput.ReadToEnd()\n myError = reg.StandardError\n error = myError.ReadToEnd()\n return output\n\ndef check_pid(pid):\n \"\"\"\n Check if a process with the given PID (pid) exists\n \"\"\"\n try:\n System.Diagnostics.Process.GetProcessById(pid)\n # process with given pid is running\n return True\n except System.InvalidOperationException:\n # process wasn't started by this object (but is running)\n return True\n except System.ArgumentException:\n # process with given pid isn't running\n return False \n", "path": "IPython/utils/_process_cli.py"}]} | 2,016 | 327 |
gh_patches_debug_14859 | rasdani/github-patches | git_diff | beetbox__beets-1061 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Allow plugins to define the same flexible fields if they are of the same type
It should be possible for plugins to define the same flexible fields if they are of the same type. At the moment lastfmimport and mpdstats define 'play_count' which leads to:
`beets.plugins.PluginConflictException: Plugin mpdstats defines flexible field play_count which has already been defined.`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `beets/plugins.py`
Content:
```
1 # This file is part of beets.
2 # Copyright 2013, Adrian Sampson.
3 #
4 # Permission is hereby granted, free of charge, to any person obtaining
5 # a copy of this software and associated documentation files (the
6 # "Software"), to deal in the Software without restriction, including
7 # without limitation the rights to use, copy, modify, merge, publish,
8 # distribute, sublicense, and/or sell copies of the Software, and to
9 # permit persons to whom the Software is furnished to do so, subject to
10 # the following conditions:
11 #
12 # The above copyright notice and this permission notice shall be
13 # included in all copies or substantial portions of the Software.
14
15 """Support for beets plugins."""
16
17 import logging
18 import traceback
19 from collections import defaultdict
20 import inspect
21
22 import beets
23 from beets import mediafile
24
25 PLUGIN_NAMESPACE = 'beetsplug'
26
27 # Plugins using the Last.fm API can share the same API key.
28 LASTFM_KEY = '2dc3914abf35f0d9c92d97d8f8e42b43'
29
30 # Global logger.
31 log = logging.getLogger('beets')
32
33
34 class PluginConflictException(Exception):
35 """Indicates that the services provided by one plugin conflict with
36 those of another.
37
38 For example two plugins may define different types for flexible fields.
39 """
40
41
42 # Managing the plugins themselves.
43
44 class BeetsPlugin(object):
45 """The base class for all beets plugins. Plugins provide
46 functionality by defining a subclass of BeetsPlugin and overriding
47 the abstract methods defined here.
48 """
49 def __init__(self, name=None):
50 """Perform one-time plugin setup.
51 """
52 self.import_stages = []
53 self.name = name or self.__module__.split('.')[-1]
54 self.config = beets.config[self.name]
55 if not self.template_funcs:
56 self.template_funcs = {}
57 if not self.template_fields:
58 self.template_fields = {}
59 if not self.album_template_fields:
60 self.album_template_fields = {}
61
62 def commands(self):
63 """Should return a list of beets.ui.Subcommand objects for
64 commands that should be added to beets' CLI.
65 """
66 return ()
67
68 def queries(self):
69 """Should return a dict mapping prefixes to Query subclasses.
70 """
71 return {}
72
73 def track_distance(self, item, info):
74 """Should return a Distance object to be added to the
75 distance for every track comparison.
76 """
77 return beets.autotag.hooks.Distance()
78
79 def album_distance(self, items, album_info, mapping):
80 """Should return a Distance object to be added to the
81 distance for every album-level comparison.
82 """
83 return beets.autotag.hooks.Distance()
84
85 def candidates(self, items, artist, album, va_likely):
86 """Should return a sequence of AlbumInfo objects that match the
87 album whose items are provided.
88 """
89 return ()
90
91 def item_candidates(self, item, artist, title):
92 """Should return a sequence of TrackInfo objects that match the
93 item provided.
94 """
95 return ()
96
97 def album_for_id(self, album_id):
98 """Return an AlbumInfo object or None if no matching release was
99 found.
100 """
101 return None
102
103 def track_for_id(self, track_id):
104 """Return a TrackInfo object or None if no matching release was
105 found.
106 """
107 return None
108
109 def add_media_field(self, name, descriptor):
110 """Add a field that is synchronized between media files and items.
111
112 When a media field is added ``item.write()`` will set the name
113 property of the item's MediaFile to ``item[name]`` and save the
114 changes. Similarly ``item.read()`` will set ``item[name]`` to
115 the value of the name property of the media file.
116
117 ``descriptor`` must be an instance of ``mediafile.MediaField``.
118 """
119 # Defer impor to prevent circular dependency
120 from beets import library
121 mediafile.MediaFile.add_field(name, descriptor)
122 library.Item._media_fields.add(name)
123
124 listeners = None
125
126 @classmethod
127 def register_listener(cls, event, func):
128 """Add a function as a listener for the specified event. (An
129 imperative alternative to the @listen decorator.)
130 """
131 if cls.listeners is None:
132 cls.listeners = defaultdict(list)
133 cls.listeners[event].append(func)
134
135 @classmethod
136 def listen(cls, event):
137 """Decorator that adds a function as an event handler for the
138 specified event (as a string). The parameters passed to function
139 will vary depending on what event occurred.
140
141 The function should respond to named parameters.
142 function(**kwargs) will trap all arguments in a dictionary.
143 Example:
144
145 >>> @MyPlugin.listen("imported")
146 >>> def importListener(**kwargs):
147 ... pass
148 """
149 def helper(func):
150 if cls.listeners is None:
151 cls.listeners = defaultdict(list)
152 cls.listeners[event].append(func)
153 return func
154 return helper
155
156 template_funcs = None
157 template_fields = None
158 album_template_fields = None
159
160 @classmethod
161 def template_func(cls, name):
162 """Decorator that registers a path template function. The
163 function will be invoked as ``%name{}`` from path format
164 strings.
165 """
166 def helper(func):
167 if cls.template_funcs is None:
168 cls.template_funcs = {}
169 cls.template_funcs[name] = func
170 return func
171 return helper
172
173 @classmethod
174 def template_field(cls, name):
175 """Decorator that registers a path template field computation.
176 The value will be referenced as ``$name`` from path format
177 strings. The function must accept a single parameter, the Item
178 being formatted.
179 """
180 def helper(func):
181 if cls.template_fields is None:
182 cls.template_fields = {}
183 cls.template_fields[name] = func
184 return func
185 return helper
186
187
188 _classes = set()
189
190
191 def load_plugins(names=()):
192 """Imports the modules for a sequence of plugin names. Each name
193 must be the name of a Python module under the "beetsplug" namespace
194 package in sys.path; the module indicated should contain the
195 BeetsPlugin subclasses desired.
196 """
197 for name in names:
198 modname = '%s.%s' % (PLUGIN_NAMESPACE, name)
199 try:
200 try:
201 namespace = __import__(modname, None, None)
202 except ImportError as exc:
203 # Again, this is hacky:
204 if exc.args[0].endswith(' ' + name):
205 log.warn(u'** plugin {0} not found'.format(name))
206 else:
207 raise
208 else:
209 for obj in getattr(namespace, name).__dict__.values():
210 if isinstance(obj, type) and issubclass(obj, BeetsPlugin) \
211 and obj != BeetsPlugin and obj not in _classes:
212 _classes.add(obj)
213
214 except:
215 log.warn(u'** error loading plugin {0}'.format(name))
216 log.warn(traceback.format_exc())
217
218
219 _instances = {}
220
221
222 def find_plugins():
223 """Returns a list of BeetsPlugin subclass instances from all
224 currently loaded beets plugins. Loads the default plugin set
225 first.
226 """
227 load_plugins()
228 plugins = []
229 for cls in _classes:
230 # Only instantiate each plugin class once.
231 if cls not in _instances:
232 _instances[cls] = cls()
233 plugins.append(_instances[cls])
234 return plugins
235
236
237 # Communication with plugins.
238
239 def commands():
240 """Returns a list of Subcommand objects from all loaded plugins.
241 """
242 out = []
243 for plugin in find_plugins():
244 out += plugin.commands()
245 return out
246
247
248 def queries():
249 """Returns a dict mapping prefix strings to Query subclasses all loaded
250 plugins.
251 """
252 out = {}
253 for plugin in find_plugins():
254 out.update(plugin.queries())
255 return out
256
257
258 def types(model_cls):
259 # Gives us `item_types` and `album_types`
260 attr_name = '{0}_types'.format(model_cls.__name__.lower())
261 types = {}
262 for plugin in find_plugins():
263 plugin_types = getattr(plugin, attr_name, {})
264 for field in plugin_types:
265 if field in types:
266 raise PluginConflictException(
267 u'Plugin {0} defines flexible field {1} '
268 'which has already been defined.'.format(
269 plugin.name, field
270 )
271 )
272 types.update(plugin_types)
273 return types
274
275
276 def track_distance(item, info):
277 """Gets the track distance calculated by all loaded plugins.
278 Returns a Distance object.
279 """
280 from beets.autotag.hooks import Distance
281 dist = Distance()
282 for plugin in find_plugins():
283 dist.update(plugin.track_distance(item, info))
284 return dist
285
286
287 def album_distance(items, album_info, mapping):
288 """Returns the album distance calculated by plugins."""
289 from beets.autotag.hooks import Distance
290 dist = Distance()
291 for plugin in find_plugins():
292 dist.update(plugin.album_distance(items, album_info, mapping))
293 return dist
294
295
296 def candidates(items, artist, album, va_likely):
297 """Gets MusicBrainz candidates for an album from each plugin.
298 """
299 out = []
300 for plugin in find_plugins():
301 out.extend(plugin.candidates(items, artist, album, va_likely))
302 return out
303
304
305 def item_candidates(item, artist, title):
306 """Gets MusicBrainz candidates for an item from the plugins.
307 """
308 out = []
309 for plugin in find_plugins():
310 out.extend(plugin.item_candidates(item, artist, title))
311 return out
312
313
314 def album_for_id(album_id):
315 """Get AlbumInfo objects for a given ID string.
316 """
317 out = []
318 for plugin in find_plugins():
319 res = plugin.album_for_id(album_id)
320 if res:
321 out.append(res)
322 return out
323
324
325 def track_for_id(track_id):
326 """Get TrackInfo objects for a given ID string.
327 """
328 out = []
329 for plugin in find_plugins():
330 res = plugin.track_for_id(track_id)
331 if res:
332 out.append(res)
333 return out
334
335
336 def template_funcs():
337 """Get all the template functions declared by plugins as a
338 dictionary.
339 """
340 funcs = {}
341 for plugin in find_plugins():
342 if plugin.template_funcs:
343 funcs.update(plugin.template_funcs)
344 return funcs
345
346
347 def import_stages():
348 """Get a list of import stage functions defined by plugins."""
349 stages = []
350 for plugin in find_plugins():
351 if hasattr(plugin, 'import_stages'):
352 stages += plugin.import_stages
353 return stages
354
355
356 # New-style (lazy) plugin-provided fields.
357
358 def item_field_getters():
359 """Get a dictionary mapping field names to unary functions that
360 compute the field's value.
361 """
362 funcs = {}
363 for plugin in find_plugins():
364 if plugin.template_fields:
365 funcs.update(plugin.template_fields)
366 return funcs
367
368
369 def album_field_getters():
370 """As above, for album fields.
371 """
372 funcs = {}
373 for plugin in find_plugins():
374 if plugin.album_template_fields:
375 funcs.update(plugin.album_template_fields)
376 return funcs
377
378
379 # Event dispatch.
380
381 def event_handlers():
382 """Find all event handlers from plugins as a dictionary mapping
383 event names to sequences of callables.
384 """
385 all_handlers = defaultdict(list)
386 for plugin in find_plugins():
387 if plugin.listeners:
388 for event, handlers in plugin.listeners.items():
389 all_handlers[event] += handlers
390 return all_handlers
391
392
393 def send(event, **arguments):
394 """Sends an event to all assigned event listeners. Event is the
395 name of the event to send, all other named arguments go to the
396 event handler(s).
397
398 Returns a list of return values from the handlers.
399 """
400 log.debug(u'Sending event: {0}'.format(event))
401 for handler in event_handlers()[event]:
402 # Don't break legacy plugins if we want to pass more arguments
403 argspec = inspect.getargspec(handler).args
404 args = dict((k, v) for k, v in arguments.items() if k in argspec)
405 handler(**args)
406
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/beets/plugins.py b/beets/plugins.py
--- a/beets/plugins.py
+++ b/beets/plugins.py
@@ -262,12 +262,11 @@
for plugin in find_plugins():
plugin_types = getattr(plugin, attr_name, {})
for field in plugin_types:
- if field in types:
+ if field in types and plugin_types[field] != types[field]:
raise PluginConflictException(
u'Plugin {0} defines flexible field {1} '
- 'which has already been defined.'.format(
- plugin.name, field
- )
+ 'which has already been defined with '
+ 'another type.'.format(plugin.name, field)
)
types.update(plugin_types)
return types
| {"golden_diff": "diff --git a/beets/plugins.py b/beets/plugins.py\n--- a/beets/plugins.py\n+++ b/beets/plugins.py\n@@ -262,12 +262,11 @@\n for plugin in find_plugins():\n plugin_types = getattr(plugin, attr_name, {})\n for field in plugin_types:\n- if field in types:\n+ if field in types and plugin_types[field] != types[field]:\n raise PluginConflictException(\n u'Plugin {0} defines flexible field {1} '\n- 'which has already been defined.'.format(\n- plugin.name, field\n- )\n+ 'which has already been defined with '\n+ 'another type.'.format(plugin.name, field)\n )\n types.update(plugin_types)\n return types\n", "issue": "Allow plugins to define the same flexible fields if they are of the same type\nIt should be possible for plugins to define the same flexible fields if they are of the same type. At the moment lastfmimport and mpdstats define 'play_count' which leads to:\n\n`beets.plugins.PluginConflictException: Plugin mpdstats defines flexible field play_count which has already been defined.`\n\n", "before_files": [{"content": "# This file is part of beets.\n# Copyright 2013, Adrian Sampson.\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n\n\"\"\"Support for beets plugins.\"\"\"\n\nimport logging\nimport traceback\nfrom collections import defaultdict\nimport inspect\n\nimport beets\nfrom beets import mediafile\n\nPLUGIN_NAMESPACE = 'beetsplug'\n\n# Plugins using the Last.fm API can share the same API key.\nLASTFM_KEY = '2dc3914abf35f0d9c92d97d8f8e42b43'\n\n# Global logger.\nlog = logging.getLogger('beets')\n\n\nclass PluginConflictException(Exception):\n \"\"\"Indicates that the services provided by one plugin conflict with\n those of another.\n\n For example two plugins may define different types for flexible fields.\n \"\"\"\n\n\n# Managing the plugins themselves.\n\nclass BeetsPlugin(object):\n \"\"\"The base class for all beets plugins. Plugins provide\n functionality by defining a subclass of BeetsPlugin and overriding\n the abstract methods defined here.\n \"\"\"\n def __init__(self, name=None):\n \"\"\"Perform one-time plugin setup.\n \"\"\"\n self.import_stages = []\n self.name = name or self.__module__.split('.')[-1]\n self.config = beets.config[self.name]\n if not self.template_funcs:\n self.template_funcs = {}\n if not self.template_fields:\n self.template_fields = {}\n if not self.album_template_fields:\n self.album_template_fields = {}\n\n def commands(self):\n \"\"\"Should return a list of beets.ui.Subcommand objects for\n commands that should be added to beets' CLI.\n \"\"\"\n return ()\n\n def queries(self):\n \"\"\"Should return a dict mapping prefixes to Query subclasses.\n \"\"\"\n return {}\n\n def track_distance(self, item, info):\n \"\"\"Should return a Distance object to be added to the\n distance for every track comparison.\n \"\"\"\n return beets.autotag.hooks.Distance()\n\n def album_distance(self, items, album_info, mapping):\n \"\"\"Should return a Distance object to be added to the\n distance for every album-level comparison.\n \"\"\"\n return beets.autotag.hooks.Distance()\n\n def candidates(self, items, artist, album, va_likely):\n \"\"\"Should return a sequence of AlbumInfo objects that match the\n album whose items are provided.\n \"\"\"\n return ()\n\n def item_candidates(self, item, artist, title):\n \"\"\"Should return a sequence of TrackInfo objects that match the\n item provided.\n \"\"\"\n return ()\n\n def album_for_id(self, album_id):\n \"\"\"Return an AlbumInfo object or None if no matching release was\n found.\n \"\"\"\n return None\n\n def track_for_id(self, track_id):\n \"\"\"Return a TrackInfo object or None if no matching release was\n found.\n \"\"\"\n return None\n\n def add_media_field(self, name, descriptor):\n \"\"\"Add a field that is synchronized between media files and items.\n\n When a media field is added ``item.write()`` will set the name\n property of the item's MediaFile to ``item[name]`` and save the\n changes. Similarly ``item.read()`` will set ``item[name]`` to\n the value of the name property of the media file.\n\n ``descriptor`` must be an instance of ``mediafile.MediaField``.\n \"\"\"\n # Defer impor to prevent circular dependency\n from beets import library\n mediafile.MediaFile.add_field(name, descriptor)\n library.Item._media_fields.add(name)\n\n listeners = None\n\n @classmethod\n def register_listener(cls, event, func):\n \"\"\"Add a function as a listener for the specified event. (An\n imperative alternative to the @listen decorator.)\n \"\"\"\n if cls.listeners is None:\n cls.listeners = defaultdict(list)\n cls.listeners[event].append(func)\n\n @classmethod\n def listen(cls, event):\n \"\"\"Decorator that adds a function as an event handler for the\n specified event (as a string). The parameters passed to function\n will vary depending on what event occurred.\n\n The function should respond to named parameters.\n function(**kwargs) will trap all arguments in a dictionary.\n Example:\n\n >>> @MyPlugin.listen(\"imported\")\n >>> def importListener(**kwargs):\n ... pass\n \"\"\"\n def helper(func):\n if cls.listeners is None:\n cls.listeners = defaultdict(list)\n cls.listeners[event].append(func)\n return func\n return helper\n\n template_funcs = None\n template_fields = None\n album_template_fields = None\n\n @classmethod\n def template_func(cls, name):\n \"\"\"Decorator that registers a path template function. The\n function will be invoked as ``%name{}`` from path format\n strings.\n \"\"\"\n def helper(func):\n if cls.template_funcs is None:\n cls.template_funcs = {}\n cls.template_funcs[name] = func\n return func\n return helper\n\n @classmethod\n def template_field(cls, name):\n \"\"\"Decorator that registers a path template field computation.\n The value will be referenced as ``$name`` from path format\n strings. The function must accept a single parameter, the Item\n being formatted.\n \"\"\"\n def helper(func):\n if cls.template_fields is None:\n cls.template_fields = {}\n cls.template_fields[name] = func\n return func\n return helper\n\n\n_classes = set()\n\n\ndef load_plugins(names=()):\n \"\"\"Imports the modules for a sequence of plugin names. Each name\n must be the name of a Python module under the \"beetsplug\" namespace\n package in sys.path; the module indicated should contain the\n BeetsPlugin subclasses desired.\n \"\"\"\n for name in names:\n modname = '%s.%s' % (PLUGIN_NAMESPACE, name)\n try:\n try:\n namespace = __import__(modname, None, None)\n except ImportError as exc:\n # Again, this is hacky:\n if exc.args[0].endswith(' ' + name):\n log.warn(u'** plugin {0} not found'.format(name))\n else:\n raise\n else:\n for obj in getattr(namespace, name).__dict__.values():\n if isinstance(obj, type) and issubclass(obj, BeetsPlugin) \\\n and obj != BeetsPlugin and obj not in _classes:\n _classes.add(obj)\n\n except:\n log.warn(u'** error loading plugin {0}'.format(name))\n log.warn(traceback.format_exc())\n\n\n_instances = {}\n\n\ndef find_plugins():\n \"\"\"Returns a list of BeetsPlugin subclass instances from all\n currently loaded beets plugins. Loads the default plugin set\n first.\n \"\"\"\n load_plugins()\n plugins = []\n for cls in _classes:\n # Only instantiate each plugin class once.\n if cls not in _instances:\n _instances[cls] = cls()\n plugins.append(_instances[cls])\n return plugins\n\n\n# Communication with plugins.\n\ndef commands():\n \"\"\"Returns a list of Subcommand objects from all loaded plugins.\n \"\"\"\n out = []\n for plugin in find_plugins():\n out += plugin.commands()\n return out\n\n\ndef queries():\n \"\"\"Returns a dict mapping prefix strings to Query subclasses all loaded\n plugins.\n \"\"\"\n out = {}\n for plugin in find_plugins():\n out.update(plugin.queries())\n return out\n\n\ndef types(model_cls):\n # Gives us `item_types` and `album_types`\n attr_name = '{0}_types'.format(model_cls.__name__.lower())\n types = {}\n for plugin in find_plugins():\n plugin_types = getattr(plugin, attr_name, {})\n for field in plugin_types:\n if field in types:\n raise PluginConflictException(\n u'Plugin {0} defines flexible field {1} '\n 'which has already been defined.'.format(\n plugin.name, field\n )\n )\n types.update(plugin_types)\n return types\n\n\ndef track_distance(item, info):\n \"\"\"Gets the track distance calculated by all loaded plugins.\n Returns a Distance object.\n \"\"\"\n from beets.autotag.hooks import Distance\n dist = Distance()\n for plugin in find_plugins():\n dist.update(plugin.track_distance(item, info))\n return dist\n\n\ndef album_distance(items, album_info, mapping):\n \"\"\"Returns the album distance calculated by plugins.\"\"\"\n from beets.autotag.hooks import Distance\n dist = Distance()\n for plugin in find_plugins():\n dist.update(plugin.album_distance(items, album_info, mapping))\n return dist\n\n\ndef candidates(items, artist, album, va_likely):\n \"\"\"Gets MusicBrainz candidates for an album from each plugin.\n \"\"\"\n out = []\n for plugin in find_plugins():\n out.extend(plugin.candidates(items, artist, album, va_likely))\n return out\n\n\ndef item_candidates(item, artist, title):\n \"\"\"Gets MusicBrainz candidates for an item from the plugins.\n \"\"\"\n out = []\n for plugin in find_plugins():\n out.extend(plugin.item_candidates(item, artist, title))\n return out\n\n\ndef album_for_id(album_id):\n \"\"\"Get AlbumInfo objects for a given ID string.\n \"\"\"\n out = []\n for plugin in find_plugins():\n res = plugin.album_for_id(album_id)\n if res:\n out.append(res)\n return out\n\n\ndef track_for_id(track_id):\n \"\"\"Get TrackInfo objects for a given ID string.\n \"\"\"\n out = []\n for plugin in find_plugins():\n res = plugin.track_for_id(track_id)\n if res:\n out.append(res)\n return out\n\n\ndef template_funcs():\n \"\"\"Get all the template functions declared by plugins as a\n dictionary.\n \"\"\"\n funcs = {}\n for plugin in find_plugins():\n if plugin.template_funcs:\n funcs.update(plugin.template_funcs)\n return funcs\n\n\ndef import_stages():\n \"\"\"Get a list of import stage functions defined by plugins.\"\"\"\n stages = []\n for plugin in find_plugins():\n if hasattr(plugin, 'import_stages'):\n stages += plugin.import_stages\n return stages\n\n\n# New-style (lazy) plugin-provided fields.\n\ndef item_field_getters():\n \"\"\"Get a dictionary mapping field names to unary functions that\n compute the field's value.\n \"\"\"\n funcs = {}\n for plugin in find_plugins():\n if plugin.template_fields:\n funcs.update(plugin.template_fields)\n return funcs\n\n\ndef album_field_getters():\n \"\"\"As above, for album fields.\n \"\"\"\n funcs = {}\n for plugin in find_plugins():\n if plugin.album_template_fields:\n funcs.update(plugin.album_template_fields)\n return funcs\n\n\n# Event dispatch.\n\ndef event_handlers():\n \"\"\"Find all event handlers from plugins as a dictionary mapping\n event names to sequences of callables.\n \"\"\"\n all_handlers = defaultdict(list)\n for plugin in find_plugins():\n if plugin.listeners:\n for event, handlers in plugin.listeners.items():\n all_handlers[event] += handlers\n return all_handlers\n\n\ndef send(event, **arguments):\n \"\"\"Sends an event to all assigned event listeners. Event is the\n name of the event to send, all other named arguments go to the\n event handler(s).\n\n Returns a list of return values from the handlers.\n \"\"\"\n log.debug(u'Sending event: {0}'.format(event))\n for handler in event_handlers()[event]:\n # Don't break legacy plugins if we want to pass more arguments\n argspec = inspect.getargspec(handler).args\n args = dict((k, v) for k, v in arguments.items() if k in argspec)\n handler(**args)\n", "path": "beets/plugins.py"}], "after_files": [{"content": "# This file is part of beets.\n# Copyright 2013, Adrian Sampson.\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n\n\"\"\"Support for beets plugins.\"\"\"\n\nimport logging\nimport traceback\nfrom collections import defaultdict\nimport inspect\n\nimport beets\nfrom beets import mediafile\n\nPLUGIN_NAMESPACE = 'beetsplug'\n\n# Plugins using the Last.fm API can share the same API key.\nLASTFM_KEY = '2dc3914abf35f0d9c92d97d8f8e42b43'\n\n# Global logger.\nlog = logging.getLogger('beets')\n\n\nclass PluginConflictException(Exception):\n \"\"\"Indicates that the services provided by one plugin conflict with\n those of another.\n\n For example two plugins may define different types for flexible fields.\n \"\"\"\n\n\n# Managing the plugins themselves.\n\nclass BeetsPlugin(object):\n \"\"\"The base class for all beets plugins. Plugins provide\n functionality by defining a subclass of BeetsPlugin and overriding\n the abstract methods defined here.\n \"\"\"\n def __init__(self, name=None):\n \"\"\"Perform one-time plugin setup.\n \"\"\"\n self.import_stages = []\n self.name = name or self.__module__.split('.')[-1]\n self.config = beets.config[self.name]\n if not self.template_funcs:\n self.template_funcs = {}\n if not self.template_fields:\n self.template_fields = {}\n if not self.album_template_fields:\n self.album_template_fields = {}\n\n def commands(self):\n \"\"\"Should return a list of beets.ui.Subcommand objects for\n commands that should be added to beets' CLI.\n \"\"\"\n return ()\n\n def queries(self):\n \"\"\"Should return a dict mapping prefixes to Query subclasses.\n \"\"\"\n return {}\n\n def track_distance(self, item, info):\n \"\"\"Should return a Distance object to be added to the\n distance for every track comparison.\n \"\"\"\n return beets.autotag.hooks.Distance()\n\n def album_distance(self, items, album_info, mapping):\n \"\"\"Should return a Distance object to be added to the\n distance for every album-level comparison.\n \"\"\"\n return beets.autotag.hooks.Distance()\n\n def candidates(self, items, artist, album, va_likely):\n \"\"\"Should return a sequence of AlbumInfo objects that match the\n album whose items are provided.\n \"\"\"\n return ()\n\n def item_candidates(self, item, artist, title):\n \"\"\"Should return a sequence of TrackInfo objects that match the\n item provided.\n \"\"\"\n return ()\n\n def album_for_id(self, album_id):\n \"\"\"Return an AlbumInfo object or None if no matching release was\n found.\n \"\"\"\n return None\n\n def track_for_id(self, track_id):\n \"\"\"Return a TrackInfo object or None if no matching release was\n found.\n \"\"\"\n return None\n\n def add_media_field(self, name, descriptor):\n \"\"\"Add a field that is synchronized between media files and items.\n\n When a media field is added ``item.write()`` will set the name\n property of the item's MediaFile to ``item[name]`` and save the\n changes. Similarly ``item.read()`` will set ``item[name]`` to\n the value of the name property of the media file.\n\n ``descriptor`` must be an instance of ``mediafile.MediaField``.\n \"\"\"\n # Defer impor to prevent circular dependency\n from beets import library\n mediafile.MediaFile.add_field(name, descriptor)\n library.Item._media_fields.add(name)\n\n listeners = None\n\n @classmethod\n def register_listener(cls, event, func):\n \"\"\"Add a function as a listener for the specified event. (An\n imperative alternative to the @listen decorator.)\n \"\"\"\n if cls.listeners is None:\n cls.listeners = defaultdict(list)\n cls.listeners[event].append(func)\n\n @classmethod\n def listen(cls, event):\n \"\"\"Decorator that adds a function as an event handler for the\n specified event (as a string). The parameters passed to function\n will vary depending on what event occurred.\n\n The function should respond to named parameters.\n function(**kwargs) will trap all arguments in a dictionary.\n Example:\n\n >>> @MyPlugin.listen(\"imported\")\n >>> def importListener(**kwargs):\n ... pass\n \"\"\"\n def helper(func):\n if cls.listeners is None:\n cls.listeners = defaultdict(list)\n cls.listeners[event].append(func)\n return func\n return helper\n\n template_funcs = None\n template_fields = None\n album_template_fields = None\n\n @classmethod\n def template_func(cls, name):\n \"\"\"Decorator that registers a path template function. The\n function will be invoked as ``%name{}`` from path format\n strings.\n \"\"\"\n def helper(func):\n if cls.template_funcs is None:\n cls.template_funcs = {}\n cls.template_funcs[name] = func\n return func\n return helper\n\n @classmethod\n def template_field(cls, name):\n \"\"\"Decorator that registers a path template field computation.\n The value will be referenced as ``$name`` from path format\n strings. The function must accept a single parameter, the Item\n being formatted.\n \"\"\"\n def helper(func):\n if cls.template_fields is None:\n cls.template_fields = {}\n cls.template_fields[name] = func\n return func\n return helper\n\n\n_classes = set()\n\n\ndef load_plugins(names=()):\n \"\"\"Imports the modules for a sequence of plugin names. Each name\n must be the name of a Python module under the \"beetsplug\" namespace\n package in sys.path; the module indicated should contain the\n BeetsPlugin subclasses desired.\n \"\"\"\n for name in names:\n modname = '%s.%s' % (PLUGIN_NAMESPACE, name)\n try:\n try:\n namespace = __import__(modname, None, None)\n except ImportError as exc:\n # Again, this is hacky:\n if exc.args[0].endswith(' ' + name):\n log.warn(u'** plugin {0} not found'.format(name))\n else:\n raise\n else:\n for obj in getattr(namespace, name).__dict__.values():\n if isinstance(obj, type) and issubclass(obj, BeetsPlugin) \\\n and obj != BeetsPlugin and obj not in _classes:\n _classes.add(obj)\n\n except:\n log.warn(u'** error loading plugin {0}'.format(name))\n log.warn(traceback.format_exc())\n\n\n_instances = {}\n\n\ndef find_plugins():\n \"\"\"Returns a list of BeetsPlugin subclass instances from all\n currently loaded beets plugins. Loads the default plugin set\n first.\n \"\"\"\n load_plugins()\n plugins = []\n for cls in _classes:\n # Only instantiate each plugin class once.\n if cls not in _instances:\n _instances[cls] = cls()\n plugins.append(_instances[cls])\n return plugins\n\n\n# Communication with plugins.\n\ndef commands():\n \"\"\"Returns a list of Subcommand objects from all loaded plugins.\n \"\"\"\n out = []\n for plugin in find_plugins():\n out += plugin.commands()\n return out\n\n\ndef queries():\n \"\"\"Returns a dict mapping prefix strings to Query subclasses all loaded\n plugins.\n \"\"\"\n out = {}\n for plugin in find_plugins():\n out.update(plugin.queries())\n return out\n\n\ndef types(model_cls):\n # Gives us `item_types` and `album_types`\n attr_name = '{0}_types'.format(model_cls.__name__.lower())\n types = {}\n for plugin in find_plugins():\n plugin_types = getattr(plugin, attr_name, {})\n for field in plugin_types:\n if field in types and plugin_types[field] != types[field]:\n raise PluginConflictException(\n u'Plugin {0} defines flexible field {1} '\n 'which has already been defined with '\n 'another type.'.format(plugin.name, field)\n )\n types.update(plugin_types)\n return types\n\n\ndef track_distance(item, info):\n \"\"\"Gets the track distance calculated by all loaded plugins.\n Returns a Distance object.\n \"\"\"\n from beets.autotag.hooks import Distance\n dist = Distance()\n for plugin in find_plugins():\n dist.update(plugin.track_distance(item, info))\n return dist\n\n\ndef album_distance(items, album_info, mapping):\n \"\"\"Returns the album distance calculated by plugins.\"\"\"\n from beets.autotag.hooks import Distance\n dist = Distance()\n for plugin in find_plugins():\n dist.update(plugin.album_distance(items, album_info, mapping))\n return dist\n\n\ndef candidates(items, artist, album, va_likely):\n \"\"\"Gets MusicBrainz candidates for an album from each plugin.\n \"\"\"\n out = []\n for plugin in find_plugins():\n out.extend(plugin.candidates(items, artist, album, va_likely))\n return out\n\n\ndef item_candidates(item, artist, title):\n \"\"\"Gets MusicBrainz candidates for an item from the plugins.\n \"\"\"\n out = []\n for plugin in find_plugins():\n out.extend(plugin.item_candidates(item, artist, title))\n return out\n\n\ndef album_for_id(album_id):\n \"\"\"Get AlbumInfo objects for a given ID string.\n \"\"\"\n out = []\n for plugin in find_plugins():\n res = plugin.album_for_id(album_id)\n if res:\n out.append(res)\n return out\n\n\ndef track_for_id(track_id):\n \"\"\"Get TrackInfo objects for a given ID string.\n \"\"\"\n out = []\n for plugin in find_plugins():\n res = plugin.track_for_id(track_id)\n if res:\n out.append(res)\n return out\n\n\ndef template_funcs():\n \"\"\"Get all the template functions declared by plugins as a\n dictionary.\n \"\"\"\n funcs = {}\n for plugin in find_plugins():\n if plugin.template_funcs:\n funcs.update(plugin.template_funcs)\n return funcs\n\n\ndef import_stages():\n \"\"\"Get a list of import stage functions defined by plugins.\"\"\"\n stages = []\n for plugin in find_plugins():\n if hasattr(plugin, 'import_stages'):\n stages += plugin.import_stages\n return stages\n\n\n# New-style (lazy) plugin-provided fields.\n\ndef item_field_getters():\n \"\"\"Get a dictionary mapping field names to unary functions that\n compute the field's value.\n \"\"\"\n funcs = {}\n for plugin in find_plugins():\n if plugin.template_fields:\n funcs.update(plugin.template_fields)\n return funcs\n\n\ndef album_field_getters():\n \"\"\"As above, for album fields.\n \"\"\"\n funcs = {}\n for plugin in find_plugins():\n if plugin.album_template_fields:\n funcs.update(plugin.album_template_fields)\n return funcs\n\n\n# Event dispatch.\n\ndef event_handlers():\n \"\"\"Find all event handlers from plugins as a dictionary mapping\n event names to sequences of callables.\n \"\"\"\n all_handlers = defaultdict(list)\n for plugin in find_plugins():\n if plugin.listeners:\n for event, handlers in plugin.listeners.items():\n all_handlers[event] += handlers\n return all_handlers\n\n\ndef send(event, **arguments):\n \"\"\"Sends an event to all assigned event listeners. Event is the\n name of the event to send, all other named arguments go to the\n event handler(s).\n\n Returns a list of return values from the handlers.\n \"\"\"\n log.debug(u'Sending event: {0}'.format(event))\n for handler in event_handlers()[event]:\n # Don't break legacy plugins if we want to pass more arguments\n argspec = inspect.getargspec(handler).args\n args = dict((k, v) for k, v in arguments.items() if k in argspec)\n handler(**args)\n", "path": "beets/plugins.py"}]} | 4,092 | 166 |
gh_patches_debug_39622 | rasdani/github-patches | git_diff | openai__gym-2158 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Base Env class should be abstract
Hi, thank you for this lib,
The Env class should be abstract but it is not. Instead its methods have a `raise NotImplementedError` body.
There are advantages to making this abstract: for example PyLance can properly analyze gym code.
I think Env should subclass `ABC` and methods should be annotated as `@abstractmethod`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `gym/core.py`
Content:
```
1 import gym
2 from gym import error
3 from gym.utils import closer
4
5 env_closer = closer.Closer()
6
7
8 class Env(object):
9 """The main OpenAI Gym class. It encapsulates an environment with
10 arbitrary behind-the-scenes dynamics. An environment can be
11 partially or fully observed.
12
13 The main API methods that users of this class need to know are:
14
15 step
16 reset
17 render
18 close
19 seed
20
21 And set the following attributes:
22
23 action_space: The Space object corresponding to valid actions
24 observation_space: The Space object corresponding to valid observations
25 reward_range: A tuple corresponding to the min and max possible rewards
26
27 Note: a default reward range set to [-inf,+inf] already exists. Set it if you want a narrower range.
28
29 The methods are accessed publicly as "step", "reset", etc...
30 """
31
32 # Set this in SOME subclasses
33 metadata = {"render.modes": []}
34 reward_range = (-float("inf"), float("inf"))
35 spec = None
36
37 # Set these in ALL subclasses
38 action_space = None
39 observation_space = None
40
41 def step(self, action):
42 """Run one timestep of the environment's dynamics. When end of
43 episode is reached, you are responsible for calling `reset()`
44 to reset this environment's state.
45
46 Accepts an action and returns a tuple (observation, reward, done, info).
47
48 Args:
49 action (object): an action provided by the agent
50
51 Returns:
52 observation (object): agent's observation of the current environment
53 reward (float) : amount of reward returned after previous action
54 done (bool): whether the episode has ended, in which case further step() calls will return undefined results
55 info (dict): contains auxiliary diagnostic information (helpful for debugging, and sometimes learning)
56 """
57 raise NotImplementedError
58
59 def reset(self):
60 """Resets the environment to an initial state and returns an initial
61 observation.
62
63 Note that this function should not reset the environment's random
64 number generator(s); random variables in the environment's state should
65 be sampled independently between multiple calls to `reset()`. In other
66 words, each call of `reset()` should yield an environment suitable for
67 a new episode, independent of previous episodes.
68
69 Returns:
70 observation (object): the initial observation.
71 """
72 raise NotImplementedError
73
74 def render(self, mode="human"):
75 """Renders the environment.
76
77 The set of supported modes varies per environment. (And some
78 environments do not support rendering at all.) By convention,
79 if mode is:
80
81 - human: render to the current display or terminal and
82 return nothing. Usually for human consumption.
83 - rgb_array: Return an numpy.ndarray with shape (x, y, 3),
84 representing RGB values for an x-by-y pixel image, suitable
85 for turning into a video.
86 - ansi: Return a string (str) or StringIO.StringIO containing a
87 terminal-style text representation. The text can include newlines
88 and ANSI escape sequences (e.g. for colors).
89
90 Note:
91 Make sure that your class's metadata 'render.modes' key includes
92 the list of supported modes. It's recommended to call super()
93 in implementations to use the functionality of this method.
94
95 Args:
96 mode (str): the mode to render with
97
98 Example:
99
100 class MyEnv(Env):
101 metadata = {'render.modes': ['human', 'rgb_array']}
102
103 def render(self, mode='human'):
104 if mode == 'rgb_array':
105 return np.array(...) # return RGB frame suitable for video
106 elif mode == 'human':
107 ... # pop up a window and render
108 else:
109 super(MyEnv, self).render(mode=mode) # just raise an exception
110 """
111 raise NotImplementedError
112
113 def close(self):
114 """Override close in your subclass to perform any necessary cleanup.
115
116 Environments will automatically close() themselves when
117 garbage collected or when the program exits.
118 """
119 pass
120
121 def seed(self, seed=None):
122 """Sets the seed for this env's random number generator(s).
123
124 Note:
125 Some environments use multiple pseudorandom number generators.
126 We want to capture all such seeds used in order to ensure that
127 there aren't accidental correlations between multiple generators.
128
129 Returns:
130 list<bigint>: Returns the list of seeds used in this env's random
131 number generators. The first value in the list should be the
132 "main" seed, or the value which a reproducer should pass to
133 'seed'. Often, the main seed equals the provided 'seed', but
134 this won't be true if seed=None, for example.
135 """
136 return
137
138 @property
139 def unwrapped(self):
140 """Completely unwrap this env.
141
142 Returns:
143 gym.Env: The base non-wrapped gym.Env instance
144 """
145 return self
146
147 def __str__(self):
148 if self.spec is None:
149 return "<{} instance>".format(type(self).__name__)
150 else:
151 return "<{}<{}>>".format(type(self).__name__, self.spec.id)
152
153 def __enter__(self):
154 """Support with-statement for the environment."""
155 return self
156
157 def __exit__(self, *args):
158 """Support with-statement for the environment."""
159 self.close()
160 # propagate exception
161 return False
162
163
164 class GoalEnv(Env):
165 """A goal-based environment. It functions just as any regular OpenAI Gym environment but it
166 imposes a required structure on the observation_space. More concretely, the observation
167 space is required to contain at least three elements, namely `observation`, `desired_goal`, and
168 `achieved_goal`. Here, `desired_goal` specifies the goal that the agent should attempt to achieve.
169 `achieved_goal` is the goal that it currently achieved instead. `observation` contains the
170 actual observations of the environment as per usual.
171 """
172
173 def reset(self):
174 # Enforce that each GoalEnv uses a Goal-compatible observation space.
175 if not isinstance(self.observation_space, gym.spaces.Dict):
176 raise error.Error(
177 "GoalEnv requires an observation space of type gym.spaces.Dict"
178 )
179 for key in ["observation", "achieved_goal", "desired_goal"]:
180 if key not in self.observation_space.spaces:
181 raise error.Error(
182 'GoalEnv requires the "{}" key to be part of the observation dictionary.'.format(
183 key
184 )
185 )
186
187 def compute_reward(self, achieved_goal, desired_goal, info):
188 """Compute the step reward. This externalizes the reward function and makes
189 it dependent on a desired goal and the one that was achieved. If you wish to include
190 additional rewards that are independent of the goal, you can include the necessary values
191 to derive it in 'info' and compute it accordingly.
192
193 Args:
194 achieved_goal (object): the goal that was achieved during execution
195 desired_goal (object): the desired goal that we asked the agent to attempt to achieve
196 info (dict): an info dictionary with additional information
197
198 Returns:
199 float: The reward that corresponds to the provided achieved goal w.r.t. to the desired
200 goal. Note that the following should always hold true:
201
202 ob, reward, done, info = env.step()
203 assert reward == env.compute_reward(ob['achieved_goal'], ob['goal'], info)
204 """
205 raise NotImplementedError
206
207
208 class Wrapper(Env):
209 """Wraps the environment to allow a modular transformation.
210
211 This class is the base class for all wrappers. The subclass could override
212 some methods to change the behavior of the original environment without touching the
213 original code.
214
215 .. note::
216
217 Don't forget to call ``super().__init__(env)`` if the subclass overrides :meth:`__init__`.
218
219 """
220
221 def __init__(self, env):
222 self.env = env
223 self.action_space = self.env.action_space
224 self.observation_space = self.env.observation_space
225 self.reward_range = self.env.reward_range
226 self.metadata = self.env.metadata
227
228 def __getattr__(self, name):
229 if name.startswith("_"):
230 raise AttributeError(
231 "attempted to get missing private attribute '{}'".format(name)
232 )
233 return getattr(self.env, name)
234
235 @property
236 def spec(self):
237 return self.env.spec
238
239 @classmethod
240 def class_name(cls):
241 return cls.__name__
242
243 def step(self, action):
244 return self.env.step(action)
245
246 def reset(self, **kwargs):
247 return self.env.reset(**kwargs)
248
249 def render(self, mode="human", **kwargs):
250 return self.env.render(mode, **kwargs)
251
252 def close(self):
253 return self.env.close()
254
255 def seed(self, seed=None):
256 return self.env.seed(seed)
257
258 def compute_reward(self, achieved_goal, desired_goal, info):
259 return self.env.compute_reward(achieved_goal, desired_goal, info)
260
261 def __str__(self):
262 return "<{}{}>".format(type(self).__name__, self.env)
263
264 def __repr__(self):
265 return str(self)
266
267 @property
268 def unwrapped(self):
269 return self.env.unwrapped
270
271
272 class ObservationWrapper(Wrapper):
273 def reset(self, **kwargs):
274 observation = self.env.reset(**kwargs)
275 return self.observation(observation)
276
277 def step(self, action):
278 observation, reward, done, info = self.env.step(action)
279 return self.observation(observation), reward, done, info
280
281 def observation(self, observation):
282 raise NotImplementedError
283
284
285 class RewardWrapper(Wrapper):
286 def reset(self, **kwargs):
287 return self.env.reset(**kwargs)
288
289 def step(self, action):
290 observation, reward, done, info = self.env.step(action)
291 return observation, self.reward(reward), done, info
292
293 def reward(self, reward):
294 raise NotImplementedError
295
296
297 class ActionWrapper(Wrapper):
298 def reset(self, **kwargs):
299 return self.env.reset(**kwargs)
300
301 def step(self, action):
302 return self.env.step(self.action(action))
303
304 def action(self, action):
305 raise NotImplementedError
306
307 def reverse_action(self, action):
308 raise NotImplementedError
309
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/gym/core.py b/gym/core.py
--- a/gym/core.py
+++ b/gym/core.py
@@ -1,3 +1,5 @@
+from abc import abstractmethod
+
import gym
from gym import error
from gym.utils import closer
@@ -38,6 +40,7 @@
action_space = None
observation_space = None
+ @abstractmethod
def step(self, action):
"""Run one timestep of the environment's dynamics. When end of
episode is reached, you are responsible for calling `reset()`
@@ -56,6 +59,7 @@
"""
raise NotImplementedError
+ @abstractmethod
def reset(self):
"""Resets the environment to an initial state and returns an initial
observation.
@@ -71,7 +75,8 @@
"""
raise NotImplementedError
- def render(self, mode="human"):
+ @abstractmethod
+ def render(self, mode='human'):
"""Renders the environment.
The set of supported modes varies per environment. (And some
@@ -184,6 +189,7 @@
)
)
+ @abstractmethod
def compute_reward(self, achieved_goal, desired_goal, info):
"""Compute the step reward. This externalizes the reward function and makes
it dependent on a desired goal and the one that was achieved. If you wish to include
@@ -278,6 +284,7 @@
observation, reward, done, info = self.env.step(action)
return self.observation(observation), reward, done, info
+ @abstractmethod
def observation(self, observation):
raise NotImplementedError
@@ -290,6 +297,7 @@
observation, reward, done, info = self.env.step(action)
return observation, self.reward(reward), done, info
+ @abstractmethod
def reward(self, reward):
raise NotImplementedError
@@ -301,8 +309,10 @@
def step(self, action):
return self.env.step(self.action(action))
+ @abstractmethod
def action(self, action):
raise NotImplementedError
+ @abstractmethod
def reverse_action(self, action):
raise NotImplementedError
| {"golden_diff": "diff --git a/gym/core.py b/gym/core.py\n--- a/gym/core.py\n+++ b/gym/core.py\n@@ -1,3 +1,5 @@\n+from abc import abstractmethod\n+\n import gym\n from gym import error\n from gym.utils import closer\n@@ -38,6 +40,7 @@\n action_space = None\n observation_space = None\n \n+ @abstractmethod\n def step(self, action):\n \"\"\"Run one timestep of the environment's dynamics. When end of\n episode is reached, you are responsible for calling `reset()`\n@@ -56,6 +59,7 @@\n \"\"\"\n raise NotImplementedError\n \n+ @abstractmethod\n def reset(self):\n \"\"\"Resets the environment to an initial state and returns an initial\n observation.\n@@ -71,7 +75,8 @@\n \"\"\"\n raise NotImplementedError\n \n- def render(self, mode=\"human\"):\n+ @abstractmethod\n+ def render(self, mode='human'):\n \"\"\"Renders the environment.\n \n The set of supported modes varies per environment. (And some\n@@ -184,6 +189,7 @@\n )\n )\n \n+ @abstractmethod\n def compute_reward(self, achieved_goal, desired_goal, info):\n \"\"\"Compute the step reward. This externalizes the reward function and makes\n it dependent on a desired goal and the one that was achieved. If you wish to include\n@@ -278,6 +284,7 @@\n observation, reward, done, info = self.env.step(action)\n return self.observation(observation), reward, done, info\n \n+ @abstractmethod\n def observation(self, observation):\n raise NotImplementedError\n \n@@ -290,6 +297,7 @@\n observation, reward, done, info = self.env.step(action)\n return observation, self.reward(reward), done, info\n \n+ @abstractmethod\n def reward(self, reward):\n raise NotImplementedError\n \n@@ -301,8 +309,10 @@\n def step(self, action):\n return self.env.step(self.action(action))\n \n+ @abstractmethod\n def action(self, action):\n raise NotImplementedError\n \n+ @abstractmethod\n def reverse_action(self, action):\n raise NotImplementedError\n", "issue": "Base Env class should be abstract\nHi, thank you for this lib,\r\nThe Env class should be abstract but it is not. Instead its methods have a `raise NotImplementedError` body.\r\nThere are advantages to making this abstract: for example PyLance can properly analyze gym code.\r\n\r\nI think Env should subclass `ABC` and methods should be annotated as `@abstractmethod`.\n", "before_files": [{"content": "import gym\nfrom gym import error\nfrom gym.utils import closer\n\nenv_closer = closer.Closer()\n\n\nclass Env(object):\n \"\"\"The main OpenAI Gym class. It encapsulates an environment with\n arbitrary behind-the-scenes dynamics. An environment can be\n partially or fully observed.\n\n The main API methods that users of this class need to know are:\n\n step\n reset\n render\n close\n seed\n\n And set the following attributes:\n\n action_space: The Space object corresponding to valid actions\n observation_space: The Space object corresponding to valid observations\n reward_range: A tuple corresponding to the min and max possible rewards\n\n Note: a default reward range set to [-inf,+inf] already exists. Set it if you want a narrower range.\n\n The methods are accessed publicly as \"step\", \"reset\", etc...\n \"\"\"\n\n # Set this in SOME subclasses\n metadata = {\"render.modes\": []}\n reward_range = (-float(\"inf\"), float(\"inf\"))\n spec = None\n\n # Set these in ALL subclasses\n action_space = None\n observation_space = None\n\n def step(self, action):\n \"\"\"Run one timestep of the environment's dynamics. When end of\n episode is reached, you are responsible for calling `reset()`\n to reset this environment's state.\n\n Accepts an action and returns a tuple (observation, reward, done, info).\n\n Args:\n action (object): an action provided by the agent\n\n Returns:\n observation (object): agent's observation of the current environment\n reward (float) : amount of reward returned after previous action\n done (bool): whether the episode has ended, in which case further step() calls will return undefined results\n info (dict): contains auxiliary diagnostic information (helpful for debugging, and sometimes learning)\n \"\"\"\n raise NotImplementedError\n\n def reset(self):\n \"\"\"Resets the environment to an initial state and returns an initial\n observation.\n\n Note that this function should not reset the environment's random\n number generator(s); random variables in the environment's state should\n be sampled independently between multiple calls to `reset()`. In other\n words, each call of `reset()` should yield an environment suitable for\n a new episode, independent of previous episodes.\n\n Returns:\n observation (object): the initial observation.\n \"\"\"\n raise NotImplementedError\n\n def render(self, mode=\"human\"):\n \"\"\"Renders the environment.\n\n The set of supported modes varies per environment. (And some\n environments do not support rendering at all.) By convention,\n if mode is:\n\n - human: render to the current display or terminal and\n return nothing. Usually for human consumption.\n - rgb_array: Return an numpy.ndarray with shape (x, y, 3),\n representing RGB values for an x-by-y pixel image, suitable\n for turning into a video.\n - ansi: Return a string (str) or StringIO.StringIO containing a\n terminal-style text representation. The text can include newlines\n and ANSI escape sequences (e.g. for colors).\n\n Note:\n Make sure that your class's metadata 'render.modes' key includes\n the list of supported modes. It's recommended to call super()\n in implementations to use the functionality of this method.\n\n Args:\n mode (str): the mode to render with\n\n Example:\n\n class MyEnv(Env):\n metadata = {'render.modes': ['human', 'rgb_array']}\n\n def render(self, mode='human'):\n if mode == 'rgb_array':\n return np.array(...) # return RGB frame suitable for video\n elif mode == 'human':\n ... # pop up a window and render\n else:\n super(MyEnv, self).render(mode=mode) # just raise an exception\n \"\"\"\n raise NotImplementedError\n\n def close(self):\n \"\"\"Override close in your subclass to perform any necessary cleanup.\n\n Environments will automatically close() themselves when\n garbage collected or when the program exits.\n \"\"\"\n pass\n\n def seed(self, seed=None):\n \"\"\"Sets the seed for this env's random number generator(s).\n\n Note:\n Some environments use multiple pseudorandom number generators.\n We want to capture all such seeds used in order to ensure that\n there aren't accidental correlations between multiple generators.\n\n Returns:\n list<bigint>: Returns the list of seeds used in this env's random\n number generators. The first value in the list should be the\n \"main\" seed, or the value which a reproducer should pass to\n 'seed'. Often, the main seed equals the provided 'seed', but\n this won't be true if seed=None, for example.\n \"\"\"\n return\n\n @property\n def unwrapped(self):\n \"\"\"Completely unwrap this env.\n\n Returns:\n gym.Env: The base non-wrapped gym.Env instance\n \"\"\"\n return self\n\n def __str__(self):\n if self.spec is None:\n return \"<{} instance>\".format(type(self).__name__)\n else:\n return \"<{}<{}>>\".format(type(self).__name__, self.spec.id)\n\n def __enter__(self):\n \"\"\"Support with-statement for the environment.\"\"\"\n return self\n\n def __exit__(self, *args):\n \"\"\"Support with-statement for the environment.\"\"\"\n self.close()\n # propagate exception\n return False\n\n\nclass GoalEnv(Env):\n \"\"\"A goal-based environment. It functions just as any regular OpenAI Gym environment but it\n imposes a required structure on the observation_space. More concretely, the observation\n space is required to contain at least three elements, namely `observation`, `desired_goal`, and\n `achieved_goal`. Here, `desired_goal` specifies the goal that the agent should attempt to achieve.\n `achieved_goal` is the goal that it currently achieved instead. `observation` contains the\n actual observations of the environment as per usual.\n \"\"\"\n\n def reset(self):\n # Enforce that each GoalEnv uses a Goal-compatible observation space.\n if not isinstance(self.observation_space, gym.spaces.Dict):\n raise error.Error(\n \"GoalEnv requires an observation space of type gym.spaces.Dict\"\n )\n for key in [\"observation\", \"achieved_goal\", \"desired_goal\"]:\n if key not in self.observation_space.spaces:\n raise error.Error(\n 'GoalEnv requires the \"{}\" key to be part of the observation dictionary.'.format(\n key\n )\n )\n\n def compute_reward(self, achieved_goal, desired_goal, info):\n \"\"\"Compute the step reward. This externalizes the reward function and makes\n it dependent on a desired goal and the one that was achieved. If you wish to include\n additional rewards that are independent of the goal, you can include the necessary values\n to derive it in 'info' and compute it accordingly.\n\n Args:\n achieved_goal (object): the goal that was achieved during execution\n desired_goal (object): the desired goal that we asked the agent to attempt to achieve\n info (dict): an info dictionary with additional information\n\n Returns:\n float: The reward that corresponds to the provided achieved goal w.r.t. to the desired\n goal. Note that the following should always hold true:\n\n ob, reward, done, info = env.step()\n assert reward == env.compute_reward(ob['achieved_goal'], ob['goal'], info)\n \"\"\"\n raise NotImplementedError\n\n\nclass Wrapper(Env):\n \"\"\"Wraps the environment to allow a modular transformation.\n\n This class is the base class for all wrappers. The subclass could override\n some methods to change the behavior of the original environment without touching the\n original code.\n\n .. note::\n\n Don't forget to call ``super().__init__(env)`` if the subclass overrides :meth:`__init__`.\n\n \"\"\"\n\n def __init__(self, env):\n self.env = env\n self.action_space = self.env.action_space\n self.observation_space = self.env.observation_space\n self.reward_range = self.env.reward_range\n self.metadata = self.env.metadata\n\n def __getattr__(self, name):\n if name.startswith(\"_\"):\n raise AttributeError(\n \"attempted to get missing private attribute '{}'\".format(name)\n )\n return getattr(self.env, name)\n\n @property\n def spec(self):\n return self.env.spec\n\n @classmethod\n def class_name(cls):\n return cls.__name__\n\n def step(self, action):\n return self.env.step(action)\n\n def reset(self, **kwargs):\n return self.env.reset(**kwargs)\n\n def render(self, mode=\"human\", **kwargs):\n return self.env.render(mode, **kwargs)\n\n def close(self):\n return self.env.close()\n\n def seed(self, seed=None):\n return self.env.seed(seed)\n\n def compute_reward(self, achieved_goal, desired_goal, info):\n return self.env.compute_reward(achieved_goal, desired_goal, info)\n\n def __str__(self):\n return \"<{}{}>\".format(type(self).__name__, self.env)\n\n def __repr__(self):\n return str(self)\n\n @property\n def unwrapped(self):\n return self.env.unwrapped\n\n\nclass ObservationWrapper(Wrapper):\n def reset(self, **kwargs):\n observation = self.env.reset(**kwargs)\n return self.observation(observation)\n\n def step(self, action):\n observation, reward, done, info = self.env.step(action)\n return self.observation(observation), reward, done, info\n\n def observation(self, observation):\n raise NotImplementedError\n\n\nclass RewardWrapper(Wrapper):\n def reset(self, **kwargs):\n return self.env.reset(**kwargs)\n\n def step(self, action):\n observation, reward, done, info = self.env.step(action)\n return observation, self.reward(reward), done, info\n\n def reward(self, reward):\n raise NotImplementedError\n\n\nclass ActionWrapper(Wrapper):\n def reset(self, **kwargs):\n return self.env.reset(**kwargs)\n\n def step(self, action):\n return self.env.step(self.action(action))\n\n def action(self, action):\n raise NotImplementedError\n\n def reverse_action(self, action):\n raise NotImplementedError\n", "path": "gym/core.py"}], "after_files": [{"content": "from abc import abstractmethod\n\nimport gym\nfrom gym import error\nfrom gym.utils import closer\n\nenv_closer = closer.Closer()\n\n\nclass Env(object):\n \"\"\"The main OpenAI Gym class. It encapsulates an environment with\n arbitrary behind-the-scenes dynamics. An environment can be\n partially or fully observed.\n\n The main API methods that users of this class need to know are:\n\n step\n reset\n render\n close\n seed\n\n And set the following attributes:\n\n action_space: The Space object corresponding to valid actions\n observation_space: The Space object corresponding to valid observations\n reward_range: A tuple corresponding to the min and max possible rewards\n\n Note: a default reward range set to [-inf,+inf] already exists. Set it if you want a narrower range.\n\n The methods are accessed publicly as \"step\", \"reset\", etc...\n \"\"\"\n\n # Set this in SOME subclasses\n metadata = {\"render.modes\": []}\n reward_range = (-float(\"inf\"), float(\"inf\"))\n spec = None\n\n # Set these in ALL subclasses\n action_space = None\n observation_space = None\n\n @abstractmethod\n def step(self, action):\n \"\"\"Run one timestep of the environment's dynamics. When end of\n episode is reached, you are responsible for calling `reset()`\n to reset this environment's state.\n\n Accepts an action and returns a tuple (observation, reward, done, info).\n\n Args:\n action (object): an action provided by the agent\n\n Returns:\n observation (object): agent's observation of the current environment\n reward (float) : amount of reward returned after previous action\n done (bool): whether the episode has ended, in which case further step() calls will return undefined results\n info (dict): contains auxiliary diagnostic information (helpful for debugging, and sometimes learning)\n \"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def reset(self):\n \"\"\"Resets the environment to an initial state and returns an initial\n observation.\n\n Note that this function should not reset the environment's random\n number generator(s); random variables in the environment's state should\n be sampled independently between multiple calls to `reset()`. In other\n words, each call of `reset()` should yield an environment suitable for\n a new episode, independent of previous episodes.\n\n Returns:\n observation (object): the initial observation.\n \"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def render(self, mode='human'):\n \"\"\"Renders the environment.\n\n The set of supported modes varies per environment. (And some\n environments do not support rendering at all.) By convention,\n if mode is:\n\n - human: render to the current display or terminal and\n return nothing. Usually for human consumption.\n - rgb_array: Return an numpy.ndarray with shape (x, y, 3),\n representing RGB values for an x-by-y pixel image, suitable\n for turning into a video.\n - ansi: Return a string (str) or StringIO.StringIO containing a\n terminal-style text representation. The text can include newlines\n and ANSI escape sequences (e.g. for colors).\n\n Note:\n Make sure that your class's metadata 'render.modes' key includes\n the list of supported modes. It's recommended to call super()\n in implementations to use the functionality of this method.\n\n Args:\n mode (str): the mode to render with\n\n Example:\n\n class MyEnv(Env):\n metadata = {'render.modes': ['human', 'rgb_array']}\n\n def render(self, mode='human'):\n if mode == 'rgb_array':\n return np.array(...) # return RGB frame suitable for video\n elif mode == 'human':\n ... # pop up a window and render\n else:\n super(MyEnv, self).render(mode=mode) # just raise an exception\n \"\"\"\n raise NotImplementedError\n\n def close(self):\n \"\"\"Override close in your subclass to perform any necessary cleanup.\n\n Environments will automatically close() themselves when\n garbage collected or when the program exits.\n \"\"\"\n pass\n\n def seed(self, seed=None):\n \"\"\"Sets the seed for this env's random number generator(s).\n\n Note:\n Some environments use multiple pseudorandom number generators.\n We want to capture all such seeds used in order to ensure that\n there aren't accidental correlations between multiple generators.\n\n Returns:\n list<bigint>: Returns the list of seeds used in this env's random\n number generators. The first value in the list should be the\n \"main\" seed, or the value which a reproducer should pass to\n 'seed'. Often, the main seed equals the provided 'seed', but\n this won't be true if seed=None, for example.\n \"\"\"\n return\n\n @property\n def unwrapped(self):\n \"\"\"Completely unwrap this env.\n\n Returns:\n gym.Env: The base non-wrapped gym.Env instance\n \"\"\"\n return self\n\n def __str__(self):\n if self.spec is None:\n return \"<{} instance>\".format(type(self).__name__)\n else:\n return \"<{}<{}>>\".format(type(self).__name__, self.spec.id)\n\n def __enter__(self):\n \"\"\"Support with-statement for the environment.\"\"\"\n return self\n\n def __exit__(self, *args):\n \"\"\"Support with-statement for the environment.\"\"\"\n self.close()\n # propagate exception\n return False\n\n\nclass GoalEnv(Env):\n \"\"\"A goal-based environment. It functions just as any regular OpenAI Gym environment but it\n imposes a required structure on the observation_space. More concretely, the observation\n space is required to contain at least three elements, namely `observation`, `desired_goal`, and\n `achieved_goal`. Here, `desired_goal` specifies the goal that the agent should attempt to achieve.\n `achieved_goal` is the goal that it currently achieved instead. `observation` contains the\n actual observations of the environment as per usual.\n \"\"\"\n\n def reset(self):\n # Enforce that each GoalEnv uses a Goal-compatible observation space.\n if not isinstance(self.observation_space, gym.spaces.Dict):\n raise error.Error(\n \"GoalEnv requires an observation space of type gym.spaces.Dict\"\n )\n for key in [\"observation\", \"achieved_goal\", \"desired_goal\"]:\n if key not in self.observation_space.spaces:\n raise error.Error(\n 'GoalEnv requires the \"{}\" key to be part of the observation dictionary.'.format(\n key\n )\n )\n\n @abstractmethod\n def compute_reward(self, achieved_goal, desired_goal, info):\n \"\"\"Compute the step reward. This externalizes the reward function and makes\n it dependent on a desired goal and the one that was achieved. If you wish to include\n additional rewards that are independent of the goal, you can include the necessary values\n to derive it in 'info' and compute it accordingly.\n\n Args:\n achieved_goal (object): the goal that was achieved during execution\n desired_goal (object): the desired goal that we asked the agent to attempt to achieve\n info (dict): an info dictionary with additional information\n\n Returns:\n float: The reward that corresponds to the provided achieved goal w.r.t. to the desired\n goal. Note that the following should always hold true:\n\n ob, reward, done, info = env.step()\n assert reward == env.compute_reward(ob['achieved_goal'], ob['goal'], info)\n \"\"\"\n raise NotImplementedError\n\n\nclass Wrapper(Env):\n \"\"\"Wraps the environment to allow a modular transformation.\n\n This class is the base class for all wrappers. The subclass could override\n some methods to change the behavior of the original environment without touching the\n original code.\n\n .. note::\n\n Don't forget to call ``super().__init__(env)`` if the subclass overrides :meth:`__init__`.\n\n \"\"\"\n\n def __init__(self, env):\n self.env = env\n self.action_space = self.env.action_space\n self.observation_space = self.env.observation_space\n self.reward_range = self.env.reward_range\n self.metadata = self.env.metadata\n\n def __getattr__(self, name):\n if name.startswith(\"_\"):\n raise AttributeError(\n \"attempted to get missing private attribute '{}'\".format(name)\n )\n return getattr(self.env, name)\n\n @property\n def spec(self):\n return self.env.spec\n\n @classmethod\n def class_name(cls):\n return cls.__name__\n\n def step(self, action):\n return self.env.step(action)\n\n def reset(self, **kwargs):\n return self.env.reset(**kwargs)\n\n def render(self, mode=\"human\", **kwargs):\n return self.env.render(mode, **kwargs)\n\n def close(self):\n return self.env.close()\n\n def seed(self, seed=None):\n return self.env.seed(seed)\n\n def compute_reward(self, achieved_goal, desired_goal, info):\n return self.env.compute_reward(achieved_goal, desired_goal, info)\n\n def __str__(self):\n return \"<{}{}>\".format(type(self).__name__, self.env)\n\n def __repr__(self):\n return str(self)\n\n @property\n def unwrapped(self):\n return self.env.unwrapped\n\n\nclass ObservationWrapper(Wrapper):\n def reset(self, **kwargs):\n observation = self.env.reset(**kwargs)\n return self.observation(observation)\n\n def step(self, action):\n observation, reward, done, info = self.env.step(action)\n return self.observation(observation), reward, done, info\n\n @abstractmethod\n def observation(self, observation):\n raise NotImplementedError\n\n\nclass RewardWrapper(Wrapper):\n def reset(self, **kwargs):\n return self.env.reset(**kwargs)\n\n def step(self, action):\n observation, reward, done, info = self.env.step(action)\n return observation, self.reward(reward), done, info\n\n @abstractmethod\n def reward(self, reward):\n raise NotImplementedError\n\n\nclass ActionWrapper(Wrapper):\n def reset(self, **kwargs):\n return self.env.reset(**kwargs)\n\n def step(self, action):\n return self.env.step(self.action(action))\n\n @abstractmethod\n def action(self, action):\n raise NotImplementedError\n\n @abstractmethod\n def reverse_action(self, action):\n raise NotImplementedError\n", "path": "gym/core.py"}]} | 3,376 | 490 |
gh_patches_debug_35929 | rasdani/github-patches | git_diff | litestar-org__litestar-3295 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Bug: Path parameters missing from OpenAPI schema when not included in handler signature
### Description
When defining a path parameter, but not using it in the handler signature, the path parameter is not documented in the OpenAPI schema. This is likely due to how we handle their extraction, which is based on the handler.
This is an issue though because, even if the parameter value is not used in the handler itself, it is still required to specify and should therefore be documented.
### URL to code causing the issue
_No response_
### MCVE
```python
@get("/{param:str}")
async def handler() -> None:
...
app = Litestar([handler])
assert app.openapi_schema.paths["/{param}"].get.parameters
```
### Steps to reproduce
_No response_
### Screenshots
_No response_
### Logs
_No response_
### Litestar Version
2.7.1
### Platform
- [X] Linux
- [ ] Mac
- [ ] Windows
- [ ] Other (Please specify in the description above)
<!-- POLAR PLEDGE BADGE START -->
---
> [!NOTE]
> While we are open for sponsoring on [GitHub Sponsors](https://github.com/sponsors/litestar-org/) and
> [OpenCollective](https://opencollective.com/litestar), we also utilize [Polar.sh](https://polar.sh/) to engage in pledge-based sponsorship.
>
> Check out all issues funded or available for funding [on our Polar.sh dashboard](https://polar.sh/litestar-org)
> * If you would like to see an issue prioritized, make a pledge towards it!
> * We receive the pledge once the issue is completed & verified
> * This, along with engagement in the community, helps us know which features are a priority to our users.
<a href="https://polar.sh/litestar-org/litestar/issues/3290">
<picture>
<source media="(prefers-color-scheme: dark)" srcset="https://polar.sh/api/github/litestar-org/litestar/issues/3290/pledge.svg?darkmode=1">
<img alt="Fund with Polar" src="https://polar.sh/api/github/litestar-org/litestar/issues/3290/pledge.svg">
</picture>
</a>
<!-- POLAR PLEDGE BADGE END -->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `litestar/_openapi/parameters.py`
Content:
```
1 from __future__ import annotations
2
3 from typing import TYPE_CHECKING
4
5 from litestar._openapi.schema_generation import SchemaCreator
6 from litestar._openapi.schema_generation.utils import get_formatted_examples
7 from litestar.constants import RESERVED_KWARGS
8 from litestar.enums import ParamType
9 from litestar.exceptions import ImproperlyConfiguredException
10 from litestar.openapi.spec.parameter import Parameter
11 from litestar.openapi.spec.schema import Schema
12 from litestar.params import DependencyKwarg, ParameterKwarg
13 from litestar.types import Empty
14 from litestar.typing import FieldDefinition
15
16 if TYPE_CHECKING:
17 from litestar._openapi.datastructures import OpenAPIContext
18 from litestar.handlers.base import BaseRouteHandler
19 from litestar.openapi.spec import Reference
20 from litestar.types.internal_types import PathParameterDefinition
21
22 __all__ = ("create_parameters_for_handler",)
23
24
25 class ParameterCollection:
26 """Facilitates conditional deduplication of parameters.
27
28 If multiple parameters with the same name are produced for a handler, the condition is ignored if the two
29 ``Parameter`` instances are the same (the first is retained and any duplicates are ignored). If the ``Parameter``
30 instances are not the same, an exception is raised.
31 """
32
33 def __init__(self, route_handler: BaseRouteHandler) -> None:
34 """Initialize ``ParameterCollection``.
35
36 Args:
37 route_handler: Associated route handler
38 """
39 self.route_handler = route_handler
40 self._parameters: dict[tuple[str, str], Parameter] = {}
41
42 def add(self, parameter: Parameter) -> None:
43 """Add a ``Parameter`` to the collection.
44
45 If an existing parameter with the same name and type already exists, the
46 parameter is ignored.
47
48 If an existing parameter with the same name but different type exists, raises
49 ``ImproperlyConfiguredException``.
50 """
51
52 if (parameter.name, parameter.param_in) not in self._parameters:
53 # because we are defining routes as unique per path, we have to handle here a situation when there is an optional
54 # path parameter. e.g. get(path=["/", "/{param:str}"]). When parsing the parameter for path, the route handler
55 # would still have a kwarg called param:
56 # def handler(param: str | None) -> ...
57 if parameter.param_in != ParamType.QUERY or all(
58 f"{{{parameter.name}:" not in path for path in self.route_handler.paths
59 ):
60 self._parameters[(parameter.name, parameter.param_in)] = parameter
61 return
62
63 pre_existing = self._parameters[(parameter.name, parameter.param_in)]
64 if parameter == pre_existing:
65 return
66
67 raise ImproperlyConfiguredException(
68 f"OpenAPI schema generation for handler `{self.route_handler}` detected multiple parameters named "
69 f"'{parameter.name}' with different types."
70 )
71
72 def list(self) -> list[Parameter]:
73 """Return a list of all ``Parameter``'s in the collection."""
74 return list(self._parameters.values())
75
76
77 class ParameterFactory:
78 """Factory for creating OpenAPI Parameters for a given route handler."""
79
80 def __init__(
81 self,
82 context: OpenAPIContext,
83 route_handler: BaseRouteHandler,
84 path_parameters: tuple[PathParameterDefinition, ...],
85 ) -> None:
86 """Initialize ParameterFactory.
87
88 Args:
89 context: The OpenAPI context.
90 route_handler: The route handler.
91 path_parameters: The path parameters for the route.
92 """
93 self.context = context
94 self.schema_creator = SchemaCreator.from_openapi_context(self.context, prefer_alias=True)
95 self.route_handler = route_handler
96 self.parameters = ParameterCollection(route_handler)
97 self.dependency_providers = route_handler.resolve_dependencies()
98 self.layered_parameters = route_handler.resolve_layered_parameters()
99 self.path_parameters_names = {p.name for p in path_parameters}
100
101 def create_parameter(self, field_definition: FieldDefinition, parameter_name: str) -> Parameter:
102 """Create an OpenAPI Parameter instance for a field definition.
103
104 Args:
105 field_definition: The field definition.
106 parameter_name: The name of the parameter.
107 """
108
109 result: Schema | Reference | None = None
110 kwarg_definition = (
111 field_definition.kwarg_definition if isinstance(field_definition.kwarg_definition, ParameterKwarg) else None
112 )
113
114 if parameter_name in self.path_parameters_names:
115 param_in = ParamType.PATH
116 is_required = True
117 result = self.schema_creator.for_field_definition(field_definition)
118 elif kwarg_definition and kwarg_definition.header:
119 parameter_name = kwarg_definition.header
120 param_in = ParamType.HEADER
121 is_required = field_definition.is_required
122 elif kwarg_definition and kwarg_definition.cookie:
123 parameter_name = kwarg_definition.cookie
124 param_in = ParamType.COOKIE
125 is_required = field_definition.is_required
126 else:
127 is_required = field_definition.is_required
128 param_in = ParamType.QUERY
129 parameter_name = kwarg_definition.query if kwarg_definition and kwarg_definition.query else parameter_name
130
131 if not result:
132 result = self.schema_creator.for_field_definition(field_definition)
133
134 schema = result if isinstance(result, Schema) else self.context.schema_registry.from_reference(result).schema
135
136 examples_list = kwarg_definition.examples or [] if kwarg_definition else []
137 examples = get_formatted_examples(field_definition, examples_list)
138
139 return Parameter(
140 description=schema.description,
141 name=parameter_name,
142 param_in=param_in,
143 required=is_required,
144 schema=result,
145 examples=examples or None,
146 )
147
148 def get_layered_parameter(self, field_name: str, field_definition: FieldDefinition) -> Parameter:
149 """Create a parameter for a field definition that has a KwargDefinition defined on the layers.
150
151 Args:
152 field_name: The name of the field.
153 field_definition: The field definition.
154 """
155 layer_field = self.layered_parameters[field_name]
156
157 field = field_definition if field_definition.is_parameter_field else layer_field
158 default = layer_field.default if field_definition.has_default else field_definition.default
159 annotation = field_definition.annotation if field_definition is not Empty else layer_field.annotation
160
161 parameter_name = field_name
162 if isinstance(field.kwarg_definition, ParameterKwarg):
163 parameter_name = (
164 field.kwarg_definition.query
165 or field.kwarg_definition.header
166 or field.kwarg_definition.cookie
167 or field_name
168 )
169
170 field_definition = FieldDefinition.from_kwarg(
171 inner_types=field.inner_types,
172 default=default,
173 extra=field.extra,
174 annotation=annotation,
175 kwarg_definition=field.kwarg_definition,
176 name=field_name,
177 )
178 return self.create_parameter(field_definition=field_definition, parameter_name=parameter_name)
179
180 def create_parameters_for_field_definitions(self, fields: dict[str, FieldDefinition]) -> None:
181 """Add Parameter models to the handler's collection for the given field definitions.
182
183 Args:
184 fields: The field definitions.
185 """
186 unique_handler_fields = (
187 (k, v) for k, v in fields.items() if k not in RESERVED_KWARGS and k not in self.layered_parameters
188 )
189 unique_layered_fields = (
190 (k, v) for k, v in self.layered_parameters.items() if k not in RESERVED_KWARGS and k not in fields
191 )
192 intersection_fields = (
193 (k, v) for k, v in fields.items() if k not in RESERVED_KWARGS and k in self.layered_parameters
194 )
195
196 for field_name, field_definition in unique_handler_fields:
197 if (
198 isinstance(field_definition.kwarg_definition, DependencyKwarg)
199 and field_name not in self.dependency_providers
200 ):
201 # never document explicit dependencies
202 continue
203
204 if provider := self.dependency_providers.get(field_name):
205 self.create_parameters_for_field_definitions(fields=provider.parsed_fn_signature.parameters)
206 else:
207 self.parameters.add(self.create_parameter(field_definition=field_definition, parameter_name=field_name))
208
209 for field_name, field_definition in unique_layered_fields:
210 self.parameters.add(self.create_parameter(field_definition=field_definition, parameter_name=field_name))
211
212 for field_name, field_definition in intersection_fields:
213 self.parameters.add(self.get_layered_parameter(field_name=field_name, field_definition=field_definition))
214
215 def create_parameters_for_handler(self) -> list[Parameter]:
216 """Create a list of path/query/header Parameter models for the given PathHandler."""
217 handler_fields = self.route_handler.parsed_fn_signature.parameters
218 self.create_parameters_for_field_definitions(handler_fields)
219 return self.parameters.list()
220
221
222 def create_parameters_for_handler(
223 context: OpenAPIContext,
224 route_handler: BaseRouteHandler,
225 path_parameters: tuple[PathParameterDefinition, ...],
226 ) -> list[Parameter]:
227 """Create a list of path/query/header Parameter models for the given PathHandler."""
228 factory = ParameterFactory(
229 context=context,
230 route_handler=route_handler,
231 path_parameters=path_parameters,
232 )
233 return factory.create_parameters_for_handler()
234
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/litestar/_openapi/parameters.py b/litestar/_openapi/parameters.py
--- a/litestar/_openapi/parameters.py
+++ b/litestar/_openapi/parameters.py
@@ -96,7 +96,7 @@
self.parameters = ParameterCollection(route_handler)
self.dependency_providers = route_handler.resolve_dependencies()
self.layered_parameters = route_handler.resolve_layered_parameters()
- self.path_parameters_names = {p.name for p in path_parameters}
+ self.path_parameters: dict[str, PathParameterDefinition] = {p.name: p for p in path_parameters}
def create_parameter(self, field_definition: FieldDefinition, parameter_name: str) -> Parameter:
"""Create an OpenAPI Parameter instance for a field definition.
@@ -111,7 +111,7 @@
field_definition.kwarg_definition if isinstance(field_definition.kwarg_definition, ParameterKwarg) else None
)
- if parameter_name in self.path_parameters_names:
+ if parameter_name in self.path_parameters:
param_in = ParamType.PATH
is_required = True
result = self.schema_creator.for_field_definition(field_definition)
@@ -215,6 +215,17 @@
def create_parameters_for_handler(self) -> list[Parameter]:
"""Create a list of path/query/header Parameter models for the given PathHandler."""
handler_fields = self.route_handler.parsed_fn_signature.parameters
+ # not all path parameters have to be consumed by the handler. Because even not
+ # consumed path parameters must still be specified, we create stub parameters
+ # for the unconsumed ones so a correct OpenAPI schema can be generated
+ params_not_consumed_by_handler = set(self.path_parameters) - handler_fields.keys()
+ handler_fields.update(
+ {
+ param_name: FieldDefinition.from_kwarg(self.path_parameters[param_name].type, name=param_name)
+ for param_name in params_not_consumed_by_handler
+ }
+ )
+
self.create_parameters_for_field_definitions(handler_fields)
return self.parameters.list()
| {"golden_diff": "diff --git a/litestar/_openapi/parameters.py b/litestar/_openapi/parameters.py\n--- a/litestar/_openapi/parameters.py\n+++ b/litestar/_openapi/parameters.py\n@@ -96,7 +96,7 @@\n self.parameters = ParameterCollection(route_handler)\n self.dependency_providers = route_handler.resolve_dependencies()\n self.layered_parameters = route_handler.resolve_layered_parameters()\n- self.path_parameters_names = {p.name for p in path_parameters}\n+ self.path_parameters: dict[str, PathParameterDefinition] = {p.name: p for p in path_parameters}\n \n def create_parameter(self, field_definition: FieldDefinition, parameter_name: str) -> Parameter:\n \"\"\"Create an OpenAPI Parameter instance for a field definition.\n@@ -111,7 +111,7 @@\n field_definition.kwarg_definition if isinstance(field_definition.kwarg_definition, ParameterKwarg) else None\n )\n \n- if parameter_name in self.path_parameters_names:\n+ if parameter_name in self.path_parameters:\n param_in = ParamType.PATH\n is_required = True\n result = self.schema_creator.for_field_definition(field_definition)\n@@ -215,6 +215,17 @@\n def create_parameters_for_handler(self) -> list[Parameter]:\n \"\"\"Create a list of path/query/header Parameter models for the given PathHandler.\"\"\"\n handler_fields = self.route_handler.parsed_fn_signature.parameters\n+ # not all path parameters have to be consumed by the handler. Because even not\n+ # consumed path parameters must still be specified, we create stub parameters\n+ # for the unconsumed ones so a correct OpenAPI schema can be generated\n+ params_not_consumed_by_handler = set(self.path_parameters) - handler_fields.keys()\n+ handler_fields.update(\n+ {\n+ param_name: FieldDefinition.from_kwarg(self.path_parameters[param_name].type, name=param_name)\n+ for param_name in params_not_consumed_by_handler\n+ }\n+ )\n+\n self.create_parameters_for_field_definitions(handler_fields)\n return self.parameters.list()\n", "issue": "Bug: Path parameters missing from OpenAPI schema when not included in handler signature\n### Description\r\n\r\nWhen defining a path parameter, but not using it in the handler signature, the path parameter is not documented in the OpenAPI schema. This is likely due to how we handle their extraction, which is based on the handler. \r\n\r\nThis is an issue though because, even if the parameter value is not used in the handler itself, it is still required to specify and should therefore be documented. \r\n\r\n### URL to code causing the issue\r\n\r\n_No response_\r\n\r\n### MCVE\r\n\r\n```python\r\n@get(\"/{param:str}\")\r\nasync def handler() -> None:\r\n ...\r\n\r\napp = Litestar([handler])\r\nassert app.openapi_schema.paths[\"/{param}\"].get.parameters\r\n```\r\n\r\n\r\n### Steps to reproduce\r\n\r\n_No response_\r\n\r\n### Screenshots\r\n\r\n_No response_\r\n\r\n### Logs\r\n\r\n_No response_\r\n\r\n### Litestar Version\r\n\r\n2.7.1 \r\n\r\n### Platform\r\n\r\n- [X] Linux\r\n- [ ] Mac\r\n- [ ] Windows\r\n- [ ] Other (Please specify in the description above)\r\n\r\n<!-- POLAR PLEDGE BADGE START -->\r\n---\r\n> [!NOTE] \r\n> While we are open for sponsoring on [GitHub Sponsors](https://github.com/sponsors/litestar-org/) and \r\n> [OpenCollective](https://opencollective.com/litestar), we also utilize [Polar.sh](https://polar.sh/) to engage in pledge-based sponsorship.\r\n>\r\n> Check out all issues funded or available for funding [on our Polar.sh dashboard](https://polar.sh/litestar-org)\r\n> * If you would like to see an issue prioritized, make a pledge towards it!\r\n> * We receive the pledge once the issue is completed & verified\r\n> * This, along with engagement in the community, helps us know which features are a priority to our users.\r\n\r\n<a href=\"https://polar.sh/litestar-org/litestar/issues/3290\">\r\n<picture>\r\n <source media=\"(prefers-color-scheme: dark)\" srcset=\"https://polar.sh/api/github/litestar-org/litestar/issues/3290/pledge.svg?darkmode=1\">\r\n <img alt=\"Fund with Polar\" src=\"https://polar.sh/api/github/litestar-org/litestar/issues/3290/pledge.svg\">\r\n</picture>\r\n</a>\r\n<!-- POLAR PLEDGE BADGE END -->\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom typing import TYPE_CHECKING\n\nfrom litestar._openapi.schema_generation import SchemaCreator\nfrom litestar._openapi.schema_generation.utils import get_formatted_examples\nfrom litestar.constants import RESERVED_KWARGS\nfrom litestar.enums import ParamType\nfrom litestar.exceptions import ImproperlyConfiguredException\nfrom litestar.openapi.spec.parameter import Parameter\nfrom litestar.openapi.spec.schema import Schema\nfrom litestar.params import DependencyKwarg, ParameterKwarg\nfrom litestar.types import Empty\nfrom litestar.typing import FieldDefinition\n\nif TYPE_CHECKING:\n from litestar._openapi.datastructures import OpenAPIContext\n from litestar.handlers.base import BaseRouteHandler\n from litestar.openapi.spec import Reference\n from litestar.types.internal_types import PathParameterDefinition\n\n__all__ = (\"create_parameters_for_handler\",)\n\n\nclass ParameterCollection:\n \"\"\"Facilitates conditional deduplication of parameters.\n\n If multiple parameters with the same name are produced for a handler, the condition is ignored if the two\n ``Parameter`` instances are the same (the first is retained and any duplicates are ignored). If the ``Parameter``\n instances are not the same, an exception is raised.\n \"\"\"\n\n def __init__(self, route_handler: BaseRouteHandler) -> None:\n \"\"\"Initialize ``ParameterCollection``.\n\n Args:\n route_handler: Associated route handler\n \"\"\"\n self.route_handler = route_handler\n self._parameters: dict[tuple[str, str], Parameter] = {}\n\n def add(self, parameter: Parameter) -> None:\n \"\"\"Add a ``Parameter`` to the collection.\n\n If an existing parameter with the same name and type already exists, the\n parameter is ignored.\n\n If an existing parameter with the same name but different type exists, raises\n ``ImproperlyConfiguredException``.\n \"\"\"\n\n if (parameter.name, parameter.param_in) not in self._parameters:\n # because we are defining routes as unique per path, we have to handle here a situation when there is an optional\n # path parameter. e.g. get(path=[\"/\", \"/{param:str}\"]). When parsing the parameter for path, the route handler\n # would still have a kwarg called param:\n # def handler(param: str | None) -> ...\n if parameter.param_in != ParamType.QUERY or all(\n f\"{{{parameter.name}:\" not in path for path in self.route_handler.paths\n ):\n self._parameters[(parameter.name, parameter.param_in)] = parameter\n return\n\n pre_existing = self._parameters[(parameter.name, parameter.param_in)]\n if parameter == pre_existing:\n return\n\n raise ImproperlyConfiguredException(\n f\"OpenAPI schema generation for handler `{self.route_handler}` detected multiple parameters named \"\n f\"'{parameter.name}' with different types.\"\n )\n\n def list(self) -> list[Parameter]:\n \"\"\"Return a list of all ``Parameter``'s in the collection.\"\"\"\n return list(self._parameters.values())\n\n\nclass ParameterFactory:\n \"\"\"Factory for creating OpenAPI Parameters for a given route handler.\"\"\"\n\n def __init__(\n self,\n context: OpenAPIContext,\n route_handler: BaseRouteHandler,\n path_parameters: tuple[PathParameterDefinition, ...],\n ) -> None:\n \"\"\"Initialize ParameterFactory.\n\n Args:\n context: The OpenAPI context.\n route_handler: The route handler.\n path_parameters: The path parameters for the route.\n \"\"\"\n self.context = context\n self.schema_creator = SchemaCreator.from_openapi_context(self.context, prefer_alias=True)\n self.route_handler = route_handler\n self.parameters = ParameterCollection(route_handler)\n self.dependency_providers = route_handler.resolve_dependencies()\n self.layered_parameters = route_handler.resolve_layered_parameters()\n self.path_parameters_names = {p.name for p in path_parameters}\n\n def create_parameter(self, field_definition: FieldDefinition, parameter_name: str) -> Parameter:\n \"\"\"Create an OpenAPI Parameter instance for a field definition.\n\n Args:\n field_definition: The field definition.\n parameter_name: The name of the parameter.\n \"\"\"\n\n result: Schema | Reference | None = None\n kwarg_definition = (\n field_definition.kwarg_definition if isinstance(field_definition.kwarg_definition, ParameterKwarg) else None\n )\n\n if parameter_name in self.path_parameters_names:\n param_in = ParamType.PATH\n is_required = True\n result = self.schema_creator.for_field_definition(field_definition)\n elif kwarg_definition and kwarg_definition.header:\n parameter_name = kwarg_definition.header\n param_in = ParamType.HEADER\n is_required = field_definition.is_required\n elif kwarg_definition and kwarg_definition.cookie:\n parameter_name = kwarg_definition.cookie\n param_in = ParamType.COOKIE\n is_required = field_definition.is_required\n else:\n is_required = field_definition.is_required\n param_in = ParamType.QUERY\n parameter_name = kwarg_definition.query if kwarg_definition and kwarg_definition.query else parameter_name\n\n if not result:\n result = self.schema_creator.for_field_definition(field_definition)\n\n schema = result if isinstance(result, Schema) else self.context.schema_registry.from_reference(result).schema\n\n examples_list = kwarg_definition.examples or [] if kwarg_definition else []\n examples = get_formatted_examples(field_definition, examples_list)\n\n return Parameter(\n description=schema.description,\n name=parameter_name,\n param_in=param_in,\n required=is_required,\n schema=result,\n examples=examples or None,\n )\n\n def get_layered_parameter(self, field_name: str, field_definition: FieldDefinition) -> Parameter:\n \"\"\"Create a parameter for a field definition that has a KwargDefinition defined on the layers.\n\n Args:\n field_name: The name of the field.\n field_definition: The field definition.\n \"\"\"\n layer_field = self.layered_parameters[field_name]\n\n field = field_definition if field_definition.is_parameter_field else layer_field\n default = layer_field.default if field_definition.has_default else field_definition.default\n annotation = field_definition.annotation if field_definition is not Empty else layer_field.annotation\n\n parameter_name = field_name\n if isinstance(field.kwarg_definition, ParameterKwarg):\n parameter_name = (\n field.kwarg_definition.query\n or field.kwarg_definition.header\n or field.kwarg_definition.cookie\n or field_name\n )\n\n field_definition = FieldDefinition.from_kwarg(\n inner_types=field.inner_types,\n default=default,\n extra=field.extra,\n annotation=annotation,\n kwarg_definition=field.kwarg_definition,\n name=field_name,\n )\n return self.create_parameter(field_definition=field_definition, parameter_name=parameter_name)\n\n def create_parameters_for_field_definitions(self, fields: dict[str, FieldDefinition]) -> None:\n \"\"\"Add Parameter models to the handler's collection for the given field definitions.\n\n Args:\n fields: The field definitions.\n \"\"\"\n unique_handler_fields = (\n (k, v) for k, v in fields.items() if k not in RESERVED_KWARGS and k not in self.layered_parameters\n )\n unique_layered_fields = (\n (k, v) for k, v in self.layered_parameters.items() if k not in RESERVED_KWARGS and k not in fields\n )\n intersection_fields = (\n (k, v) for k, v in fields.items() if k not in RESERVED_KWARGS and k in self.layered_parameters\n )\n\n for field_name, field_definition in unique_handler_fields:\n if (\n isinstance(field_definition.kwarg_definition, DependencyKwarg)\n and field_name not in self.dependency_providers\n ):\n # never document explicit dependencies\n continue\n\n if provider := self.dependency_providers.get(field_name):\n self.create_parameters_for_field_definitions(fields=provider.parsed_fn_signature.parameters)\n else:\n self.parameters.add(self.create_parameter(field_definition=field_definition, parameter_name=field_name))\n\n for field_name, field_definition in unique_layered_fields:\n self.parameters.add(self.create_parameter(field_definition=field_definition, parameter_name=field_name))\n\n for field_name, field_definition in intersection_fields:\n self.parameters.add(self.get_layered_parameter(field_name=field_name, field_definition=field_definition))\n\n def create_parameters_for_handler(self) -> list[Parameter]:\n \"\"\"Create a list of path/query/header Parameter models for the given PathHandler.\"\"\"\n handler_fields = self.route_handler.parsed_fn_signature.parameters\n self.create_parameters_for_field_definitions(handler_fields)\n return self.parameters.list()\n\n\ndef create_parameters_for_handler(\n context: OpenAPIContext,\n route_handler: BaseRouteHandler,\n path_parameters: tuple[PathParameterDefinition, ...],\n) -> list[Parameter]:\n \"\"\"Create a list of path/query/header Parameter models for the given PathHandler.\"\"\"\n factory = ParameterFactory(\n context=context,\n route_handler=route_handler,\n path_parameters=path_parameters,\n )\n return factory.create_parameters_for_handler()\n", "path": "litestar/_openapi/parameters.py"}], "after_files": [{"content": "from __future__ import annotations\n\nfrom typing import TYPE_CHECKING\n\nfrom litestar._openapi.schema_generation import SchemaCreator\nfrom litestar._openapi.schema_generation.utils import get_formatted_examples\nfrom litestar.constants import RESERVED_KWARGS\nfrom litestar.enums import ParamType\nfrom litestar.exceptions import ImproperlyConfiguredException\nfrom litestar.openapi.spec.parameter import Parameter\nfrom litestar.openapi.spec.schema import Schema\nfrom litestar.params import DependencyKwarg, ParameterKwarg\nfrom litestar.types import Empty\nfrom litestar.typing import FieldDefinition\n\nif TYPE_CHECKING:\n from litestar._openapi.datastructures import OpenAPIContext\n from litestar.handlers.base import BaseRouteHandler\n from litestar.openapi.spec import Reference\n from litestar.types.internal_types import PathParameterDefinition\n\n__all__ = (\"create_parameters_for_handler\",)\n\n\nclass ParameterCollection:\n \"\"\"Facilitates conditional deduplication of parameters.\n\n If multiple parameters with the same name are produced for a handler, the condition is ignored if the two\n ``Parameter`` instances are the same (the first is retained and any duplicates are ignored). If the ``Parameter``\n instances are not the same, an exception is raised.\n \"\"\"\n\n def __init__(self, route_handler: BaseRouteHandler) -> None:\n \"\"\"Initialize ``ParameterCollection``.\n\n Args:\n route_handler: Associated route handler\n \"\"\"\n self.route_handler = route_handler\n self._parameters: dict[tuple[str, str], Parameter] = {}\n\n def add(self, parameter: Parameter) -> None:\n \"\"\"Add a ``Parameter`` to the collection.\n\n If an existing parameter with the same name and type already exists, the\n parameter is ignored.\n\n If an existing parameter with the same name but different type exists, raises\n ``ImproperlyConfiguredException``.\n \"\"\"\n\n if (parameter.name, parameter.param_in) not in self._parameters:\n # because we are defining routes as unique per path, we have to handle here a situation when there is an optional\n # path parameter. e.g. get(path=[\"/\", \"/{param:str}\"]). When parsing the parameter for path, the route handler\n # would still have a kwarg called param:\n # def handler(param: str | None) -> ...\n if parameter.param_in != ParamType.QUERY or all(\n f\"{{{parameter.name}:\" not in path for path in self.route_handler.paths\n ):\n self._parameters[(parameter.name, parameter.param_in)] = parameter\n return\n\n pre_existing = self._parameters[(parameter.name, parameter.param_in)]\n if parameter == pre_existing:\n return\n\n raise ImproperlyConfiguredException(\n f\"OpenAPI schema generation for handler `{self.route_handler}` detected multiple parameters named \"\n f\"'{parameter.name}' with different types.\"\n )\n\n def list(self) -> list[Parameter]:\n \"\"\"Return a list of all ``Parameter``'s in the collection.\"\"\"\n return list(self._parameters.values())\n\n\nclass ParameterFactory:\n \"\"\"Factory for creating OpenAPI Parameters for a given route handler.\"\"\"\n\n def __init__(\n self,\n context: OpenAPIContext,\n route_handler: BaseRouteHandler,\n path_parameters: tuple[PathParameterDefinition, ...],\n ) -> None:\n \"\"\"Initialize ParameterFactory.\n\n Args:\n context: The OpenAPI context.\n route_handler: The route handler.\n path_parameters: The path parameters for the route.\n \"\"\"\n self.context = context\n self.schema_creator = SchemaCreator.from_openapi_context(self.context, prefer_alias=True)\n self.route_handler = route_handler\n self.parameters = ParameterCollection(route_handler)\n self.dependency_providers = route_handler.resolve_dependencies()\n self.layered_parameters = route_handler.resolve_layered_parameters()\n self.path_parameters: dict[str, PathParameterDefinition] = {p.name: p for p in path_parameters}\n\n def create_parameter(self, field_definition: FieldDefinition, parameter_name: str) -> Parameter:\n \"\"\"Create an OpenAPI Parameter instance for a field definition.\n\n Args:\n field_definition: The field definition.\n parameter_name: The name of the parameter.\n \"\"\"\n\n result: Schema | Reference | None = None\n kwarg_definition = (\n field_definition.kwarg_definition if isinstance(field_definition.kwarg_definition, ParameterKwarg) else None\n )\n\n if parameter_name in self.path_parameters:\n param_in = ParamType.PATH\n is_required = True\n result = self.schema_creator.for_field_definition(field_definition)\n elif kwarg_definition and kwarg_definition.header:\n parameter_name = kwarg_definition.header\n param_in = ParamType.HEADER\n is_required = field_definition.is_required\n elif kwarg_definition and kwarg_definition.cookie:\n parameter_name = kwarg_definition.cookie\n param_in = ParamType.COOKIE\n is_required = field_definition.is_required\n else:\n is_required = field_definition.is_required\n param_in = ParamType.QUERY\n parameter_name = kwarg_definition.query if kwarg_definition and kwarg_definition.query else parameter_name\n\n if not result:\n result = self.schema_creator.for_field_definition(field_definition)\n\n schema = result if isinstance(result, Schema) else self.context.schema_registry.from_reference(result).schema\n\n examples_list = kwarg_definition.examples or [] if kwarg_definition else []\n examples = get_formatted_examples(field_definition, examples_list)\n\n return Parameter(\n description=schema.description,\n name=parameter_name,\n param_in=param_in,\n required=is_required,\n schema=result,\n examples=examples or None,\n )\n\n def get_layered_parameter(self, field_name: str, field_definition: FieldDefinition) -> Parameter:\n \"\"\"Create a parameter for a field definition that has a KwargDefinition defined on the layers.\n\n Args:\n field_name: The name of the field.\n field_definition: The field definition.\n \"\"\"\n layer_field = self.layered_parameters[field_name]\n\n field = field_definition if field_definition.is_parameter_field else layer_field\n default = layer_field.default if field_definition.has_default else field_definition.default\n annotation = field_definition.annotation if field_definition is not Empty else layer_field.annotation\n\n parameter_name = field_name\n if isinstance(field.kwarg_definition, ParameterKwarg):\n parameter_name = (\n field.kwarg_definition.query\n or field.kwarg_definition.header\n or field.kwarg_definition.cookie\n or field_name\n )\n\n field_definition = FieldDefinition.from_kwarg(\n inner_types=field.inner_types,\n default=default,\n extra=field.extra,\n annotation=annotation,\n kwarg_definition=field.kwarg_definition,\n name=field_name,\n )\n return self.create_parameter(field_definition=field_definition, parameter_name=parameter_name)\n\n def create_parameters_for_field_definitions(self, fields: dict[str, FieldDefinition]) -> None:\n \"\"\"Add Parameter models to the handler's collection for the given field definitions.\n\n Args:\n fields: The field definitions.\n \"\"\"\n unique_handler_fields = (\n (k, v) for k, v in fields.items() if k not in RESERVED_KWARGS and k not in self.layered_parameters\n )\n unique_layered_fields = (\n (k, v) for k, v in self.layered_parameters.items() if k not in RESERVED_KWARGS and k not in fields\n )\n intersection_fields = (\n (k, v) for k, v in fields.items() if k not in RESERVED_KWARGS and k in self.layered_parameters\n )\n\n for field_name, field_definition in unique_handler_fields:\n if (\n isinstance(field_definition.kwarg_definition, DependencyKwarg)\n and field_name not in self.dependency_providers\n ):\n # never document explicit dependencies\n continue\n\n if provider := self.dependency_providers.get(field_name):\n self.create_parameters_for_field_definitions(fields=provider.parsed_fn_signature.parameters)\n else:\n self.parameters.add(self.create_parameter(field_definition=field_definition, parameter_name=field_name))\n\n for field_name, field_definition in unique_layered_fields:\n self.parameters.add(self.create_parameter(field_definition=field_definition, parameter_name=field_name))\n\n for field_name, field_definition in intersection_fields:\n self.parameters.add(self.get_layered_parameter(field_name=field_name, field_definition=field_definition))\n\n def create_parameters_for_handler(self) -> list[Parameter]:\n \"\"\"Create a list of path/query/header Parameter models for the given PathHandler.\"\"\"\n handler_fields = self.route_handler.parsed_fn_signature.parameters\n # not all path parameters have to be consumed by the handler. Because even not\n # consumed path parameters must still be specified, we create stub parameters\n # for the unconsumed ones so a correct OpenAPI schema can be generated\n params_not_consumed_by_handler = set(self.path_parameters) - handler_fields.keys()\n handler_fields.update(\n {\n param_name: FieldDefinition.from_kwarg(self.path_parameters[param_name].type, name=param_name)\n for param_name in params_not_consumed_by_handler\n }\n )\n\n self.create_parameters_for_field_definitions(handler_fields)\n return self.parameters.list()\n\n\ndef create_parameters_for_handler(\n context: OpenAPIContext,\n route_handler: BaseRouteHandler,\n path_parameters: tuple[PathParameterDefinition, ...],\n) -> list[Parameter]:\n \"\"\"Create a list of path/query/header Parameter models for the given PathHandler.\"\"\"\n factory = ParameterFactory(\n context=context,\n route_handler=route_handler,\n path_parameters=path_parameters,\n )\n return factory.create_parameters_for_handler()\n", "path": "litestar/_openapi/parameters.py"}]} | 3,310 | 457 |
gh_patches_debug_1640 | rasdani/github-patches | git_diff | scikit-image__scikit-image-1430 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
measure.label is documented under morphology.label
In the [measure API reference](http://scikit-image.org/docs/stable/api/skimage.measure.html) label is not documented, but it is [documented under morphology module](http://scikit-image.org/docs/stable/api/skimage.morphology.html#label) (which is depreciated).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `skimage/measure/__init__.py`
Content:
```
1 from ._find_contours import find_contours
2 from ._marching_cubes import (marching_cubes, mesh_surface_area,
3 correct_mesh_orientation)
4 from ._regionprops import regionprops, perimeter
5 from ._structural_similarity import structural_similarity
6 from ._polygon import approximate_polygon, subdivide_polygon
7 from ._pnpoly import points_in_poly, grid_points_in_poly
8 from ._moments import moments, moments_central, moments_normalized, moments_hu
9 from .profile import profile_line
10 from .fit import LineModel, CircleModel, EllipseModel, ransac
11 from .block import block_reduce
12 from ._ccomp import label
13
14
15 __all__ = ['find_contours',
16 'regionprops',
17 'perimeter',
18 'structural_similarity',
19 'approximate_polygon',
20 'subdivide_polygon',
21 'LineModel',
22 'CircleModel',
23 'EllipseModel',
24 'ransac',
25 'block_reduce',
26 'moments',
27 'moments_central',
28 'moments_normalized',
29 'moments_hu',
30 'marching_cubes',
31 'mesh_surface_area',
32 'correct_mesh_orientation',
33 'profile_line',
34 'label',
35 'points_in_poly',
36 'grid_points_in_poly']
37
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/skimage/measure/__init__.py b/skimage/measure/__init__.py
--- a/skimage/measure/__init__.py
+++ b/skimage/measure/__init__.py
@@ -9,7 +9,7 @@
from .profile import profile_line
from .fit import LineModel, CircleModel, EllipseModel, ransac
from .block import block_reduce
-from ._ccomp import label
+from ._label import label
__all__ = ['find_contours',
| {"golden_diff": "diff --git a/skimage/measure/__init__.py b/skimage/measure/__init__.py\n--- a/skimage/measure/__init__.py\n+++ b/skimage/measure/__init__.py\n@@ -9,7 +9,7 @@\n from .profile import profile_line\n from .fit import LineModel, CircleModel, EllipseModel, ransac\n from .block import block_reduce\n-from ._ccomp import label\n+from ._label import label\n \n \n __all__ = ['find_contours',\n", "issue": "measure.label is documented under morphology.label\nIn the [measure API reference](http://scikit-image.org/docs/stable/api/skimage.measure.html) label is not documented, but it is [documented under morphology module](http://scikit-image.org/docs/stable/api/skimage.morphology.html#label) (which is depreciated).\n\n", "before_files": [{"content": "from ._find_contours import find_contours\nfrom ._marching_cubes import (marching_cubes, mesh_surface_area,\n correct_mesh_orientation)\nfrom ._regionprops import regionprops, perimeter\nfrom ._structural_similarity import structural_similarity\nfrom ._polygon import approximate_polygon, subdivide_polygon\nfrom ._pnpoly import points_in_poly, grid_points_in_poly\nfrom ._moments import moments, moments_central, moments_normalized, moments_hu\nfrom .profile import profile_line\nfrom .fit import LineModel, CircleModel, EllipseModel, ransac\nfrom .block import block_reduce\nfrom ._ccomp import label\n\n\n__all__ = ['find_contours',\n 'regionprops',\n 'perimeter',\n 'structural_similarity',\n 'approximate_polygon',\n 'subdivide_polygon',\n 'LineModel',\n 'CircleModel',\n 'EllipseModel',\n 'ransac',\n 'block_reduce',\n 'moments',\n 'moments_central',\n 'moments_normalized',\n 'moments_hu',\n 'marching_cubes',\n 'mesh_surface_area',\n 'correct_mesh_orientation',\n 'profile_line',\n 'label',\n 'points_in_poly',\n 'grid_points_in_poly']\n", "path": "skimage/measure/__init__.py"}], "after_files": [{"content": "from ._find_contours import find_contours\nfrom ._marching_cubes import (marching_cubes, mesh_surface_area,\n correct_mesh_orientation)\nfrom ._regionprops import regionprops, perimeter\nfrom ._structural_similarity import structural_similarity\nfrom ._polygon import approximate_polygon, subdivide_polygon\nfrom ._pnpoly import points_in_poly, grid_points_in_poly\nfrom ._moments import moments, moments_central, moments_normalized, moments_hu\nfrom .profile import profile_line\nfrom .fit import LineModel, CircleModel, EllipseModel, ransac\nfrom .block import block_reduce\nfrom ._label import label\n\n\n__all__ = ['find_contours',\n 'regionprops',\n 'perimeter',\n 'structural_similarity',\n 'approximate_polygon',\n 'subdivide_polygon',\n 'LineModel',\n 'CircleModel',\n 'EllipseModel',\n 'ransac',\n 'block_reduce',\n 'moments',\n 'moments_central',\n 'moments_normalized',\n 'moments_hu',\n 'marching_cubes',\n 'mesh_surface_area',\n 'correct_mesh_orientation',\n 'profile_line',\n 'label',\n 'points_in_poly',\n 'grid_points_in_poly']\n", "path": "skimage/measure/__init__.py"}]} | 661 | 114 |
gh_patches_debug_38564 | rasdani/github-patches | git_diff | sql-machine-learning__elasticdl-1555 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
service domain name will be changed after relaunch due to PR 1543
ElasticDL workers use service domain names to connect to PS pods.
Service domain names should not be changed after PS pods relaunch.
https://github.com/sql-machine-learning/elasticdl/pull/1543 causes a regression that will change the service domain name after PS relaunch.
```
[2019-12-05 11:31:19,096] [INFO] [k8s_instance_manager.py:139:_update_addr] addr list before update is ['elasticdl-test-embedding-edl-ps-0.kubemaker.svc:2222', 'elasticdl-test-embedding-edl-ps-1.kubemaker.svc:2222']
[2019-12-05 11:31:19,096] [INFO] [k8s_instance_manager.py:143:_update_addr] addr list after update is ['elasticdl-test-embedding-edl-ps-0.kubemaker.svc:2222', 'elasticdl-test-embedding-edl-ps-2.kubemaker.svc:2222']
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `elasticdl/python/master/k8s_instance_manager.py`
Content:
```
1 import copy
2 import itertools
3 import threading
4 from collections import Counter
5
6 from elasticdl.python.common import k8s_client as k8s
7 from elasticdl.python.common.log_utils import default_logger as logger
8
9 _SERVICE_ADDR_SEP = ","
10
11
12 class InstanceManager(object):
13 def __init__(
14 self,
15 task_d,
16 num_workers=1,
17 worker_command=None,
18 worker_args=None,
19 worker_resource_request="cpu=1,memory=4096Mi",
20 worker_resource_limit="cpu=1,memory=4096Mi",
21 worker_pod_priority=None,
22 num_ps=0,
23 ps_command=None,
24 ps_args=None,
25 ps_resource_request="cpu=1,memory=4096Mi",
26 ps_resource_limit="cpu=1,memory=4096Mi",
27 ps_pod_priority=None,
28 volume=None,
29 image_pull_policy=None,
30 restart_policy="Never",
31 envs=None,
32 **kwargs
33 ):
34 self._num_workers = num_workers
35 self._worker_command = worker_command
36 self._worker_args = worker_args
37 self._worker_resource_request = worker_resource_request
38 self._worker_resource_limit = worker_resource_limit
39 self._worker_pod_priority = worker_pod_priority
40
41 self._num_ps = num_ps
42 self._ps_command = ps_command
43 self._ps_args = ps_args
44 self._ps_resource_request = ps_resource_request
45 self._ps_resource_limit = ps_resource_limit
46 self._ps_pod_priority = ps_pod_priority
47
48 self._restart_policy = restart_policy
49 self._volume = volume
50 self._image_pull_policy = image_pull_policy
51 self._envs = envs
52 self._task_d = task_d
53 self._next_worker_id = itertools.count().__next__
54 self._next_ps_id = itertools.count().__next__
55
56 # Protects followed variables, which are accessed from event_cb.
57 self._lock = threading.Lock()
58 # worker id to (pod name, phase) mapping
59 # phase: None/Pending/Running/Succeeded/Failed/Unknown
60 # None: worker was just launched, haven't received event yet.
61 # Pending: worker pod not started yet
62 # Running: worker pod is running
63 # Succeeded: worker pod finishes all tasks and terminates with
64 # no issue.
65 # Failed: worker pod is killed for some reason
66 # Unknown: unknown
67 self._worker_pods_phase = {}
68 # pod name to worker id mapping
69 self._worker_pod_name_to_id = {}
70
71 self._relaunch_deleted_live_worker = True
72
73 self._ps_pods_phase = {}
74 self._ps_pod_name_to_id = {}
75 self._relaunch_deleted_live_ps = True
76
77 self._k8s_client = k8s.Client(event_callback=self._event_cb, **kwargs)
78 self._ps_addrs = self._get_addrs(
79 self._num_ps, self._k8s_client.get_ps_service_address
80 )
81 # TODO: Select a worker address to be used for broadcasting model
82 # parameters under allreduce-strategy.
83 self._worker_addrs = self._get_addrs(
84 self._num_workers, self._k8s_client.get_worker_service_address
85 )
86
87 def _start_worker(self, worker_id):
88 logger.info("Starting worker: %d" % worker_id)
89 with self._lock:
90 pod = self._k8s_client.create_worker(
91 worker_id=worker_id,
92 resource_requests=self._worker_resource_request,
93 resource_limits=self._worker_resource_limit,
94 pod_priority=self._worker_pod_priority,
95 volume=self._volume,
96 image_pull_policy=self._image_pull_policy,
97 command=self._worker_command,
98 args=self._worker_args
99 + ["--worker_id", str(worker_id)]
100 + ["--ps_addrs", self._ps_addrs],
101 restart_policy=self._restart_policy,
102 ps_addrs=self._ps_addrs,
103 envs=copy.deepcopy(self._envs),
104 )
105 name = pod.metadata.name
106 self._worker_pod_name_to_id[name] = worker_id
107 self._worker_pods_phase[worker_id] = (name, None)
108 self._k8s_client.create_worker_service(worker_id)
109
110 def _start_ps(self, ps_id):
111 logger.info("Starting PS: %d" % ps_id)
112 with self._lock:
113 pod = self._k8s_client.create_ps(
114 ps_id=ps_id,
115 resource_requests=self._ps_resource_request,
116 resource_limits=self._ps_resource_limit,
117 pod_priority=self._ps_pod_priority,
118 volume=self._volume,
119 image_pull_policy=self._image_pull_policy,
120 command=self._ps_command,
121 args=self._ps_args + ["--ps_id", str(ps_id)],
122 restart_policy=self._restart_policy,
123 envs=copy.deepcopy(self._envs),
124 )
125 name = pod.metadata.name
126 self._ps_pod_name_to_id[name] = ps_id
127 self._ps_pods_phase[ps_id] = (name, None)
128 self._k8s_client.create_ps_service(ps_id)
129
130 def _get_addrs(self, num_addrs, addr_get_fn):
131 addrs = []
132 for addr_id in range(num_addrs):
133 addrs.append(addr_get_fn(addr_id))
134 return _SERVICE_ADDR_SEP.join(addrs)
135
136 @staticmethod
137 def _update_addr(old_addr, new_addr, addrs, addr_get_fn):
138 addrs_list = addrs.split(_SERVICE_ADDR_SEP)
139 addrs_list[addrs_list.index(addr_get_fn(old_addr))] = addr_get_fn(
140 new_addr
141 )
142 return _SERVICE_ADDR_SEP.join(addrs_list)
143
144 def update_status(self, status):
145 master_name = self._k8s_client.get_master_pod_name()
146 self._k8s_client.patch_labels_to_pod(
147 master_name, labels_dict={"status": status}
148 )
149
150 def start_workers(self):
151 for _ in range(self._num_workers):
152 self._start_worker(self._next_worker_id())
153
154 def start_parameter_servers(self):
155 for _ in range(self._num_ps):
156 self._start_ps(self._next_ps_id())
157
158 def _remove_worker(self, worker_id):
159 logger.info("Removing worker: %d", worker_id)
160 with self._lock:
161 if worker_id not in self._worker_pods_phase:
162 logger.error("Unknown worker id: %s" % worker_id)
163 return
164
165 # TODO: change _k8s_client to accept pod name instead of worker id.
166 self._k8s_client.delete_worker(worker_id)
167
168 def _remove_ps(self, ps_id):
169 logger.info("Removing PS: %d", ps_id)
170 with self._lock:
171 if ps_id not in self._ps_pods_phase:
172 logger.error("Unknown PS id: %s" % ps_id)
173 return
174
175 self._k8s_client.delete_ps(ps_id)
176
177 def stop_relaunch_and_remove_workers(self):
178 with self._lock:
179 self._relaunch_deleted_live_worker = False
180 for worker_id in self._worker_pods_phase:
181 self._k8s_client.delete_worker(worker_id)
182
183 def stop_relaunch_and_remove_all_ps(self):
184 with self._lock:
185 self._relaunch_deleted_live_ps = False
186 for ps_id in self._ps_pods_phase:
187 self._k8s_client.delete_ps(ps_id)
188
189 def get_worker_counter(self):
190 with self._lock:
191 return Counter([v for _, v in self._worker_pods_phase.values()])
192
193 def get_ps_counter(self):
194 with self._lock:
195 return Counter([v for _, v in self._ps_pods_phase.values()])
196
197 def _event_cb(self, event):
198 evt_obj = event.get("object")
199 evt_type = event.get("type")
200 if not evt_obj or not evt_type:
201 logger.error("Event doesn't have object or type: %s" % event)
202 return
203
204 if evt_obj.kind != "Pod":
205 # We only care about pod related events
206 return
207
208 pod_name = evt_obj.metadata.name
209 phase = evt_obj.status.phase
210 logger.info(
211 "Got event %s, phase %s for pod: %s" % (evt_type, phase, pod_name)
212 )
213 if pod_name == self._k8s_client.get_master_pod_name():
214 # No need to care about master pod
215 return
216
217 relaunch_worker = False
218 relaunch_ps = False
219 worker_id = None
220 ps_id = None
221 with self._lock:
222 if pod_name in self._worker_pod_name_to_id:
223 worker_id = self._worker_pod_name_to_id.get(pod_name)
224 self._worker_pods_phase[worker_id] = (pod_name, phase)
225 if evt_type == "DELETED":
226 del self._worker_pods_phase[worker_id]
227 del self._worker_pod_name_to_id[pod_name]
228 self._task_d.recover_tasks(worker_id)
229
230 # If a deleted pod was not "Succeeded", relaunch a worker.
231 relaunch_worker = (
232 self._relaunch_deleted_live_worker
233 and phase != "Succeeded"
234 )
235
236 elif pod_name in self._ps_pod_name_to_id:
237 ps_id = self._ps_pod_name_to_id.get(pod_name)
238 self._ps_pods_phase[ps_id] = (pod_name, phase)
239 if evt_type == "DELETED":
240 del self._ps_pods_phase[ps_id]
241 del self._ps_pod_name_to_id[pod_name]
242 relaunch_ps = self._relaunch_deleted_live_ps
243 else:
244 logger.error("Unknown pod name: %s" % pod_name)
245 return
246
247 if relaunch_worker and worker_id:
248 logger.info("Relaunching worker.")
249 new_worker_id = self._next_worker_id()
250 self._start_worker(new_worker_id)
251 self._update_addr(
252 worker_id,
253 new_worker_id,
254 self._worker_addrs,
255 addr_get_fn=self._k8s_client.get_worker_service_address,
256 )
257 elif relaunch_ps and ps_id:
258 logger.info("Relaunching ps.")
259 new_ps_id = self._next_ps_id()
260 self._start_ps(new_ps_id)
261 self._update_addr(
262 ps_id,
263 new_ps_id,
264 self._ps_addrs,
265 addr_get_fn=self._k8s_client.get_ps_service_address,
266 )
267
268 @property
269 def ps_addrs(self):
270 return self._ps_addrs
271
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/elasticdl/python/master/k8s_instance_manager.py b/elasticdl/python/master/k8s_instance_manager.py
--- a/elasticdl/python/master/k8s_instance_manager.py
+++ b/elasticdl/python/master/k8s_instance_manager.py
@@ -51,7 +51,6 @@
self._envs = envs
self._task_d = task_d
self._next_worker_id = itertools.count().__next__
- self._next_ps_id = itertools.count().__next__
# Protects followed variables, which are accessed from event_cb.
self._lock = threading.Lock()
@@ -152,8 +151,8 @@
self._start_worker(self._next_worker_id())
def start_parameter_servers(self):
- for _ in range(self._num_ps):
- self._start_ps(self._next_ps_id())
+ for i in range(self._num_ps):
+ self._start_ps(i)
def _remove_worker(self, worker_id):
logger.info("Removing worker: %d", worker_id)
@@ -216,8 +215,8 @@
relaunch_worker = False
relaunch_ps = False
- worker_id = None
- ps_id = None
+ worker_id = -1
+ ps_id = -1
with self._lock:
if pod_name in self._worker_pod_name_to_id:
worker_id = self._worker_pod_name_to_id.get(pod_name)
@@ -244,7 +243,7 @@
logger.error("Unknown pod name: %s" % pod_name)
return
- if relaunch_worker and worker_id:
+ if relaunch_worker and worker_id >= 0:
logger.info("Relaunching worker.")
new_worker_id = self._next_worker_id()
self._start_worker(new_worker_id)
@@ -254,16 +253,12 @@
self._worker_addrs,
addr_get_fn=self._k8s_client.get_worker_service_address,
)
- elif relaunch_ps and ps_id:
+ elif relaunch_ps:
logger.info("Relaunching ps.")
- new_ps_id = self._next_ps_id()
- self._start_ps(new_ps_id)
- self._update_addr(
- ps_id,
- new_ps_id,
- self._ps_addrs,
- addr_get_fn=self._k8s_client.get_ps_service_address,
- )
+ # Note: the ID and service address for relaunched parameter
+ # server are intentionally left unchanged to support fault
+ # tolerance.
+ self._start_ps(ps_id)
@property
def ps_addrs(self):
| {"golden_diff": "diff --git a/elasticdl/python/master/k8s_instance_manager.py b/elasticdl/python/master/k8s_instance_manager.py\n--- a/elasticdl/python/master/k8s_instance_manager.py\n+++ b/elasticdl/python/master/k8s_instance_manager.py\n@@ -51,7 +51,6 @@\n self._envs = envs\n self._task_d = task_d\n self._next_worker_id = itertools.count().__next__\n- self._next_ps_id = itertools.count().__next__\n \n # Protects followed variables, which are accessed from event_cb.\n self._lock = threading.Lock()\n@@ -152,8 +151,8 @@\n self._start_worker(self._next_worker_id())\n \n def start_parameter_servers(self):\n- for _ in range(self._num_ps):\n- self._start_ps(self._next_ps_id())\n+ for i in range(self._num_ps):\n+ self._start_ps(i)\n \n def _remove_worker(self, worker_id):\n logger.info(\"Removing worker: %d\", worker_id)\n@@ -216,8 +215,8 @@\n \n relaunch_worker = False\n relaunch_ps = False\n- worker_id = None\n- ps_id = None\n+ worker_id = -1\n+ ps_id = -1\n with self._lock:\n if pod_name in self._worker_pod_name_to_id:\n worker_id = self._worker_pod_name_to_id.get(pod_name)\n@@ -244,7 +243,7 @@\n logger.error(\"Unknown pod name: %s\" % pod_name)\n return\n \n- if relaunch_worker and worker_id:\n+ if relaunch_worker and worker_id >= 0:\n logger.info(\"Relaunching worker.\")\n new_worker_id = self._next_worker_id()\n self._start_worker(new_worker_id)\n@@ -254,16 +253,12 @@\n self._worker_addrs,\n addr_get_fn=self._k8s_client.get_worker_service_address,\n )\n- elif relaunch_ps and ps_id:\n+ elif relaunch_ps:\n logger.info(\"Relaunching ps.\")\n- new_ps_id = self._next_ps_id()\n- self._start_ps(new_ps_id)\n- self._update_addr(\n- ps_id,\n- new_ps_id,\n- self._ps_addrs,\n- addr_get_fn=self._k8s_client.get_ps_service_address,\n- )\n+ # Note: the ID and service address for relaunched parameter\n+ # server are intentionally left unchanged to support fault\n+ # tolerance.\n+ self._start_ps(ps_id)\n \n @property\n def ps_addrs(self):\n", "issue": "service domain name will be changed after relaunch due to PR 1543\nElasticDL workers use service domain names to connect to PS pods. \r\nService domain names should not be changed after PS pods relaunch.\r\nhttps://github.com/sql-machine-learning/elasticdl/pull/1543 causes a regression that will change the service domain name after PS relaunch.\r\n\r\n```\r\n[2019-12-05 11:31:19,096] [INFO] [k8s_instance_manager.py:139:_update_addr] addr list before update is ['elasticdl-test-embedding-edl-ps-0.kubemaker.svc:2222', 'elasticdl-test-embedding-edl-ps-1.kubemaker.svc:2222']\r\n[2019-12-05 11:31:19,096] [INFO] [k8s_instance_manager.py:143:_update_addr] addr list after update is ['elasticdl-test-embedding-edl-ps-0.kubemaker.svc:2222', 'elasticdl-test-embedding-edl-ps-2.kubemaker.svc:2222']\r\n```\n", "before_files": [{"content": "import copy\nimport itertools\nimport threading\nfrom collections import Counter\n\nfrom elasticdl.python.common import k8s_client as k8s\nfrom elasticdl.python.common.log_utils import default_logger as logger\n\n_SERVICE_ADDR_SEP = \",\"\n\n\nclass InstanceManager(object):\n def __init__(\n self,\n task_d,\n num_workers=1,\n worker_command=None,\n worker_args=None,\n worker_resource_request=\"cpu=1,memory=4096Mi\",\n worker_resource_limit=\"cpu=1,memory=4096Mi\",\n worker_pod_priority=None,\n num_ps=0,\n ps_command=None,\n ps_args=None,\n ps_resource_request=\"cpu=1,memory=4096Mi\",\n ps_resource_limit=\"cpu=1,memory=4096Mi\",\n ps_pod_priority=None,\n volume=None,\n image_pull_policy=None,\n restart_policy=\"Never\",\n envs=None,\n **kwargs\n ):\n self._num_workers = num_workers\n self._worker_command = worker_command\n self._worker_args = worker_args\n self._worker_resource_request = worker_resource_request\n self._worker_resource_limit = worker_resource_limit\n self._worker_pod_priority = worker_pod_priority\n\n self._num_ps = num_ps\n self._ps_command = ps_command\n self._ps_args = ps_args\n self._ps_resource_request = ps_resource_request\n self._ps_resource_limit = ps_resource_limit\n self._ps_pod_priority = ps_pod_priority\n\n self._restart_policy = restart_policy\n self._volume = volume\n self._image_pull_policy = image_pull_policy\n self._envs = envs\n self._task_d = task_d\n self._next_worker_id = itertools.count().__next__\n self._next_ps_id = itertools.count().__next__\n\n # Protects followed variables, which are accessed from event_cb.\n self._lock = threading.Lock()\n # worker id to (pod name, phase) mapping\n # phase: None/Pending/Running/Succeeded/Failed/Unknown\n # None: worker was just launched, haven't received event yet.\n # Pending: worker pod not started yet\n # Running: worker pod is running\n # Succeeded: worker pod finishes all tasks and terminates with\n # no issue.\n # Failed: worker pod is killed for some reason\n # Unknown: unknown\n self._worker_pods_phase = {}\n # pod name to worker id mapping\n self._worker_pod_name_to_id = {}\n\n self._relaunch_deleted_live_worker = True\n\n self._ps_pods_phase = {}\n self._ps_pod_name_to_id = {}\n self._relaunch_deleted_live_ps = True\n\n self._k8s_client = k8s.Client(event_callback=self._event_cb, **kwargs)\n self._ps_addrs = self._get_addrs(\n self._num_ps, self._k8s_client.get_ps_service_address\n )\n # TODO: Select a worker address to be used for broadcasting model\n # parameters under allreduce-strategy.\n self._worker_addrs = self._get_addrs(\n self._num_workers, self._k8s_client.get_worker_service_address\n )\n\n def _start_worker(self, worker_id):\n logger.info(\"Starting worker: %d\" % worker_id)\n with self._lock:\n pod = self._k8s_client.create_worker(\n worker_id=worker_id,\n resource_requests=self._worker_resource_request,\n resource_limits=self._worker_resource_limit,\n pod_priority=self._worker_pod_priority,\n volume=self._volume,\n image_pull_policy=self._image_pull_policy,\n command=self._worker_command,\n args=self._worker_args\n + [\"--worker_id\", str(worker_id)]\n + [\"--ps_addrs\", self._ps_addrs],\n restart_policy=self._restart_policy,\n ps_addrs=self._ps_addrs,\n envs=copy.deepcopy(self._envs),\n )\n name = pod.metadata.name\n self._worker_pod_name_to_id[name] = worker_id\n self._worker_pods_phase[worker_id] = (name, None)\n self._k8s_client.create_worker_service(worker_id)\n\n def _start_ps(self, ps_id):\n logger.info(\"Starting PS: %d\" % ps_id)\n with self._lock:\n pod = self._k8s_client.create_ps(\n ps_id=ps_id,\n resource_requests=self._ps_resource_request,\n resource_limits=self._ps_resource_limit,\n pod_priority=self._ps_pod_priority,\n volume=self._volume,\n image_pull_policy=self._image_pull_policy,\n command=self._ps_command,\n args=self._ps_args + [\"--ps_id\", str(ps_id)],\n restart_policy=self._restart_policy,\n envs=copy.deepcopy(self._envs),\n )\n name = pod.metadata.name\n self._ps_pod_name_to_id[name] = ps_id\n self._ps_pods_phase[ps_id] = (name, None)\n self._k8s_client.create_ps_service(ps_id)\n\n def _get_addrs(self, num_addrs, addr_get_fn):\n addrs = []\n for addr_id in range(num_addrs):\n addrs.append(addr_get_fn(addr_id))\n return _SERVICE_ADDR_SEP.join(addrs)\n\n @staticmethod\n def _update_addr(old_addr, new_addr, addrs, addr_get_fn):\n addrs_list = addrs.split(_SERVICE_ADDR_SEP)\n addrs_list[addrs_list.index(addr_get_fn(old_addr))] = addr_get_fn(\n new_addr\n )\n return _SERVICE_ADDR_SEP.join(addrs_list)\n\n def update_status(self, status):\n master_name = self._k8s_client.get_master_pod_name()\n self._k8s_client.patch_labels_to_pod(\n master_name, labels_dict={\"status\": status}\n )\n\n def start_workers(self):\n for _ in range(self._num_workers):\n self._start_worker(self._next_worker_id())\n\n def start_parameter_servers(self):\n for _ in range(self._num_ps):\n self._start_ps(self._next_ps_id())\n\n def _remove_worker(self, worker_id):\n logger.info(\"Removing worker: %d\", worker_id)\n with self._lock:\n if worker_id not in self._worker_pods_phase:\n logger.error(\"Unknown worker id: %s\" % worker_id)\n return\n\n # TODO: change _k8s_client to accept pod name instead of worker id.\n self._k8s_client.delete_worker(worker_id)\n\n def _remove_ps(self, ps_id):\n logger.info(\"Removing PS: %d\", ps_id)\n with self._lock:\n if ps_id not in self._ps_pods_phase:\n logger.error(\"Unknown PS id: %s\" % ps_id)\n return\n\n self._k8s_client.delete_ps(ps_id)\n\n def stop_relaunch_and_remove_workers(self):\n with self._lock:\n self._relaunch_deleted_live_worker = False\n for worker_id in self._worker_pods_phase:\n self._k8s_client.delete_worker(worker_id)\n\n def stop_relaunch_and_remove_all_ps(self):\n with self._lock:\n self._relaunch_deleted_live_ps = False\n for ps_id in self._ps_pods_phase:\n self._k8s_client.delete_ps(ps_id)\n\n def get_worker_counter(self):\n with self._lock:\n return Counter([v for _, v in self._worker_pods_phase.values()])\n\n def get_ps_counter(self):\n with self._lock:\n return Counter([v for _, v in self._ps_pods_phase.values()])\n\n def _event_cb(self, event):\n evt_obj = event.get(\"object\")\n evt_type = event.get(\"type\")\n if not evt_obj or not evt_type:\n logger.error(\"Event doesn't have object or type: %s\" % event)\n return\n\n if evt_obj.kind != \"Pod\":\n # We only care about pod related events\n return\n\n pod_name = evt_obj.metadata.name\n phase = evt_obj.status.phase\n logger.info(\n \"Got event %s, phase %s for pod: %s\" % (evt_type, phase, pod_name)\n )\n if pod_name == self._k8s_client.get_master_pod_name():\n # No need to care about master pod\n return\n\n relaunch_worker = False\n relaunch_ps = False\n worker_id = None\n ps_id = None\n with self._lock:\n if pod_name in self._worker_pod_name_to_id:\n worker_id = self._worker_pod_name_to_id.get(pod_name)\n self._worker_pods_phase[worker_id] = (pod_name, phase)\n if evt_type == \"DELETED\":\n del self._worker_pods_phase[worker_id]\n del self._worker_pod_name_to_id[pod_name]\n self._task_d.recover_tasks(worker_id)\n\n # If a deleted pod was not \"Succeeded\", relaunch a worker.\n relaunch_worker = (\n self._relaunch_deleted_live_worker\n and phase != \"Succeeded\"\n )\n\n elif pod_name in self._ps_pod_name_to_id:\n ps_id = self._ps_pod_name_to_id.get(pod_name)\n self._ps_pods_phase[ps_id] = (pod_name, phase)\n if evt_type == \"DELETED\":\n del self._ps_pods_phase[ps_id]\n del self._ps_pod_name_to_id[pod_name]\n relaunch_ps = self._relaunch_deleted_live_ps\n else:\n logger.error(\"Unknown pod name: %s\" % pod_name)\n return\n\n if relaunch_worker and worker_id:\n logger.info(\"Relaunching worker.\")\n new_worker_id = self._next_worker_id()\n self._start_worker(new_worker_id)\n self._update_addr(\n worker_id,\n new_worker_id,\n self._worker_addrs,\n addr_get_fn=self._k8s_client.get_worker_service_address,\n )\n elif relaunch_ps and ps_id:\n logger.info(\"Relaunching ps.\")\n new_ps_id = self._next_ps_id()\n self._start_ps(new_ps_id)\n self._update_addr(\n ps_id,\n new_ps_id,\n self._ps_addrs,\n addr_get_fn=self._k8s_client.get_ps_service_address,\n )\n\n @property\n def ps_addrs(self):\n return self._ps_addrs\n", "path": "elasticdl/python/master/k8s_instance_manager.py"}], "after_files": [{"content": "import copy\nimport itertools\nimport threading\nfrom collections import Counter\n\nfrom elasticdl.python.common import k8s_client as k8s\nfrom elasticdl.python.common.log_utils import default_logger as logger\n\n_SERVICE_ADDR_SEP = \",\"\n\n\nclass InstanceManager(object):\n def __init__(\n self,\n task_d,\n num_workers=1,\n worker_command=None,\n worker_args=None,\n worker_resource_request=\"cpu=1,memory=4096Mi\",\n worker_resource_limit=\"cpu=1,memory=4096Mi\",\n worker_pod_priority=None,\n num_ps=0,\n ps_command=None,\n ps_args=None,\n ps_resource_request=\"cpu=1,memory=4096Mi\",\n ps_resource_limit=\"cpu=1,memory=4096Mi\",\n ps_pod_priority=None,\n volume=None,\n image_pull_policy=None,\n restart_policy=\"Never\",\n envs=None,\n **kwargs\n ):\n self._num_workers = num_workers\n self._worker_command = worker_command\n self._worker_args = worker_args\n self._worker_resource_request = worker_resource_request\n self._worker_resource_limit = worker_resource_limit\n self._worker_pod_priority = worker_pod_priority\n\n self._num_ps = num_ps\n self._ps_command = ps_command\n self._ps_args = ps_args\n self._ps_resource_request = ps_resource_request\n self._ps_resource_limit = ps_resource_limit\n self._ps_pod_priority = ps_pod_priority\n\n self._restart_policy = restart_policy\n self._volume = volume\n self._image_pull_policy = image_pull_policy\n self._envs = envs\n self._task_d = task_d\n self._next_worker_id = itertools.count().__next__\n\n # Protects followed variables, which are accessed from event_cb.\n self._lock = threading.Lock()\n # worker id to (pod name, phase) mapping\n # phase: None/Pending/Running/Succeeded/Failed/Unknown\n # None: worker was just launched, haven't received event yet.\n # Pending: worker pod not started yet\n # Running: worker pod is running\n # Succeeded: worker pod finishes all tasks and terminates with\n # no issue.\n # Failed: worker pod is killed for some reason\n # Unknown: unknown\n self._worker_pods_phase = {}\n # pod name to worker id mapping\n self._worker_pod_name_to_id = {}\n\n self._relaunch_deleted_live_worker = True\n\n self._ps_pods_phase = {}\n self._ps_pod_name_to_id = {}\n self._relaunch_deleted_live_ps = True\n\n self._k8s_client = k8s.Client(event_callback=self._event_cb, **kwargs)\n self._ps_addrs = self._get_addrs(\n self._num_ps, self._k8s_client.get_ps_service_address\n )\n # TODO: Select a worker address to be used for broadcasting model\n # parameters under allreduce-strategy.\n self._worker_addrs = self._get_addrs(\n self._num_workers, self._k8s_client.get_worker_service_address\n )\n\n def _start_worker(self, worker_id):\n logger.info(\"Starting worker: %d\" % worker_id)\n with self._lock:\n pod = self._k8s_client.create_worker(\n worker_id=worker_id,\n resource_requests=self._worker_resource_request,\n resource_limits=self._worker_resource_limit,\n pod_priority=self._worker_pod_priority,\n volume=self._volume,\n image_pull_policy=self._image_pull_policy,\n command=self._worker_command,\n args=self._worker_args\n + [\"--worker_id\", str(worker_id)]\n + [\"--ps_addrs\", self._ps_addrs],\n restart_policy=self._restart_policy,\n ps_addrs=self._ps_addrs,\n envs=copy.deepcopy(self._envs),\n )\n name = pod.metadata.name\n self._worker_pod_name_to_id[name] = worker_id\n self._worker_pods_phase[worker_id] = (name, None)\n self._k8s_client.create_worker_service(worker_id)\n\n def _start_ps(self, ps_id):\n logger.info(\"Starting PS: %d\" % ps_id)\n with self._lock:\n pod = self._k8s_client.create_ps(\n ps_id=ps_id,\n resource_requests=self._ps_resource_request,\n resource_limits=self._ps_resource_limit,\n pod_priority=self._ps_pod_priority,\n volume=self._volume,\n image_pull_policy=self._image_pull_policy,\n command=self._ps_command,\n args=self._ps_args + [\"--ps_id\", str(ps_id)],\n restart_policy=self._restart_policy,\n envs=copy.deepcopy(self._envs),\n )\n name = pod.metadata.name\n self._ps_pod_name_to_id[name] = ps_id\n self._ps_pods_phase[ps_id] = (name, None)\n self._k8s_client.create_ps_service(ps_id)\n\n def _get_addrs(self, num_addrs, addr_get_fn):\n addrs = []\n for addr_id in range(num_addrs):\n addrs.append(addr_get_fn(addr_id))\n return _SERVICE_ADDR_SEP.join(addrs)\n\n @staticmethod\n def _update_addr(old_addr, new_addr, addrs, addr_get_fn):\n addrs_list = addrs.split(_SERVICE_ADDR_SEP)\n addrs_list[addrs_list.index(addr_get_fn(old_addr))] = addr_get_fn(\n new_addr\n )\n return _SERVICE_ADDR_SEP.join(addrs_list)\n\n def update_status(self, status):\n master_name = self._k8s_client.get_master_pod_name()\n self._k8s_client.patch_labels_to_pod(\n master_name, labels_dict={\"status\": status}\n )\n\n def start_workers(self):\n for _ in range(self._num_workers):\n self._start_worker(self._next_worker_id())\n\n def start_parameter_servers(self):\n for i in range(self._num_ps):\n self._start_ps(i)\n\n def _remove_worker(self, worker_id):\n logger.info(\"Removing worker: %d\", worker_id)\n with self._lock:\n if worker_id not in self._worker_pods_phase:\n logger.error(\"Unknown worker id: %s\" % worker_id)\n return\n\n # TODO: change _k8s_client to accept pod name instead of worker id.\n self._k8s_client.delete_worker(worker_id)\n\n def _remove_ps(self, ps_id):\n logger.info(\"Removing PS: %d\", ps_id)\n with self._lock:\n if ps_id not in self._ps_pods_phase:\n logger.error(\"Unknown PS id: %s\" % ps_id)\n return\n\n self._k8s_client.delete_ps(ps_id)\n\n def stop_relaunch_and_remove_workers(self):\n with self._lock:\n self._relaunch_deleted_live_worker = False\n for worker_id in self._worker_pods_phase:\n self._k8s_client.delete_worker(worker_id)\n\n def stop_relaunch_and_remove_all_ps(self):\n with self._lock:\n self._relaunch_deleted_live_ps = False\n for ps_id in self._ps_pods_phase:\n self._k8s_client.delete_ps(ps_id)\n\n def get_worker_counter(self):\n with self._lock:\n return Counter([v for _, v in self._worker_pods_phase.values()])\n\n def get_ps_counter(self):\n with self._lock:\n return Counter([v for _, v in self._ps_pods_phase.values()])\n\n def _event_cb(self, event):\n evt_obj = event.get(\"object\")\n evt_type = event.get(\"type\")\n if not evt_obj or not evt_type:\n logger.error(\"Event doesn't have object or type: %s\" % event)\n return\n\n if evt_obj.kind != \"Pod\":\n # We only care about pod related events\n return\n\n pod_name = evt_obj.metadata.name\n phase = evt_obj.status.phase\n logger.info(\n \"Got event %s, phase %s for pod: %s\" % (evt_type, phase, pod_name)\n )\n if pod_name == self._k8s_client.get_master_pod_name():\n # No need to care about master pod\n return\n\n relaunch_worker = False\n relaunch_ps = False\n worker_id = -1\n ps_id = -1\n with self._lock:\n if pod_name in self._worker_pod_name_to_id:\n worker_id = self._worker_pod_name_to_id.get(pod_name)\n self._worker_pods_phase[worker_id] = (pod_name, phase)\n if evt_type == \"DELETED\":\n del self._worker_pods_phase[worker_id]\n del self._worker_pod_name_to_id[pod_name]\n self._task_d.recover_tasks(worker_id)\n\n # If a deleted pod was not \"Succeeded\", relaunch a worker.\n relaunch_worker = (\n self._relaunch_deleted_live_worker\n and phase != \"Succeeded\"\n )\n\n elif pod_name in self._ps_pod_name_to_id:\n ps_id = self._ps_pod_name_to_id.get(pod_name)\n self._ps_pods_phase[ps_id] = (pod_name, phase)\n if evt_type == \"DELETED\":\n del self._ps_pods_phase[ps_id]\n del self._ps_pod_name_to_id[pod_name]\n relaunch_ps = self._relaunch_deleted_live_ps\n else:\n logger.error(\"Unknown pod name: %s\" % pod_name)\n return\n\n if relaunch_worker and worker_id >= 0:\n logger.info(\"Relaunching worker.\")\n new_worker_id = self._next_worker_id()\n self._start_worker(new_worker_id)\n self._update_addr(\n worker_id,\n new_worker_id,\n self._worker_addrs,\n addr_get_fn=self._k8s_client.get_worker_service_address,\n )\n elif relaunch_ps:\n logger.info(\"Relaunching ps.\")\n # Note: the ID and service address for relaunched parameter\n # server are intentionally left unchanged to support fault\n # tolerance.\n self._start_ps(ps_id)\n\n @property\n def ps_addrs(self):\n return self._ps_addrs\n", "path": "elasticdl/python/master/k8s_instance_manager.py"}]} | 3,533 | 598 |
gh_patches_debug_27583 | rasdani/github-patches | git_diff | arviz-devs__arviz-1988 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
plot_density has no filter_vars argument
I think it should have the argument. It may be a little more complicated given the multiple idata input but is should still be feasible.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `arviz/plots/densityplot.py`
Content:
```
1 """KDE and histogram plots for multiple variables."""
2 import warnings
3
4 from ..data import convert_to_dataset
5 from ..labels import BaseLabeller
6 from ..sel_utils import (
7 xarray_var_iter,
8 )
9 from ..rcparams import rcParams
10 from ..utils import _var_names
11 from .plot_utils import default_grid, get_plotting_function
12
13
14 # pylint:disable-msg=too-many-function-args
15 def plot_density(
16 data,
17 group="posterior",
18 data_labels=None,
19 var_names=None,
20 transform=None,
21 hdi_prob=None,
22 point_estimate="auto",
23 colors="cycle",
24 outline=True,
25 hdi_markers="",
26 shade=0.0,
27 bw="default",
28 circular=False,
29 grid=None,
30 figsize=None,
31 textsize=None,
32 labeller=None,
33 ax=None,
34 backend=None,
35 backend_kwargs=None,
36 show=None,
37 ):
38 """Generate KDE plots for continuous variables and histograms for discrete ones.
39
40 Plots are truncated at their 100*(1-alpha)% highest density intervals. Plots are grouped per
41 variable and colors assigned to models.
42
43 Parameters
44 ----------
45 data : Union[Object, Iterator[Object]]
46 Any object that can be converted to an :class:`arviz.InferenceData` object, or an Iterator
47 returning a sequence of such objects.
48 Refer to documentation of :func:`arviz.convert_to_dataset` for details about such objects.
49 group: Optional[str]
50 Specifies which :class:`arviz.InferenceData` group should be plotted.
51 Defaults to 'posterior'.
52 Alternative values include 'prior' and any other strings used as dataset keys in the
53 :class:`arviz.InferenceData`.
54 data_labels : Optional[List[str]]
55 List with names for the datasets passed as "data." Useful when plotting more than one
56 dataset. Must be the same shape as the data parameter. Defaults to None.
57 var_names: Optional[List[str]]
58 List of variables to plot. If multiple datasets are supplied and var_names is not None,
59 will print the same set of variables for each dataset. Defaults to None, which results in
60 all the variables being plotted.
61 transform : callable
62 Function to transform data (defaults to None i.e. the identity function)
63 hdi_prob : float
64 Probability for the highest density interval. Should be in the interval (0, 1].
65 Defaults to 0.94.
66 point_estimate : Optional[str]
67 Plot point estimate per variable. Values should be 'mean', 'median', 'mode' or None.
68 Defaults to 'auto' i.e. it falls back to default set in ``rcParams``.
69 colors : Optional[Union[List[str],str]]
70 List with valid matplotlib colors, one color per model. Alternative a string can be passed.
71 If the string is `cycle`, it will automatically choose a color per model from matplotlib's
72 cycle. If a single color is passed, e.g. 'k', 'C2' or 'red' this color will be used for all
73 models. Defaults to `cycle`.
74 outline : bool
75 Use a line to draw KDEs and histograms. Default to True
76 hdi_markers : str
77 A valid `matplotlib.markers` like 'v', used to indicate the limits of the highest density
78 interval. Defaults to empty string (no marker).
79 shade : Optional[float]
80 Alpha blending value for the shaded area under the curve, between 0 (no shade) and 1
81 (opaque). Defaults to 0.
82 bw: Optional[float or str]
83 If numeric, indicates the bandwidth and must be positive.
84 If str, indicates the method to estimate the bandwidth and must be
85 one of "scott", "silverman", "isj" or "experimental" when `circular` is False
86 and "taylor" (for now) when `circular` is True.
87 Defaults to "default" which means "experimental" when variable is not circular
88 and "taylor" when it is.
89 circular: Optional[bool]
90 If True, it interprets the values passed are from a circular variable measured in radians
91 and a circular KDE is used. Only valid for 1D KDE. Defaults to False.
92 grid : tuple
93 Number of rows and columns. Defaults to None, the rows and columns are
94 automatically inferred.
95 figsize : Optional[Tuple[int, int]]
96 Figure size. If None it will be defined automatically.
97 textsize: Optional[float]
98 Text size scaling factor for labels, titles and lines. If None it will be autoscaled based
99 on ``figsize``.
100 labeller : labeller instance, optional
101 Class providing the method ``make_label_vert`` to generate the labels in the plot titles.
102 Read the :ref:`label_guide` for more details and usage examples.
103 ax: numpy array-like of matplotlib axes or bokeh figures, optional
104 A 2D array of locations into which to plot the densities. If not supplied, Arviz will create
105 its own array of plot areas (and return it).
106 backend: str, optional
107 Select plotting backend {"matplotlib","bokeh"}. Default "matplotlib".
108 backend_kwargs: bool, optional
109 These are kwargs specific to the backend being used, passed to
110 :func:`matplotlib.pyplot.subplots` or :func:`bokeh.plotting.figure`.
111 For additional documentation check the plotting method of the backend.
112 show : bool, optional
113 Call backend show function.
114
115 Returns
116 -------
117 axes : matplotlib axes or bokeh figures
118
119 See Also
120 --------
121 plot_dist : Plot distribution as histogram or kernel density estimates.
122 plot_posterior : Plot Posterior densities in the style of John K. Kruschke’s book.
123
124 Examples
125 --------
126 Plot default density plot
127
128 .. plot::
129 :context: close-figs
130
131 >>> import arviz as az
132 >>> centered = az.load_arviz_data('centered_eight')
133 >>> non_centered = az.load_arviz_data('non_centered_eight')
134 >>> az.plot_density([centered, non_centered])
135
136 Plot variables in a 4x5 grid
137
138 .. plot::
139 :context: close-figs
140
141 >>> az.plot_density([centered, non_centered], grid=(4, 5))
142
143 Plot subset variables by specifying variable name exactly
144
145 .. plot::
146 :context: close-figs
147
148 >>> az.plot_density([centered, non_centered], var_names=["mu"])
149
150 Plot a specific `az.InferenceData` group
151
152 .. plot::
153 :context: close-figs
154
155 >>> az.plot_density([centered, non_centered], var_names=["mu"], group="prior")
156
157 Specify highest density interval
158
159 .. plot::
160 :context: close-figs
161
162 >>> az.plot_density([centered, non_centered], var_names=["mu"], hdi_prob=.5)
163
164 Shade plots and/or remove outlines
165
166 .. plot::
167 :context: close-figs
168
169 >>> az.plot_density([centered, non_centered], var_names=["mu"], outline=False, shade=.8)
170
171 Specify binwidth for kernel density estimation
172
173 .. plot::
174 :context: close-figs
175
176 >>> az.plot_density([centered, non_centered], var_names=["mu"], bw=.9)
177 """
178 if not isinstance(data, (list, tuple)):
179 datasets = [convert_to_dataset(data, group=group)]
180 else:
181 datasets = [convert_to_dataset(datum, group=group) for datum in data]
182
183 if transform is not None:
184 datasets = [transform(dataset) for dataset in datasets]
185
186 if labeller is None:
187 labeller = BaseLabeller()
188
189 var_names = _var_names(var_names, datasets)
190 n_data = len(datasets)
191
192 if data_labels is None:
193 if n_data > 1:
194 data_labels = [f"{idx}" for idx in range(n_data)]
195 else:
196 data_labels = [""]
197 elif len(data_labels) != n_data:
198 raise ValueError(
199 "The number of names for the models ({}) "
200 "does not match the number of models ({})".format(len(data_labels), n_data)
201 )
202
203 if hdi_prob is None:
204 hdi_prob = rcParams["stats.hdi_prob"]
205 else:
206 if not 1 >= hdi_prob > 0:
207 raise ValueError("The value of hdi_prob should be in the interval (0, 1]")
208
209 to_plot = [list(xarray_var_iter(data, var_names, combined=True)) for data in datasets]
210 all_labels = []
211 length_plotters = []
212 for plotters in to_plot:
213 length_plotters.append(len(plotters))
214 for var_name, selection, isel, _ in plotters:
215 label = labeller.make_label_vert(var_name, selection, isel)
216 if label not in all_labels:
217 all_labels.append(label)
218 length_plotters = len(all_labels)
219 max_plots = rcParams["plot.max_subplots"]
220 max_plots = length_plotters if max_plots is None else max_plots
221 if length_plotters > max_plots:
222 warnings.warn(
223 "rcParams['plot.max_subplots'] ({max_plots}) is smaller than the number "
224 "of variables to plot ({len_plotters}) in plot_density, generating only "
225 "{max_plots} plots".format(max_plots=max_plots, len_plotters=length_plotters),
226 UserWarning,
227 )
228 all_labels = all_labels[:max_plots]
229 to_plot = [
230 [
231 (var_name, selection, values)
232 for var_name, selection, isel, values in plotters
233 if labeller.make_label_vert(var_name, selection, isel) in all_labels
234 ]
235 for plotters in to_plot
236 ]
237 length_plotters = max_plots
238 rows, cols = default_grid(length_plotters, grid=grid, max_cols=3)
239
240 if bw == "default":
241 if circular:
242 bw = "taylor"
243 else:
244 bw = "experimental"
245
246 plot_density_kwargs = dict(
247 ax=ax,
248 all_labels=all_labels,
249 to_plot=to_plot,
250 colors=colors,
251 bw=bw,
252 circular=circular,
253 figsize=figsize,
254 length_plotters=length_plotters,
255 rows=rows,
256 cols=cols,
257 textsize=textsize,
258 labeller=labeller,
259 hdi_prob=hdi_prob,
260 point_estimate=point_estimate,
261 hdi_markers=hdi_markers,
262 outline=outline,
263 shade=shade,
264 n_data=n_data,
265 data_labels=data_labels,
266 backend_kwargs=backend_kwargs,
267 show=show,
268 )
269
270 if backend is None:
271 backend = rcParams["plot.backend"]
272 backend = backend.lower()
273
274 # TODO: Add backend kwargs
275 plot = get_plotting_function("plot_density", "densityplot", backend)
276 ax = plot(**plot_density_kwargs)
277 return ax
278
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/arviz/plots/densityplot.py b/arviz/plots/densityplot.py
--- a/arviz/plots/densityplot.py
+++ b/arviz/plots/densityplot.py
@@ -17,6 +17,7 @@
group="posterior",
data_labels=None,
var_names=None,
+ filter_vars=None,
transform=None,
hdi_prob=None,
point_estimate="auto",
@@ -58,6 +59,11 @@
List of variables to plot. If multiple datasets are supplied and var_names is not None,
will print the same set of variables for each dataset. Defaults to None, which results in
all the variables being plotted.
+ filter_vars: {None, "like", "regex"}, optional, default=None
+ If `None` (default), interpret var_names as the real variables names. If "like",
+ interpret var_names as substrings of the real variables names. If "regex",
+ interpret var_names as regular expressions on the real variables names. A la
+ ``pandas.filter``.
transform : callable
Function to transform data (defaults to None i.e. the identity function)
hdi_prob : float
@@ -186,7 +192,7 @@
if labeller is None:
labeller = BaseLabeller()
- var_names = _var_names(var_names, datasets)
+ var_names = _var_names(var_names, datasets, filter_vars)
n_data = len(datasets)
if data_labels is None:
| {"golden_diff": "diff --git a/arviz/plots/densityplot.py b/arviz/plots/densityplot.py\n--- a/arviz/plots/densityplot.py\n+++ b/arviz/plots/densityplot.py\n@@ -17,6 +17,7 @@\n group=\"posterior\",\n data_labels=None,\n var_names=None,\n+ filter_vars=None,\n transform=None,\n hdi_prob=None,\n point_estimate=\"auto\",\n@@ -58,6 +59,11 @@\n List of variables to plot. If multiple datasets are supplied and var_names is not None,\n will print the same set of variables for each dataset. Defaults to None, which results in\n all the variables being plotted.\n+ filter_vars: {None, \"like\", \"regex\"}, optional, default=None\n+ If `None` (default), interpret var_names as the real variables names. If \"like\",\n+ interpret var_names as substrings of the real variables names. If \"regex\",\n+ interpret var_names as regular expressions on the real variables names. A la\n+ ``pandas.filter``.\n transform : callable\n Function to transform data (defaults to None i.e. the identity function)\n hdi_prob : float\n@@ -186,7 +192,7 @@\n if labeller is None:\n labeller = BaseLabeller()\n \n- var_names = _var_names(var_names, datasets)\n+ var_names = _var_names(var_names, datasets, filter_vars)\n n_data = len(datasets)\n \n if data_labels is None:\n", "issue": "plot_density has no filter_vars argument\nI think it should have the argument. It may be a little more complicated given the multiple idata input but is should still be feasible.\r\n\n", "before_files": [{"content": "\"\"\"KDE and histogram plots for multiple variables.\"\"\"\nimport warnings\n\nfrom ..data import convert_to_dataset\nfrom ..labels import BaseLabeller\nfrom ..sel_utils import (\n xarray_var_iter,\n)\nfrom ..rcparams import rcParams\nfrom ..utils import _var_names\nfrom .plot_utils import default_grid, get_plotting_function\n\n\n# pylint:disable-msg=too-many-function-args\ndef plot_density(\n data,\n group=\"posterior\",\n data_labels=None,\n var_names=None,\n transform=None,\n hdi_prob=None,\n point_estimate=\"auto\",\n colors=\"cycle\",\n outline=True,\n hdi_markers=\"\",\n shade=0.0,\n bw=\"default\",\n circular=False,\n grid=None,\n figsize=None,\n textsize=None,\n labeller=None,\n ax=None,\n backend=None,\n backend_kwargs=None,\n show=None,\n):\n \"\"\"Generate KDE plots for continuous variables and histograms for discrete ones.\n\n Plots are truncated at their 100*(1-alpha)% highest density intervals. Plots are grouped per\n variable and colors assigned to models.\n\n Parameters\n ----------\n data : Union[Object, Iterator[Object]]\n Any object that can be converted to an :class:`arviz.InferenceData` object, or an Iterator\n returning a sequence of such objects.\n Refer to documentation of :func:`arviz.convert_to_dataset` for details about such objects.\n group: Optional[str]\n Specifies which :class:`arviz.InferenceData` group should be plotted.\n Defaults to 'posterior'.\n Alternative values include 'prior' and any other strings used as dataset keys in the\n :class:`arviz.InferenceData`.\n data_labels : Optional[List[str]]\n List with names for the datasets passed as \"data.\" Useful when plotting more than one\n dataset. Must be the same shape as the data parameter. Defaults to None.\n var_names: Optional[List[str]]\n List of variables to plot. If multiple datasets are supplied and var_names is not None,\n will print the same set of variables for each dataset. Defaults to None, which results in\n all the variables being plotted.\n transform : callable\n Function to transform data (defaults to None i.e. the identity function)\n hdi_prob : float\n Probability for the highest density interval. Should be in the interval (0, 1].\n Defaults to 0.94.\n point_estimate : Optional[str]\n Plot point estimate per variable. Values should be 'mean', 'median', 'mode' or None.\n Defaults to 'auto' i.e. it falls back to default set in ``rcParams``.\n colors : Optional[Union[List[str],str]]\n List with valid matplotlib colors, one color per model. Alternative a string can be passed.\n If the string is `cycle`, it will automatically choose a color per model from matplotlib's\n cycle. If a single color is passed, e.g. 'k', 'C2' or 'red' this color will be used for all\n models. Defaults to `cycle`.\n outline : bool\n Use a line to draw KDEs and histograms. Default to True\n hdi_markers : str\n A valid `matplotlib.markers` like 'v', used to indicate the limits of the highest density\n interval. Defaults to empty string (no marker).\n shade : Optional[float]\n Alpha blending value for the shaded area under the curve, between 0 (no shade) and 1\n (opaque). Defaults to 0.\n bw: Optional[float or str]\n If numeric, indicates the bandwidth and must be positive.\n If str, indicates the method to estimate the bandwidth and must be\n one of \"scott\", \"silverman\", \"isj\" or \"experimental\" when `circular` is False\n and \"taylor\" (for now) when `circular` is True.\n Defaults to \"default\" which means \"experimental\" when variable is not circular\n and \"taylor\" when it is.\n circular: Optional[bool]\n If True, it interprets the values passed are from a circular variable measured in radians\n and a circular KDE is used. Only valid for 1D KDE. Defaults to False.\n grid : tuple\n Number of rows and columns. Defaults to None, the rows and columns are\n automatically inferred.\n figsize : Optional[Tuple[int, int]]\n Figure size. If None it will be defined automatically.\n textsize: Optional[float]\n Text size scaling factor for labels, titles and lines. If None it will be autoscaled based\n on ``figsize``.\n labeller : labeller instance, optional\n Class providing the method ``make_label_vert`` to generate the labels in the plot titles.\n Read the :ref:`label_guide` for more details and usage examples.\n ax: numpy array-like of matplotlib axes or bokeh figures, optional\n A 2D array of locations into which to plot the densities. If not supplied, Arviz will create\n its own array of plot areas (and return it).\n backend: str, optional\n Select plotting backend {\"matplotlib\",\"bokeh\"}. Default \"matplotlib\".\n backend_kwargs: bool, optional\n These are kwargs specific to the backend being used, passed to\n :func:`matplotlib.pyplot.subplots` or :func:`bokeh.plotting.figure`.\n For additional documentation check the plotting method of the backend.\n show : bool, optional\n Call backend show function.\n\n Returns\n -------\n axes : matplotlib axes or bokeh figures\n\n See Also\n --------\n plot_dist : Plot distribution as histogram or kernel density estimates.\n plot_posterior : Plot Posterior densities in the style of John K. Kruschke\u2019s book.\n\n Examples\n --------\n Plot default density plot\n\n .. plot::\n :context: close-figs\n\n >>> import arviz as az\n >>> centered = az.load_arviz_data('centered_eight')\n >>> non_centered = az.load_arviz_data('non_centered_eight')\n >>> az.plot_density([centered, non_centered])\n\n Plot variables in a 4x5 grid\n\n .. plot::\n :context: close-figs\n\n >>> az.plot_density([centered, non_centered], grid=(4, 5))\n\n Plot subset variables by specifying variable name exactly\n\n .. plot::\n :context: close-figs\n\n >>> az.plot_density([centered, non_centered], var_names=[\"mu\"])\n\n Plot a specific `az.InferenceData` group\n\n .. plot::\n :context: close-figs\n\n >>> az.plot_density([centered, non_centered], var_names=[\"mu\"], group=\"prior\")\n\n Specify highest density interval\n\n .. plot::\n :context: close-figs\n\n >>> az.plot_density([centered, non_centered], var_names=[\"mu\"], hdi_prob=.5)\n\n Shade plots and/or remove outlines\n\n .. plot::\n :context: close-figs\n\n >>> az.plot_density([centered, non_centered], var_names=[\"mu\"], outline=False, shade=.8)\n\n Specify binwidth for kernel density estimation\n\n .. plot::\n :context: close-figs\n\n >>> az.plot_density([centered, non_centered], var_names=[\"mu\"], bw=.9)\n \"\"\"\n if not isinstance(data, (list, tuple)):\n datasets = [convert_to_dataset(data, group=group)]\n else:\n datasets = [convert_to_dataset(datum, group=group) for datum in data]\n\n if transform is not None:\n datasets = [transform(dataset) for dataset in datasets]\n\n if labeller is None:\n labeller = BaseLabeller()\n\n var_names = _var_names(var_names, datasets)\n n_data = len(datasets)\n\n if data_labels is None:\n if n_data > 1:\n data_labels = [f\"{idx}\" for idx in range(n_data)]\n else:\n data_labels = [\"\"]\n elif len(data_labels) != n_data:\n raise ValueError(\n \"The number of names for the models ({}) \"\n \"does not match the number of models ({})\".format(len(data_labels), n_data)\n )\n\n if hdi_prob is None:\n hdi_prob = rcParams[\"stats.hdi_prob\"]\n else:\n if not 1 >= hdi_prob > 0:\n raise ValueError(\"The value of hdi_prob should be in the interval (0, 1]\")\n\n to_plot = [list(xarray_var_iter(data, var_names, combined=True)) for data in datasets]\n all_labels = []\n length_plotters = []\n for plotters in to_plot:\n length_plotters.append(len(plotters))\n for var_name, selection, isel, _ in plotters:\n label = labeller.make_label_vert(var_name, selection, isel)\n if label not in all_labels:\n all_labels.append(label)\n length_plotters = len(all_labels)\n max_plots = rcParams[\"plot.max_subplots\"]\n max_plots = length_plotters if max_plots is None else max_plots\n if length_plotters > max_plots:\n warnings.warn(\n \"rcParams['plot.max_subplots'] ({max_plots}) is smaller than the number \"\n \"of variables to plot ({len_plotters}) in plot_density, generating only \"\n \"{max_plots} plots\".format(max_plots=max_plots, len_plotters=length_plotters),\n UserWarning,\n )\n all_labels = all_labels[:max_plots]\n to_plot = [\n [\n (var_name, selection, values)\n for var_name, selection, isel, values in plotters\n if labeller.make_label_vert(var_name, selection, isel) in all_labels\n ]\n for plotters in to_plot\n ]\n length_plotters = max_plots\n rows, cols = default_grid(length_plotters, grid=grid, max_cols=3)\n\n if bw == \"default\":\n if circular:\n bw = \"taylor\"\n else:\n bw = \"experimental\"\n\n plot_density_kwargs = dict(\n ax=ax,\n all_labels=all_labels,\n to_plot=to_plot,\n colors=colors,\n bw=bw,\n circular=circular,\n figsize=figsize,\n length_plotters=length_plotters,\n rows=rows,\n cols=cols,\n textsize=textsize,\n labeller=labeller,\n hdi_prob=hdi_prob,\n point_estimate=point_estimate,\n hdi_markers=hdi_markers,\n outline=outline,\n shade=shade,\n n_data=n_data,\n data_labels=data_labels,\n backend_kwargs=backend_kwargs,\n show=show,\n )\n\n if backend is None:\n backend = rcParams[\"plot.backend\"]\n backend = backend.lower()\n\n # TODO: Add backend kwargs\n plot = get_plotting_function(\"plot_density\", \"densityplot\", backend)\n ax = plot(**plot_density_kwargs)\n return ax\n", "path": "arviz/plots/densityplot.py"}], "after_files": [{"content": "\"\"\"KDE and histogram plots for multiple variables.\"\"\"\nimport warnings\n\nfrom ..data import convert_to_dataset\nfrom ..labels import BaseLabeller\nfrom ..sel_utils import (\n xarray_var_iter,\n)\nfrom ..rcparams import rcParams\nfrom ..utils import _var_names\nfrom .plot_utils import default_grid, get_plotting_function\n\n\n# pylint:disable-msg=too-many-function-args\ndef plot_density(\n data,\n group=\"posterior\",\n data_labels=None,\n var_names=None,\n filter_vars=None,\n transform=None,\n hdi_prob=None,\n point_estimate=\"auto\",\n colors=\"cycle\",\n outline=True,\n hdi_markers=\"\",\n shade=0.0,\n bw=\"default\",\n circular=False,\n grid=None,\n figsize=None,\n textsize=None,\n labeller=None,\n ax=None,\n backend=None,\n backend_kwargs=None,\n show=None,\n):\n \"\"\"Generate KDE plots for continuous variables and histograms for discrete ones.\n\n Plots are truncated at their 100*(1-alpha)% highest density intervals. Plots are grouped per\n variable and colors assigned to models.\n\n Parameters\n ----------\n data : Union[Object, Iterator[Object]]\n Any object that can be converted to an :class:`arviz.InferenceData` object, or an Iterator\n returning a sequence of such objects.\n Refer to documentation of :func:`arviz.convert_to_dataset` for details about such objects.\n group: Optional[str]\n Specifies which :class:`arviz.InferenceData` group should be plotted.\n Defaults to 'posterior'.\n Alternative values include 'prior' and any other strings used as dataset keys in the\n :class:`arviz.InferenceData`.\n data_labels : Optional[List[str]]\n List with names for the datasets passed as \"data.\" Useful when plotting more than one\n dataset. Must be the same shape as the data parameter. Defaults to None.\n var_names: Optional[List[str]]\n List of variables to plot. If multiple datasets are supplied and var_names is not None,\n will print the same set of variables for each dataset. Defaults to None, which results in\n all the variables being plotted.\n filter_vars: {None, \"like\", \"regex\"}, optional, default=None\n If `None` (default), interpret var_names as the real variables names. If \"like\",\n interpret var_names as substrings of the real variables names. If \"regex\",\n interpret var_names as regular expressions on the real variables names. A la\n ``pandas.filter``.\n transform : callable\n Function to transform data (defaults to None i.e. the identity function)\n hdi_prob : float\n Probability for the highest density interval. Should be in the interval (0, 1].\n Defaults to 0.94.\n point_estimate : Optional[str]\n Plot point estimate per variable. Values should be 'mean', 'median', 'mode' or None.\n Defaults to 'auto' i.e. it falls back to default set in ``rcParams``.\n colors : Optional[Union[List[str],str]]\n List with valid matplotlib colors, one color per model. Alternative a string can be passed.\n If the string is `cycle`, it will automatically choose a color per model from matplotlib's\n cycle. If a single color is passed, e.g. 'k', 'C2' or 'red' this color will be used for all\n models. Defaults to `cycle`.\n outline : bool\n Use a line to draw KDEs and histograms. Default to True\n hdi_markers : str\n A valid `matplotlib.markers` like 'v', used to indicate the limits of the highest density\n interval. Defaults to empty string (no marker).\n shade : Optional[float]\n Alpha blending value for the shaded area under the curve, between 0 (no shade) and 1\n (opaque). Defaults to 0.\n bw: Optional[float or str]\n If numeric, indicates the bandwidth and must be positive.\n If str, indicates the method to estimate the bandwidth and must be\n one of \"scott\", \"silverman\", \"isj\" or \"experimental\" when `circular` is False\n and \"taylor\" (for now) when `circular` is True.\n Defaults to \"default\" which means \"experimental\" when variable is not circular\n and \"taylor\" when it is.\n circular: Optional[bool]\n If True, it interprets the values passed are from a circular variable measured in radians\n and a circular KDE is used. Only valid for 1D KDE. Defaults to False.\n grid : tuple\n Number of rows and columns. Defaults to None, the rows and columns are\n automatically inferred.\n figsize : Optional[Tuple[int, int]]\n Figure size. If None it will be defined automatically.\n textsize: Optional[float]\n Text size scaling factor for labels, titles and lines. If None it will be autoscaled based\n on ``figsize``.\n labeller : labeller instance, optional\n Class providing the method ``make_label_vert`` to generate the labels in the plot titles.\n Read the :ref:`label_guide` for more details and usage examples.\n ax: numpy array-like of matplotlib axes or bokeh figures, optional\n A 2D array of locations into which to plot the densities. If not supplied, Arviz will create\n its own array of plot areas (and return it).\n backend: str, optional\n Select plotting backend {\"matplotlib\",\"bokeh\"}. Default \"matplotlib\".\n backend_kwargs: bool, optional\n These are kwargs specific to the backend being used, passed to\n :func:`matplotlib.pyplot.subplots` or :func:`bokeh.plotting.figure`.\n For additional documentation check the plotting method of the backend.\n show : bool, optional\n Call backend show function.\n\n Returns\n -------\n axes : matplotlib axes or bokeh figures\n\n See Also\n --------\n plot_dist : Plot distribution as histogram or kernel density estimates.\n plot_posterior : Plot Posterior densities in the style of John K. Kruschke\u2019s book.\n\n Examples\n --------\n Plot default density plot\n\n .. plot::\n :context: close-figs\n\n >>> import arviz as az\n >>> centered = az.load_arviz_data('centered_eight')\n >>> non_centered = az.load_arviz_data('non_centered_eight')\n >>> az.plot_density([centered, non_centered])\n\n Plot variables in a 4x5 grid\n\n .. plot::\n :context: close-figs\n\n >>> az.plot_density([centered, non_centered], grid=(4, 5))\n\n Plot subset variables by specifying variable name exactly\n\n .. plot::\n :context: close-figs\n\n >>> az.plot_density([centered, non_centered], var_names=[\"mu\"])\n\n Plot a specific `az.InferenceData` group\n\n .. plot::\n :context: close-figs\n\n >>> az.plot_density([centered, non_centered], var_names=[\"mu\"], group=\"prior\")\n\n Specify highest density interval\n\n .. plot::\n :context: close-figs\n\n >>> az.plot_density([centered, non_centered], var_names=[\"mu\"], hdi_prob=.5)\n\n Shade plots and/or remove outlines\n\n .. plot::\n :context: close-figs\n\n >>> az.plot_density([centered, non_centered], var_names=[\"mu\"], outline=False, shade=.8)\n\n Specify binwidth for kernel density estimation\n\n .. plot::\n :context: close-figs\n\n >>> az.plot_density([centered, non_centered], var_names=[\"mu\"], bw=.9)\n \"\"\"\n if not isinstance(data, (list, tuple)):\n datasets = [convert_to_dataset(data, group=group)]\n else:\n datasets = [convert_to_dataset(datum, group=group) for datum in data]\n\n if transform is not None:\n datasets = [transform(dataset) for dataset in datasets]\n\n if labeller is None:\n labeller = BaseLabeller()\n\n var_names = _var_names(var_names, datasets, filter_vars)\n n_data = len(datasets)\n\n if data_labels is None:\n if n_data > 1:\n data_labels = [f\"{idx}\" for idx in range(n_data)]\n else:\n data_labels = [\"\"]\n elif len(data_labels) != n_data:\n raise ValueError(\n \"The number of names for the models ({}) \"\n \"does not match the number of models ({})\".format(len(data_labels), n_data)\n )\n\n if hdi_prob is None:\n hdi_prob = rcParams[\"stats.hdi_prob\"]\n else:\n if not 1 >= hdi_prob > 0:\n raise ValueError(\"The value of hdi_prob should be in the interval (0, 1]\")\n\n to_plot = [list(xarray_var_iter(data, var_names, combined=True)) for data in datasets]\n all_labels = []\n length_plotters = []\n for plotters in to_plot:\n length_plotters.append(len(plotters))\n for var_name, selection, isel, _ in plotters:\n label = labeller.make_label_vert(var_name, selection, isel)\n if label not in all_labels:\n all_labels.append(label)\n length_plotters = len(all_labels)\n max_plots = rcParams[\"plot.max_subplots\"]\n max_plots = length_plotters if max_plots is None else max_plots\n if length_plotters > max_plots:\n warnings.warn(\n \"rcParams['plot.max_subplots'] ({max_plots}) is smaller than the number \"\n \"of variables to plot ({len_plotters}) in plot_density, generating only \"\n \"{max_plots} plots\".format(max_plots=max_plots, len_plotters=length_plotters),\n UserWarning,\n )\n all_labels = all_labels[:max_plots]\n to_plot = [\n [\n (var_name, selection, values)\n for var_name, selection, isel, values in plotters\n if labeller.make_label_vert(var_name, selection, isel) in all_labels\n ]\n for plotters in to_plot\n ]\n length_plotters = max_plots\n rows, cols = default_grid(length_plotters, grid=grid, max_cols=3)\n\n if bw == \"default\":\n if circular:\n bw = \"taylor\"\n else:\n bw = \"experimental\"\n\n plot_density_kwargs = dict(\n ax=ax,\n all_labels=all_labels,\n to_plot=to_plot,\n colors=colors,\n bw=bw,\n circular=circular,\n figsize=figsize,\n length_plotters=length_plotters,\n rows=rows,\n cols=cols,\n textsize=textsize,\n labeller=labeller,\n hdi_prob=hdi_prob,\n point_estimate=point_estimate,\n hdi_markers=hdi_markers,\n outline=outline,\n shade=shade,\n n_data=n_data,\n data_labels=data_labels,\n backend_kwargs=backend_kwargs,\n show=show,\n )\n\n if backend is None:\n backend = rcParams[\"plot.backend\"]\n backend = backend.lower()\n\n # TODO: Add backend kwargs\n plot = get_plotting_function(\"plot_density\", \"densityplot\", backend)\n ax = plot(**plot_density_kwargs)\n return ax\n", "path": "arviz/plots/densityplot.py"}]} | 3,437 | 342 |
gh_patches_debug_38927 | rasdani/github-patches | git_diff | ansible-collections__community.general-7357 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
cargo fails to find standard homedir path for cargo binary
### Summary
the cargo module fails with -
`TASK [Install tokei Rust package] ***************************************************************************************************************************
fatal: [hostname]: FAILED! => {"changed": false, "msg": "Failed to find required executable \"cargo\" in paths: /usr/local/bin:/usr/bin:/bin:/usr/games:/sbin:/usr/sbin:/usr/local/sbin"}`
cargo executable is located in default rustup install location `/home/username/.cargo/bin/`
### Issue Type
Bug Report
### Component Name
cargo
### Ansible Version
```console (paste below)
$ ansible --version
ansible [core 2.12.3]
config file = /home/username/foo/ansible.cfg
configured module search path = ['/home/username/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /usr/lib/python3/dist-packages/ansible
ansible collection location = /home/username/.ansible/collections:/usr/share/ansible/collections
executable location = /usr/bin/ansible
python version = 3.9.10 (main, Feb 22 2022, 13:54:07) [GCC 11.2.0]
jinja version = 3.0.3
libyaml = True
```
### Community.general Version
```console (paste below)
$ ansible-galaxy collection list community.general
# /usr/lib/python3/dist-packages/ansible_collections
Collection Version
----------------- -------
community.general 4.5.0
```
### Configuration
```console (paste below)
$ ansible-config dump --only-changed
DEFAULT_HOST_LIST(/home/username/foo/ansible.cfg) = ['/home/username/foo/HOSTS']
```
### OS / Environment
Debian Bookworm targeting Bookworm
### Steps to Reproduce
<!--- Paste example playbooks or commands between quotes below -->
```yaml (paste below)
```
Run this module against home directory with the default rustup install location.
### Expected Results
cargo installs the package
### Actual Results
```console (paste below)
TASK [Install tokei Rust package] ***************************************************************************************************************************
fatal: [hostname]: FAILED! => {"changed": false, "msg": "Failed to find required executable \"cargo\" in paths: /usr/local/bin:/usr/bin:/bin:/usr/games:/sbin:/usr/sbin:/usr/local/sbin"}
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `plugins/modules/cargo.py`
Content:
```
1 #!/usr/bin/python
2 # -*- coding: utf-8 -*-
3 # Copyright (c) 2021 Radek Sprta <[email protected]>
4 # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
5 # SPDX-License-Identifier: GPL-3.0-or-later
6
7 from __future__ import absolute_import, division, print_function
8
9 __metaclass__ = type
10
11
12 DOCUMENTATION = r"""
13 ---
14 module: cargo
15 short_description: Manage Rust packages with cargo
16 version_added: 4.3.0
17 description:
18 - Manage Rust packages with cargo.
19 author: "Radek Sprta (@radek-sprta)"
20 extends_documentation_fragment:
21 - community.general.attributes
22 attributes:
23 check_mode:
24 support: full
25 diff_mode:
26 support: none
27 options:
28 name:
29 description:
30 - The name of a Rust package to install.
31 type: list
32 elements: str
33 required: true
34 path:
35 description:
36 ->
37 The base path where to install the Rust packages. Cargo automatically appends
38 V(/bin). In other words, V(/usr/local) will become V(/usr/local/bin).
39 type: path
40 version:
41 description:
42 ->
43 The version to install. If O(name) contains multiple values, the module will
44 try to install all of them in this version.
45 type: str
46 required: false
47 locked:
48 description:
49 - Install with locked dependencies.
50 - This is only used when installing packages.
51 required: false
52 type: bool
53 default: false
54 version_added: 7.5.0
55 state:
56 description:
57 - The state of the Rust package.
58 required: false
59 type: str
60 default: present
61 choices: [ "present", "absent", "latest" ]
62 requirements:
63 - cargo installed in bin path (recommended /usr/local/bin)
64 """
65
66 EXAMPLES = r"""
67 - name: Install "ludusavi" Rust package
68 community.general.cargo:
69 name: ludusavi
70
71 - name: Install "ludusavi" Rust package with locked dependencies
72 community.general.cargo:
73 name: ludusavi
74 locked: true
75
76 - name: Install "ludusavi" Rust package in version 0.10.0
77 community.general.cargo:
78 name: ludusavi
79 version: '0.10.0'
80
81 - name: Install "ludusavi" Rust package to global location
82 community.general.cargo:
83 name: ludusavi
84 path: /usr/local
85
86 - name: Remove "ludusavi" Rust package
87 community.general.cargo:
88 name: ludusavi
89 state: absent
90
91 - name: Update "ludusavi" Rust package its latest version
92 community.general.cargo:
93 name: ludusavi
94 state: latest
95 """
96
97 import os
98 import re
99
100 from ansible.module_utils.basic import AnsibleModule
101
102
103 class Cargo(object):
104 def __init__(self, module, **kwargs):
105 self.module = module
106 self.name = kwargs["name"]
107 self.path = kwargs["path"]
108 self.state = kwargs["state"]
109 self.version = kwargs["version"]
110 self.locked = kwargs["locked"]
111
112 self.executable = [module.get_bin_path("cargo", True)]
113
114 @property
115 def path(self):
116 return self._path
117
118 @path.setter
119 def path(self, path):
120 if path is not None and not os.path.isdir(path):
121 self.module.fail_json(msg="Path %s is not a directory" % path)
122 self._path = path
123
124 def _exec(
125 self, args, run_in_check_mode=False, check_rc=True, add_package_name=True
126 ):
127 if not self.module.check_mode or (self.module.check_mode and run_in_check_mode):
128 cmd = self.executable + args
129 rc, out, err = self.module.run_command(cmd, check_rc=check_rc)
130 return out, err
131 return "", ""
132
133 def get_installed(self):
134 cmd = ["install", "--list"]
135 data, dummy = self._exec(cmd, True, False, False)
136
137 package_regex = re.compile(r"^([\w\-]+) v(.+):$")
138 installed = {}
139 for line in data.splitlines():
140 package_info = package_regex.match(line)
141 if package_info:
142 installed[package_info.group(1)] = package_info.group(2)
143
144 return installed
145
146 def install(self, packages=None):
147 cmd = ["install"]
148 cmd.extend(packages or self.name)
149 if self.locked:
150 cmd.append("--locked")
151 if self.path:
152 cmd.append("--root")
153 cmd.append(self.path)
154 if self.version:
155 cmd.append("--version")
156 cmd.append(self.version)
157 return self._exec(cmd)
158
159 def is_outdated(self, name):
160 installed_version = self.get_installed().get(name)
161
162 cmd = ["search", name, "--limit", "1"]
163 data, dummy = self._exec(cmd, True, False, False)
164
165 match = re.search(r'"(.+)"', data)
166 if match:
167 latest_version = match.group(1)
168
169 return installed_version != latest_version
170
171 def uninstall(self, packages=None):
172 cmd = ["uninstall"]
173 cmd.extend(packages or self.name)
174 return self._exec(cmd)
175
176
177 def main():
178 arg_spec = dict(
179 name=dict(required=True, type="list", elements="str"),
180 path=dict(default=None, type="path"),
181 state=dict(default="present", choices=["present", "absent", "latest"]),
182 version=dict(default=None, type="str"),
183 locked=dict(default=False, type="bool"),
184 )
185 module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True)
186
187 name = module.params["name"]
188 path = module.params["path"]
189 state = module.params["state"]
190 version = module.params["version"]
191 locked = module.params["locked"]
192
193 if not name:
194 module.fail_json(msg="Package name must be specified")
195
196 # Set LANG env since we parse stdout
197 module.run_command_environ_update = dict(
198 LANG="C", LC_ALL="C", LC_MESSAGES="C", LC_CTYPE="C"
199 )
200
201 cargo = Cargo(module, name=name, path=path, state=state, version=version, locked=locked)
202 changed, out, err = False, None, None
203 installed_packages = cargo.get_installed()
204 if state == "present":
205 to_install = [
206 n
207 for n in name
208 if (n not in installed_packages)
209 or (version and version != installed_packages[n])
210 ]
211 if to_install:
212 changed = True
213 out, err = cargo.install(to_install)
214 elif state == "latest":
215 to_update = [
216 n for n in name if n not in installed_packages or cargo.is_outdated(n)
217 ]
218 if to_update:
219 changed = True
220 out, err = cargo.install(to_update)
221 else: # absent
222 to_uninstall = [n for n in name if n in installed_packages]
223 if to_uninstall:
224 changed = True
225 out, err = cargo.uninstall(to_uninstall)
226
227 module.exit_json(changed=changed, stdout=out, stderr=err)
228
229
230 if __name__ == "__main__":
231 main()
232
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/plugins/modules/cargo.py b/plugins/modules/cargo.py
--- a/plugins/modules/cargo.py
+++ b/plugins/modules/cargo.py
@@ -25,6 +25,12 @@
diff_mode:
support: none
options:
+ executable:
+ description:
+ - Path to the C(cargo) installed in the system.
+ - If not specified, the module will look C(cargo) in E(PATH).
+ type: path
+ version_added: 7.5.0
name:
description:
- The name of a Rust package to install.
@@ -60,7 +66,7 @@
default: present
choices: [ "present", "absent", "latest" ]
requirements:
- - cargo installed in bin path (recommended /usr/local/bin)
+ - cargo installed
"""
EXAMPLES = r"""
@@ -103,14 +109,13 @@
class Cargo(object):
def __init__(self, module, **kwargs):
self.module = module
+ self.executable = [kwargs["executable"] or module.get_bin_path("cargo", True)]
self.name = kwargs["name"]
self.path = kwargs["path"]
self.state = kwargs["state"]
self.version = kwargs["version"]
self.locked = kwargs["locked"]
- self.executable = [module.get_bin_path("cargo", True)]
-
@property
def path(self):
return self._path
@@ -176,6 +181,7 @@
def main():
arg_spec = dict(
+ executable=dict(default=None, type="path"),
name=dict(required=True, type="list", elements="str"),
path=dict(default=None, type="path"),
state=dict(default="present", choices=["present", "absent", "latest"]),
@@ -185,10 +191,8 @@
module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True)
name = module.params["name"]
- path = module.params["path"]
state = module.params["state"]
version = module.params["version"]
- locked = module.params["locked"]
if not name:
module.fail_json(msg="Package name must be specified")
@@ -198,7 +202,7 @@
LANG="C", LC_ALL="C", LC_MESSAGES="C", LC_CTYPE="C"
)
- cargo = Cargo(module, name=name, path=path, state=state, version=version, locked=locked)
+ cargo = Cargo(module, **module.params)
changed, out, err = False, None, None
installed_packages = cargo.get_installed()
if state == "present":
| {"golden_diff": "diff --git a/plugins/modules/cargo.py b/plugins/modules/cargo.py\n--- a/plugins/modules/cargo.py\n+++ b/plugins/modules/cargo.py\n@@ -25,6 +25,12 @@\n diff_mode:\n support: none\n options:\n+ executable:\n+ description:\n+ - Path to the C(cargo) installed in the system.\n+ - If not specified, the module will look C(cargo) in E(PATH).\n+ type: path\n+ version_added: 7.5.0\n name:\n description:\n - The name of a Rust package to install.\n@@ -60,7 +66,7 @@\n default: present\n choices: [ \"present\", \"absent\", \"latest\" ]\n requirements:\n- - cargo installed in bin path (recommended /usr/local/bin)\n+ - cargo installed\n \"\"\"\n \n EXAMPLES = r\"\"\"\n@@ -103,14 +109,13 @@\n class Cargo(object):\n def __init__(self, module, **kwargs):\n self.module = module\n+ self.executable = [kwargs[\"executable\"] or module.get_bin_path(\"cargo\", True)]\n self.name = kwargs[\"name\"]\n self.path = kwargs[\"path\"]\n self.state = kwargs[\"state\"]\n self.version = kwargs[\"version\"]\n self.locked = kwargs[\"locked\"]\n \n- self.executable = [module.get_bin_path(\"cargo\", True)]\n-\n @property\n def path(self):\n return self._path\n@@ -176,6 +181,7 @@\n \n def main():\n arg_spec = dict(\n+ executable=dict(default=None, type=\"path\"),\n name=dict(required=True, type=\"list\", elements=\"str\"),\n path=dict(default=None, type=\"path\"),\n state=dict(default=\"present\", choices=[\"present\", \"absent\", \"latest\"]),\n@@ -185,10 +191,8 @@\n module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True)\n \n name = module.params[\"name\"]\n- path = module.params[\"path\"]\n state = module.params[\"state\"]\n version = module.params[\"version\"]\n- locked = module.params[\"locked\"]\n \n if not name:\n module.fail_json(msg=\"Package name must be specified\")\n@@ -198,7 +202,7 @@\n LANG=\"C\", LC_ALL=\"C\", LC_MESSAGES=\"C\", LC_CTYPE=\"C\"\n )\n \n- cargo = Cargo(module, name=name, path=path, state=state, version=version, locked=locked)\n+ cargo = Cargo(module, **module.params)\n changed, out, err = False, None, None\n installed_packages = cargo.get_installed()\n if state == \"present\":\n", "issue": "cargo fails to find standard homedir path for cargo binary\n### Summary\r\n\r\nthe cargo module fails with -\r\n\r\n`TASK [Install tokei Rust package] ***************************************************************************************************************************\r\nfatal: [hostname]: FAILED! => {\"changed\": false, \"msg\": \"Failed to find required executable \\\"cargo\\\" in paths: /usr/local/bin:/usr/bin:/bin:/usr/games:/sbin:/usr/sbin:/usr/local/sbin\"}`\r\n\r\ncargo executable is located in default rustup install location `/home/username/.cargo/bin/` \r\n\r\n### Issue Type\r\n\r\nBug Report\r\n\r\n### Component Name\r\n\r\ncargo\r\n\r\n### Ansible Version\r\n\r\n```console (paste below)\r\n$ ansible --version\r\nansible [core 2.12.3]\r\n config file = /home/username/foo/ansible.cfg\r\n configured module search path = ['/home/username/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']\r\n ansible python module location = /usr/lib/python3/dist-packages/ansible\r\n ansible collection location = /home/username/.ansible/collections:/usr/share/ansible/collections\r\n executable location = /usr/bin/ansible\r\n python version = 3.9.10 (main, Feb 22 2022, 13:54:07) [GCC 11.2.0]\r\n jinja version = 3.0.3\r\n libyaml = True\r\n```\r\n\r\n\r\n### Community.general Version\r\n\r\n```console (paste below)\r\n$ ansible-galaxy collection list community.general\r\n\r\n# /usr/lib/python3/dist-packages/ansible_collections\r\nCollection Version\r\n----------------- -------\r\ncommunity.general 4.5.0 \r\n```\r\n\r\n\r\n### Configuration\r\n\r\n```console (paste below)\r\n$ ansible-config dump --only-changed\r\nDEFAULT_HOST_LIST(/home/username/foo/ansible.cfg) = ['/home/username/foo/HOSTS']\r\n```\r\n\r\n\r\n### OS / Environment\r\n\r\nDebian Bookworm targeting Bookworm\r\n\r\n### Steps to Reproduce\r\n\r\n<!--- Paste example playbooks or commands between quotes below -->\r\n```yaml (paste below)\r\n\r\n```\r\nRun this module against home directory with the default rustup install location.\r\n\r\n### Expected Results\r\n\r\ncargo installs the package\r\n\r\n### Actual Results\r\n\r\n```console (paste below)\r\nTASK [Install tokei Rust package] ***************************************************************************************************************************\r\nfatal: [hostname]: FAILED! => {\"changed\": false, \"msg\": \"Failed to find required executable \\\"cargo\\\" in paths: /usr/local/bin:/usr/bin:/bin:/usr/games:/sbin:/usr/sbin:/usr/local/sbin\"}\r\n```\r\n\r\n\r\n### Code of Conduct\r\n\r\n- [X] I agree to follow the Ansible Code of Conduct\n", "before_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n# Copyright (c) 2021 Radek Sprta <[email protected]>\n# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)\n# SPDX-License-Identifier: GPL-3.0-or-later\n\nfrom __future__ import absolute_import, division, print_function\n\n__metaclass__ = type\n\n\nDOCUMENTATION = r\"\"\"\n---\nmodule: cargo\nshort_description: Manage Rust packages with cargo\nversion_added: 4.3.0\ndescription:\n - Manage Rust packages with cargo.\nauthor: \"Radek Sprta (@radek-sprta)\"\nextends_documentation_fragment:\n - community.general.attributes\nattributes:\n check_mode:\n support: full\n diff_mode:\n support: none\noptions:\n name:\n description:\n - The name of a Rust package to install.\n type: list\n elements: str\n required: true\n path:\n description:\n ->\n The base path where to install the Rust packages. Cargo automatically appends\n V(/bin). In other words, V(/usr/local) will become V(/usr/local/bin).\n type: path\n version:\n description:\n ->\n The version to install. If O(name) contains multiple values, the module will\n try to install all of them in this version.\n type: str\n required: false\n locked:\n description:\n - Install with locked dependencies.\n - This is only used when installing packages.\n required: false\n type: bool\n default: false\n version_added: 7.5.0\n state:\n description:\n - The state of the Rust package.\n required: false\n type: str\n default: present\n choices: [ \"present\", \"absent\", \"latest\" ]\nrequirements:\n - cargo installed in bin path (recommended /usr/local/bin)\n\"\"\"\n\nEXAMPLES = r\"\"\"\n- name: Install \"ludusavi\" Rust package\n community.general.cargo:\n name: ludusavi\n\n- name: Install \"ludusavi\" Rust package with locked dependencies\n community.general.cargo:\n name: ludusavi\n locked: true\n\n- name: Install \"ludusavi\" Rust package in version 0.10.0\n community.general.cargo:\n name: ludusavi\n version: '0.10.0'\n\n- name: Install \"ludusavi\" Rust package to global location\n community.general.cargo:\n name: ludusavi\n path: /usr/local\n\n- name: Remove \"ludusavi\" Rust package\n community.general.cargo:\n name: ludusavi\n state: absent\n\n- name: Update \"ludusavi\" Rust package its latest version\n community.general.cargo:\n name: ludusavi\n state: latest\n\"\"\"\n\nimport os\nimport re\n\nfrom ansible.module_utils.basic import AnsibleModule\n\n\nclass Cargo(object):\n def __init__(self, module, **kwargs):\n self.module = module\n self.name = kwargs[\"name\"]\n self.path = kwargs[\"path\"]\n self.state = kwargs[\"state\"]\n self.version = kwargs[\"version\"]\n self.locked = kwargs[\"locked\"]\n\n self.executable = [module.get_bin_path(\"cargo\", True)]\n\n @property\n def path(self):\n return self._path\n\n @path.setter\n def path(self, path):\n if path is not None and not os.path.isdir(path):\n self.module.fail_json(msg=\"Path %s is not a directory\" % path)\n self._path = path\n\n def _exec(\n self, args, run_in_check_mode=False, check_rc=True, add_package_name=True\n ):\n if not self.module.check_mode or (self.module.check_mode and run_in_check_mode):\n cmd = self.executable + args\n rc, out, err = self.module.run_command(cmd, check_rc=check_rc)\n return out, err\n return \"\", \"\"\n\n def get_installed(self):\n cmd = [\"install\", \"--list\"]\n data, dummy = self._exec(cmd, True, False, False)\n\n package_regex = re.compile(r\"^([\\w\\-]+) v(.+):$\")\n installed = {}\n for line in data.splitlines():\n package_info = package_regex.match(line)\n if package_info:\n installed[package_info.group(1)] = package_info.group(2)\n\n return installed\n\n def install(self, packages=None):\n cmd = [\"install\"]\n cmd.extend(packages or self.name)\n if self.locked:\n cmd.append(\"--locked\")\n if self.path:\n cmd.append(\"--root\")\n cmd.append(self.path)\n if self.version:\n cmd.append(\"--version\")\n cmd.append(self.version)\n return self._exec(cmd)\n\n def is_outdated(self, name):\n installed_version = self.get_installed().get(name)\n\n cmd = [\"search\", name, \"--limit\", \"1\"]\n data, dummy = self._exec(cmd, True, False, False)\n\n match = re.search(r'\"(.+)\"', data)\n if match:\n latest_version = match.group(1)\n\n return installed_version != latest_version\n\n def uninstall(self, packages=None):\n cmd = [\"uninstall\"]\n cmd.extend(packages or self.name)\n return self._exec(cmd)\n\n\ndef main():\n arg_spec = dict(\n name=dict(required=True, type=\"list\", elements=\"str\"),\n path=dict(default=None, type=\"path\"),\n state=dict(default=\"present\", choices=[\"present\", \"absent\", \"latest\"]),\n version=dict(default=None, type=\"str\"),\n locked=dict(default=False, type=\"bool\"),\n )\n module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True)\n\n name = module.params[\"name\"]\n path = module.params[\"path\"]\n state = module.params[\"state\"]\n version = module.params[\"version\"]\n locked = module.params[\"locked\"]\n\n if not name:\n module.fail_json(msg=\"Package name must be specified\")\n\n # Set LANG env since we parse stdout\n module.run_command_environ_update = dict(\n LANG=\"C\", LC_ALL=\"C\", LC_MESSAGES=\"C\", LC_CTYPE=\"C\"\n )\n\n cargo = Cargo(module, name=name, path=path, state=state, version=version, locked=locked)\n changed, out, err = False, None, None\n installed_packages = cargo.get_installed()\n if state == \"present\":\n to_install = [\n n\n for n in name\n if (n not in installed_packages)\n or (version and version != installed_packages[n])\n ]\n if to_install:\n changed = True\n out, err = cargo.install(to_install)\n elif state == \"latest\":\n to_update = [\n n for n in name if n not in installed_packages or cargo.is_outdated(n)\n ]\n if to_update:\n changed = True\n out, err = cargo.install(to_update)\n else: # absent\n to_uninstall = [n for n in name if n in installed_packages]\n if to_uninstall:\n changed = True\n out, err = cargo.uninstall(to_uninstall)\n\n module.exit_json(changed=changed, stdout=out, stderr=err)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "plugins/modules/cargo.py"}], "after_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n# Copyright (c) 2021 Radek Sprta <[email protected]>\n# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)\n# SPDX-License-Identifier: GPL-3.0-or-later\n\nfrom __future__ import absolute_import, division, print_function\n\n__metaclass__ = type\n\n\nDOCUMENTATION = r\"\"\"\n---\nmodule: cargo\nshort_description: Manage Rust packages with cargo\nversion_added: 4.3.0\ndescription:\n - Manage Rust packages with cargo.\nauthor: \"Radek Sprta (@radek-sprta)\"\nextends_documentation_fragment:\n - community.general.attributes\nattributes:\n check_mode:\n support: full\n diff_mode:\n support: none\noptions:\n executable:\n description:\n - Path to the C(cargo) installed in the system.\n - If not specified, the module will look C(cargo) in E(PATH).\n type: path\n version_added: 7.5.0\n name:\n description:\n - The name of a Rust package to install.\n type: list\n elements: str\n required: true\n path:\n description:\n ->\n The base path where to install the Rust packages. Cargo automatically appends\n V(/bin). In other words, V(/usr/local) will become V(/usr/local/bin).\n type: path\n version:\n description:\n ->\n The version to install. If O(name) contains multiple values, the module will\n try to install all of them in this version.\n type: str\n required: false\n locked:\n description:\n - Install with locked dependencies.\n - This is only used when installing packages.\n required: false\n type: bool\n default: false\n version_added: 7.5.0\n state:\n description:\n - The state of the Rust package.\n required: false\n type: str\n default: present\n choices: [ \"present\", \"absent\", \"latest\" ]\nrequirements:\n - cargo installed\n\"\"\"\n\nEXAMPLES = r\"\"\"\n- name: Install \"ludusavi\" Rust package\n community.general.cargo:\n name: ludusavi\n\n- name: Install \"ludusavi\" Rust package with locked dependencies\n community.general.cargo:\n name: ludusavi\n locked: true\n\n- name: Install \"ludusavi\" Rust package in version 0.10.0\n community.general.cargo:\n name: ludusavi\n version: '0.10.0'\n\n- name: Install \"ludusavi\" Rust package to global location\n community.general.cargo:\n name: ludusavi\n path: /usr/local\n\n- name: Remove \"ludusavi\" Rust package\n community.general.cargo:\n name: ludusavi\n state: absent\n\n- name: Update \"ludusavi\" Rust package its latest version\n community.general.cargo:\n name: ludusavi\n state: latest\n\"\"\"\n\nimport os\nimport re\n\nfrom ansible.module_utils.basic import AnsibleModule\n\n\nclass Cargo(object):\n def __init__(self, module, **kwargs):\n self.module = module\n self.executable = [kwargs[\"executable\"] or module.get_bin_path(\"cargo\", True)]\n self.name = kwargs[\"name\"]\n self.path = kwargs[\"path\"]\n self.state = kwargs[\"state\"]\n self.version = kwargs[\"version\"]\n self.locked = kwargs[\"locked\"]\n\n @property\n def path(self):\n return self._path\n\n @path.setter\n def path(self, path):\n if path is not None and not os.path.isdir(path):\n self.module.fail_json(msg=\"Path %s is not a directory\" % path)\n self._path = path\n\n def _exec(\n self, args, run_in_check_mode=False, check_rc=True, add_package_name=True\n ):\n if not self.module.check_mode or (self.module.check_mode and run_in_check_mode):\n cmd = self.executable + args\n rc, out, err = self.module.run_command(cmd, check_rc=check_rc)\n return out, err\n return \"\", \"\"\n\n def get_installed(self):\n cmd = [\"install\", \"--list\"]\n data, dummy = self._exec(cmd, True, False, False)\n\n package_regex = re.compile(r\"^([\\w\\-]+) v(.+):$\")\n installed = {}\n for line in data.splitlines():\n package_info = package_regex.match(line)\n if package_info:\n installed[package_info.group(1)] = package_info.group(2)\n\n return installed\n\n def install(self, packages=None):\n cmd = [\"install\"]\n cmd.extend(packages or self.name)\n if self.locked:\n cmd.append(\"--locked\")\n if self.path:\n cmd.append(\"--root\")\n cmd.append(self.path)\n if self.version:\n cmd.append(\"--version\")\n cmd.append(self.version)\n return self._exec(cmd)\n\n def is_outdated(self, name):\n installed_version = self.get_installed().get(name)\n\n cmd = [\"search\", name, \"--limit\", \"1\"]\n data, dummy = self._exec(cmd, True, False, False)\n\n match = re.search(r'\"(.+)\"', data)\n if match:\n latest_version = match.group(1)\n\n return installed_version != latest_version\n\n def uninstall(self, packages=None):\n cmd = [\"uninstall\"]\n cmd.extend(packages or self.name)\n return self._exec(cmd)\n\n\ndef main():\n arg_spec = dict(\n executable=dict(default=None, type=\"path\"),\n name=dict(required=True, type=\"list\", elements=\"str\"),\n path=dict(default=None, type=\"path\"),\n state=dict(default=\"present\", choices=[\"present\", \"absent\", \"latest\"]),\n version=dict(default=None, type=\"str\"),\n locked=dict(default=False, type=\"bool\"),\n )\n module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True)\n\n name = module.params[\"name\"]\n state = module.params[\"state\"]\n version = module.params[\"version\"]\n\n if not name:\n module.fail_json(msg=\"Package name must be specified\")\n\n # Set LANG env since we parse stdout\n module.run_command_environ_update = dict(\n LANG=\"C\", LC_ALL=\"C\", LC_MESSAGES=\"C\", LC_CTYPE=\"C\"\n )\n\n cargo = Cargo(module, **module.params)\n changed, out, err = False, None, None\n installed_packages = cargo.get_installed()\n if state == \"present\":\n to_install = [\n n\n for n in name\n if (n not in installed_packages)\n or (version and version != installed_packages[n])\n ]\n if to_install:\n changed = True\n out, err = cargo.install(to_install)\n elif state == \"latest\":\n to_update = [\n n for n in name if n not in installed_packages or cargo.is_outdated(n)\n ]\n if to_update:\n changed = True\n out, err = cargo.install(to_update)\n else: # absent\n to_uninstall = [n for n in name if n in installed_packages]\n if to_uninstall:\n changed = True\n out, err = cargo.uninstall(to_uninstall)\n\n module.exit_json(changed=changed, stdout=out, stderr=err)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "plugins/modules/cargo.py"}]} | 3,035 | 600 |
gh_patches_debug_5998 | rasdani/github-patches | git_diff | dbt-labs__dbt-core-9763 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Bug] Unhandled exception for failing `on-run-*` hooks in `source freshness`
### Is this a new bug in dbt-core?
- [X] I believe this is a new bug in dbt-core
- [X] I have searched the existing issues, and I could not find an existing issue for this bug
### Current Behavior
_follow up to https://github.com/dbt-labs/dbt-core/pull/9366_
Failing hooks in `source freshness` lead to an unhandled exception
### Expected Behavior
The exception is handled, as it is for other commands
### Steps To Reproduce
1. Create a simple project that defines a source with `freshness` + `loaded_at_field`
2. In the project file:
```yml
# dbt_project.yml
flags:
source_freshness_run_project_hooks: true
on-run-start:
"select fake_column from table_does_not_exist"
```
3. `dbt source freshness`
### Relevant log output
```shell
$ dbt source freshness
12:11:06 Running with dbt=1.8.0-a1
12:11:06 Registered adapter: postgres=1.8.0-a1
12:11:06 Unable to do partial parsing because a project config has changed
12:11:07 Found 1 operation, 1 source, 521 macros
12:11:07
12:11:08
12:11:08 Running 1 on-run-start hook
12:11:08 1 of 1 START hook: my_dbt_project.on-run-start.0 ............................... [RUN]
12:11:08 Database error while running on-run-start
12:11:08 Concurrency: 5 threads (target='dev')
12:11:08
12:11:08 1 of 1 START freshness of my_src.my_tbl ........................................ [RUN]
12:11:08 1 of 1 WARN freshness of my_src.my_tbl ......................................... [WARN in 0.04s]
12:11:08
12:11:08 Finished running 1 source in 0 hours 0 minutes and 0.32 seconds (0.32s).
12:11:08 Encountered an error:
'BaseResult' object has no attribute 'node'
12:11:08 Traceback (most recent call last):
File "/Users/jerco/dev/product/dbt-core/core/dbt/cli/requires.py", line 97, in wrapper
result, success = func(*args, **kwargs)
File "/Users/jerco/dev/product/dbt-core/core/dbt/cli/requires.py", line 82, in wrapper
return func(*args, **kwargs)
File "/Users/jerco/dev/product/dbt-core/core/dbt/cli/requires.py", line 175, in wrapper
return func(*args, **kwargs)
File "/Users/jerco/dev/product/dbt-core/core/dbt/cli/requires.py", line 204, in wrapper
return func(*args, **kwargs)
File "/Users/jerco/dev/product/dbt-core/core/dbt/cli/requires.py", line 251, in wrapper
return func(*args, **kwargs)
File "/Users/jerco/dev/product/dbt-core/core/dbt/cli/requires.py", line 280, in wrapper
return func(*args, **kwargs)
File "/Users/jerco/dev/product/dbt-core/core/dbt/cli/main.py", line 789, in freshness
results = task.run()
File "/Users/jerco/dev/product/dbt-core/core/dbt/task/runnable.py", line 524, in run
result.write(self.result_path())
File "/Users/jerco/dev/product/dbt-core/core/dbt/artifacts/schemas/freshness/v3/freshness.py", line 67, in write
FreshnessExecutionResultArtifact.from_result(self).write(path)
File "/Users/jerco/dev/product/dbt-core/core/dbt/artifacts/schemas/freshness/v3/freshness.py", line 110, in from_result
processed = [process_freshness_result(r) for r in base.results]
File "/Users/jerco/dev/product/dbt-core/core/dbt/artifacts/schemas/freshness/v3/freshness.py", line 110, in <listcomp>
processed = [process_freshness_result(r) for r in base.results]
File "/Users/jerco/dev/product/dbt-core/core/dbt/artifacts/schemas/freshness/v3/freshness.py", line 119, in process_freshness_result
unique_id = result.node.unique_id
AttributeError: 'BaseResult' object has no attribute 'node'
```
### Environment
```markdown
- OS: 13.4.1
- Python: 3.10.11
- dbt: 1.8.0-a1
```
### Which database adapter are you using with dbt?
_No response_
### Additional Context
_No response_
[Bug] Unhandled exception for failing `on-run-*` hooks in `source freshness`
### Is this a new bug in dbt-core?
- [X] I believe this is a new bug in dbt-core
- [X] I have searched the existing issues, and I could not find an existing issue for this bug
### Current Behavior
_follow up to https://github.com/dbt-labs/dbt-core/pull/9366_
Failing hooks in `source freshness` lead to an unhandled exception
### Expected Behavior
The exception is handled, as it is for other commands
### Steps To Reproduce
1. Create a simple project that defines a source with `freshness` + `loaded_at_field`
2. In the project file:
```yml
# dbt_project.yml
flags:
source_freshness_run_project_hooks: true
on-run-start:
"select fake_column from table_does_not_exist"
```
3. `dbt source freshness`
### Relevant log output
```shell
$ dbt source freshness
12:11:06 Running with dbt=1.8.0-a1
12:11:06 Registered adapter: postgres=1.8.0-a1
12:11:06 Unable to do partial parsing because a project config has changed
12:11:07 Found 1 operation, 1 source, 521 macros
12:11:07
12:11:08
12:11:08 Running 1 on-run-start hook
12:11:08 1 of 1 START hook: my_dbt_project.on-run-start.0 ............................... [RUN]
12:11:08 Database error while running on-run-start
12:11:08 Concurrency: 5 threads (target='dev')
12:11:08
12:11:08 1 of 1 START freshness of my_src.my_tbl ........................................ [RUN]
12:11:08 1 of 1 WARN freshness of my_src.my_tbl ......................................... [WARN in 0.04s]
12:11:08
12:11:08 Finished running 1 source in 0 hours 0 minutes and 0.32 seconds (0.32s).
12:11:08 Encountered an error:
'BaseResult' object has no attribute 'node'
12:11:08 Traceback (most recent call last):
File "/Users/jerco/dev/product/dbt-core/core/dbt/cli/requires.py", line 97, in wrapper
result, success = func(*args, **kwargs)
File "/Users/jerco/dev/product/dbt-core/core/dbt/cli/requires.py", line 82, in wrapper
return func(*args, **kwargs)
File "/Users/jerco/dev/product/dbt-core/core/dbt/cli/requires.py", line 175, in wrapper
return func(*args, **kwargs)
File "/Users/jerco/dev/product/dbt-core/core/dbt/cli/requires.py", line 204, in wrapper
return func(*args, **kwargs)
File "/Users/jerco/dev/product/dbt-core/core/dbt/cli/requires.py", line 251, in wrapper
return func(*args, **kwargs)
File "/Users/jerco/dev/product/dbt-core/core/dbt/cli/requires.py", line 280, in wrapper
return func(*args, **kwargs)
File "/Users/jerco/dev/product/dbt-core/core/dbt/cli/main.py", line 789, in freshness
results = task.run()
File "/Users/jerco/dev/product/dbt-core/core/dbt/task/runnable.py", line 524, in run
result.write(self.result_path())
File "/Users/jerco/dev/product/dbt-core/core/dbt/artifacts/schemas/freshness/v3/freshness.py", line 67, in write
FreshnessExecutionResultArtifact.from_result(self).write(path)
File "/Users/jerco/dev/product/dbt-core/core/dbt/artifacts/schemas/freshness/v3/freshness.py", line 110, in from_result
processed = [process_freshness_result(r) for r in base.results]
File "/Users/jerco/dev/product/dbt-core/core/dbt/artifacts/schemas/freshness/v3/freshness.py", line 110, in <listcomp>
processed = [process_freshness_result(r) for r in base.results]
File "/Users/jerco/dev/product/dbt-core/core/dbt/artifacts/schemas/freshness/v3/freshness.py", line 119, in process_freshness_result
unique_id = result.node.unique_id
AttributeError: 'BaseResult' object has no attribute 'node'
```
### Environment
```markdown
- OS: 13.4.1
- Python: 3.10.11
- dbt: 1.8.0-a1
```
### Which database adapter are you using with dbt?
_No response_
### Additional Context
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `core/dbt/artifacts/schemas/freshness/v3/freshness.py`
Content:
```
1 from dataclasses import dataclass, field
2 from typing import Dict, Any, Sequence, List, Union, Optional
3 from datetime import datetime
4
5 from dbt.artifacts.resources import FreshnessThreshold
6 from dbt.artifacts.schemas.results import ExecutionResult, FreshnessStatus, NodeResult, TimingInfo
7 from dbt.artifacts.schemas.base import (
8 ArtifactMixin,
9 VersionedSchema,
10 schema_version,
11 BaseArtifactMetadata,
12 )
13 from dbt_common.dataclass_schema import dbtClassMixin, StrEnum
14 from dbt_common.exceptions import DbtInternalError
15
16 from dbt.contracts.graph.nodes import SourceDefinition
17
18
19 @dataclass
20 class SourceFreshnessResult(NodeResult):
21 node: SourceDefinition
22 status: FreshnessStatus
23 max_loaded_at: datetime
24 snapshotted_at: datetime
25 age: float
26
27 @property
28 def skipped(self):
29 return False
30
31
32 @dataclass
33 class PartialSourceFreshnessResult(NodeResult):
34 status: FreshnessStatus
35
36 @property
37 def skipped(self):
38 return False
39
40
41 FreshnessNodeResult = Union[PartialSourceFreshnessResult, SourceFreshnessResult]
42
43
44 @dataclass
45 class FreshnessMetadata(BaseArtifactMetadata):
46 dbt_schema_version: str = field(
47 default_factory=lambda: str(FreshnessExecutionResultArtifact.dbt_schema_version)
48 )
49
50
51 @dataclass
52 class FreshnessResult(ExecutionResult):
53 metadata: FreshnessMetadata
54 results: Sequence[FreshnessNodeResult]
55
56 @classmethod
57 def from_node_results(
58 cls,
59 results: List[FreshnessNodeResult],
60 elapsed_time: float,
61 generated_at: datetime,
62 ):
63 meta = FreshnessMetadata(generated_at=generated_at)
64 return cls(metadata=meta, results=results, elapsed_time=elapsed_time)
65
66 def write(self, path):
67 FreshnessExecutionResultArtifact.from_result(self).write(path)
68
69
70 @dataclass
71 class SourceFreshnessOutput(dbtClassMixin):
72 unique_id: str
73 max_loaded_at: datetime
74 snapshotted_at: datetime
75 max_loaded_at_time_ago_in_s: float
76 status: FreshnessStatus
77 criteria: FreshnessThreshold
78 adapter_response: Dict[str, Any]
79 timing: List[TimingInfo]
80 thread_id: str
81 execution_time: float
82
83
84 class FreshnessErrorEnum(StrEnum):
85 runtime_error = "runtime error"
86
87
88 @dataclass
89 class SourceFreshnessRuntimeError(dbtClassMixin):
90 unique_id: str
91 error: Optional[Union[str, int]]
92 status: FreshnessErrorEnum
93
94
95 FreshnessNodeOutput = Union[SourceFreshnessRuntimeError, SourceFreshnessOutput]
96
97
98 @dataclass
99 @schema_version("sources", 3)
100 class FreshnessExecutionResultArtifact(
101 ArtifactMixin,
102 VersionedSchema,
103 ):
104 metadata: FreshnessMetadata
105 results: Sequence[FreshnessNodeOutput]
106 elapsed_time: float
107
108 @classmethod
109 def from_result(cls, base: FreshnessResult):
110 processed = [process_freshness_result(r) for r in base.results]
111 return cls(
112 metadata=base.metadata,
113 results=processed,
114 elapsed_time=base.elapsed_time,
115 )
116
117
118 def process_freshness_result(result: FreshnessNodeResult) -> FreshnessNodeOutput:
119 unique_id = result.node.unique_id
120 if result.status == FreshnessStatus.RuntimeErr:
121 return SourceFreshnessRuntimeError(
122 unique_id=unique_id,
123 error=result.message,
124 status=FreshnessErrorEnum.runtime_error,
125 )
126
127 # we know that this must be a SourceFreshnessResult
128 if not isinstance(result, SourceFreshnessResult):
129 raise DbtInternalError(
130 "Got {} instead of a SourceFreshnessResult for a "
131 "non-error result in freshness execution!".format(type(result))
132 )
133 # if we're here, we must have a non-None freshness threshold
134 criteria = result.node.freshness
135 if criteria is None:
136 raise DbtInternalError(
137 "Somehow evaluated a freshness result for a source that has no freshness criteria!"
138 )
139 return SourceFreshnessOutput(
140 unique_id=unique_id,
141 max_loaded_at=result.max_loaded_at,
142 snapshotted_at=result.snapshotted_at,
143 max_loaded_at_time_ago_in_s=result.age,
144 status=result.status,
145 criteria=criteria,
146 adapter_response=result.adapter_response,
147 timing=result.timing,
148 thread_id=result.thread_id,
149 execution_time=result.execution_time,
150 )
151
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/core/dbt/artifacts/schemas/freshness/v3/freshness.py b/core/dbt/artifacts/schemas/freshness/v3/freshness.py
--- a/core/dbt/artifacts/schemas/freshness/v3/freshness.py
+++ b/core/dbt/artifacts/schemas/freshness/v3/freshness.py
@@ -107,7 +107,11 @@
@classmethod
def from_result(cls, base: FreshnessResult):
- processed = [process_freshness_result(r) for r in base.results]
+ processed = [
+ process_freshness_result(r)
+ for r in base.results
+ if isinstance(r, SourceFreshnessResult)
+ ]
return cls(
metadata=base.metadata,
results=processed,
| {"golden_diff": "diff --git a/core/dbt/artifacts/schemas/freshness/v3/freshness.py b/core/dbt/artifacts/schemas/freshness/v3/freshness.py\n--- a/core/dbt/artifacts/schemas/freshness/v3/freshness.py\n+++ b/core/dbt/artifacts/schemas/freshness/v3/freshness.py\n@@ -107,7 +107,11 @@\n \n @classmethod\n def from_result(cls, base: FreshnessResult):\n- processed = [process_freshness_result(r) for r in base.results]\n+ processed = [\n+ process_freshness_result(r)\n+ for r in base.results\n+ if isinstance(r, SourceFreshnessResult)\n+ ]\n return cls(\n metadata=base.metadata,\n results=processed,\n", "issue": "[Bug] Unhandled exception for failing `on-run-*` hooks in `source freshness`\n### Is this a new bug in dbt-core?\r\n\r\n- [X] I believe this is a new bug in dbt-core\r\n- [X] I have searched the existing issues, and I could not find an existing issue for this bug\r\n\r\n### Current Behavior\r\n\r\n_follow up to https://github.com/dbt-labs/dbt-core/pull/9366_\r\n\r\nFailing hooks in `source freshness` lead to an unhandled exception\r\n\r\n### Expected Behavior\r\n\r\nThe exception is handled, as it is for other commands\r\n\r\n### Steps To Reproduce\r\n\r\n1. Create a simple project that defines a source with `freshness` + `loaded_at_field`\r\n2. In the project file:\r\n```yml\r\n# dbt_project.yml\r\nflags:\r\n source_freshness_run_project_hooks: true\r\n\r\non-run-start:\r\n \"select fake_column from table_does_not_exist\"\r\n```\r\n3. `dbt source freshness`\r\n\r\n### Relevant log output\r\n\r\n```shell\r\n$ dbt source freshness\r\n12:11:06 Running with dbt=1.8.0-a1\r\n12:11:06 Registered adapter: postgres=1.8.0-a1\r\n12:11:06 Unable to do partial parsing because a project config has changed\r\n12:11:07 Found 1 operation, 1 source, 521 macros\r\n12:11:07\r\n12:11:08\r\n12:11:08 Running 1 on-run-start hook\r\n12:11:08 1 of 1 START hook: my_dbt_project.on-run-start.0 ............................... [RUN]\r\n12:11:08 Database error while running on-run-start\r\n12:11:08 Concurrency: 5 threads (target='dev')\r\n12:11:08\r\n12:11:08 1 of 1 START freshness of my_src.my_tbl ........................................ [RUN]\r\n12:11:08 1 of 1 WARN freshness of my_src.my_tbl ......................................... [WARN in 0.04s]\r\n12:11:08\r\n12:11:08 Finished running 1 source in 0 hours 0 minutes and 0.32 seconds (0.32s).\r\n12:11:08 Encountered an error:\r\n'BaseResult' object has no attribute 'node'\r\n12:11:08 Traceback (most recent call last):\r\n File \"/Users/jerco/dev/product/dbt-core/core/dbt/cli/requires.py\", line 97, in wrapper\r\n result, success = func(*args, **kwargs)\r\n File \"/Users/jerco/dev/product/dbt-core/core/dbt/cli/requires.py\", line 82, in wrapper\r\n return func(*args, **kwargs)\r\n File \"/Users/jerco/dev/product/dbt-core/core/dbt/cli/requires.py\", line 175, in wrapper\r\n return func(*args, **kwargs)\r\n File \"/Users/jerco/dev/product/dbt-core/core/dbt/cli/requires.py\", line 204, in wrapper\r\n return func(*args, **kwargs)\r\n File \"/Users/jerco/dev/product/dbt-core/core/dbt/cli/requires.py\", line 251, in wrapper\r\n return func(*args, **kwargs)\r\n File \"/Users/jerco/dev/product/dbt-core/core/dbt/cli/requires.py\", line 280, in wrapper\r\n return func(*args, **kwargs)\r\n File \"/Users/jerco/dev/product/dbt-core/core/dbt/cli/main.py\", line 789, in freshness\r\n results = task.run()\r\n File \"/Users/jerco/dev/product/dbt-core/core/dbt/task/runnable.py\", line 524, in run\r\n result.write(self.result_path())\r\n File \"/Users/jerco/dev/product/dbt-core/core/dbt/artifacts/schemas/freshness/v3/freshness.py\", line 67, in write\r\n FreshnessExecutionResultArtifact.from_result(self).write(path)\r\n File \"/Users/jerco/dev/product/dbt-core/core/dbt/artifacts/schemas/freshness/v3/freshness.py\", line 110, in from_result\r\n processed = [process_freshness_result(r) for r in base.results]\r\n File \"/Users/jerco/dev/product/dbt-core/core/dbt/artifacts/schemas/freshness/v3/freshness.py\", line 110, in <listcomp>\r\n processed = [process_freshness_result(r) for r in base.results]\r\n File \"/Users/jerco/dev/product/dbt-core/core/dbt/artifacts/schemas/freshness/v3/freshness.py\", line 119, in process_freshness_result\r\n unique_id = result.node.unique_id\r\nAttributeError: 'BaseResult' object has no attribute 'node'\r\n```\r\n\r\n\r\n### Environment\r\n\r\n```markdown\r\n- OS: 13.4.1\r\n- Python: 3.10.11\r\n- dbt: 1.8.0-a1\r\n```\r\n\r\n\r\n### Which database adapter are you using with dbt?\r\n\r\n_No response_\r\n\r\n### Additional Context\r\n\r\n_No response_\n[Bug] Unhandled exception for failing `on-run-*` hooks in `source freshness`\n### Is this a new bug in dbt-core?\r\n\r\n- [X] I believe this is a new bug in dbt-core\r\n- [X] I have searched the existing issues, and I could not find an existing issue for this bug\r\n\r\n### Current Behavior\r\n\r\n_follow up to https://github.com/dbt-labs/dbt-core/pull/9366_\r\n\r\nFailing hooks in `source freshness` lead to an unhandled exception\r\n\r\n### Expected Behavior\r\n\r\nThe exception is handled, as it is for other commands\r\n\r\n### Steps To Reproduce\r\n\r\n1. Create a simple project that defines a source with `freshness` + `loaded_at_field`\r\n2. In the project file:\r\n```yml\r\n# dbt_project.yml\r\nflags:\r\n source_freshness_run_project_hooks: true\r\n\r\non-run-start:\r\n \"select fake_column from table_does_not_exist\"\r\n```\r\n3. `dbt source freshness`\r\n\r\n### Relevant log output\r\n\r\n```shell\r\n$ dbt source freshness\r\n12:11:06 Running with dbt=1.8.0-a1\r\n12:11:06 Registered adapter: postgres=1.8.0-a1\r\n12:11:06 Unable to do partial parsing because a project config has changed\r\n12:11:07 Found 1 operation, 1 source, 521 macros\r\n12:11:07\r\n12:11:08\r\n12:11:08 Running 1 on-run-start hook\r\n12:11:08 1 of 1 START hook: my_dbt_project.on-run-start.0 ............................... [RUN]\r\n12:11:08 Database error while running on-run-start\r\n12:11:08 Concurrency: 5 threads (target='dev')\r\n12:11:08\r\n12:11:08 1 of 1 START freshness of my_src.my_tbl ........................................ [RUN]\r\n12:11:08 1 of 1 WARN freshness of my_src.my_tbl ......................................... [WARN in 0.04s]\r\n12:11:08\r\n12:11:08 Finished running 1 source in 0 hours 0 minutes and 0.32 seconds (0.32s).\r\n12:11:08 Encountered an error:\r\n'BaseResult' object has no attribute 'node'\r\n12:11:08 Traceback (most recent call last):\r\n File \"/Users/jerco/dev/product/dbt-core/core/dbt/cli/requires.py\", line 97, in wrapper\r\n result, success = func(*args, **kwargs)\r\n File \"/Users/jerco/dev/product/dbt-core/core/dbt/cli/requires.py\", line 82, in wrapper\r\n return func(*args, **kwargs)\r\n File \"/Users/jerco/dev/product/dbt-core/core/dbt/cli/requires.py\", line 175, in wrapper\r\n return func(*args, **kwargs)\r\n File \"/Users/jerco/dev/product/dbt-core/core/dbt/cli/requires.py\", line 204, in wrapper\r\n return func(*args, **kwargs)\r\n File \"/Users/jerco/dev/product/dbt-core/core/dbt/cli/requires.py\", line 251, in wrapper\r\n return func(*args, **kwargs)\r\n File \"/Users/jerco/dev/product/dbt-core/core/dbt/cli/requires.py\", line 280, in wrapper\r\n return func(*args, **kwargs)\r\n File \"/Users/jerco/dev/product/dbt-core/core/dbt/cli/main.py\", line 789, in freshness\r\n results = task.run()\r\n File \"/Users/jerco/dev/product/dbt-core/core/dbt/task/runnable.py\", line 524, in run\r\n result.write(self.result_path())\r\n File \"/Users/jerco/dev/product/dbt-core/core/dbt/artifacts/schemas/freshness/v3/freshness.py\", line 67, in write\r\n FreshnessExecutionResultArtifact.from_result(self).write(path)\r\n File \"/Users/jerco/dev/product/dbt-core/core/dbt/artifacts/schemas/freshness/v3/freshness.py\", line 110, in from_result\r\n processed = [process_freshness_result(r) for r in base.results]\r\n File \"/Users/jerco/dev/product/dbt-core/core/dbt/artifacts/schemas/freshness/v3/freshness.py\", line 110, in <listcomp>\r\n processed = [process_freshness_result(r) for r in base.results]\r\n File \"/Users/jerco/dev/product/dbt-core/core/dbt/artifacts/schemas/freshness/v3/freshness.py\", line 119, in process_freshness_result\r\n unique_id = result.node.unique_id\r\nAttributeError: 'BaseResult' object has no attribute 'node'\r\n```\r\n\r\n\r\n### Environment\r\n\r\n```markdown\r\n- OS: 13.4.1\r\n- Python: 3.10.11\r\n- dbt: 1.8.0-a1\r\n```\r\n\r\n\r\n### Which database adapter are you using with dbt?\r\n\r\n_No response_\r\n\r\n### Additional Context\r\n\r\n_No response_\n", "before_files": [{"content": "from dataclasses import dataclass, field\nfrom typing import Dict, Any, Sequence, List, Union, Optional\nfrom datetime import datetime\n\nfrom dbt.artifacts.resources import FreshnessThreshold\nfrom dbt.artifacts.schemas.results import ExecutionResult, FreshnessStatus, NodeResult, TimingInfo\nfrom dbt.artifacts.schemas.base import (\n ArtifactMixin,\n VersionedSchema,\n schema_version,\n BaseArtifactMetadata,\n)\nfrom dbt_common.dataclass_schema import dbtClassMixin, StrEnum\nfrom dbt_common.exceptions import DbtInternalError\n\nfrom dbt.contracts.graph.nodes import SourceDefinition\n\n\n@dataclass\nclass SourceFreshnessResult(NodeResult):\n node: SourceDefinition\n status: FreshnessStatus\n max_loaded_at: datetime\n snapshotted_at: datetime\n age: float\n\n @property\n def skipped(self):\n return False\n\n\n@dataclass\nclass PartialSourceFreshnessResult(NodeResult):\n status: FreshnessStatus\n\n @property\n def skipped(self):\n return False\n\n\nFreshnessNodeResult = Union[PartialSourceFreshnessResult, SourceFreshnessResult]\n\n\n@dataclass\nclass FreshnessMetadata(BaseArtifactMetadata):\n dbt_schema_version: str = field(\n default_factory=lambda: str(FreshnessExecutionResultArtifact.dbt_schema_version)\n )\n\n\n@dataclass\nclass FreshnessResult(ExecutionResult):\n metadata: FreshnessMetadata\n results: Sequence[FreshnessNodeResult]\n\n @classmethod\n def from_node_results(\n cls,\n results: List[FreshnessNodeResult],\n elapsed_time: float,\n generated_at: datetime,\n ):\n meta = FreshnessMetadata(generated_at=generated_at)\n return cls(metadata=meta, results=results, elapsed_time=elapsed_time)\n\n def write(self, path):\n FreshnessExecutionResultArtifact.from_result(self).write(path)\n\n\n@dataclass\nclass SourceFreshnessOutput(dbtClassMixin):\n unique_id: str\n max_loaded_at: datetime\n snapshotted_at: datetime\n max_loaded_at_time_ago_in_s: float\n status: FreshnessStatus\n criteria: FreshnessThreshold\n adapter_response: Dict[str, Any]\n timing: List[TimingInfo]\n thread_id: str\n execution_time: float\n\n\nclass FreshnessErrorEnum(StrEnum):\n runtime_error = \"runtime error\"\n\n\n@dataclass\nclass SourceFreshnessRuntimeError(dbtClassMixin):\n unique_id: str\n error: Optional[Union[str, int]]\n status: FreshnessErrorEnum\n\n\nFreshnessNodeOutput = Union[SourceFreshnessRuntimeError, SourceFreshnessOutput]\n\n\n@dataclass\n@schema_version(\"sources\", 3)\nclass FreshnessExecutionResultArtifact(\n ArtifactMixin,\n VersionedSchema,\n):\n metadata: FreshnessMetadata\n results: Sequence[FreshnessNodeOutput]\n elapsed_time: float\n\n @classmethod\n def from_result(cls, base: FreshnessResult):\n processed = [process_freshness_result(r) for r in base.results]\n return cls(\n metadata=base.metadata,\n results=processed,\n elapsed_time=base.elapsed_time,\n )\n\n\ndef process_freshness_result(result: FreshnessNodeResult) -> FreshnessNodeOutput:\n unique_id = result.node.unique_id\n if result.status == FreshnessStatus.RuntimeErr:\n return SourceFreshnessRuntimeError(\n unique_id=unique_id,\n error=result.message,\n status=FreshnessErrorEnum.runtime_error,\n )\n\n # we know that this must be a SourceFreshnessResult\n if not isinstance(result, SourceFreshnessResult):\n raise DbtInternalError(\n \"Got {} instead of a SourceFreshnessResult for a \"\n \"non-error result in freshness execution!\".format(type(result))\n )\n # if we're here, we must have a non-None freshness threshold\n criteria = result.node.freshness\n if criteria is None:\n raise DbtInternalError(\n \"Somehow evaluated a freshness result for a source that has no freshness criteria!\"\n )\n return SourceFreshnessOutput(\n unique_id=unique_id,\n max_loaded_at=result.max_loaded_at,\n snapshotted_at=result.snapshotted_at,\n max_loaded_at_time_ago_in_s=result.age,\n status=result.status,\n criteria=criteria,\n adapter_response=result.adapter_response,\n timing=result.timing,\n thread_id=result.thread_id,\n execution_time=result.execution_time,\n )\n", "path": "core/dbt/artifacts/schemas/freshness/v3/freshness.py"}], "after_files": [{"content": "from dataclasses import dataclass, field\nfrom typing import Dict, Any, Sequence, List, Union, Optional\nfrom datetime import datetime\n\nfrom dbt.artifacts.resources import FreshnessThreshold\nfrom dbt.artifacts.schemas.results import ExecutionResult, FreshnessStatus, NodeResult, TimingInfo\nfrom dbt.artifacts.schemas.base import (\n ArtifactMixin,\n VersionedSchema,\n schema_version,\n BaseArtifactMetadata,\n)\nfrom dbt_common.dataclass_schema import dbtClassMixin, StrEnum\nfrom dbt_common.exceptions import DbtInternalError\n\nfrom dbt.contracts.graph.nodes import SourceDefinition\n\n\n@dataclass\nclass SourceFreshnessResult(NodeResult):\n node: SourceDefinition\n status: FreshnessStatus\n max_loaded_at: datetime\n snapshotted_at: datetime\n age: float\n\n @property\n def skipped(self):\n return False\n\n\n@dataclass\nclass PartialSourceFreshnessResult(NodeResult):\n status: FreshnessStatus\n\n @property\n def skipped(self):\n return False\n\n\nFreshnessNodeResult = Union[PartialSourceFreshnessResult, SourceFreshnessResult]\n\n\n@dataclass\nclass FreshnessMetadata(BaseArtifactMetadata):\n dbt_schema_version: str = field(\n default_factory=lambda: str(FreshnessExecutionResultArtifact.dbt_schema_version)\n )\n\n\n@dataclass\nclass FreshnessResult(ExecutionResult):\n metadata: FreshnessMetadata\n results: Sequence[FreshnessNodeResult]\n\n @classmethod\n def from_node_results(\n cls,\n results: List[FreshnessNodeResult],\n elapsed_time: float,\n generated_at: datetime,\n ):\n meta = FreshnessMetadata(generated_at=generated_at)\n return cls(metadata=meta, results=results, elapsed_time=elapsed_time)\n\n def write(self, path):\n FreshnessExecutionResultArtifact.from_result(self).write(path)\n\n\n@dataclass\nclass SourceFreshnessOutput(dbtClassMixin):\n unique_id: str\n max_loaded_at: datetime\n snapshotted_at: datetime\n max_loaded_at_time_ago_in_s: float\n status: FreshnessStatus\n criteria: FreshnessThreshold\n adapter_response: Dict[str, Any]\n timing: List[TimingInfo]\n thread_id: str\n execution_time: float\n\n\nclass FreshnessErrorEnum(StrEnum):\n runtime_error = \"runtime error\"\n\n\n@dataclass\nclass SourceFreshnessRuntimeError(dbtClassMixin):\n unique_id: str\n error: Optional[Union[str, int]]\n status: FreshnessErrorEnum\n\n\nFreshnessNodeOutput = Union[SourceFreshnessRuntimeError, SourceFreshnessOutput]\n\n\n@dataclass\n@schema_version(\"sources\", 3)\nclass FreshnessExecutionResultArtifact(\n ArtifactMixin,\n VersionedSchema,\n):\n metadata: FreshnessMetadata\n results: Sequence[FreshnessNodeOutput]\n elapsed_time: float\n\n @classmethod\n def from_result(cls, base: FreshnessResult):\n processed = [\n process_freshness_result(r)\n for r in base.results\n if isinstance(r, SourceFreshnessResult)\n ]\n return cls(\n metadata=base.metadata,\n results=processed,\n elapsed_time=base.elapsed_time,\n )\n\n\ndef process_freshness_result(result: FreshnessNodeResult) -> FreshnessNodeOutput:\n unique_id = result.node.unique_id\n if result.status == FreshnessStatus.RuntimeErr:\n return SourceFreshnessRuntimeError(\n unique_id=unique_id,\n error=result.message,\n status=FreshnessErrorEnum.runtime_error,\n )\n\n # we know that this must be a SourceFreshnessResult\n if not isinstance(result, SourceFreshnessResult):\n raise DbtInternalError(\n \"Got {} instead of a SourceFreshnessResult for a \"\n \"non-error result in freshness execution!\".format(type(result))\n )\n # if we're here, we must have a non-None freshness threshold\n criteria = result.node.freshness\n if criteria is None:\n raise DbtInternalError(\n \"Somehow evaluated a freshness result for a source that has no freshness criteria!\"\n )\n return SourceFreshnessOutput(\n unique_id=unique_id,\n max_loaded_at=result.max_loaded_at,\n snapshotted_at=result.snapshotted_at,\n max_loaded_at_time_ago_in_s=result.age,\n status=result.status,\n criteria=criteria,\n adapter_response=result.adapter_response,\n timing=result.timing,\n thread_id=result.thread_id,\n execution_time=result.execution_time,\n )\n", "path": "core/dbt/artifacts/schemas/freshness/v3/freshness.py"}]} | 3,922 | 177 |
gh_patches_debug_3114 | rasdani/github-patches | git_diff | jupyterhub__jupyterhub-108 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Missing zmq and jsonschema
It seems like two additional dependencies are missing.
``` bash
Traceback (most recent call last):
File "/home/stanleygu/.virtualenvs/localpy/src/ipython/IPython/utils/zmqrelated.py", line 35, in check_for_zmq
import zmq
ImportError: No module named 'zmq'
```
``` bash
Traceback (most recent call last):
File "/home/stanleygu/.virtualenvs/localpy/src/ipython/IPython/nbformat/validator.py", line 10, in <module>
from jsonschema import ValidationError
ImportError: No module named 'jsonschema'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 # coding: utf-8
3
4 # Copyright (c) Juptyer Development Team.
5 # Distributed under the terms of the Modified BSD License.
6
7 #-----------------------------------------------------------------------------
8 # Minimal Python version sanity check (from IPython)
9 #-----------------------------------------------------------------------------
10
11 from __future__ import print_function
12
13 import os
14 import sys
15
16 v = sys.version_info
17 if v[:2] < (3,3):
18 error = "ERROR: Jupyter Hub requires Python version 3.3 or above."
19 print(error, file=sys.stderr)
20 sys.exit(1)
21
22
23 if os.name in ('nt', 'dos'):
24 error = "ERROR: Windows is not supported"
25 print(error, file=sys.stderr)
26
27 # At least we're on the python version we need, move on.
28
29 import os
30
31 from glob import glob
32
33 from distutils.core import setup
34 from subprocess import check_call
35
36 pjoin = os.path.join
37
38 here = os.path.abspath(os.path.dirname(__file__))
39 share_jupyter = pjoin(here, 'share', 'jupyter')
40 static = pjoin(share_jupyter, 'static')
41
42 #---------------------------------------------------------------------------
43 # Build basic package data, etc.
44 #---------------------------------------------------------------------------
45
46 def get_data_files():
47 """Get data files in share/jupyter"""
48
49 data_files = []
50 ntrim = len(here) + 1
51
52 for (d, dirs, filenames) in os.walk(share_jupyter):
53 data_files.append((
54 d[ntrim:],
55 [ pjoin(d, f) for f in filenames ]
56 ))
57 return data_files
58
59
60 ns = {}
61 with open(pjoin(here, 'jupyterhub', 'version.py')) as f:
62 exec(f.read(), {}, ns)
63
64
65 packages = []
66 for d, _, _ in os.walk('jupyterhub'):
67 if os.path.exists(pjoin(d, '__init__.py')):
68 packages.append(d.replace(os.path.sep, '.'))
69
70 setup_args = dict(
71 name = 'jupyterhub',
72 scripts = glob(pjoin('scripts', '*')),
73 packages = packages,
74 # dummy, so that install_data doesn't get skipped
75 # this will be overridden when bower is run anyway
76 data_files = get_data_files() or ['dummy'],
77 version = ns['__version__'],
78 description = """JupyterHub: A multi-user server for Jupyter notebooks""",
79 long_description = "",
80 author = "Jupyter Development Team",
81 author_email = "[email protected]",
82 url = "http://jupyter.org",
83 license = "BSD",
84 platforms = "Linux, Mac OS X",
85 keywords = ['Interactive', 'Interpreter', 'Shell', 'Web'],
86 classifiers = [
87 'Intended Audience :: Developers',
88 'Intended Audience :: System Administrators',
89 'Intended Audience :: Science/Research',
90 'License :: OSI Approved :: BSD License',
91 'Programming Language :: Python',
92 'Programming Language :: Python :: 3',
93 ],
94 )
95
96 #---------------------------------------------------------------------------
97 # custom distutils commands
98 #---------------------------------------------------------------------------
99
100 # imports here, so they are after setuptools import if there was one
101 from distutils.cmd import Command
102 from distutils.command.install import install
103
104 class BaseCommand(Command):
105 """Dumb empty command because Command needs subclasses to override too much"""
106 user_options = []
107
108 def initialize_options(self):
109 pass
110
111 def finalize_options(self):
112 pass
113
114 def get_inputs(self):
115 return []
116
117 def get_outputs(self):
118 return []
119
120
121 class Bower(BaseCommand):
122 description = "fetch static client-side components with bower"
123
124 user_options = []
125
126 def run(self):
127 try:
128 check_call(['bower', 'install', '--allow-root'])
129 except OSError as e:
130 print("Failed to run bower: %s" % e, file=sys.stderr)
131 print("You can install bower with `npm install -g bower`", file=sys.stderr)
132 raise
133 # update data-files in case this created new files
134 self.distribution.data_files = get_data_files()
135
136 class CSS(BaseCommand):
137 description = "compile CSS from LESS"
138
139 user_options = []
140
141 def initialize_options(self):
142 pass
143
144 def finalize_options(self):
145 pass
146
147 def run(self):
148 style_less = pjoin(static, 'less', 'style.less')
149 style_css = pjoin(static, 'css', 'style.min.css')
150 sourcemap = style_css + '.map'
151 try:
152 check_call([
153 'lessc', '-x', '--verbose',
154 '--source-map-basepath={}'.format(static),
155 '--source-map={}'.format(sourcemap),
156 '--source-map-rootpath=../',
157 style_less, style_css,
158 ])
159 except OSError as e:
160 print("Failed to run lessc: %s" % e, file=sys.stderr)
161 print("You can install less with `npm install -g less`", file=sys.stderr)
162 raise
163 # update data-files in case this created new files
164 self.distribution.data_files = get_data_files()
165
166 # ensure bower is run as part of install
167 install.sub_commands.insert(0, ('js', None))
168 install.sub_commands.insert(1, ('css', None))
169
170 setup_args['cmdclass'] = {
171 'js': Bower,
172 'css': CSS,
173 }
174
175
176 # setuptools requirements
177
178 if 'setuptools' in sys.modules:
179 setup_args['zip_safe'] = False
180 from setuptools.command.develop import develop
181 class develop_js_css(develop):
182 def run(self):
183 if not self.uninstall:
184 self.distribution.run_command('js')
185 self.distribution.run_command('css')
186 develop.run(self)
187 setup_args['cmdclass']['develop'] = develop_js_css
188 setup_args['install_requires'] = install_requires = []
189
190 with open('requirements.txt') as f:
191 for line in f.readlines():
192 req = line.strip()
193 if not req or req.startswith(('-e', '#')):
194 continue
195 install_requires.append(req)
196
197 #---------------------------------------------------------------------------
198 # setup
199 #---------------------------------------------------------------------------
200
201 def main():
202 setup(**setup_args)
203
204 if __name__ == '__main__':
205 main()
206
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -190,7 +190,7 @@
with open('requirements.txt') as f:
for line in f.readlines():
req = line.strip()
- if not req or req.startswith(('-e', '#')):
+ if not req or req.startswith('#') or '://' in req:
continue
install_requires.append(req)
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -190,7 +190,7 @@\n with open('requirements.txt') as f:\n for line in f.readlines():\n req = line.strip()\n- if not req or req.startswith(('-e', '#')):\n+ if not req or req.startswith('#') or '://' in req:\n continue\n install_requires.append(req)\n", "issue": "Missing zmq and jsonschema\nIt seems like two additional dependencies are missing.\n\n``` bash\nTraceback (most recent call last):\n File \"/home/stanleygu/.virtualenvs/localpy/src/ipython/IPython/utils/zmqrelated.py\", line 35, in check_for_zmq\n import zmq\nImportError: No module named 'zmq'\n```\n\n``` bash\nTraceback (most recent call last):\n File \"/home/stanleygu/.virtualenvs/localpy/src/ipython/IPython/nbformat/validator.py\", line 10, in <module>\n from jsonschema import ValidationError\nImportError: No module named 'jsonschema'\n\n```\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# coding: utf-8\n\n# Copyright (c) Juptyer Development Team.\n# Distributed under the terms of the Modified BSD License.\n\n#-----------------------------------------------------------------------------\n# Minimal Python version sanity check (from IPython)\n#-----------------------------------------------------------------------------\n\nfrom __future__ import print_function\n\nimport os\nimport sys\n\nv = sys.version_info\nif v[:2] < (3,3):\n error = \"ERROR: Jupyter Hub requires Python version 3.3 or above.\"\n print(error, file=sys.stderr)\n sys.exit(1)\n\n\nif os.name in ('nt', 'dos'):\n error = \"ERROR: Windows is not supported\"\n print(error, file=sys.stderr)\n\n# At least we're on the python version we need, move on.\n\nimport os\n\nfrom glob import glob\n\nfrom distutils.core import setup\nfrom subprocess import check_call\n\npjoin = os.path.join\n\nhere = os.path.abspath(os.path.dirname(__file__))\nshare_jupyter = pjoin(here, 'share', 'jupyter')\nstatic = pjoin(share_jupyter, 'static')\n\n#---------------------------------------------------------------------------\n# Build basic package data, etc.\n#---------------------------------------------------------------------------\n\ndef get_data_files():\n \"\"\"Get data files in share/jupyter\"\"\"\n \n data_files = []\n ntrim = len(here) + 1\n \n for (d, dirs, filenames) in os.walk(share_jupyter):\n data_files.append((\n d[ntrim:],\n [ pjoin(d, f) for f in filenames ]\n ))\n return data_files\n\n\nns = {}\nwith open(pjoin(here, 'jupyterhub', 'version.py')) as f:\n exec(f.read(), {}, ns)\n\n\npackages = []\nfor d, _, _ in os.walk('jupyterhub'):\n if os.path.exists(pjoin(d, '__init__.py')):\n packages.append(d.replace(os.path.sep, '.'))\n\nsetup_args = dict(\n name = 'jupyterhub',\n scripts = glob(pjoin('scripts', '*')),\n packages = packages,\n # dummy, so that install_data doesn't get skipped\n # this will be overridden when bower is run anyway\n data_files = get_data_files() or ['dummy'],\n version = ns['__version__'],\n description = \"\"\"JupyterHub: A multi-user server for Jupyter notebooks\"\"\",\n long_description = \"\",\n author = \"Jupyter Development Team\",\n author_email = \"[email protected]\",\n url = \"http://jupyter.org\",\n license = \"BSD\",\n platforms = \"Linux, Mac OS X\",\n keywords = ['Interactive', 'Interpreter', 'Shell', 'Web'],\n classifiers = [\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: BSD License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n ],\n)\n\n#---------------------------------------------------------------------------\n# custom distutils commands\n#---------------------------------------------------------------------------\n\n# imports here, so they are after setuptools import if there was one\nfrom distutils.cmd import Command\nfrom distutils.command.install import install\n\nclass BaseCommand(Command):\n \"\"\"Dumb empty command because Command needs subclasses to override too much\"\"\"\n user_options = []\n \n def initialize_options(self):\n pass\n \n def finalize_options(self):\n pass\n \n def get_inputs(self):\n return []\n \n def get_outputs(self):\n return []\n\n\nclass Bower(BaseCommand):\n description = \"fetch static client-side components with bower\"\n \n user_options = []\n \n def run(self):\n try:\n check_call(['bower', 'install', '--allow-root'])\n except OSError as e:\n print(\"Failed to run bower: %s\" % e, file=sys.stderr)\n print(\"You can install bower with `npm install -g bower`\", file=sys.stderr)\n raise\n # update data-files in case this created new files\n self.distribution.data_files = get_data_files()\n\nclass CSS(BaseCommand):\n description = \"compile CSS from LESS\"\n \n user_options = []\n \n def initialize_options(self):\n pass\n \n def finalize_options(self):\n pass\n \n def run(self):\n style_less = pjoin(static, 'less', 'style.less')\n style_css = pjoin(static, 'css', 'style.min.css')\n sourcemap = style_css + '.map'\n try:\n check_call([\n 'lessc', '-x', '--verbose',\n '--source-map-basepath={}'.format(static),\n '--source-map={}'.format(sourcemap),\n '--source-map-rootpath=../',\n style_less, style_css,\n ])\n except OSError as e:\n print(\"Failed to run lessc: %s\" % e, file=sys.stderr)\n print(\"You can install less with `npm install -g less`\", file=sys.stderr)\n raise\n # update data-files in case this created new files\n self.distribution.data_files = get_data_files()\n\n# ensure bower is run as part of install\ninstall.sub_commands.insert(0, ('js', None))\ninstall.sub_commands.insert(1, ('css', None))\n\nsetup_args['cmdclass'] = {\n 'js': Bower,\n 'css': CSS,\n}\n\n\n# setuptools requirements\n\nif 'setuptools' in sys.modules:\n setup_args['zip_safe'] = False\n from setuptools.command.develop import develop\n class develop_js_css(develop):\n def run(self):\n if not self.uninstall:\n self.distribution.run_command('js')\n self.distribution.run_command('css')\n develop.run(self)\n setup_args['cmdclass']['develop'] = develop_js_css\n setup_args['install_requires'] = install_requires = []\n\n with open('requirements.txt') as f:\n for line in f.readlines():\n req = line.strip()\n if not req or req.startswith(('-e', '#')):\n continue\n install_requires.append(req)\n\n#---------------------------------------------------------------------------\n# setup\n#---------------------------------------------------------------------------\n\ndef main():\n setup(**setup_args)\n\nif __name__ == '__main__':\n main()\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# coding: utf-8\n\n# Copyright (c) Juptyer Development Team.\n# Distributed under the terms of the Modified BSD License.\n\n#-----------------------------------------------------------------------------\n# Minimal Python version sanity check (from IPython)\n#-----------------------------------------------------------------------------\n\nfrom __future__ import print_function\n\nimport os\nimport sys\n\nv = sys.version_info\nif v[:2] < (3,3):\n error = \"ERROR: Jupyter Hub requires Python version 3.3 or above.\"\n print(error, file=sys.stderr)\n sys.exit(1)\n\n\nif os.name in ('nt', 'dos'):\n error = \"ERROR: Windows is not supported\"\n print(error, file=sys.stderr)\n\n# At least we're on the python version we need, move on.\n\nimport os\n\nfrom glob import glob\n\nfrom distutils.core import setup\nfrom subprocess import check_call\n\npjoin = os.path.join\n\nhere = os.path.abspath(os.path.dirname(__file__))\nshare_jupyter = pjoin(here, 'share', 'jupyter')\nstatic = pjoin(share_jupyter, 'static')\n\n#---------------------------------------------------------------------------\n# Build basic package data, etc.\n#---------------------------------------------------------------------------\n\ndef get_data_files():\n \"\"\"Get data files in share/jupyter\"\"\"\n \n data_files = []\n ntrim = len(here) + 1\n \n for (d, dirs, filenames) in os.walk(share_jupyter):\n data_files.append((\n d[ntrim:],\n [ pjoin(d, f) for f in filenames ]\n ))\n return data_files\n\n\nns = {}\nwith open(pjoin(here, 'jupyterhub', 'version.py')) as f:\n exec(f.read(), {}, ns)\n\n\npackages = []\nfor d, _, _ in os.walk('jupyterhub'):\n if os.path.exists(pjoin(d, '__init__.py')):\n packages.append(d.replace(os.path.sep, '.'))\n\nsetup_args = dict(\n name = 'jupyterhub',\n scripts = glob(pjoin('scripts', '*')),\n packages = packages,\n # dummy, so that install_data doesn't get skipped\n # this will be overridden when bower is run anyway\n data_files = get_data_files() or ['dummy'],\n version = ns['__version__'],\n description = \"\"\"JupyterHub: A multi-user server for Jupyter notebooks\"\"\",\n long_description = \"\",\n author = \"Jupyter Development Team\",\n author_email = \"[email protected]\",\n url = \"http://jupyter.org\",\n license = \"BSD\",\n platforms = \"Linux, Mac OS X\",\n keywords = ['Interactive', 'Interpreter', 'Shell', 'Web'],\n classifiers = [\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: BSD License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n ],\n)\n\n#---------------------------------------------------------------------------\n# custom distutils commands\n#---------------------------------------------------------------------------\n\n# imports here, so they are after setuptools import if there was one\nfrom distutils.cmd import Command\nfrom distutils.command.install import install\n\nclass BaseCommand(Command):\n \"\"\"Dumb empty command because Command needs subclasses to override too much\"\"\"\n user_options = []\n \n def initialize_options(self):\n pass\n \n def finalize_options(self):\n pass\n \n def get_inputs(self):\n return []\n \n def get_outputs(self):\n return []\n\n\nclass Bower(BaseCommand):\n description = \"fetch static client-side components with bower\"\n \n user_options = []\n \n def run(self):\n try:\n check_call(['bower', 'install', '--allow-root'])\n except OSError as e:\n print(\"Failed to run bower: %s\" % e, file=sys.stderr)\n print(\"You can install bower with `npm install -g bower`\", file=sys.stderr)\n raise\n # update data-files in case this created new files\n self.distribution.data_files = get_data_files()\n\nclass CSS(BaseCommand):\n description = \"compile CSS from LESS\"\n \n user_options = []\n \n def initialize_options(self):\n pass\n \n def finalize_options(self):\n pass\n \n def run(self):\n style_less = pjoin(static, 'less', 'style.less')\n style_css = pjoin(static, 'css', 'style.min.css')\n sourcemap = style_css + '.map'\n try:\n check_call([\n 'lessc', '-x', '--verbose',\n '--source-map-basepath={}'.format(static),\n '--source-map={}'.format(sourcemap),\n '--source-map-rootpath=../',\n style_less, style_css,\n ])\n except OSError as e:\n print(\"Failed to run lessc: %s\" % e, file=sys.stderr)\n print(\"You can install less with `npm install -g less`\", file=sys.stderr)\n raise\n # update data-files in case this created new files\n self.distribution.data_files = get_data_files()\n\n# ensure bower is run as part of install\ninstall.sub_commands.insert(0, ('js', None))\ninstall.sub_commands.insert(1, ('css', None))\n\nsetup_args['cmdclass'] = {\n 'js': Bower,\n 'css': CSS,\n}\n\n\n# setuptools requirements\n\nif 'setuptools' in sys.modules:\n setup_args['zip_safe'] = False\n from setuptools.command.develop import develop\n class develop_js_css(develop):\n def run(self):\n if not self.uninstall:\n self.distribution.run_command('js')\n self.distribution.run_command('css')\n develop.run(self)\n setup_args['cmdclass']['develop'] = develop_js_css\n setup_args['install_requires'] = install_requires = []\n\n with open('requirements.txt') as f:\n for line in f.readlines():\n req = line.strip()\n if not req or req.startswith('#') or '://' in req:\n continue\n install_requires.append(req)\n\n#---------------------------------------------------------------------------\n# setup\n#---------------------------------------------------------------------------\n\ndef main():\n setup(**setup_args)\n\nif __name__ == '__main__':\n main()\n", "path": "setup.py"}]} | 2,254 | 96 |
gh_patches_debug_13945 | rasdani/github-patches | git_diff | optuna__optuna-1007 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Order dependencies in `setup.py` alphabetically.
The libraries in `setup.py` are not arranged in alphabetical order. I think it would be helpful for developers if we fix it.
Examples:
https://github.com/optuna/optuna/blob/master/setup.py#L30-L41
```python
def get_install_requires() -> List[str]:
return [
'alembic',
'cliff',
'colorlog',
'numpy',
'scipy!=1.4.0',
'sqlalchemy>=1.1.0',
'tqdm',
'joblib',
]
```
https://github.com/optuna/optuna/blob/master/setup.py#L61-L66
```python
'doctest': [
'pandas',
'cma',
'scikit-learn>=0.19.0',
'plotly>=4.0.0',
],
```
Order dependencies in `setup.py` alphabetically.
The libraries in `setup.py` are not arranged in alphabetical order. I think it would be helpful for developers if we fix it.
Examples:
https://github.com/optuna/optuna/blob/master/setup.py#L30-L41
```python
def get_install_requires() -> List[str]:
return [
'alembic',
'cliff',
'colorlog',
'numpy',
'scipy!=1.4.0',
'sqlalchemy>=1.1.0',
'tqdm',
'joblib',
]
```
https://github.com/optuna/optuna/blob/master/setup.py#L61-L66
```python
'doctest': [
'pandas',
'cma',
'scikit-learn>=0.19.0',
'plotly>=4.0.0',
],
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 import os
2 import sys
3
4 import pkg_resources
5 from setuptools import find_packages
6 from setuptools import setup
7
8 from typing import Dict
9 from typing import List
10 from typing import Optional
11
12
13 def get_version() -> str:
14
15 version_filepath = os.path.join(os.path.dirname(__file__), 'optuna', 'version.py')
16 with open(version_filepath) as f:
17 for line in f:
18 if line.startswith('__version__'):
19 return line.strip().split()[-1][1:-1]
20 assert False
21
22
23 def get_long_description() -> str:
24
25 readme_filepath = os.path.join(os.path.dirname(__file__), 'README.md')
26 with open(readme_filepath) as f:
27 return f.read()
28
29
30 def get_install_requires() -> List[str]:
31
32 return [
33 'alembic',
34 'cliff',
35 'colorlog',
36 'numpy',
37 'scipy!=1.4.0',
38 'sqlalchemy>=1.1.0',
39 'tqdm',
40 'joblib',
41 ]
42
43
44 def get_tests_require() -> List[str]:
45
46 return get_extras_require()['testing']
47
48
49 def get_extras_require() -> Dict[str, List[str]]:
50
51 requirements = {
52 'checking': [
53 'autopep8',
54 'hacking',
55 'mypy',
56 ],
57 'codecov': [
58 'codecov',
59 'pytest-cov',
60 ],
61 'doctest': [
62 'pandas',
63 'cma',
64 'scikit-learn>=0.19.0',
65 'plotly>=4.0.0',
66 'scikit-optimize',
67 ],
68 'document': [
69 'sphinx',
70 'sphinx_rtd_theme',
71 ],
72 'example': [
73 'catboost',
74 'chainer',
75 'lightgbm',
76 'mlflow',
77 'mxnet',
78 'pytorch-ignite',
79 'scikit-image',
80 'scikit-learn',
81 'torch',
82 'torchvision>=0.5.0',
83 'xgboost',
84 ] + (['fastai<2'] if (3, 5) < sys.version_info[:2] < (3, 8) else [])
85 + ([
86 'dask[dataframe]',
87 'dask-ml',
88 'keras',
89 # TODO(toshihikoyanase): Remove the version constraint after resolving the issue
90 # https://github.com/optuna/optuna/issues/997.
91 'pytorch-lightning<0.7.0',
92 'tensorflow>=2.0.0',
93 ] if sys.version_info[:2] < (3, 8) else []),
94 'testing': [
95 # TODO(toshihikoyanase): Remove the version constraint after resolving the issue
96 # https://github.com/optuna/optuna/issues/1000.
97 'bokeh<2.0.0',
98 'chainer>=5.0.0',
99 'cma',
100 'lightgbm',
101 'mock',
102 'mpi4py',
103 'mxnet',
104 'pandas',
105 'plotly>=4.0.0',
106 'pytest',
107 'pytorch-ignite',
108 'scikit-learn>=0.19.0',
109 'scikit-optimize',
110 'torch',
111 'torchvision>=0.5.0',
112 'xgboost',
113 ] + (['fastai<2'] if (3, 5) < sys.version_info[:2] < (3, 8) else [])
114 + ([
115 'keras',
116 # TODO(toshihikoyanase): Remove the version constraint after resolving the issue
117 # https://github.com/optuna/optuna/issues/997.
118 'pytorch-lightning<0.7.0',
119 'tensorflow',
120 'tensorflow-datasets',
121 ] if sys.version_info[:2] < (3, 8) else []),
122 }
123
124 return requirements
125
126
127 def find_any_distribution(pkgs: List[str]) -> Optional[pkg_resources.Distribution]:
128
129 for pkg in pkgs:
130 try:
131 return pkg_resources.get_distribution(pkg)
132 except pkg_resources.DistributionNotFound:
133 pass
134 return None
135
136
137 pfnopt_pkg = find_any_distribution(['pfnopt'])
138 if pfnopt_pkg is not None:
139 msg = 'We detected that PFNOpt is installed in your environment.\n' \
140 'PFNOpt has been renamed Optuna. Please uninstall the old\n' \
141 'PFNOpt in advance (e.g. by executing `$ pip uninstall pfnopt`).'
142 print(msg)
143 exit(1)
144
145 setup(
146 name='optuna',
147 version=get_version(),
148 description='A hyperparameter optimization framework',
149 long_description=get_long_description(),
150 long_description_content_type='text/markdown',
151 author='Takuya Akiba',
152 author_email='[email protected]',
153 url='https://optuna.org/',
154 packages=find_packages(),
155 package_data={
156 'optuna': [
157 'storages/rdb/alembic.ini',
158 'storages/rdb/alembic/*.*',
159 'storages/rdb/alembic/versions/*.*'
160 ]
161 },
162 install_requires=get_install_requires(),
163 tests_require=get_tests_require(),
164 extras_require=get_extras_require(),
165 entry_points={'console_scripts': ['optuna = optuna.cli:main']})
166
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -33,11 +33,11 @@
'alembic',
'cliff',
'colorlog',
+ 'joblib',
'numpy',
'scipy!=1.4.0',
'sqlalchemy>=1.1.0',
'tqdm',
- 'joblib',
]
@@ -59,10 +59,10 @@
'pytest-cov',
],
'doctest': [
- 'pandas',
'cma',
- 'scikit-learn>=0.19.0',
+ 'pandas',
'plotly>=4.0.0',
+ 'scikit-learn>=0.19.0',
'scikit-optimize',
],
'document': [
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -33,11 +33,11 @@\n 'alembic',\n 'cliff',\n 'colorlog',\n+ 'joblib',\n 'numpy',\n 'scipy!=1.4.0',\n 'sqlalchemy>=1.1.0',\n 'tqdm',\n- 'joblib',\n ]\n \n \n@@ -59,10 +59,10 @@\n 'pytest-cov',\n ],\n 'doctest': [\n- 'pandas',\n 'cma',\n- 'scikit-learn>=0.19.0',\n+ 'pandas',\n 'plotly>=4.0.0',\n+ 'scikit-learn>=0.19.0',\n 'scikit-optimize',\n ],\n 'document': [\n", "issue": "Order dependencies in `setup.py` alphabetically.\nThe libraries in `setup.py` are not arranged in alphabetical order. I think it would be helpful for developers if we fix it.\r\n\r\nExamples:\r\n\r\nhttps://github.com/optuna/optuna/blob/master/setup.py#L30-L41\r\n```python\r\ndef get_install_requires() -> List[str]:\r\n\r\n return [\r\n 'alembic',\r\n 'cliff',\r\n 'colorlog',\r\n 'numpy',\r\n 'scipy!=1.4.0',\r\n 'sqlalchemy>=1.1.0',\r\n 'tqdm',\r\n 'joblib',\r\n ]\r\n```\r\n\r\nhttps://github.com/optuna/optuna/blob/master/setup.py#L61-L66\r\n```python\r\n 'doctest': [\r\n 'pandas',\r\n 'cma',\r\n 'scikit-learn>=0.19.0',\r\n 'plotly>=4.0.0',\r\n ],\r\n```\r\n\r\n\nOrder dependencies in `setup.py` alphabetically.\nThe libraries in `setup.py` are not arranged in alphabetical order. I think it would be helpful for developers if we fix it.\r\n\r\nExamples:\r\n\r\nhttps://github.com/optuna/optuna/blob/master/setup.py#L30-L41\r\n```python\r\ndef get_install_requires() -> List[str]:\r\n\r\n return [\r\n 'alembic',\r\n 'cliff',\r\n 'colorlog',\r\n 'numpy',\r\n 'scipy!=1.4.0',\r\n 'sqlalchemy>=1.1.0',\r\n 'tqdm',\r\n 'joblib',\r\n ]\r\n```\r\n\r\nhttps://github.com/optuna/optuna/blob/master/setup.py#L61-L66\r\n```python\r\n 'doctest': [\r\n 'pandas',\r\n 'cma',\r\n 'scikit-learn>=0.19.0',\r\n 'plotly>=4.0.0',\r\n ],\r\n```\r\n\r\n\n", "before_files": [{"content": "import os\nimport sys\n\nimport pkg_resources\nfrom setuptools import find_packages\nfrom setuptools import setup\n\nfrom typing import Dict\nfrom typing import List\nfrom typing import Optional\n\n\ndef get_version() -> str:\n\n version_filepath = os.path.join(os.path.dirname(__file__), 'optuna', 'version.py')\n with open(version_filepath) as f:\n for line in f:\n if line.startswith('__version__'):\n return line.strip().split()[-1][1:-1]\n assert False\n\n\ndef get_long_description() -> str:\n\n readme_filepath = os.path.join(os.path.dirname(__file__), 'README.md')\n with open(readme_filepath) as f:\n return f.read()\n\n\ndef get_install_requires() -> List[str]:\n\n return [\n 'alembic',\n 'cliff',\n 'colorlog',\n 'numpy',\n 'scipy!=1.4.0',\n 'sqlalchemy>=1.1.0',\n 'tqdm',\n 'joblib',\n ]\n\n\ndef get_tests_require() -> List[str]:\n\n return get_extras_require()['testing']\n\n\ndef get_extras_require() -> Dict[str, List[str]]:\n\n requirements = {\n 'checking': [\n 'autopep8',\n 'hacking',\n 'mypy',\n ],\n 'codecov': [\n 'codecov',\n 'pytest-cov',\n ],\n 'doctest': [\n 'pandas',\n 'cma',\n 'scikit-learn>=0.19.0',\n 'plotly>=4.0.0',\n 'scikit-optimize',\n ],\n 'document': [\n 'sphinx',\n 'sphinx_rtd_theme',\n ],\n 'example': [\n 'catboost',\n 'chainer',\n 'lightgbm',\n 'mlflow',\n 'mxnet',\n 'pytorch-ignite',\n 'scikit-image',\n 'scikit-learn',\n 'torch',\n 'torchvision>=0.5.0',\n 'xgboost',\n ] + (['fastai<2'] if (3, 5) < sys.version_info[:2] < (3, 8) else [])\n + ([\n 'dask[dataframe]',\n 'dask-ml',\n 'keras',\n # TODO(toshihikoyanase): Remove the version constraint after resolving the issue\n # https://github.com/optuna/optuna/issues/997.\n 'pytorch-lightning<0.7.0',\n 'tensorflow>=2.0.0',\n ] if sys.version_info[:2] < (3, 8) else []),\n 'testing': [\n # TODO(toshihikoyanase): Remove the version constraint after resolving the issue\n # https://github.com/optuna/optuna/issues/1000.\n 'bokeh<2.0.0',\n 'chainer>=5.0.0',\n 'cma',\n 'lightgbm',\n 'mock',\n 'mpi4py',\n 'mxnet',\n 'pandas',\n 'plotly>=4.0.0',\n 'pytest',\n 'pytorch-ignite',\n 'scikit-learn>=0.19.0',\n 'scikit-optimize',\n 'torch',\n 'torchvision>=0.5.0',\n 'xgboost',\n ] + (['fastai<2'] if (3, 5) < sys.version_info[:2] < (3, 8) else [])\n + ([\n 'keras',\n # TODO(toshihikoyanase): Remove the version constraint after resolving the issue\n # https://github.com/optuna/optuna/issues/997.\n 'pytorch-lightning<0.7.0',\n 'tensorflow',\n 'tensorflow-datasets',\n ] if sys.version_info[:2] < (3, 8) else []),\n }\n\n return requirements\n\n\ndef find_any_distribution(pkgs: List[str]) -> Optional[pkg_resources.Distribution]:\n\n for pkg in pkgs:\n try:\n return pkg_resources.get_distribution(pkg)\n except pkg_resources.DistributionNotFound:\n pass\n return None\n\n\npfnopt_pkg = find_any_distribution(['pfnopt'])\nif pfnopt_pkg is not None:\n msg = 'We detected that PFNOpt is installed in your environment.\\n' \\\n 'PFNOpt has been renamed Optuna. Please uninstall the old\\n' \\\n 'PFNOpt in advance (e.g. by executing `$ pip uninstall pfnopt`).'\n print(msg)\n exit(1)\n\nsetup(\n name='optuna',\n version=get_version(),\n description='A hyperparameter optimization framework',\n long_description=get_long_description(),\n long_description_content_type='text/markdown',\n author='Takuya Akiba',\n author_email='[email protected]',\n url='https://optuna.org/',\n packages=find_packages(),\n package_data={\n 'optuna': [\n 'storages/rdb/alembic.ini',\n 'storages/rdb/alembic/*.*',\n 'storages/rdb/alembic/versions/*.*'\n ]\n },\n install_requires=get_install_requires(),\n tests_require=get_tests_require(),\n extras_require=get_extras_require(),\n entry_points={'console_scripts': ['optuna = optuna.cli:main']})\n", "path": "setup.py"}], "after_files": [{"content": "import os\nimport sys\n\nimport pkg_resources\nfrom setuptools import find_packages\nfrom setuptools import setup\n\nfrom typing import Dict\nfrom typing import List\nfrom typing import Optional\n\n\ndef get_version() -> str:\n\n version_filepath = os.path.join(os.path.dirname(__file__), 'optuna', 'version.py')\n with open(version_filepath) as f:\n for line in f:\n if line.startswith('__version__'):\n return line.strip().split()[-1][1:-1]\n assert False\n\n\ndef get_long_description() -> str:\n\n readme_filepath = os.path.join(os.path.dirname(__file__), 'README.md')\n with open(readme_filepath) as f:\n return f.read()\n\n\ndef get_install_requires() -> List[str]:\n\n return [\n 'alembic',\n 'cliff',\n 'colorlog',\n 'joblib',\n 'numpy',\n 'scipy!=1.4.0',\n 'sqlalchemy>=1.1.0',\n 'tqdm',\n ]\n\n\ndef get_tests_require() -> List[str]:\n\n return get_extras_require()['testing']\n\n\ndef get_extras_require() -> Dict[str, List[str]]:\n\n requirements = {\n 'checking': [\n 'autopep8',\n 'hacking',\n 'mypy',\n ],\n 'codecov': [\n 'codecov',\n 'pytest-cov',\n ],\n 'doctest': [\n 'cma',\n 'pandas',\n 'plotly>=4.0.0',\n 'scikit-learn>=0.19.0',\n 'scikit-optimize',\n ],\n 'document': [\n 'sphinx',\n 'sphinx_rtd_theme',\n ],\n 'example': [\n 'catboost',\n 'chainer',\n 'lightgbm',\n 'mlflow',\n 'mxnet',\n 'pytorch-ignite',\n 'scikit-image',\n 'scikit-learn',\n 'torch',\n 'torchvision>=0.5.0',\n 'xgboost',\n ] + (['fastai<2'] if (3, 5) < sys.version_info[:2] < (3, 8) else [])\n + ([\n 'dask[dataframe]',\n 'dask-ml',\n 'keras',\n # TODO(toshihikoyanase): Remove the version constraint after resolving the issue\n # https://github.com/optuna/optuna/issues/997.\n 'pytorch-lightning<0.7.0',\n 'tensorflow>=2.0.0',\n ] if sys.version_info[:2] < (3, 8) else []),\n 'testing': [\n # TODO(toshihikoyanase): Remove the version constraint after resolving the issue\n # https://github.com/optuna/optuna/issues/1000.\n 'bokeh<2.0.0',\n 'chainer>=5.0.0',\n 'cma',\n 'lightgbm',\n 'mock',\n 'mpi4py',\n 'mxnet',\n 'pandas',\n 'plotly>=4.0.0',\n 'pytest',\n 'pytorch-ignite',\n 'scikit-learn>=0.19.0',\n 'scikit-optimize',\n 'torch',\n 'torchvision>=0.5.0',\n 'xgboost',\n ] + (['fastai<2'] if (3, 5) < sys.version_info[:2] < (3, 8) else [])\n + ([\n 'keras',\n # TODO(toshihikoyanase): Remove the version constraint after resolving the issue\n # https://github.com/optuna/optuna/issues/997.\n 'pytorch-lightning<0.7.0',\n 'tensorflow',\n 'tensorflow-datasets',\n ] if sys.version_info[:2] < (3, 8) else []),\n }\n\n return requirements\n\n\ndef find_any_distribution(pkgs: List[str]) -> Optional[pkg_resources.Distribution]:\n\n for pkg in pkgs:\n try:\n return pkg_resources.get_distribution(pkg)\n except pkg_resources.DistributionNotFound:\n pass\n return None\n\n\npfnopt_pkg = find_any_distribution(['pfnopt'])\nif pfnopt_pkg is not None:\n msg = 'We detected that PFNOpt is installed in your environment.\\n' \\\n 'PFNOpt has been renamed Optuna. Please uninstall the old\\n' \\\n 'PFNOpt in advance (e.g. by executing `$ pip uninstall pfnopt`).'\n print(msg)\n exit(1)\n\nsetup(\n name='optuna',\n version=get_version(),\n description='A hyperparameter optimization framework',\n long_description=get_long_description(),\n long_description_content_type='text/markdown',\n author='Takuya Akiba',\n author_email='[email protected]',\n url='https://optuna.org/',\n packages=find_packages(),\n package_data={\n 'optuna': [\n 'storages/rdb/alembic.ini',\n 'storages/rdb/alembic/*.*',\n 'storages/rdb/alembic/versions/*.*'\n ]\n },\n install_requires=get_install_requires(),\n tests_require=get_tests_require(),\n extras_require=get_extras_require(),\n entry_points={'console_scripts': ['optuna = optuna.cli:main']})\n", "path": "setup.py"}]} | 2,228 | 197 |
gh_patches_debug_12864 | rasdani/github-patches | git_diff | facebookresearch__hydra-792 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Bug] interpolation to hydra.job.override_dirname containing another interpolation is not possible
# 🐛 Bug
Now that #725 is fixed, I post an issue for the behavior I described in (https://github.com/facebookresearch/hydra/issues/725#issuecomment-653442315)
In the following example an interpolation to `hydra.job.override_dirname` containing another interpolation results in a crash:
## To reproduce
test.py:
```
import hydra
from omegaconf import DictConfig
@hydra.main(config_path="conf", config_name="config")
def run(config: DictConfig):
print("c", config.c)
if __name__ == "__main__":
run()
```
conf/config.yaml:
```
a: 10
b: 20
c: override_${hydra:job.override_dirname}
```
call:
`python test.py b=\${a}` (I have to escape $)
** Stack trace/error message **
```
Traceback (most recent call last):
File "/home/mtadmin/miniconda3/envs/debug_hydra/lib/python3.8/site-packages/hydra/_internal/utils.py", line 197, in run_and_report
return func()
File "/home/mtadmin/miniconda3/envs/debug_hydra/lib/python3.8/site-packages/hydra/_internal/utils.py", line 271, in <lambda>
lambda: hydra.run(
File "/home/mtadmin/miniconda3/envs/debug_hydra/lib/python3.8/site-packages/hydra/_internal/hydra.py", line 105, in run
return run_job(
File "/home/mtadmin/miniconda3/envs/debug_hydra/lib/python3.8/site-packages/hydra/core/utils.py", line 122, in run_job
ret.return_value = task_function(task_cfg)
File "/home/mtadmin/projects/debug_hydra/interpolation_jobdir/test.py", line 16, in run
print("c", config.c)
File "/home/mtadmin/miniconda3/envs/debug_hydra/lib/python3.8/site-packages/omegaconf/dictconfig.py", line 315, in __getattr__
self._format_and_raise(key=key, value=None, cause=e)
File "/home/mtadmin/miniconda3/envs/debug_hydra/lib/python3.8/site-packages/omegaconf/base.py", line 95, in _format_and_raise
format_and_raise(
File "/home/mtadmin/miniconda3/envs/debug_hydra/lib/python3.8/site-packages/omegaconf/_utils.py", line 555, in format_and_raise
raise_(ex, cause)
File "/home/mtadmin/miniconda3/envs/debug_hydra/lib/python3.8/site-packages/omegaconf/_utils.py", line 548, in raise_
raise ex
File "/home/mtadmin/miniconda3/envs/debug_hydra/lib/python3.8/site-packages/omegaconf/dictconfig.py", line 313, in __getattr__
return self._get_impl(key=key, default_value=DEFAULT_VALUE_MARKER)
File "/home/mtadmin/miniconda3/envs/debug_hydra/lib/python3.8/site-packages/omegaconf/dictconfig.py", line 377, in _get_impl
return self._resolve_with_default(
File "/home/mtadmin/miniconda3/envs/debug_hydra/lib/python3.8/site-packages/omegaconf/basecontainer.py", line 56, in _resolve_with_default
resolved = self._resolve_interpolation(
File "/home/mtadmin/miniconda3/envs/debug_hydra/lib/python3.8/site-packages/omegaconf/base.py", line 370, in _resolve_interpolation
return self._resolve_simple_interpolation(
File "/home/mtadmin/miniconda3/envs/debug_hydra/lib/python3.8/site-packages/omegaconf/base.py", line 344, in _resolve_simple_interpolation
self._format_and_raise(key=inter_key, value=None, cause=e)
File "/home/mtadmin/miniconda3/envs/debug_hydra/lib/python3.8/site-packages/omegaconf/base.py", line 95, in _format_and_raise
format_and_raise(
File "/home/mtadmin/miniconda3/envs/debug_hydra/lib/python3.8/site-packages/omegaconf/_utils.py", line 555, in format_and_raise
raise_(ex, cause)
File "/home/mtadmin/miniconda3/envs/debug_hydra/lib/python3.8/site-packages/omegaconf/_utils.py", line 548, in raise_
raise ex
File "/home/mtadmin/miniconda3/envs/debug_hydra/lib/python3.8/site-packages/omegaconf/base.py", line 335, in _resolve_simple_interpolation
value = resolver(root_node, inter_key)
File "/home/mtadmin/miniconda3/envs/debug_hydra/lib/python3.8/site-packages/omegaconf/omegaconf.py", line 335, in caching
cache[key] if key in cache else resolver(*OmegaConf._tokenize_args(key))
File "/home/mtadmin/miniconda3/envs/debug_hydra/lib/python3.8/site-packages/hydra/core/utils.py", line 150, in <lambda>
lambda path: OmegaConf.select(cast(DictConfig, HydraConfig.get()), path),
File "/home/mtadmin/miniconda3/envs/debug_hydra/lib/python3.8/site-packages/omegaconf/omegaconf.py", line 531, in select
format_and_raise(node=cfg, key=key, value=None, cause=e, msg=str(e))
File "/home/mtadmin/miniconda3/envs/debug_hydra/lib/python3.8/site-packages/omegaconf/_utils.py", line 623, in format_and_raise
raise_(ex, cause)
File "/home/mtadmin/miniconda3/envs/debug_hydra/lib/python3.8/site-packages/omegaconf/_utils.py", line 548, in raise_
raise ex
omegaconf.errors.ConfigKeyError: str interpolation key 'a' not found
full_key: hydra.job.override_dirname
reference_type=Optional[HydraConf]
object_type=HydraConf
```
## Expected Behavior
the `hydra.job.override_dirname` should be able to interpolate `a` via `b`
## System information
- **Hydra Version** : master/1.0.0rc2
- **Python version** : 3.8.3
- **Virtual environment type and version** : miniconda
- **Operating system** : ubuntu 18.04
## Additional context
This can be worked arround with something like this:
```
hdr = HydraConfig.get()
override_dirname= hdr.job.override_dirname
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `hydra/core/hydra_config.py`
Content:
```
1 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2 from typing import Any, Optional
3
4 from omegaconf import DictConfig, OmegaConf
5
6 from hydra.conf import HydraConf
7 from hydra.core.singleton import Singleton
8
9
10 class HydraConfig(metaclass=Singleton):
11 def __init__(self) -> None:
12 self.cfg: Optional[HydraConf] = None
13
14 def set_config(self, cfg: DictConfig) -> None:
15 assert cfg is not None
16 OmegaConf.set_readonly(cfg.hydra, True)
17 assert OmegaConf.get_type(cfg, "hydra") == HydraConf
18 self.cfg = OmegaConf.masked_copy(cfg, "hydra") # type: ignore
19
20 @staticmethod
21 def get() -> HydraConf:
22 instance = HydraConfig.instance()
23 if instance.cfg is None:
24 raise ValueError("HydraConfig was not set")
25 return instance.cfg.hydra # type: ignore
26
27 @staticmethod
28 def initialized() -> bool:
29 instance = HydraConfig.instance()
30 return instance.cfg is not None
31
32 @staticmethod
33 def instance(*args: Any, **kwargs: Any) -> "HydraConfig":
34 return Singleton.instance(HydraConfig, *args, **kwargs) # type: ignore
35
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/hydra/core/hydra_config.py b/hydra/core/hydra_config.py
--- a/hydra/core/hydra_config.py
+++ b/hydra/core/hydra_config.py
@@ -15,7 +15,13 @@
assert cfg is not None
OmegaConf.set_readonly(cfg.hydra, True)
assert OmegaConf.get_type(cfg, "hydra") == HydraConf
+ # THis is emulating a node that is hidden.
+ # It's quiet a hack but it will be much better once
+ # https://github.com/omry/omegaconf/issues/280 is done
+ # The motivation is that this allows for interpolations from the hydra node
+ # into the user's config.
self.cfg = OmegaConf.masked_copy(cfg, "hydra") # type: ignore
+ self.cfg.hydra._set_parent(cfg) # type: ignore
@staticmethod
def get() -> HydraConf:
| {"golden_diff": "diff --git a/hydra/core/hydra_config.py b/hydra/core/hydra_config.py\n--- a/hydra/core/hydra_config.py\n+++ b/hydra/core/hydra_config.py\n@@ -15,7 +15,13 @@\n assert cfg is not None\n OmegaConf.set_readonly(cfg.hydra, True)\n assert OmegaConf.get_type(cfg, \"hydra\") == HydraConf\n+ # THis is emulating a node that is hidden.\n+ # It's quiet a hack but it will be much better once\n+ # https://github.com/omry/omegaconf/issues/280 is done\n+ # The motivation is that this allows for interpolations from the hydra node\n+ # into the user's config.\n self.cfg = OmegaConf.masked_copy(cfg, \"hydra\") # type: ignore\n+ self.cfg.hydra._set_parent(cfg) # type: ignore\n \n @staticmethod\n def get() -> HydraConf:\n", "issue": "[Bug] interpolation to hydra.job.override_dirname containing another interpolation is not possible\n# \ud83d\udc1b Bug\r\n\r\nNow that #725 is fixed, I post an issue for the behavior I described in (https://github.com/facebookresearch/hydra/issues/725#issuecomment-653442315)\r\nIn the following example an interpolation to `hydra.job.override_dirname` containing another interpolation results in a crash:\r\n\r\n## To reproduce\r\n\r\ntest.py:\r\n```\r\nimport hydra\r\nfrom omegaconf import DictConfig\r\n\r\n\r\[email protected](config_path=\"conf\", config_name=\"config\")\r\ndef run(config: DictConfig):\r\n print(\"c\", config.c)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n run()\r\n\r\n```\r\n\r\nconf/config.yaml:\r\n```\r\na: 10\r\nb: 20\r\n\r\nc: override_${hydra:job.override_dirname}\r\n```\r\n\r\ncall:\r\n`python test.py b=\\${a}` (I have to escape $)\r\n\r\n\r\n** Stack trace/error message **\r\n```\r\nTraceback (most recent call last):\r\n File \"/home/mtadmin/miniconda3/envs/debug_hydra/lib/python3.8/site-packages/hydra/_internal/utils.py\", line 197, in run_and_report\r\n return func()\r\n File \"/home/mtadmin/miniconda3/envs/debug_hydra/lib/python3.8/site-packages/hydra/_internal/utils.py\", line 271, in <lambda>\r\n lambda: hydra.run(\r\n File \"/home/mtadmin/miniconda3/envs/debug_hydra/lib/python3.8/site-packages/hydra/_internal/hydra.py\", line 105, in run\r\n return run_job(\r\n File \"/home/mtadmin/miniconda3/envs/debug_hydra/lib/python3.8/site-packages/hydra/core/utils.py\", line 122, in run_job\r\n ret.return_value = task_function(task_cfg)\r\n File \"/home/mtadmin/projects/debug_hydra/interpolation_jobdir/test.py\", line 16, in run\r\n print(\"c\", config.c)\r\n File \"/home/mtadmin/miniconda3/envs/debug_hydra/lib/python3.8/site-packages/omegaconf/dictconfig.py\", line 315, in __getattr__\r\n self._format_and_raise(key=key, value=None, cause=e)\r\n File \"/home/mtadmin/miniconda3/envs/debug_hydra/lib/python3.8/site-packages/omegaconf/base.py\", line 95, in _format_and_raise\r\n format_and_raise(\r\n File \"/home/mtadmin/miniconda3/envs/debug_hydra/lib/python3.8/site-packages/omegaconf/_utils.py\", line 555, in format_and_raise\r\n raise_(ex, cause)\r\n File \"/home/mtadmin/miniconda3/envs/debug_hydra/lib/python3.8/site-packages/omegaconf/_utils.py\", line 548, in raise_\r\n raise ex\r\n File \"/home/mtadmin/miniconda3/envs/debug_hydra/lib/python3.8/site-packages/omegaconf/dictconfig.py\", line 313, in __getattr__\r\n return self._get_impl(key=key, default_value=DEFAULT_VALUE_MARKER)\r\n File \"/home/mtadmin/miniconda3/envs/debug_hydra/lib/python3.8/site-packages/omegaconf/dictconfig.py\", line 377, in _get_impl\r\n return self._resolve_with_default(\r\n File \"/home/mtadmin/miniconda3/envs/debug_hydra/lib/python3.8/site-packages/omegaconf/basecontainer.py\", line 56, in _resolve_with_default\r\n resolved = self._resolve_interpolation(\r\n File \"/home/mtadmin/miniconda3/envs/debug_hydra/lib/python3.8/site-packages/omegaconf/base.py\", line 370, in _resolve_interpolation\r\n return self._resolve_simple_interpolation(\r\n File \"/home/mtadmin/miniconda3/envs/debug_hydra/lib/python3.8/site-packages/omegaconf/base.py\", line 344, in _resolve_simple_interpolation\r\n self._format_and_raise(key=inter_key, value=None, cause=e)\r\n File \"/home/mtadmin/miniconda3/envs/debug_hydra/lib/python3.8/site-packages/omegaconf/base.py\", line 95, in _format_and_raise\r\n format_and_raise(\r\n File \"/home/mtadmin/miniconda3/envs/debug_hydra/lib/python3.8/site-packages/omegaconf/_utils.py\", line 555, in format_and_raise\r\n raise_(ex, cause)\r\n File \"/home/mtadmin/miniconda3/envs/debug_hydra/lib/python3.8/site-packages/omegaconf/_utils.py\", line 548, in raise_\r\n raise ex\r\n File \"/home/mtadmin/miniconda3/envs/debug_hydra/lib/python3.8/site-packages/omegaconf/base.py\", line 335, in _resolve_simple_interpolation\r\n value = resolver(root_node, inter_key)\r\n File \"/home/mtadmin/miniconda3/envs/debug_hydra/lib/python3.8/site-packages/omegaconf/omegaconf.py\", line 335, in caching\r\n cache[key] if key in cache else resolver(*OmegaConf._tokenize_args(key))\r\n File \"/home/mtadmin/miniconda3/envs/debug_hydra/lib/python3.8/site-packages/hydra/core/utils.py\", line 150, in <lambda>\r\n lambda path: OmegaConf.select(cast(DictConfig, HydraConfig.get()), path),\r\n File \"/home/mtadmin/miniconda3/envs/debug_hydra/lib/python3.8/site-packages/omegaconf/omegaconf.py\", line 531, in select\r\n format_and_raise(node=cfg, key=key, value=None, cause=e, msg=str(e))\r\n File \"/home/mtadmin/miniconda3/envs/debug_hydra/lib/python3.8/site-packages/omegaconf/_utils.py\", line 623, in format_and_raise\r\n raise_(ex, cause)\r\n File \"/home/mtadmin/miniconda3/envs/debug_hydra/lib/python3.8/site-packages/omegaconf/_utils.py\", line 548, in raise_\r\n raise ex\r\nomegaconf.errors.ConfigKeyError: str interpolation key 'a' not found\r\n full_key: hydra.job.override_dirname\r\n reference_type=Optional[HydraConf]\r\n object_type=HydraConf\r\n\r\n```\r\n\r\n## Expected Behavior\r\nthe `hydra.job.override_dirname` should be able to interpolate `a` via `b`\r\n\r\n## System information\r\n- **Hydra Version** : master/1.0.0rc2\r\n- **Python version** : 3.8.3\r\n- **Virtual environment type and version** : miniconda\r\n- **Operating system** : ubuntu 18.04\r\n\r\n## Additional context\r\nThis can be worked arround with something like this:\r\n```\r\nhdr = HydraConfig.get()\r\noverride_dirname= hdr.job.override_dirname\r\n```\r\n\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nfrom typing import Any, Optional\n\nfrom omegaconf import DictConfig, OmegaConf\n\nfrom hydra.conf import HydraConf\nfrom hydra.core.singleton import Singleton\n\n\nclass HydraConfig(metaclass=Singleton):\n def __init__(self) -> None:\n self.cfg: Optional[HydraConf] = None\n\n def set_config(self, cfg: DictConfig) -> None:\n assert cfg is not None\n OmegaConf.set_readonly(cfg.hydra, True)\n assert OmegaConf.get_type(cfg, \"hydra\") == HydraConf\n self.cfg = OmegaConf.masked_copy(cfg, \"hydra\") # type: ignore\n\n @staticmethod\n def get() -> HydraConf:\n instance = HydraConfig.instance()\n if instance.cfg is None:\n raise ValueError(\"HydraConfig was not set\")\n return instance.cfg.hydra # type: ignore\n\n @staticmethod\n def initialized() -> bool:\n instance = HydraConfig.instance()\n return instance.cfg is not None\n\n @staticmethod\n def instance(*args: Any, **kwargs: Any) -> \"HydraConfig\":\n return Singleton.instance(HydraConfig, *args, **kwargs) # type: ignore\n", "path": "hydra/core/hydra_config.py"}], "after_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nfrom typing import Any, Optional\n\nfrom omegaconf import DictConfig, OmegaConf\n\nfrom hydra.conf import HydraConf\nfrom hydra.core.singleton import Singleton\n\n\nclass HydraConfig(metaclass=Singleton):\n def __init__(self) -> None:\n self.cfg: Optional[HydraConf] = None\n\n def set_config(self, cfg: DictConfig) -> None:\n assert cfg is not None\n OmegaConf.set_readonly(cfg.hydra, True)\n assert OmegaConf.get_type(cfg, \"hydra\") == HydraConf\n # THis is emulating a node that is hidden.\n # It's quiet a hack but it will be much better once\n # https://github.com/omry/omegaconf/issues/280 is done\n # The motivation is that this allows for interpolations from the hydra node\n # into the user's config.\n self.cfg = OmegaConf.masked_copy(cfg, \"hydra\") # type: ignore\n self.cfg.hydra._set_parent(cfg) # type: ignore\n\n @staticmethod\n def get() -> HydraConf:\n instance = HydraConfig.instance()\n if instance.cfg is None:\n raise ValueError(\"HydraConfig was not set\")\n return instance.cfg.hydra # type: ignore\n\n @staticmethod\n def initialized() -> bool:\n instance = HydraConfig.instance()\n return instance.cfg is not None\n\n @staticmethod\n def instance(*args: Any, **kwargs: Any) -> \"HydraConfig\":\n return Singleton.instance(HydraConfig, *args, **kwargs) # type: ignore\n", "path": "hydra/core/hydra_config.py"}]} | 2,166 | 224 |
gh_patches_debug_128 | rasdani/github-patches | git_diff | opsdroid__opsdroid-28 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Regex case sensitive
The regex match is currently case insensitive. It shouldn't be.
https://github.com/opsdroid/opsdroid/blob/master/opsdroid/helper.py#L30
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `opsdroid/helper.py`
Content:
```
1 """Helper functions to use within OpsDroid."""
2
3 import logging
4 import re
5
6
7 def set_logging_level(logging_level):
8 """Set the logger level based on the user configuration."""
9 logger = logging.getLogger()
10 if logging_level == 'critical':
11 logger.setLevel(logging.CRITICAL)
12 elif logging_level == 'error':
13 logger.setLevel(logging.ERROR)
14 elif logging_level == 'warning':
15 logger.setLevel(logging.WARNING)
16 elif logging_level == 'info':
17 logger.setLevel(logging.INFO)
18 elif logging_level == 'debug':
19 logger.setLevel(logging.DEBUG)
20 # No need to log the others as they'll never be seen
21 logging.debug("Set log level to debug")
22 else:
23 logger.setLevel(logging.INFO)
24 logging.warning("Log level '" + logging_level +
25 "' unknown, defaulting to 'info'")
26
27
28 def match(regex, message):
29 """Regex match a string."""
30 return re.match(regex, message, re.M | re.I)
31
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/opsdroid/helper.py b/opsdroid/helper.py
--- a/opsdroid/helper.py
+++ b/opsdroid/helper.py
@@ -27,4 +27,4 @@
def match(regex, message):
"""Regex match a string."""
- return re.match(regex, message, re.M | re.I)
+ return re.match(regex, message)
| {"golden_diff": "diff --git a/opsdroid/helper.py b/opsdroid/helper.py\n--- a/opsdroid/helper.py\n+++ b/opsdroid/helper.py\n@@ -27,4 +27,4 @@\n \n def match(regex, message):\n \"\"\"Regex match a string.\"\"\"\n- return re.match(regex, message, re.M | re.I)\n+ return re.match(regex, message)\n", "issue": "Regex case sensitive\nThe regex match is currently case insensitive. It shouldn't be.\n\nhttps://github.com/opsdroid/opsdroid/blob/master/opsdroid/helper.py#L30\n\n", "before_files": [{"content": "\"\"\"Helper functions to use within OpsDroid.\"\"\"\n\nimport logging\nimport re\n\n\ndef set_logging_level(logging_level):\n \"\"\"Set the logger level based on the user configuration.\"\"\"\n logger = logging.getLogger()\n if logging_level == 'critical':\n logger.setLevel(logging.CRITICAL)\n elif logging_level == 'error':\n logger.setLevel(logging.ERROR)\n elif logging_level == 'warning':\n logger.setLevel(logging.WARNING)\n elif logging_level == 'info':\n logger.setLevel(logging.INFO)\n elif logging_level == 'debug':\n logger.setLevel(logging.DEBUG)\n # No need to log the others as they'll never be seen\n logging.debug(\"Set log level to debug\")\n else:\n logger.setLevel(logging.INFO)\n logging.warning(\"Log level '\" + logging_level +\n \"' unknown, defaulting to 'info'\")\n\n\ndef match(regex, message):\n \"\"\"Regex match a string.\"\"\"\n return re.match(regex, message, re.M | re.I)\n", "path": "opsdroid/helper.py"}], "after_files": [{"content": "\"\"\"Helper functions to use within OpsDroid.\"\"\"\n\nimport logging\nimport re\n\n\ndef set_logging_level(logging_level):\n \"\"\"Set the logger level based on the user configuration.\"\"\"\n logger = logging.getLogger()\n if logging_level == 'critical':\n logger.setLevel(logging.CRITICAL)\n elif logging_level == 'error':\n logger.setLevel(logging.ERROR)\n elif logging_level == 'warning':\n logger.setLevel(logging.WARNING)\n elif logging_level == 'info':\n logger.setLevel(logging.INFO)\n elif logging_level == 'debug':\n logger.setLevel(logging.DEBUG)\n # No need to log the others as they'll never be seen\n logging.debug(\"Set log level to debug\")\n else:\n logger.setLevel(logging.INFO)\n logging.warning(\"Log level '\" + logging_level +\n \"' unknown, defaulting to 'info'\")\n\n\ndef match(regex, message):\n \"\"\"Regex match a string.\"\"\"\n return re.match(regex, message)\n", "path": "opsdroid/helper.py"}]} | 553 | 86 |
gh_patches_debug_10099 | rasdani/github-patches | git_diff | pyqtgraph__pyqtgraph-905 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[CI-fail] HDF5 big data
This test fails due to h5py missing. Probably we can change requisites to include h5py or skip this test if h5py is not found.
```
examples/test_examples.py::test_examples[PyQt4-f17] frontend = PyQt4. f = ('HDF5 big data', 'hdf5.py')
HDF5 big data.....................................FAILED
Python 2.7.16 |Anaconda, Inc.| (default, Mar 14 2019, 21:00:58)
[GCC 7.3.0] on linux2
Type "help", "copyright", "credits" or "license" for more information.
>>> >>> ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... Traceback (most recent call last):
File "<stdin>", line 6, in <module>
File "hdf5.py", line 19, in <module>
import h5py
ImportError: No module named h5py
>>>
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/utils.py`
Content:
```
1 from __future__ import division, print_function, absolute_import
2 import subprocess
3 import time
4 import os
5 import sys
6 import errno
7 from pyqtgraph.pgcollections import OrderedDict
8 from pyqtgraph.python2_3 import basestring
9
10 path = os.path.abspath(os.path.dirname(__file__))
11
12
13 examples = OrderedDict([
14 ('Command-line usage', 'CLIexample.py'),
15 ('Basic Plotting', 'Plotting.py'),
16 ('ImageView', 'ImageView.py'),
17 ('ParameterTree', 'parametertree.py'),
18 ('Crosshair / Mouse interaction', 'crosshair.py'),
19 ('Data Slicing', 'DataSlicing.py'),
20 ('Plot Customization', 'customPlot.py'),
21 ('Image Analysis', 'imageAnalysis.py'),
22 ('ViewBox Features', 'ViewBoxFeatures.py'),
23 ('Dock widgets', 'dockarea.py'),
24 ('Console', 'ConsoleWidget.py'),
25 ('Histograms', 'histogram.py'),
26 ('Beeswarm plot', 'beeswarm.py'),
27 ('Symbols', 'Symbols.py'),
28 ('Auto-range', 'PlotAutoRange.py'),
29 ('Remote Plotting', 'RemoteSpeedTest.py'),
30 ('Scrolling plots', 'scrollingPlots.py'),
31 ('HDF5 big data', 'hdf5.py'),
32 ('Demos', OrderedDict([
33 ('Optics', 'optics_demos.py'),
34 ('Special relativity', 'relativity_demo.py'),
35 ('Verlet chain', 'verlet_chain_demo.py'),
36 ('Koch Fractal', 'fractal.py'),
37 ])),
38 ('GraphicsItems', OrderedDict([
39 ('Scatter Plot', 'ScatterPlot.py'),
40 #('PlotItem', 'PlotItem.py'),
41 ('IsocurveItem', 'isocurve.py'),
42 ('GraphItem', 'GraphItem.py'),
43 ('ErrorBarItem', 'ErrorBarItem.py'),
44 ('FillBetweenItem', 'FillBetweenItem.py'),
45 ('ImageItem - video', 'ImageItem.py'),
46 ('ImageItem - draw', 'Draw.py'),
47 ('Region-of-Interest', 'ROIExamples.py'),
48 ('Bar Graph', 'BarGraphItem.py'),
49 ('GraphicsLayout', 'GraphicsLayout.py'),
50 ('LegendItem', 'Legend.py'),
51 ('Text Item', 'text.py'),
52 ('Linked Views', 'linkedViews.py'),
53 ('Arrow', 'Arrow.py'),
54 ('ViewBox', 'ViewBoxFeatures.py'),
55 ('Custom Graphics', 'customGraphicsItem.py'),
56 ('Labeled Graph', 'CustomGraphItem.py'),
57 ])),
58 ('Benchmarks', OrderedDict([
59 ('Video speed test', 'VideoSpeedTest.py'),
60 ('Line Plot update', 'PlotSpeedTest.py'),
61 ('Scatter Plot update', 'ScatterPlotSpeedTest.py'),
62 ('Multiple plots', 'MultiPlotSpeedTest.py'),
63 ])),
64 ('3D Graphics', OrderedDict([
65 ('Volumetric', 'GLVolumeItem.py'),
66 ('Isosurface', 'GLIsosurface.py'),
67 ('Surface Plot', 'GLSurfacePlot.py'),
68 ('Scatter Plot', 'GLScatterPlotItem.py'),
69 ('Shaders', 'GLshaders.py'),
70 ('Line Plot', 'GLLinePlotItem.py'),
71 ('Mesh', 'GLMeshItem.py'),
72 ('Image', 'GLImageItem.py'),
73 ])),
74 ('Widgets', OrderedDict([
75 ('PlotWidget', 'PlotWidget.py'),
76 ('SpinBox', 'SpinBox.py'),
77 ('ConsoleWidget', 'ConsoleWidget.py'),
78 ('Histogram / lookup table', 'HistogramLUT.py'),
79 ('TreeWidget', 'TreeWidget.py'),
80 ('ScatterPlotWidget', 'ScatterPlotWidget.py'),
81 ('DataTreeWidget', 'DataTreeWidget.py'),
82 ('GradientWidget', 'GradientWidget.py'),
83 ('TableWidget', 'TableWidget.py'),
84 ('ColorButton', 'ColorButton.py'),
85 #('CheckTable', '../widgets/CheckTable.py'),
86 #('VerticalLabel', '../widgets/VerticalLabel.py'),
87 ('JoystickButton', 'JoystickButton.py'),
88 ])),
89
90 ('Flowcharts', 'Flowchart.py'),
91 ('Custom Flowchart Nodes', 'FlowchartCustomNode.py'),
92 ])
93
94
95 def buildFileList(examples, files=None):
96 if files == None:
97 files = []
98 for key, val in examples.items():
99 #item = QtGui.QTreeWidgetItem([key])
100 if isinstance(val, basestring):
101 #item.file = val
102 files.append((key,val))
103 else:
104 buildFileList(val, files)
105 return files
106
107 def testFile(name, f, exe, lib, graphicsSystem=None):
108 global path
109 fn = os.path.join(path,f)
110 #print "starting process: ", fn
111 os.chdir(path)
112 sys.stdout.write(name)
113 sys.stdout.flush()
114
115 import1 = "import %s" % lib if lib != '' else ''
116 import2 = os.path.splitext(os.path.split(fn)[1])[0]
117 graphicsSystem = '' if graphicsSystem is None else "pg.QtGui.QApplication.setGraphicsSystem('%s')" % graphicsSystem
118 code = """
119 try:
120 %s
121 import initExample
122 import pyqtgraph as pg
123 %s
124 import %s
125 import sys
126 print("test complete")
127 sys.stdout.flush()
128 import time
129 while True: ## run a little event loop
130 pg.QtGui.QApplication.processEvents()
131 time.sleep(0.01)
132 except:
133 print("test failed")
134 raise
135
136 """ % (import1, graphicsSystem, import2)
137
138 if sys.platform.startswith('win'):
139 process = subprocess.Popen([exe], stdin=subprocess.PIPE, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
140 process.stdin.write(code.encode('UTF-8'))
141 process.stdin.close()
142 else:
143 process = subprocess.Popen(['exec %s -i' % (exe)], shell=True, stdin=subprocess.PIPE, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
144 process.stdin.write(code.encode('UTF-8'))
145 process.stdin.close() ##?
146 output = ''
147 fail = False
148 while True:
149 try:
150 c = process.stdout.read(1).decode()
151 except IOError as err:
152 if err.errno == errno.EINTR:
153 # Interrupted system call; just try again.
154 c = ''
155 else:
156 raise
157 output += c
158 #sys.stdout.write(c)
159 #sys.stdout.flush()
160 if output.endswith('test complete'):
161 break
162 if output.endswith('test failed'):
163 fail = True
164 break
165 time.sleep(1)
166 process.kill()
167 #res = process.communicate()
168 res = (process.stdout.read(), process.stderr.read())
169
170 if fail or 'exception' in res[1].decode().lower() or 'error' in res[1].decode().lower():
171 print('.' * (50-len(name)) + 'FAILED')
172 print(res[0].decode())
173 print(res[1].decode())
174 else:
175 print('.' * (50-len(name)) + 'passed')
176
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/examples/utils.py b/examples/utils.py
--- a/examples/utils.py
+++ b/examples/utils.py
@@ -4,6 +4,7 @@
import os
import sys
import errno
+import copy
from pyqtgraph.pgcollections import OrderedDict
from pyqtgraph.python2_3 import basestring
@@ -91,6 +92,11 @@
('Custom Flowchart Nodes', 'FlowchartCustomNode.py'),
])
+not_tested = ['HDF5 big data']
+
+tested_examples = copy.deepcopy(examples)
+all(map(tested_examples.pop, not_tested))
+
def buildFileList(examples, files=None):
if files == None:
| {"golden_diff": "diff --git a/examples/utils.py b/examples/utils.py\n--- a/examples/utils.py\n+++ b/examples/utils.py\n@@ -4,6 +4,7 @@\n import os\n import sys\n import errno\n+import copy\n from pyqtgraph.pgcollections import OrderedDict\n from pyqtgraph.python2_3 import basestring\n \n@@ -91,6 +92,11 @@\n ('Custom Flowchart Nodes', 'FlowchartCustomNode.py'),\n ])\n \n+not_tested = ['HDF5 big data']\n+\n+tested_examples = copy.deepcopy(examples)\n+all(map(tested_examples.pop, not_tested))\n+\n \n def buildFileList(examples, files=None):\n if files == None:\n", "issue": "[CI-fail] HDF5 big data\nThis test fails due to h5py missing. Probably we can change requisites to include h5py or skip this test if h5py is not found.\r\n```\r\nexamples/test_examples.py::test_examples[PyQt4-f17] frontend = PyQt4. f = ('HDF5 big data', 'hdf5.py')\r\nHDF5 big data.....................................FAILED\r\nPython 2.7.16 |Anaconda, Inc.| (default, Mar 14 2019, 21:00:58) \r\n[GCC 7.3.0] on linux2\r\nType \"help\", \"copyright\", \"credits\" or \"license\" for more information.\r\n>>> >>> ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... Traceback (most recent call last):\r\n File \"<stdin>\", line 6, in <module>\r\n File \"hdf5.py\", line 19, in <module>\r\n import h5py\r\nImportError: No module named h5py\r\n>>> \r\n```\n", "before_files": [{"content": "from __future__ import division, print_function, absolute_import\nimport subprocess\nimport time\nimport os\nimport sys\nimport errno\nfrom pyqtgraph.pgcollections import OrderedDict\nfrom pyqtgraph.python2_3 import basestring\n\npath = os.path.abspath(os.path.dirname(__file__))\n\n\nexamples = OrderedDict([\n ('Command-line usage', 'CLIexample.py'),\n ('Basic Plotting', 'Plotting.py'),\n ('ImageView', 'ImageView.py'),\n ('ParameterTree', 'parametertree.py'),\n ('Crosshair / Mouse interaction', 'crosshair.py'),\n ('Data Slicing', 'DataSlicing.py'),\n ('Plot Customization', 'customPlot.py'),\n ('Image Analysis', 'imageAnalysis.py'),\n ('ViewBox Features', 'ViewBoxFeatures.py'),\n ('Dock widgets', 'dockarea.py'),\n ('Console', 'ConsoleWidget.py'),\n ('Histograms', 'histogram.py'),\n ('Beeswarm plot', 'beeswarm.py'),\n ('Symbols', 'Symbols.py'),\n ('Auto-range', 'PlotAutoRange.py'),\n ('Remote Plotting', 'RemoteSpeedTest.py'),\n ('Scrolling plots', 'scrollingPlots.py'),\n ('HDF5 big data', 'hdf5.py'),\n ('Demos', OrderedDict([\n ('Optics', 'optics_demos.py'),\n ('Special relativity', 'relativity_demo.py'),\n ('Verlet chain', 'verlet_chain_demo.py'),\n ('Koch Fractal', 'fractal.py'),\n ])),\n ('GraphicsItems', OrderedDict([\n ('Scatter Plot', 'ScatterPlot.py'),\n #('PlotItem', 'PlotItem.py'),\n ('IsocurveItem', 'isocurve.py'),\n ('GraphItem', 'GraphItem.py'),\n ('ErrorBarItem', 'ErrorBarItem.py'),\n ('FillBetweenItem', 'FillBetweenItem.py'),\n ('ImageItem - video', 'ImageItem.py'),\n ('ImageItem - draw', 'Draw.py'),\n ('Region-of-Interest', 'ROIExamples.py'),\n ('Bar Graph', 'BarGraphItem.py'),\n ('GraphicsLayout', 'GraphicsLayout.py'),\n ('LegendItem', 'Legend.py'),\n ('Text Item', 'text.py'),\n ('Linked Views', 'linkedViews.py'),\n ('Arrow', 'Arrow.py'),\n ('ViewBox', 'ViewBoxFeatures.py'),\n ('Custom Graphics', 'customGraphicsItem.py'),\n ('Labeled Graph', 'CustomGraphItem.py'),\n ])),\n ('Benchmarks', OrderedDict([\n ('Video speed test', 'VideoSpeedTest.py'),\n ('Line Plot update', 'PlotSpeedTest.py'),\n ('Scatter Plot update', 'ScatterPlotSpeedTest.py'),\n ('Multiple plots', 'MultiPlotSpeedTest.py'),\n ])),\n ('3D Graphics', OrderedDict([\n ('Volumetric', 'GLVolumeItem.py'),\n ('Isosurface', 'GLIsosurface.py'),\n ('Surface Plot', 'GLSurfacePlot.py'),\n ('Scatter Plot', 'GLScatterPlotItem.py'),\n ('Shaders', 'GLshaders.py'),\n ('Line Plot', 'GLLinePlotItem.py'),\n ('Mesh', 'GLMeshItem.py'),\n ('Image', 'GLImageItem.py'),\n ])),\n ('Widgets', OrderedDict([\n ('PlotWidget', 'PlotWidget.py'),\n ('SpinBox', 'SpinBox.py'),\n ('ConsoleWidget', 'ConsoleWidget.py'),\n ('Histogram / lookup table', 'HistogramLUT.py'),\n ('TreeWidget', 'TreeWidget.py'),\n ('ScatterPlotWidget', 'ScatterPlotWidget.py'),\n ('DataTreeWidget', 'DataTreeWidget.py'),\n ('GradientWidget', 'GradientWidget.py'),\n ('TableWidget', 'TableWidget.py'),\n ('ColorButton', 'ColorButton.py'),\n #('CheckTable', '../widgets/CheckTable.py'),\n #('VerticalLabel', '../widgets/VerticalLabel.py'),\n ('JoystickButton', 'JoystickButton.py'),\n ])),\n\n ('Flowcharts', 'Flowchart.py'),\n ('Custom Flowchart Nodes', 'FlowchartCustomNode.py'),\n])\n\n\ndef buildFileList(examples, files=None):\n if files == None:\n files = []\n for key, val in examples.items():\n #item = QtGui.QTreeWidgetItem([key])\n if isinstance(val, basestring):\n #item.file = val\n files.append((key,val))\n else:\n buildFileList(val, files)\n return files\n\ndef testFile(name, f, exe, lib, graphicsSystem=None):\n global path\n fn = os.path.join(path,f)\n #print \"starting process: \", fn\n os.chdir(path)\n sys.stdout.write(name)\n sys.stdout.flush()\n\n import1 = \"import %s\" % lib if lib != '' else ''\n import2 = os.path.splitext(os.path.split(fn)[1])[0]\n graphicsSystem = '' if graphicsSystem is None else \"pg.QtGui.QApplication.setGraphicsSystem('%s')\" % graphicsSystem\n code = \"\"\"\ntry:\n %s\n import initExample\n import pyqtgraph as pg\n %s\n import %s\n import sys\n print(\"test complete\")\n sys.stdout.flush()\n import time\n while True: ## run a little event loop\n pg.QtGui.QApplication.processEvents()\n time.sleep(0.01)\nexcept:\n print(\"test failed\")\n raise\n\n\"\"\" % (import1, graphicsSystem, import2)\n\n if sys.platform.startswith('win'):\n process = subprocess.Popen([exe], stdin=subprocess.PIPE, stderr=subprocess.PIPE, stdout=subprocess.PIPE)\n process.stdin.write(code.encode('UTF-8'))\n process.stdin.close()\n else:\n process = subprocess.Popen(['exec %s -i' % (exe)], shell=True, stdin=subprocess.PIPE, stderr=subprocess.PIPE, stdout=subprocess.PIPE)\n process.stdin.write(code.encode('UTF-8'))\n process.stdin.close() ##?\n output = ''\n fail = False\n while True:\n try:\n c = process.stdout.read(1).decode()\n except IOError as err:\n if err.errno == errno.EINTR:\n # Interrupted system call; just try again.\n c = ''\n else:\n raise\n output += c\n #sys.stdout.write(c)\n #sys.stdout.flush()\n if output.endswith('test complete'):\n break\n if output.endswith('test failed'):\n fail = True\n break\n time.sleep(1)\n process.kill()\n #res = process.communicate()\n res = (process.stdout.read(), process.stderr.read())\n\n if fail or 'exception' in res[1].decode().lower() or 'error' in res[1].decode().lower():\n print('.' * (50-len(name)) + 'FAILED')\n print(res[0].decode())\n print(res[1].decode())\n else:\n print('.' * (50-len(name)) + 'passed')\n", "path": "examples/utils.py"}], "after_files": [{"content": "from __future__ import division, print_function, absolute_import\nimport subprocess\nimport time\nimport os\nimport sys\nimport errno\nimport copy\nfrom pyqtgraph.pgcollections import OrderedDict\nfrom pyqtgraph.python2_3 import basestring\n\npath = os.path.abspath(os.path.dirname(__file__))\n\n\nexamples = OrderedDict([\n ('Command-line usage', 'CLIexample.py'),\n ('Basic Plotting', 'Plotting.py'),\n ('ImageView', 'ImageView.py'),\n ('ParameterTree', 'parametertree.py'),\n ('Crosshair / Mouse interaction', 'crosshair.py'),\n ('Data Slicing', 'DataSlicing.py'),\n ('Plot Customization', 'customPlot.py'),\n ('Image Analysis', 'imageAnalysis.py'),\n ('ViewBox Features', 'ViewBoxFeatures.py'),\n ('Dock widgets', 'dockarea.py'),\n ('Console', 'ConsoleWidget.py'),\n ('Histograms', 'histogram.py'),\n ('Beeswarm plot', 'beeswarm.py'),\n ('Symbols', 'Symbols.py'),\n ('Auto-range', 'PlotAutoRange.py'),\n ('Remote Plotting', 'RemoteSpeedTest.py'),\n ('Scrolling plots', 'scrollingPlots.py'),\n ('HDF5 big data', 'hdf5.py'),\n ('Demos', OrderedDict([\n ('Optics', 'optics_demos.py'),\n ('Special relativity', 'relativity_demo.py'),\n ('Verlet chain', 'verlet_chain_demo.py'),\n ('Koch Fractal', 'fractal.py'),\n ])),\n ('GraphicsItems', OrderedDict([\n ('Scatter Plot', 'ScatterPlot.py'),\n #('PlotItem', 'PlotItem.py'),\n ('IsocurveItem', 'isocurve.py'),\n ('GraphItem', 'GraphItem.py'),\n ('ErrorBarItem', 'ErrorBarItem.py'),\n ('FillBetweenItem', 'FillBetweenItem.py'),\n ('ImageItem - video', 'ImageItem.py'),\n ('ImageItem - draw', 'Draw.py'),\n ('Region-of-Interest', 'ROIExamples.py'),\n ('Bar Graph', 'BarGraphItem.py'),\n ('GraphicsLayout', 'GraphicsLayout.py'),\n ('LegendItem', 'Legend.py'),\n ('Text Item', 'text.py'),\n ('Linked Views', 'linkedViews.py'),\n ('Arrow', 'Arrow.py'),\n ('ViewBox', 'ViewBoxFeatures.py'),\n ('Custom Graphics', 'customGraphicsItem.py'),\n ('Labeled Graph', 'CustomGraphItem.py'),\n ])),\n ('Benchmarks', OrderedDict([\n ('Video speed test', 'VideoSpeedTest.py'),\n ('Line Plot update', 'PlotSpeedTest.py'),\n ('Scatter Plot update', 'ScatterPlotSpeedTest.py'),\n ('Multiple plots', 'MultiPlotSpeedTest.py'),\n ])),\n ('3D Graphics', OrderedDict([\n ('Volumetric', 'GLVolumeItem.py'),\n ('Isosurface', 'GLIsosurface.py'),\n ('Surface Plot', 'GLSurfacePlot.py'),\n ('Scatter Plot', 'GLScatterPlotItem.py'),\n ('Shaders', 'GLshaders.py'),\n ('Line Plot', 'GLLinePlotItem.py'),\n ('Mesh', 'GLMeshItem.py'),\n ('Image', 'GLImageItem.py'),\n ])),\n ('Widgets', OrderedDict([\n ('PlotWidget', 'PlotWidget.py'),\n ('SpinBox', 'SpinBox.py'),\n ('ConsoleWidget', 'ConsoleWidget.py'),\n ('Histogram / lookup table', 'HistogramLUT.py'),\n ('TreeWidget', 'TreeWidget.py'),\n ('ScatterPlotWidget', 'ScatterPlotWidget.py'),\n ('DataTreeWidget', 'DataTreeWidget.py'),\n ('GradientWidget', 'GradientWidget.py'),\n ('TableWidget', 'TableWidget.py'),\n ('ColorButton', 'ColorButton.py'),\n #('CheckTable', '../widgets/CheckTable.py'),\n #('VerticalLabel', '../widgets/VerticalLabel.py'),\n ('JoystickButton', 'JoystickButton.py'),\n ])),\n\n ('Flowcharts', 'Flowchart.py'),\n ('Custom Flowchart Nodes', 'FlowchartCustomNode.py'),\n])\n\nnot_tested = ['HDF5 big data']\n\ntested_examples = copy.deepcopy(examples)\nall(map(tested_examples.pop, not_tested))\n\n\ndef buildFileList(examples, files=None):\n if files == None:\n files = []\n for key, val in examples.items():\n #item = QtGui.QTreeWidgetItem([key])\n if isinstance(val, basestring):\n #item.file = val\n files.append((key,val))\n else:\n buildFileList(val, files)\n return files\n\ndef testFile(name, f, exe, lib, graphicsSystem=None):\n global path\n fn = os.path.join(path,f)\n #print \"starting process: \", fn\n os.chdir(path)\n sys.stdout.write(name)\n sys.stdout.flush()\n\n import1 = \"import %s\" % lib if lib != '' else ''\n import2 = os.path.splitext(os.path.split(fn)[1])[0]\n graphicsSystem = '' if graphicsSystem is None else \"pg.QtGui.QApplication.setGraphicsSystem('%s')\" % graphicsSystem\n code = \"\"\"\ntry:\n %s\n import initExample\n import pyqtgraph as pg\n %s\n import %s\n import sys\n print(\"test complete\")\n sys.stdout.flush()\n import time\n while True: ## run a little event loop\n pg.QtGui.QApplication.processEvents()\n time.sleep(0.01)\nexcept:\n print(\"test failed\")\n raise\n\n\"\"\" % (import1, graphicsSystem, import2)\n\n if sys.platform.startswith('win'):\n process = subprocess.Popen([exe], stdin=subprocess.PIPE, stderr=subprocess.PIPE, stdout=subprocess.PIPE)\n process.stdin.write(code.encode('UTF-8'))\n process.stdin.close()\n else:\n process = subprocess.Popen(['exec %s -i' % (exe)], shell=True, stdin=subprocess.PIPE, stderr=subprocess.PIPE, stdout=subprocess.PIPE)\n process.stdin.write(code.encode('UTF-8'))\n process.stdin.close() ##?\n output = ''\n fail = False\n while True:\n try:\n c = process.stdout.read(1).decode()\n except IOError as err:\n if err.errno == errno.EINTR:\n # Interrupted system call; just try again.\n c = ''\n else:\n raise\n output += c\n #sys.stdout.write(c)\n #sys.stdout.flush()\n if output.endswith('test complete'):\n break\n if output.endswith('test failed'):\n fail = True\n break\n time.sleep(1)\n process.kill()\n #res = process.communicate()\n res = (process.stdout.read(), process.stderr.read())\n\n if fail or 'exception' in res[1].decode().lower() or 'error' in res[1].decode().lower():\n print('.' * (50-len(name)) + 'FAILED')\n print(res[0].decode())\n print(res[1].decode())\n else:\n print('.' * (50-len(name)) + 'passed')\n", "path": "examples/utils.py"}]} | 2,400 | 150 |
gh_patches_debug_38561 | rasdani/github-patches | git_diff | Lightning-Universe__lightning-flash-1666 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`download_data` from `flash.core.data.utils` connects to the internet before checking if a file exists
## 🐛 Bug
In many supercomputers, process of running ML codes is to first run the download part on the login nodes (which have access to the internet), and stop the code right before the actual training starts.
Then, when you run on the compute nodes (the ones with the actual gpus and no internet access), you let the code run to the end. In other frameworks, data downloaders detect the files' presence and skip it before ever trying to connect to the internet.
Flash tries first to check file size in [this line](https://github.com/Lightning-Universe/lightning-flash/blob/18ff71e228ea0d68d6564ae454a7053e503dee15/src/flash/core/data/utils.py#L86), which will freeze in a machine without internet.
### To Reproduce
call "download_data" on a machine with no internet access
#### Code sample
```
from flash.core.data.utils import download_data
download_data("https://pl-flash-data.s3.amazonaws.com/hymenoptera_data.zip", "data/")
```
### Expected behavior
If the file is already there, skip download
### Environment
- OS (e.g., Linux): Centos 8.6
- Python version: 3.10
- PyTorch/Lightning/Flash Version (e.g., 1.10/1.5/0.7): Pytorch 1.12.1, Lightning 0.8.4, Flash 0.8.1.post0
- GPU models and configuration: 16x A100 40GB
- Any other relevant information:
### Additional context
Fast.ai's fastdownload, for example, does not suffer from this - if the file is there, it doesn't try to download it, even if it's of the wrong size: [fastdownload link](https://github.com/fastai/fastcore/blob/1f6844d44d6e0e26b393cecd37818dbb4d391aca/fastcore/net.py#L180)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/flash/core/data/utils.py`
Content:
```
1 # Copyright The PyTorch Lightning team.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import os.path
16 import tarfile
17 import zipfile
18 from typing import Any, Callable, Dict, Iterable, Mapping, Optional, Set
19
20 import requests
21 import urllib3
22 from pytorch_lightning.utilities.apply_func import apply_to_collection
23 from torch import nn
24 from tqdm.auto import tqdm as tq
25
26 from flash.core.utilities.imports import _TOPIC_CORE_AVAILABLE
27 from flash.core.utilities.stages import RunningStage
28
29 # Skip doctests if requirements aren't available
30 if not _TOPIC_CORE_AVAILABLE:
31 __doctest_skip__ = ["download_data"]
32
33 _STAGES_PREFIX = {
34 RunningStage.TRAINING: "train",
35 RunningStage.TESTING: "test",
36 RunningStage.VALIDATING: "val",
37 RunningStage.PREDICTING: "predict",
38 RunningStage.SERVING: "serve",
39 RunningStage.SANITY_CHECKING: "val",
40 }
41
42 _INPUT_TRANSFORM_FUNCS: Set[str] = {
43 "per_sample_transform",
44 "per_batch_transform",
45 "per_sample_transform_on_device",
46 "per_batch_transform_on_device",
47 "collate",
48 }
49
50 _CALLBACK_FUNCS: Set[str] = {
51 "load_sample",
52 *_INPUT_TRANSFORM_FUNCS,
53 }
54
55 _OUTPUT_TRANSFORM_FUNCS: Set[str] = {
56 "per_batch_transform",
57 "uncollate",
58 "per_sample_transform",
59 }
60
61
62 def download_data(url: str, path: str = "data/", verbose: bool = False) -> None:
63 """Download file with progressbar.
64
65 # Code adapted from: https://gist.github.com/ruxi/5d6803c116ec1130d484a4ab8c00c603
66 # __author__ = "github.com/ruxi"
67 # __license__ = "MIT"
68
69 Examples
70 ________
71
72 .. doctest::
73
74 >>> import os
75 >>> from flash.core.data.utils import download_data
76 >>> download_data("https://pl-flash-data.s3.amazonaws.com/titanic.zip", "./data")
77 >>> os.listdir("./data") # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
78 [...]
79
80 """
81 # Disable warning about making an insecure request
82 urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
83
84 if not os.path.exists(path):
85 os.makedirs(path)
86 local_filename = os.path.join(path, url.split("/")[-1])
87 r = requests.get(url, stream=True, verify=False)
88 file_size = int(r.headers["Content-Length"]) if "Content-Length" in r.headers else 0
89 chunk_size = 1024
90 num_bars = int(file_size / chunk_size)
91 if verbose:
92 print({"file_size": file_size})
93 print({"num_bars": num_bars})
94
95 if not os.path.exists(local_filename):
96 with open(local_filename, "wb") as fp:
97 for chunk in tq(
98 r.iter_content(chunk_size=chunk_size),
99 total=num_bars,
100 unit="KB",
101 desc=local_filename,
102 leave=True, # progressbar stays
103 ):
104 fp.write(chunk) # type: ignore
105
106 def extract_tarfile(file_path: str, extract_path: str, mode: str):
107 if os.path.exists(file_path):
108 with tarfile.open(file_path, mode=mode) as tar_ref:
109 for member in tar_ref.getmembers():
110 try:
111 tar_ref.extract(member, path=extract_path, set_attrs=False)
112 except PermissionError:
113 raise PermissionError(f"Could not extract tar file {file_path}")
114
115 if ".zip" in local_filename:
116 if os.path.exists(local_filename):
117 with zipfile.ZipFile(local_filename, "r") as zip_ref:
118 zip_ref.extractall(path)
119 elif local_filename.endswith(".tar.gz") or local_filename.endswith(".tgz"):
120 extract_tarfile(local_filename, path, "r:gz")
121 elif local_filename.endswith(".tar.bz2") or local_filename.endswith(".tbz"):
122 extract_tarfile(local_filename, path, "r:bz2")
123
124
125 class FuncModule(nn.Module):
126 """This class is used to wrap a callable within a nn.Module and apply the wrapped function in `__call__`"""
127
128 def __init__(self, func: Callable) -> None:
129 super().__init__()
130 self.func = func
131
132 def forward(self, *args, **kwargs) -> Any:
133 return self.func(*args, **kwargs)
134
135 def __str__(self) -> str:
136 return f"{self.__class__.__name__}({self.func.__name__})"
137
138 def __repr__(self):
139 return str(self.func)
140
141
142 def convert_to_modules(transforms: Optional[Dict[str, Callable]]):
143 if transforms is None or isinstance(transforms, nn.Module):
144 return transforms
145
146 transforms = apply_to_collection(transforms, Callable, FuncModule, wrong_dtype=nn.Module)
147 transforms = apply_to_collection(transforms, Mapping, nn.ModuleDict, wrong_dtype=nn.ModuleDict)
148 return apply_to_collection(transforms, Iterable, nn.ModuleList, wrong_dtype=(nn.ModuleList, nn.ModuleDict))
149
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/flash/core/data/utils.py b/src/flash/core/data/utils.py
--- a/src/flash/core/data/utils.py
+++ b/src/flash/core/data/utils.py
@@ -59,7 +59,7 @@
}
-def download_data(url: str, path: str = "data/", verbose: bool = False) -> None:
+def download_data(url: str, path: str = "data/", verbose: bool = False, chunk_size: int = 1024) -> None:
"""Download file with progressbar.
# Code adapted from: https://gist.github.com/ruxi/5d6803c116ec1130d484a4ab8c00c603
@@ -78,39 +78,42 @@
[...]
"""
+ local_filename = os.path.join(path, url.split("/")[-1])
+ if os.path.exists(local_filename):
+ if verbose:
+ print(f"local file already exists: '{local_filename}'")
+ return
+
+ os.makedirs(path, exist_ok=True)
# Disable warning about making an insecure request
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
- if not os.path.exists(path):
- os.makedirs(path)
- local_filename = os.path.join(path, url.split("/")[-1])
r = requests.get(url, stream=True, verify=False)
file_size = int(r.headers["Content-Length"]) if "Content-Length" in r.headers else 0
- chunk_size = 1024
num_bars = int(file_size / chunk_size)
if verbose:
- print({"file_size": file_size})
- print({"num_bars": num_bars})
-
- if not os.path.exists(local_filename):
- with open(local_filename, "wb") as fp:
- for chunk in tq(
- r.iter_content(chunk_size=chunk_size),
- total=num_bars,
- unit="KB",
- desc=local_filename,
- leave=True, # progressbar stays
- ):
- fp.write(chunk) # type: ignore
-
- def extract_tarfile(file_path: str, extract_path: str, mode: str):
- if os.path.exists(file_path):
- with tarfile.open(file_path, mode=mode) as tar_ref:
- for member in tar_ref.getmembers():
- try:
- tar_ref.extract(member, path=extract_path, set_attrs=False)
- except PermissionError:
- raise PermissionError(f"Could not extract tar file {file_path}")
+ print(f"file size: {file_size}")
+ print(f"num bars: {num_bars}")
+
+ with open(local_filename, "wb") as fp:
+ for chunk in tq(
+ r.iter_content(chunk_size=chunk_size),
+ total=num_bars,
+ unit="KB",
+ desc=local_filename,
+ leave=True, # progressbar stays
+ ):
+ fp.write(chunk) # type: ignore
+
+ def extract_tarfile(file_path: str, extract_path: str, mode: str) -> None:
+ if not os.path.exists(file_path):
+ return
+ with tarfile.open(file_path, mode=mode) as tar_ref:
+ for member in tar_ref.getmembers():
+ try:
+ tar_ref.extract(member, path=extract_path, set_attrs=False)
+ except PermissionError:
+ raise PermissionError(f"Could not extract tar file {file_path}")
if ".zip" in local_filename:
if os.path.exists(local_filename):
| {"golden_diff": "diff --git a/src/flash/core/data/utils.py b/src/flash/core/data/utils.py\n--- a/src/flash/core/data/utils.py\n+++ b/src/flash/core/data/utils.py\n@@ -59,7 +59,7 @@\n }\n \n \n-def download_data(url: str, path: str = \"data/\", verbose: bool = False) -> None:\n+def download_data(url: str, path: str = \"data/\", verbose: bool = False, chunk_size: int = 1024) -> None:\n \"\"\"Download file with progressbar.\n \n # Code adapted from: https://gist.github.com/ruxi/5d6803c116ec1130d484a4ab8c00c603\n@@ -78,39 +78,42 @@\n [...]\n \n \"\"\"\n+ local_filename = os.path.join(path, url.split(\"/\")[-1])\n+ if os.path.exists(local_filename):\n+ if verbose:\n+ print(f\"local file already exists: '{local_filename}'\")\n+ return\n+\n+ os.makedirs(path, exist_ok=True)\n # Disable warning about making an insecure request\n urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n \n- if not os.path.exists(path):\n- os.makedirs(path)\n- local_filename = os.path.join(path, url.split(\"/\")[-1])\n r = requests.get(url, stream=True, verify=False)\n file_size = int(r.headers[\"Content-Length\"]) if \"Content-Length\" in r.headers else 0\n- chunk_size = 1024\n num_bars = int(file_size / chunk_size)\n if verbose:\n- print({\"file_size\": file_size})\n- print({\"num_bars\": num_bars})\n-\n- if not os.path.exists(local_filename):\n- with open(local_filename, \"wb\") as fp:\n- for chunk in tq(\n- r.iter_content(chunk_size=chunk_size),\n- total=num_bars,\n- unit=\"KB\",\n- desc=local_filename,\n- leave=True, # progressbar stays\n- ):\n- fp.write(chunk) # type: ignore\n-\n- def extract_tarfile(file_path: str, extract_path: str, mode: str):\n- if os.path.exists(file_path):\n- with tarfile.open(file_path, mode=mode) as tar_ref:\n- for member in tar_ref.getmembers():\n- try:\n- tar_ref.extract(member, path=extract_path, set_attrs=False)\n- except PermissionError:\n- raise PermissionError(f\"Could not extract tar file {file_path}\")\n+ print(f\"file size: {file_size}\")\n+ print(f\"num bars: {num_bars}\")\n+\n+ with open(local_filename, \"wb\") as fp:\n+ for chunk in tq(\n+ r.iter_content(chunk_size=chunk_size),\n+ total=num_bars,\n+ unit=\"KB\",\n+ desc=local_filename,\n+ leave=True, # progressbar stays\n+ ):\n+ fp.write(chunk) # type: ignore\n+\n+ def extract_tarfile(file_path: str, extract_path: str, mode: str) -> None:\n+ if not os.path.exists(file_path):\n+ return\n+ with tarfile.open(file_path, mode=mode) as tar_ref:\n+ for member in tar_ref.getmembers():\n+ try:\n+ tar_ref.extract(member, path=extract_path, set_attrs=False)\n+ except PermissionError:\n+ raise PermissionError(f\"Could not extract tar file {file_path}\")\n \n if \".zip\" in local_filename:\n if os.path.exists(local_filename):\n", "issue": "`download_data` from `flash.core.data.utils` connects to the internet before checking if a file exists\n## \ud83d\udc1b Bug\r\n\r\nIn many supercomputers, process of running ML codes is to first run the download part on the login nodes (which have access to the internet), and stop the code right before the actual training starts. \r\n\r\nThen, when you run on the compute nodes (the ones with the actual gpus and no internet access), you let the code run to the end. In other frameworks, data downloaders detect the files' presence and skip it before ever trying to connect to the internet.\r\n\r\nFlash tries first to check file size in [this line](https://github.com/Lightning-Universe/lightning-flash/blob/18ff71e228ea0d68d6564ae454a7053e503dee15/src/flash/core/data/utils.py#L86), which will freeze in a machine without internet.\r\n\r\n### To Reproduce\r\n\r\ncall \"download_data\" on a machine with no internet access\r\n\r\n#### Code sample\r\n\r\n```\r\nfrom flash.core.data.utils import download_data\r\ndownload_data(\"https://pl-flash-data.s3.amazonaws.com/hymenoptera_data.zip\", \"data/\")\r\n```\r\n\r\n### Expected behavior\r\n\r\nIf the file is already there, skip download\r\n\r\n### Environment\r\n\r\n - OS (e.g., Linux): Centos 8.6\r\n - Python version: 3.10\r\n - PyTorch/Lightning/Flash Version (e.g., 1.10/1.5/0.7): Pytorch 1.12.1, Lightning 0.8.4, Flash 0.8.1.post0\r\n - GPU models and configuration: 16x A100 40GB\r\n - Any other relevant information:\r\n\r\n### Additional context\r\n\r\nFast.ai's fastdownload, for example, does not suffer from this - if the file is there, it doesn't try to download it, even if it's of the wrong size: [fastdownload link](https://github.com/fastai/fastcore/blob/1f6844d44d6e0e26b393cecd37818dbb4d391aca/fastcore/net.py#L180)\n", "before_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os.path\nimport tarfile\nimport zipfile\nfrom typing import Any, Callable, Dict, Iterable, Mapping, Optional, Set\n\nimport requests\nimport urllib3\nfrom pytorch_lightning.utilities.apply_func import apply_to_collection\nfrom torch import nn\nfrom tqdm.auto import tqdm as tq\n\nfrom flash.core.utilities.imports import _TOPIC_CORE_AVAILABLE\nfrom flash.core.utilities.stages import RunningStage\n\n# Skip doctests if requirements aren't available\nif not _TOPIC_CORE_AVAILABLE:\n __doctest_skip__ = [\"download_data\"]\n\n_STAGES_PREFIX = {\n RunningStage.TRAINING: \"train\",\n RunningStage.TESTING: \"test\",\n RunningStage.VALIDATING: \"val\",\n RunningStage.PREDICTING: \"predict\",\n RunningStage.SERVING: \"serve\",\n RunningStage.SANITY_CHECKING: \"val\",\n}\n\n_INPUT_TRANSFORM_FUNCS: Set[str] = {\n \"per_sample_transform\",\n \"per_batch_transform\",\n \"per_sample_transform_on_device\",\n \"per_batch_transform_on_device\",\n \"collate\",\n}\n\n_CALLBACK_FUNCS: Set[str] = {\n \"load_sample\",\n *_INPUT_TRANSFORM_FUNCS,\n}\n\n_OUTPUT_TRANSFORM_FUNCS: Set[str] = {\n \"per_batch_transform\",\n \"uncollate\",\n \"per_sample_transform\",\n}\n\n\ndef download_data(url: str, path: str = \"data/\", verbose: bool = False) -> None:\n \"\"\"Download file with progressbar.\n\n # Code adapted from: https://gist.github.com/ruxi/5d6803c116ec1130d484a4ab8c00c603\n # __author__ = \"github.com/ruxi\"\n # __license__ = \"MIT\"\n\n Examples\n ________\n\n .. doctest::\n\n >>> import os\n >>> from flash.core.data.utils import download_data\n >>> download_data(\"https://pl-flash-data.s3.amazonaws.com/titanic.zip\", \"./data\")\n >>> os.listdir(\"./data\") # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE\n [...]\n\n \"\"\"\n # Disable warning about making an insecure request\n urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n\n if not os.path.exists(path):\n os.makedirs(path)\n local_filename = os.path.join(path, url.split(\"/\")[-1])\n r = requests.get(url, stream=True, verify=False)\n file_size = int(r.headers[\"Content-Length\"]) if \"Content-Length\" in r.headers else 0\n chunk_size = 1024\n num_bars = int(file_size / chunk_size)\n if verbose:\n print({\"file_size\": file_size})\n print({\"num_bars\": num_bars})\n\n if not os.path.exists(local_filename):\n with open(local_filename, \"wb\") as fp:\n for chunk in tq(\n r.iter_content(chunk_size=chunk_size),\n total=num_bars,\n unit=\"KB\",\n desc=local_filename,\n leave=True, # progressbar stays\n ):\n fp.write(chunk) # type: ignore\n\n def extract_tarfile(file_path: str, extract_path: str, mode: str):\n if os.path.exists(file_path):\n with tarfile.open(file_path, mode=mode) as tar_ref:\n for member in tar_ref.getmembers():\n try:\n tar_ref.extract(member, path=extract_path, set_attrs=False)\n except PermissionError:\n raise PermissionError(f\"Could not extract tar file {file_path}\")\n\n if \".zip\" in local_filename:\n if os.path.exists(local_filename):\n with zipfile.ZipFile(local_filename, \"r\") as zip_ref:\n zip_ref.extractall(path)\n elif local_filename.endswith(\".tar.gz\") or local_filename.endswith(\".tgz\"):\n extract_tarfile(local_filename, path, \"r:gz\")\n elif local_filename.endswith(\".tar.bz2\") or local_filename.endswith(\".tbz\"):\n extract_tarfile(local_filename, path, \"r:bz2\")\n\n\nclass FuncModule(nn.Module):\n \"\"\"This class is used to wrap a callable within a nn.Module and apply the wrapped function in `__call__`\"\"\"\n\n def __init__(self, func: Callable) -> None:\n super().__init__()\n self.func = func\n\n def forward(self, *args, **kwargs) -> Any:\n return self.func(*args, **kwargs)\n\n def __str__(self) -> str:\n return f\"{self.__class__.__name__}({self.func.__name__})\"\n\n def __repr__(self):\n return str(self.func)\n\n\ndef convert_to_modules(transforms: Optional[Dict[str, Callable]]):\n if transforms is None or isinstance(transforms, nn.Module):\n return transforms\n\n transforms = apply_to_collection(transforms, Callable, FuncModule, wrong_dtype=nn.Module)\n transforms = apply_to_collection(transforms, Mapping, nn.ModuleDict, wrong_dtype=nn.ModuleDict)\n return apply_to_collection(transforms, Iterable, nn.ModuleList, wrong_dtype=(nn.ModuleList, nn.ModuleDict))\n", "path": "src/flash/core/data/utils.py"}], "after_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os.path\nimport tarfile\nimport zipfile\nfrom typing import Any, Callable, Dict, Iterable, Mapping, Optional, Set\n\nimport requests\nimport urllib3\nfrom pytorch_lightning.utilities.apply_func import apply_to_collection\nfrom torch import nn\nfrom tqdm.auto import tqdm as tq\n\nfrom flash.core.utilities.imports import _TOPIC_CORE_AVAILABLE\nfrom flash.core.utilities.stages import RunningStage\n\n# Skip doctests if requirements aren't available\nif not _TOPIC_CORE_AVAILABLE:\n __doctest_skip__ = [\"download_data\"]\n\n_STAGES_PREFIX = {\n RunningStage.TRAINING: \"train\",\n RunningStage.TESTING: \"test\",\n RunningStage.VALIDATING: \"val\",\n RunningStage.PREDICTING: \"predict\",\n RunningStage.SERVING: \"serve\",\n RunningStage.SANITY_CHECKING: \"val\",\n}\n\n_INPUT_TRANSFORM_FUNCS: Set[str] = {\n \"per_sample_transform\",\n \"per_batch_transform\",\n \"per_sample_transform_on_device\",\n \"per_batch_transform_on_device\",\n \"collate\",\n}\n\n_CALLBACK_FUNCS: Set[str] = {\n \"load_sample\",\n *_INPUT_TRANSFORM_FUNCS,\n}\n\n_OUTPUT_TRANSFORM_FUNCS: Set[str] = {\n \"per_batch_transform\",\n \"uncollate\",\n \"per_sample_transform\",\n}\n\n\ndef download_data(url: str, path: str = \"data/\", verbose: bool = False, chunk_size: int = 1024) -> None:\n \"\"\"Download file with progressbar.\n\n # Code adapted from: https://gist.github.com/ruxi/5d6803c116ec1130d484a4ab8c00c603\n # __author__ = \"github.com/ruxi\"\n # __license__ = \"MIT\"\n\n Examples\n ________\n\n .. doctest::\n\n >>> import os\n >>> from flash.core.data.utils import download_data\n >>> download_data(\"https://pl-flash-data.s3.amazonaws.com/titanic.zip\", \"./data\")\n >>> os.listdir(\"./data\") # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE\n [...]\n\n \"\"\"\n local_filename = os.path.join(path, url.split(\"/\")[-1])\n if os.path.exists(local_filename):\n if verbose:\n print(f\"local file already exists: '{local_filename}'\")\n return\n\n os.makedirs(path, exist_ok=True)\n # Disable warning about making an insecure request\n urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n\n r = requests.get(url, stream=True, verify=False)\n file_size = int(r.headers[\"Content-Length\"]) if \"Content-Length\" in r.headers else 0\n num_bars = int(file_size / chunk_size)\n if verbose:\n print(f\"file size: {file_size}\")\n print(f\"num bars: {num_bars}\")\n\n with open(local_filename, \"wb\") as fp:\n for chunk in tq(\n r.iter_content(chunk_size=chunk_size),\n total=num_bars,\n unit=\"KB\",\n desc=local_filename,\n leave=True, # progressbar stays\n ):\n fp.write(chunk) # type: ignore\n\n def extract_tarfile(file_path: str, extract_path: str, mode: str) -> None:\n if not os.path.exists(file_path):\n return\n with tarfile.open(file_path, mode=mode) as tar_ref:\n for member in tar_ref.getmembers():\n try:\n tar_ref.extract(member, path=extract_path, set_attrs=False)\n except PermissionError:\n raise PermissionError(f\"Could not extract tar file {file_path}\")\n\n if \".zip\" in local_filename:\n if os.path.exists(local_filename):\n with zipfile.ZipFile(local_filename, \"r\") as zip_ref:\n zip_ref.extractall(path)\n elif local_filename.endswith(\".tar.gz\") or local_filename.endswith(\".tgz\"):\n extract_tarfile(local_filename, path, \"r:gz\")\n elif local_filename.endswith(\".tar.bz2\") or local_filename.endswith(\".tbz\"):\n extract_tarfile(local_filename, path, \"r:bz2\")\n\n\nclass FuncModule(nn.Module):\n \"\"\"This class is used to wrap a callable within a nn.Module and apply the wrapped function in `__call__`\"\"\"\n\n def __init__(self, func: Callable) -> None:\n super().__init__()\n self.func = func\n\n def forward(self, *args, **kwargs) -> Any:\n return self.func(*args, **kwargs)\n\n def __str__(self) -> str:\n return f\"{self.__class__.__name__}({self.func.__name__})\"\n\n def __repr__(self):\n return str(self.func)\n\n\ndef convert_to_modules(transforms: Optional[Dict[str, Callable]]):\n if transforms is None or isinstance(transforms, nn.Module):\n return transforms\n\n transforms = apply_to_collection(transforms, Callable, FuncModule, wrong_dtype=nn.Module)\n transforms = apply_to_collection(transforms, Mapping, nn.ModuleDict, wrong_dtype=nn.ModuleDict)\n return apply_to_collection(transforms, Iterable, nn.ModuleList, wrong_dtype=(nn.ModuleList, nn.ModuleDict))\n", "path": "src/flash/core/data/utils.py"}]} | 2,332 | 803 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.